code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import geopandas as gpd import pandas as pd import matplotlib.pyplot as plt import numpy as np import imageio import geoplot as gplt # %matplotlib inline # - # ## Reading data for merging # # - Shape files to draw and visualize maps # - Meta data of all the boundaries # - Facebook movement data # Please change the base path according to where the data has been downloaded locally on your machine base_path = '../../DataKind/' # + # Reading shape files. All admin levels have different shape files. admin0_shp = gpd.read_file(base_path + 'bgd_adm_bbs_20201113_SHP/bgd_admbnda_adm0_bbs_20201113.shp') admin1_shp = gpd.read_file(base_path + 'bgd_adm_bbs_20201113_SHP/bgd_admbnda_adm1_bbs_20201113.shp') admin2_shp = gpd.read_file(base_path + 'bgd_adm_bbs_20201113_SHP/bgd_admbnda_adm2_bbs_20201113.shp') admin3_shp = gpd.read_file(base_path + 'bgd_adm_bbs_20201113_SHP/bgd_admbnda_adm3_bbs_20201113.shp') # + # Visualize what shape files look like.. gplt.polyplot(admin2_shp) plt.title('Bangladesh map with Admin level 2 boundaries') plt.show() # + # Reading file having names of all admin levels. # Can serve as an intermediary for joining shape files with other data admin = pd.read_excel(base_path + 'bgd_adminboundaries_tabulardata.xlsx') print(admin.shape) admin.head() # + # Reading movement data provided by facebook. Note the delimeter param df = pd.read_csv(base_path + 'movement-range-data-2021-03-03/movement-range-2021-03-03.txt', delimiter='\t') # Keeping data only for Bangladesh df = df[df.country == 'BGD'] df.head() # - # In facebook movement data, the polygon name corresponds to Admin level 2 print(df.polygon_name.unique()) # + # Adding different datetime columns to index on them later # Will take some time... df['ds'] = pd.to_datetime(df.ds) df['ds_year'] = df.ds.apply(lambda y: y.year) df['ds_month'] = df.ds.apply(lambda y: y.month) df['ds_day'] = df.ds.apply(lambda y: y.day) # + # Join data: shape file to facebook data # 2 notes: Filtered on bangladesh country and selected the latest date for the purpose of visualization merged_data = admin2_shp.set_index('ADM2_EN').join( df[(df.ds == df.ds.max())].set_index('polygon_name') ) # + """ Plotting the graph for 1 day showing: Positive proportion of users stayiung put within a single location relative to February 2020 """ ax = gplt.polyplot(merged_data) gplt.choropleth( merged_data, hue='all_day_ratio_single_tile_users', edgecolor='black', linewidth=1, cmap='Reds', legend=True, ax=ax ) plt.title('Positive proportion of users staying put within a single location relative to February 2020 \n Note: This is only for a single day') plt.show() # + def single_plot(shp, ts_data, dt): # Join data: shape file to facebook data # 2 notes: Filtered on bangladesh country and selected the latest date for the purpose of visualization merged_data = shp.set_index('ADM2_EN').join( ts_data[(ts_data.ds == dt)].set_index('polygon_name') ) merged_data.reset_index(inplace=True) """ Plotting the graph for 1 day showing: Positive proportion of users stayiung put within a single location relative to February 2020 """ ax = gplt.polyplot(merged_data) gplt.choropleth( merged_data, hue='all_day_ratio_single_tile_users', edgecolor='black', linewidth=1, cmap='Reds', legend=True, ax=ax ) file_name = f'plots/facebook_merge_{dt}.png' plt.title(f'Positive proportion of users staying put within a single location relative to previous month\n For {dt}') plt.savefig(file_name) plt.close() return file_name def animate_plot(shp, ts_data, dt_range): file_names = [] for dt in dt_range: file_name = single_plot(shp, ts_data, dt) file_names.append(file_name) print(f'Saved the plot at {file_name}') images = [] for file_name in file_names: images.append(imageio.imread(file_name)) gif_path = 'plots/time_series_movement.gif' imageio.mimsave(gif_path, images, fps=1) # + # Create a date range for time series plot dt_range = pd.Series(df[(df.ds_month == 2) & (df.ds_year == 2021)].ds.unique()) animate_plot(admin2_shp, df, dt_range) # -
Bangladesh/facebook_data_merge.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="1T6_tQRr5LsU" import pandas as pd import numpy as np from keras.models import Model, load_model from keras.layers import Input, LSTM, Dense, Reshape from keras.callbacks import EarlyStopping, ModelCheckpoint from keras.layers.embeddings import Embedding import tensorflow as tf import tensorflow.keras.backend as K import matplotlib.pyplot as plt # + colab={"base_uri": "https://localhost:8080/"} id="Cy-GTRyQdjjj" outputId="6792acd3-baac-417f-e587-82cdd9304ec1" # %cd /content/drive/MyDrive/Colab/Transliterate/ # + colab={"base_uri": "https://localhost:8080/"} id="9SQgkSs5drK0" outputId="e62497b8-fdc4-415d-f31e-c2eeca1a0d95" # %ls # + [markdown] id="h87l7bzjN4V0" # # Preprocessing Data # + [markdown] id="Kl9QsQKYac2q" # ## load affidavits dataset # # + id="pAUukayNPVZY" affidavits_df = pd.read_csv('affidavits.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 625} id="A0ZOV5i6Pe_k" outputId="93c55c6f-2046-459a-8b2f-eeac1384d47e" affidavits_df.head() # + id="TnwMig87P1yV" af_df = affidavits_df[['fathers_or_husbands_name_hindi', 'fathers_or_husbands_name_english', 'name_hindi', 'name_english']] # + id="XYj3W5R3bdii" # split and merge input_target_df = pd.concat([ af_df[['fathers_or_husbands_name_hindi','fathers_or_husbands_name_english']].rename(columns={"fathers_or_husbands_name_hindi": "hindi", "fathers_or_husbands_name_english": "english"}), af_df[['name_hindi','name_english']].rename(columns={"name_hindi": "hindi", "name_english": "english"}) ], ignore_index=True) # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="q6_4pn8Wbt-A" outputId="27873e76-dbe5-4590-ccd5-d432b7035942" input_target_df.head() # + [markdown] id="sRjQYPAjajwg" # ## load cricket dataset # + id="QIlUaSeP54a0" cricket_df = pd.read_json('players_with_hindi_names.json') # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="-8AW_DdX5934" outputId="1a6e609a-9479-4194-b3e0-88cb5e4e65d4" cricket_df.head() # + id="Zyc762NtcILn" input_target_df = pd.concat([ input_target_df, cricket_df[['hindi_long_name', 'english_name']].rename(columns={"hindi_long_name": "hindi", "english_name": "english"}) ], ignore_index=True) # + [markdown] id="iDtsnXBQa7m2" # ## load Dakshina Dataset # + id="YWUoa66ea0xR" dakshina_df = pd.read_csv('hi.translit.sampled.train.tsv', sep='\t', names=["hindi", "english", "freq"]) # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="LvzPQO06bONy" outputId="6cfbd065-1cf0-41ea-943a-ff8da9afe559" dakshina_df.head() # + id="X8aCM1ehbqWb" input_target_df = pd.concat([ input_target_df, dakshina_df[['hindi', 'english']] ], ignore_index=True) # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="c_rJ_vXPcpGE" outputId="77e9dea9-0b39-4c38-833d-b77fa42350e0" input_target_df.head() # + colab={"base_uri": "https://localhost:8080/"} id="oxaiXS4yc90U" outputId="1880929e-35dd-4a3b-84a0-266ae17b200a" input_target_df.info() # + id="7WVfnG3RSDz0" input_target_df = input_target_df.dropna() # + colab={"base_uri": "https://localhost:8080/"} id="rs0mMz4ndDCG" outputId="9813a65e-00ce-4e35-b55d-cb8bd9a20dd2" input_target_df.info() # + id="7gujOMDzS1O_" # strip and make lower case input_target_df['hindi'] = input_target_df['hindi'].str.strip() # hindi has no caps input_target_df['english'] = input_target_df['english'].str.strip().str.lower() # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="aJDDFPvhTv3e" outputId="0ff1d8d2-6a9c-4a41-b60b-2c665f628210" input_target_df.head() # + id="tUmbiLe8T7rk" # Strip out any abbreviated names input_target_df = input_target_df[~input_target_df['english'].str.contains('\.')] input_target_df = input_target_df[~input_target_df['english'].str.contains(',')] # + colab={"base_uri": "https://localhost:8080/"} id="GRkMX8QcVbKg" outputId="2fbc4648-4a74-4b59-af8c-2340eed53be6" input_target_df.info() # + id="dyzG08H1Z3Ch" # drop duplicates input_target_df = input_target_df.drop_duplicates() # + colab={"base_uri": "https://localhost:8080/"} id="52H8_u4maYM2" outputId="26cb23ca-2ee1-4a68-bb94-3b14d415f652" input_target_df.info() # + id="DnQHxhVye07D" # Remove if target has non-english def isEnglish(s): try: s.encode(encoding='utf-8').decode('ascii') except UnicodeDecodeError: return False else: return True input_target_df = input_target_df[input_target_df['english'].apply(isEnglish)] # + colab={"base_uri": "https://localhost:8080/"} id="sEGVeRyrgJJc" outputId="1c0bad41-3570-4283-cc3a-33041f35cab1" input_target_df.info() # + id="URH4OE1pq4rM" # append start and end of sequence for target sos = '^' eos = '$' input_target_df['english'] = sos + input_target_df['english'].astype(str) + eos # + id="AahQxw3J5_ax" input_words = input_target_df['hindi'].tolist() target_words = input_target_df['english'].tolist() # + colab={"base_uri": "https://localhost:8080/"} id="lpsCJ3kv7LkA" outputId="bace413b-6505-4c4e-f4be-ec97964560fa" print(input_words[0]) print(target_words[0]) # + colab={"base_uri": "https://localhost:8080/"} id="X3pGuZOyQ1D2" outputId="eee96d86-fcfa-4cb2-f588-b8a60ca16037" print(f"Total number of input words {len(input_words)}") print(f"Total number of target words {len(target_words)}") # + id="U5Trw_fmOLi7" def build_vocab(input_words, target_words): input_vocab = set() target_vocab = set() for input_word, target_word in zip(input_words, target_words): input_vocab.update(set(input_word)) target_vocab.update(set(target_word)) return input_vocab, target_vocab # + id="RlgD8jM0OsHG" input_vocab, target_vocab = build_vocab(input_words, target_words) # + colab={"base_uri": "https://localhost:8080/"} id="W_wQMLvrOyu_" outputId="e75e47d5-a7f4-45d6-80bf-e3212b137f53" print(f"Total hindi characters {len(input_vocab)}") print(f"Total english characters {len(target_vocab)}") # + id="ztVnKKGsYlFK" input_vocab = sorted(list(input_vocab)) target_vocab = sorted(list(target_vocab)) # + id="fbJSDwkvXvQp" input_token_index = dict([(char, i) for i, char in enumerate(input_vocab)]) target_token_index = dict([(char, i) for i, char in enumerate(target_vocab)]) # + id="-dxT9ms4YG1y" num_encoder_tokens = len(input_vocab) num_decoder_tokens = len(target_vocab) # + id="DZguL8LRY8Bl" max_encoder_seq_length = max([len(txt) for txt in input_words]) max_decoder_seq_length = max([len(txt) for txt in target_words]) # + colab={"base_uri": "https://localhost:8080/"} id="HZLsjCHrZGuP" outputId="05b74353-43dc-4b42-90cb-49244cd84f3c" print('Number of samples:', len(input_words)) print('Number of unique input tokens:', num_encoder_tokens) print('Number of unique output tokens:', num_decoder_tokens) print('Max sequence length for inputs:', max_encoder_seq_length) print('Max sequence length for outputs:', max_decoder_seq_length) # + id="nbXBdUGOZO3_" encoder_input_data = np.zeros((len(input_words), max_encoder_seq_length, num_encoder_tokens), dtype='float32') decoder_input_data = np.zeros((len(input_words), max_decoder_seq_length, num_decoder_tokens), dtype='float32') decoder_target_data = np.zeros( (len(input_words), max_decoder_seq_length, num_decoder_tokens), dtype='float32') # + colab={"base_uri": "https://localhost:8080/"} id="HO9DgOJOZgpe" outputId="4f0769a0-7cc7-4977-d9db-013c6073dede" print(f"encoder input shape {encoder_input_data.shape}") print(f"decoder input shape {decoder_input_data.shape}") print(f"decoder target shape {decoder_target_data.shape}") # + id="9KGNey6fZ05q" for i, (input_text, target_text) in enumerate(zip(input_words, target_words)): for t, char in enumerate(input_text): encoder_input_data[i, t, input_token_index[char]] = 1.0 # for the rest of sentence, mark it as space #encoder_input_data[i, t + 1 :, input_token_index[" "]] = 1.0 for t, char in enumerate(target_text): # decoder_target_data is ahead of decoder_input_data by one timestep decoder_input_data[i, t, target_token_index[char]] = 1.0 if t > 0: # decoder_target_data will be ahead by one timestep # and will not include the start character. decoder_target_data[i, t-1, target_token_index[char]] = 1.0 # for the rest of sentence, mark it as space #decoder_input_data[i, t + 1 :, target_token_index[" "]] = 1.0 # + [markdown] id="0CZd0uprOC5u" # # Define Model # + id="0F1B0ezJcehK" colab={"base_uri": "https://localhost:8080/"} outputId="b3df9826-5bc7-4061-e777-bb38dbbe546f" latent_dim = 256 # Latent dimensionality of the encoding space. EMBEDDING_SIZE = 100 # Define an input sequence and process it. encoder_inputs = Input(shape=(None, num_encoder_tokens)) print(encoder_inputs.shape) # Define embedding layer encoder_embedding = Embedding(num_encoder_tokens, EMBEDDING_SIZE, input_length=max_encoder_seq_length) encoder_inputs_em = encoder_embedding(encoder_inputs) print(encoder_inputs_em.shape) # since embedding layer will add one more dim, we need to flatten last 2 dims encoder_reshape = Reshape((-1, encoder_inputs_em.shape[2] * encoder_inputs_em.shape[3])) encoder_inputs_re = encoder_reshape(encoder_inputs_em) print(encoder_inputs_re.shape) # Note that LSTM takes only 3 dims, (samples, timesteps, features) encoder = LSTM(latent_dim, return_state=True) encoder_outputs, state_h, state_c = encoder(encoder_inputs_re) # We discard `encoder_outputs` and only keep the states. encoder_states = [state_h, state_c] # + id="D4g-jGmUeeL4" colab={"base_uri": "https://localhost:8080/"} outputId="e4791789-9d0d-437f-8233-f9471b909846" # Set up the decoder, using `encoder_states` as initial state. decoder_inputs = Input(shape=(None, num_decoder_tokens)) print(decoder_inputs.shape) # Define embeddling layer decoder_embedding = Embedding(num_decoder_tokens, latent_dim) decoder_inputs_em = decoder_embedding(decoder_inputs) print(decoder_inputs_em.shape) # since embedding layer will add one more dim, we need to flatten last 2 dims decoder_reshape = Reshape((-1, decoder_inputs_em.shape[2] * decoder_inputs_em.shape[3])) decoder_inputs_re = decoder_reshape(decoder_inputs_em) print(decoder_inputs_re.shape) # We set up our decoder to return full output sequences, # and to return internal states as well. We don't use the # return states in the training model, but we will use them in inference. decoder = LSTM(latent_dim, return_sequences=True, return_state=True) decoder_outputs, _, _ = decoder(decoder_inputs_re, initial_state=encoder_states) print(decoder_outputs.shape) decoder_dense = Dense(num_decoder_tokens, activation='softmax') decoder_outputs = decoder_dense(decoder_outputs) print(decoder_outputs.shape) # + id="voLTGIiBeyKO" # Define the model that will turn # `encoder_input_data` & `decoder_input_data` into `decoder_target_data` model = Model([encoder_inputs, decoder_inputs], decoder_outputs) # + colab={"base_uri": "https://localhost:8080/"} id="iShaSKTZYbxl" outputId="170b8551-2874-490a-9cfb-e64600d19314" model.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 644} id="mfGcinDOYqKl" outputId="4cedc360-47bc-4cef-db62-de7e94c3bb05" tf.keras.utils.plot_model(model, show_shapes=True, show_layer_names=True) # + [markdown] id="eMp54YzsOKwn" # # Train # + colab={"base_uri": "https://localhost:8080/"} id="CbARhubxe2yA" outputId="65d91e9f-0758-4ff3-ca38-18f3133bee25" # Training batch_size = 64 # Batch size for training. epochs = 500 # Number of epochs to train for. initial_learning_rate = 0.001 # Add callbacks: # 'EarlyStopping' to stop training when the model is not enhancing anymore # 'ModelCheckPoint' to always keep the model that has the best val_accuracy model_save_filename = "model.h5" model.compile( optimizer=tf.keras.optimizers.RMSprop(learning_rate=initial_learning_rate), loss='categorical_crossentropy', metrics=['accuracy']) earlystopping_cb = EarlyStopping( monitor="val_accuracy", mode="max", verbose=1, patience=20, restore_best_weights=True) mdlcheckpoint_cb = ModelCheckpoint( model_save_filename, monitor="val_accuracy", mode="max", verbose=1, save_best_only=True ) decay = initial_learning_rate / epochs def lr_time_based_decay(epoch, lr): return lr * 1 / (1 + decay * epoch) lrscheduler_cb = tf.keras.callbacks.LearningRateScheduler(lr_time_based_decay) history = model.fit([encoder_input_data, decoder_input_data], decoder_target_data, batch_size=batch_size, epochs=epochs, validation_split=0.2, callbacks=[earlystopping_cb, mdlcheckpoint_cb, lrscheduler_cb]) # + colab={"base_uri": "https://localhost:8080/"} id="OAvJi9RndsTf" outputId="8c8d6633-a8d0-4a04-d354-fd9ada0fa750" # list all data in history print(history.history.keys()) # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="CRUKMJidtBC2" outputId="fd24b2da-b6b3-40f5-b9f3-cb91eefb725a" # summarize history for accuracy plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="arLbJGpHa-JC" outputId="6f1117f7-01e7-4859-b8ba-04eece73ce32" # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="Rmx3_vQbfh7J" outputId="1b6ddd66-d7dc-4b4c-9adc-1159b31fc2c8" # %ls -ltr # + colab={"base_uri": "https://localhost:8080/"} id="vm5myRi5tOKl" outputId="460a12c2-77c7-412d-d4b0-595189c5c835" # !du -sh model.h5 # + [markdown] id="qFGXM_d7OOm-" # # Inference # + id="ICjit4jSOU4O" # Restore the model and construct the encoder and decoder. model = load_model(model_save_filename) # model = load_model("model_v1.h5") # + colab={"base_uri": "https://localhost:8080/"} id="sZ0dTUtE_3_y" outputId="fda6ac12-6a87-4bb7-b83e-6cfe676e7055" model.layers[-1] # + id="7ZowNfcXmWlL" colab={"base_uri": "https://localhost:8080/"} outputId="23a5df65-a4f5-4383-f920-41ef8bb011d3" # Predictions encoder_inputs = model.input[0] # input_1 encoder_outputs, state_h_enc, state_c_enc = model.layers[6].output # lstm_1 encoder_states = [state_h_enc, state_c_enc] encoder_model = Model(encoder_inputs, encoder_states) decoder_inputs = model.input[1] # input_2 decoder_inputs_em = decoder_embedding(decoder_inputs) # since embedding layer will add one more dim, we need to flatten last 2 dims decoder_reshape = Reshape((-1, decoder_inputs_em.shape[2] * decoder_inputs_em.shape[3])) decoder_inputs_re = decoder_reshape(decoder_inputs_em) print(decoder_inputs_re.shape) decoder_state_input_h = Input(shape=(latent_dim,)) decoder_state_input_c = Input(shape=(latent_dim,)) decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c] decoder_lstm = model.layers[7] # lstm_2 decoder_outputs, state_h_dec, state_c_dec = decoder_lstm( decoder_inputs_re, initial_state=decoder_states_inputs ) decoder_states = [state_h_dec, state_c_dec] decoder_dense = model.layers[-1] decoder_outputs = decoder_dense(decoder_outputs) decoder_model = Model( [decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states ) # + id="P-waZOKNPJaS" # Reverse-lookup token index to decode sequences back to # something readable. reverse_input_char_index = dict((i, char) for char, i in input_token_index.items()) reverse_target_char_index = dict((i, char) for char, i in target_token_index.items()) # + colab={"base_uri": "https://localhost:8080/", "height": 470} id="90myy-NNvctl" outputId="38356718-4222-4a5c-c61d-fa21a4c56562" tf.keras.utils.plot_model(decoder_model, to_file='model_plot_dec.png', show_shapes=True, show_layer_names=True) # + id="CP-O29K3zeut" def predict_target(input_string): # convert for encoding input_data = np.zeros((1, max_encoder_seq_length, num_encoder_tokens), dtype='float32') for t, char in enumerate(input_string): input_data[0, t, input_token_index[char]] = 1. # Encode the input as state vectors. states_value = encoder_model(input_data) # Generate empty target sequence of length 1. target_seq = np.zeros((1, 1, num_decoder_tokens), dtype='float32') # Populate the first character of target sequence with the start character. target_seq[0, 0, target_token_index['^']] = 1.0 stop_condition = False decoded_sentence = "" while True: output_tokens, d_h, d_c = decoder_model.predict([target_seq] + states_value) # Sample a token sampled_token_index = np.argmax(output_tokens[0, -1, :]) sampled_char = reverse_target_char_index[sampled_token_index] # Exit condition: either hit max length # or find stop character. if sampled_char == "$" or len(decoded_sentence) > max_decoder_seq_length: break decoded_sentence += sampled_char # Update the target sequence (of length 1). target_seq = np.zeros((1, 1, num_decoder_tokens)) target_seq[0, 0, sampled_token_index] = 1.0 # Update states states_value = [d_h, d_c] return decoded_sentence # + id="0O9eWBgtvGeM" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="1d910d26-b1d5-42ec-9821-315303fa19fc" test_input = 'राजशेखर' predicted_string = predict_target(test_input) predicted_string # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="Uy2yjJXZ0MdK" outputId="0e9bedd2-7365-4d6c-9c67-eab5d0d4e56f" test_input = 'गौरव' predicted_string = predict_target(test_input) predicted_string # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="q8W7E0Kzka0e" outputId="bacc696d-b934-46ea-d7f4-74e063c07836" test_input = 'सूद' predicted_string = predict_target(test_input) predicted_string # + id="NeUfRfSlyGX4"
indicate/notebooks/Transliterate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # # pendulum使用整理 # # [pendulum](https://github.com/sdispater/pendulum)使的操作时间变得更简单. 下面参照官网列举一些常见的操作,整的很简单的哦! # # 官网也介绍了` pendulum`在`sqlite3`、`mysqlclient`、`django`使用需要的设置,日常开发绝对够了 # + [markdown] deletable=true editable=true # ## 安装 # + deletable=true editable=true # !pip install pendulum # + [markdown] deletable=true editable=true # ## 基本使用 # # > 只有`now()`使用的本地时区,其他方法都是使用的`UTC` # + deletable=true editable=true # 当前时间的获取 import pendulum now = pendulum.now()# 当前时间 print("当前时间:",now) now_in_paris = pendulum.now('Europe/Paris') print("巴黎时间:",now_in_paris) print("当前时间转UTC时间:", now.in_timezone('UTC')) # + deletable=true editable=true now = pendulum.create(2017, 5, 4, 0, 0, 0, 0, 'Asia/Shanghai') print("5.4青年节:", now) # + [markdown] deletable=true editable=true # ## 日期计算 # + deletable=true editable=true import pendulum now = pendulum.now() # 今天 tomorrow = now.add(days = 1) # 明天 last_week = now.subtract(weeks=1) # 上周 print("今天:",now) print("明天:",tomorrow) print("上周:",last_week) if now.is_weekend(): print("哇,周末了!") delta = now - last_week print("今天距离上周:", delta.days, "天") print("上周到今天每一天:") for day in delta: print("day:", day) # + [markdown] deletable=true editable=true # ## 日期转换 # > 支持各种格式. 包括`星期日历表示法`,可以查看一下[ISO_8601](https://zh.wikipedia.org/wiki/ISO_8601) # + deletable=true editable=true import pendulum pendulum.parse('2017-5-1') # + deletable=true editable=true pendulum.parse('20160413') # + deletable=true editable=true pendulum.parse('2016-W07-5') # + [markdown] deletable=true editable=true # ## 时间间隔 # + deletable=true editable=true # Interval it = pendulum.interval(days=15) print(it.weeks,it.days,it.hours) print(it.in_hours()) # 以小时表示间隔 # + deletable=true editable=true # Period 是 Interval的 特例 dt1 = pendulum.now() dt2 = dt1.add(days=3) # A period is the difference between 2 instances period = dt2 - dt1 print("日期相差:",period.in_hours(),"小时") print("日期相差(日期之间包含的工作日):",period.in_weekdays(),"天") print("日期相差(日期之间包含的周末):",period.in_weekend_days(),"天") # A period is iterable for dt in period: print(dt) # - # ## 时间戳 import pendulum print(pendulum.from_timestamp(1477548306).to_datetime_string()) print(pendulum.from_timestamp(1477548306, 'Asia/Shanghai').to_datetime_string())
books/datetime/pendulum.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SMS Spam Collection v.1 # ------------------------- # # 1. DESCRIPTION # -------------- # # The SMS Spam Collection v.1 (hereafter the corpus) is a set of SMS tagged messages that have been collected for SMS Spam research. It contains one set of SMS messages in English of 5,574 messages, tagged acording being ham (legitimate) or spam. # # 1.1. Compilation # ---------------- # # This corpus has been collected from free or free for research sources at the Web: # # - A collection of between 425 SMS spam messages extracted manually from the Grumbletext Web site. This is a UK forum in which cell phone users make public claims about SMS spam messages, most of them without reporting the very spam message received. The identification of the text of spam messages in the claims is a very hard and time-consuming task, and it involved carefully scanning hundreds of web pages. The Grumbletext Web site is: http://www.grumbletext.co.uk/ # - A list of 450 SMS ham messages collected from <NAME>'s PhD Theses available at http://etheses.bham.ac.uk/253/1/Tagg09PhD.pdf # - A subset of 3,375 SMS ham messages of the NUS SMS Corpus (NSC), which is a corpus of about 10,000 legitimate messages collected for research at the Department of Computer Science at the National University of Singapore. The messages largely originate from Singaporeans and mostly from students attending the University. These messages were collected from volunteers who were made aware that their contributions were going to be made publicly available. The NUS SMS Corpus is avalaible at: http://www.comp.nus.edu.sg/~rpnlpir/downloads/corpora/smsCorpus/ # - The amount of 1,002 SMS ham messages and 322 spam messages extracted from the SMS Spam Corpus v.0.1 Big created by <NAME> and public available at: http://www.esp.uem.es/jmgomez/smsspamcorpus/ # # # 1.2. Statistics # --------------- # # There is one collection: # # - The SMS Spam Collection v.1 (text file: smsspamcollection) has a total of 4,827 SMS legitimate messages (86.6%) and a total of 747 (13.4%) spam messages. # # # 1.3. Format # ----------- # # The files contain one message per line. Each line is composed by two columns: one with label (ham or spam) and other with the raw text. Here are some examples: # # ham What you doing?how are you? # ham Ok lar... Joking wif u oni... # ham dun say so early hor... U c already then say... # ham MY NO. IN LUTON 0125698789 RING ME IF UR AROUND! H* # ham Siva is in hostel aha:-. # ham Cos i was out shopping wif darren jus now n i called him 2 ask wat present he wan lor. Then he started guessing who i was wif n he finally guessed darren lor. # spam FreeMsg: Txt: CALL to No: 86888 & claim your reward of 3 hours talk time to use from your phone now! ubscribe6GBP/ mnth inc 3hrs 16 stop?txtStop # spam Sunshine Quiz! Win a super Sony DVD recorder if you canname the capital of Australia? Text MQUIZ to 82277. B # spam URGENT! Your Mobile No 07808726822 was awarded a L2,000 Bonus Caller Prize on 02/09/03! This is our 2nd attempt to contact YOU! Call 0871-872-9758 BOX95QU # # Note: messages are not chronologically sorted. # + # importing libraries and the Dataset import pandas as pd import re import nltk import numpy as np import matplotlib.pyplot as plt messages = pd.read_csv('spam.csv',encoding='latin-1') messages.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'],axis=1,inplace=True) messages.columns = ['label','message'] # - messages.head() messages['label'].value_counts().plot(kind='bar') #Data cleaning and preprocessing from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer lem = WordNetLemmatizer() corpus = [] for i in range(0, len(messages)): review = re.sub('[^a-zA-Z]', ' ', messages['message'][i]) review = review.lower() review = review.split() review = [lem.lemmatize(word) for word in review if not word in stopwords.words('english')] review = ' '.join(review) corpus.append(review) # Creating the Bag of Words model where 0= ham and 1= spam from sklearn.feature_extraction.text import CountVectorizer cv = CountVectorizer(max_features=2500) X = cv.fit_transform(corpus).toarray() y=pd.get_dummies(messages['label']) y=y.iloc[:,1].values # Creating the TF-IDF model from sklearn.feature_extraction.text import TfidfVectorizer tfidf = TfidfVectorizer(max_features=2500) X1 = tfidf.fit_transform(corpus).toarray() y1=pd.get_dummies(messages['label']) y1=y1.iloc[:,1].values # ### Model for Bag of words # + # Train Test Split from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0) # Training model using Naive bayes classifier from sklearn.naive_bayes import MultinomialNB spam_detect_model = MultinomialNB().fit(X_train, y_train) y_pred=spam_detect_model.predict(X_test) spam_detect_model.score(X_test,y_test) # - from nltk.tokenize import word_tokenize review= 'Free entry in 2 a wkly comp to win FA Cup ' review= re.sub('[^a-zA-Z]',' ',review) # Replacing anything except character with space review= review.lower() # converting to lower review= word_tokenize(review) # Tokensizing to get all words review= [lem.lemmatize(x) for x in review if x not in set(stopwords.words('english'))] # Leammatizing review= [' '.join(review)] spam_detect_model.predict(CountVectorizer(max_features=2500).fit(corpus).transform(review).toarray()) # ### Model for Tf-IDF # + # Train Test Split from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size = 0.20, random_state = 0) # Training model using Naive bayes classifier from sklearn.naive_bayes import MultinomialNB spam_detect_model = MultinomialNB().fit(X_train, y_train) y_pred=spam_detect_model.predict(X_test) spam_detect_model.score(X_test,y_test) # - review= 'No calls..messages..missed calls ' review= re.sub('[^a-zA-Z]',' ',review) # Replacing anything except character with space review= review.lower() # converting to lower review= word_tokenize(review) # Tokensizing to get all words review= [lem.lemmatize(x) for x in review if x not in set(stopwords.words('english'))] # Leammatizing review= [' '.join(review)] spam_detect_model.predict(TfidfVectorizer(max_features=2500).fit(corpus).transform(review).toarray()) from sklearn.metrics import roc_auc_score,precision_score,recall_score,f1_score,accuracy_score print('Roc_AUC: {}, Precision: {}, Recall:{}, F1:{}, Accuracy:{}'.format( roc_auc_score(y_test,spam_detect_model.predict(X_test)), precision_score(y_test,spam_detect_model.predict(X_test)), recall_score(y_test,spam_detect_model.predict(X_test)), f1_score(y_test,spam_detect_model.predict(X_test)), accuracy_score(y_test,spam_detect_model.predict(X_test))))
SMS_SPAM_Classfication.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os, platform uname = os.uname() print("Hostname: {}, System: {}, Dist: {}".format(uname.nodename, uname.sysname, ' '.join(platform.dist()))) # ### Import packages from phantasy import disable_warnings disable_warnings() from phantasy import MachinePortal from phantasy.tools import plot_orbit # ### Load machine/lattice mp = MachinePortal("FRIB_VA", "LEBT") lat = mp.work_lattice_conf # ### Sync device live settings lat.sync_settings() path, fm = lat.run() # fm.generate_latfile(latfile='test1.lat') # ### Show the simulated envelope p1 = plot_orbit(('pos', 'xrms'), ('pos', 'yrms'), flame_model=fm) # ### Change initial beam condition # # 1. Read from another FLAME lattice file # ```python # from flame_utils import ModelFlame, generate_source # fm1 = ModelFlame(lat_file='some_lat_file') # bs1 = fm1.bmstate # s1 = generate_source(state=bs1) # ``` # 2. Manually prepare beam condition, i.e. source configuration, see below from numpy import array s1 = {'index': 0, 'properties': {'IonChargeStates': array([ 0.13865546]), 'IonEk': 11969.995341581, 'IonEs': 931494320.0, 'NCharge': array([ 10111.]), 'P0': array([ 1.08055458e-14, -3.81040863e-18, 1.33339836e-14, -5.69478990e-18, -1.49954832e-15, 0.00000000e+00, 1.00000000e+00]), 'S0': array([ 6.81917302e+02, -2.52846423e-01, 3.83669247e+01, -1.26756931e-02, 2.80851875e-01, -1.06898705e-16, 0.00000000e+00, -2.52846423e-01, 9.44804331e-05, -6.25559687e-03, 1.77946925e-06, -9.79349148e-05, 4.48060691e-20, 0.00000000e+00, 3.83669247e+01, -6.25559687e-03, 8.92535761e+02, -3.39508360e-01, 8.36844956e-02, 0.00000000e+00, 0.00000000e+00, -1.26756931e-02, 1.77946925e-06, -3.39508360e-01, 1.29900589e-04, -3.00943030e-05, 0.00000000e+00, 0.00000000e+00, 2.80851875e-01, -9.79349148e-05, 8.36844956e-02, -3.00943030e-05, 1.68488662e-04, 9.87582187e-15, 0.00000000e+00, -1.06898705e-16, 4.48060691e-20, 0.00000000e+00, 0.00000000e+00, 9.87582187e-15, 1.00000000e-19, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00]), 'matrix_variable': 'S', 'name': 'S', 'type': 'source', 'vector_variable': 'P'}} # #### Update beam initial condition fm.configure(econf=s1) # ### Show the updated envelope p2 = plot_orbit(('pos', 'xrms'), ('pos', 'yrms'), flame_model=fm)
02_Online_model/online_model_with_flame.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="./image/IBM.jpg" width="100%"> # # Recommendations with IBM # # In this notebook, I will be putting my recommendation skills to use on real data from the IBM Watson Studio platform. # # By following the table of contents, I will build out a number of different methods for making recommendations that you can follow it. # # *** # # # ## Table of Contents # # 0. [Loading Libraries - Loading and Inspection of Datasets](#Loading) # 1. [Exploratory Data Analysis](#Exploratory-Data-Analysis) # 2. [Rank Based Recommendations](#Rank) # 3. [User-User Based Collaborative Filtering](#User-User) # 4. [Content Based Recommendations (EXTRA - NOT REQUIRED)](#Content-Recs) # 5. [Matrix Factorization](#Matrix-Fact) # 6. [Extras & Concluding](#conclusions) # # Let's get started by importing the necessary libraries and reading in the data. # ### <a class="anchor" id="Loading">Part 0: Loading Libraries - Loading and Inspection of Datasets</a> # #### 0.1. Loading of Libraries # + # Loading Libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import project_tests as t import pickle from nltk.tokenize import word_tokenize import nltk from nltk.tokenize import word_tokenize from nltk import WordNetLemmatizer from sklearn.pipeline import Pipeline from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from nltk.corpus import stopwords #import time import re nltk.download('wordnet') nltk.download('stopwords') # %matplotlib inline # - # #### 0.2.Loading and Inspection of Data # + # Loading Datasets """ df = pd.read_csv('data/user-item-interactions.csv') df_content = pd.read_csv('data/articles_community.csv') """ #url = 'D:/DS_Document/Data_UdaCity/DSND_Term2-master/project_files/Recommendations_with_IBM/data/' url = './data/' path_user_item = url +'user-item-interactions.csv' df = pd.read_csv(path_user_item) path_articles = url +'articles_community.csv' df_content = pd.read_csv(path_articles) del df['Unnamed: 0'] del df_content['Unnamed: 0'] # Show df to get an idea of the data df.head() # - df.info() # The number of records df.shape[0] # + # Describe of dataframe of df # percentile list perc =[.15, .25, .30, .50, .75, .90] # list of dtypes to include include =['object', 'float', 'int'] df[['article_id']].describe(percentiles = perc, include = include) # + #The number of Unique of features display(df_content.nunique()) display(df.nunique()) # - df_content.info() # The number of records df_content.shape[0] # Describe of dataframe of df df_content[['article_id']].describe(percentiles = perc) df.sort_values(by = 'article_id', ascending = False).head() # Show df_content to get an idea of the data df_content.head() # ### <a class="anchor" id="Exploratory-Data-Analysis">Part I : Exploratory Data Analysis</a> # # Use the dictionary and cells below to provide some insight into the descriptive statistics of the data. # # **`1.1.`** What is the distribution of how many articles a user interacts with in the dataset? Provide a visual and descriptive statistics to assist with giving a look at the number of times each user interacts with an article. # The number of articles read by each users df[['email','article_id']].groupby(['email']).count().sort_values(by= 'article_id',ascending=False) # 50% of individuals interact with '3' number of articles or fewer. median_val = df.email.value_counts().median() print('50% of individuals interact with {} number of articles or fewer'.format(median_val)) # The maximum number of user-article interactions max_views_by_user = df.email.value_counts()[0] print('The maximum number of user-article interactions by any 1 user is {}'.format(max_views_by_user)) hist_bins = [0,1,3,7,14,20,30,50,100,150,200,500] hist_ticks = np.array([0.5, 1,3,7,14,20,30,50,100,150,200,500,1000]) plt.figure(figsize=(15,5)) plt.hist(df[['email','article_id']].groupby(['email']).count()['article_id'],bins=hist_bins,histtype='bar',ec='black') plt.yscale('linear') plt.xscale('log') plt.xticks(hist_ticks,hist_ticks.astype(str)) plt.title('Distribution of user interactions - log scale') plt.xlabel('Number of article views per user - log scale') plt.ylabel('User count - log scale') plt.show() plt.figure(figsize=(20,5)) sns.distplot(df.groupby('email')['article_id'].count().values, hist_kws={"alpha": 0.2, "color": "b"}); # **`1.2.`** Explore and remove duplicate articles from the **df_content** dataframe. # Find and explore duplicate articles # First look print('Duplicated inputs as rows :',df_content.duplicated().any().sum() ) # Investigation of row of Duplicate by 'article_id' df_content.groupby(['article_id','doc_full_name'])['doc_full_name'].count().sort_values(ascending=False).head(10) # We found **duplicated 5 records**. _We should remove_ these and only keep the first of dublicated records. # As we can see from below info, there are 5 Articles that duplicated print('Shape of df_content is',df_content.shape[0]) print() print(df_content.nunique()) # Dropping dublicate rows by 'article_id' print('(Before dropping) The number of record :',df_content.shape[0]) df_content = df_content.drop_duplicates(subset='article_id', keep='first') print('(After dropping) The number of record :',df_content.shape[0]) # **`1.3.`** Use the cells below to find: # # **a.** The number of unique articles that have an interaction with a user. # **b.** The number of unique articles in the dataset (whether they have any interactions or not).<br> # **c.** The number of unique users in the dataset. (excluding null values) <br> # **d.** The number of user-article interactions in the dataset. # **`1.3.a.`** The number of unique articles that have an interaction with a user. unique_articles = df.article_id.nunique() print("The number of 'unique articles' is {}".format(unique_articles)) # **`1.3.b.`** The number of unique articles in the dataset (whether they have any **_interactions or not_**). total_articles = df_content.article_id.nunique() print("The number of 'total articles' is {}".format(total_articles)) # **`1.3.c.`** The number of **unique users** in the dataset. (excluding null values) unique_users = df.email.nunique() print("The number of 'unique users' are {}".format(unique_users)) df.email.isnull().sum() # + # We researche Null value inside feature of 'email' #df.groupby('email').count().sort_values('article_id', ascending=False).iloc[677:715] # - # **`1.3.d.`** The number of **user-article interactions** in the dataset. user_article_interactions = df.shape[0] print('The number of user-article interactions is {}'.format(user_article_interactions)) # **`1.4.`** Use the cells below to find the most viewed **article_id**, as well as how often it was viewed. After talking to the company leaders, the `email_mapper` function was deemed a reasonable way to map users to ids. There were a small number of null values, and it was found that all of these null values likely belonged to a single user (which is how they are stored using the function below). # + # The most viewed article in the dataset as a string with one value following the decimal most_viewed_article_id = df.article_id.value_counts().index[0] # The most viewed article in the dataset was viewed how many times? max_views = df.article_id.value_counts().values[0] # - print('The most viewed article in the dataset is {}'.format(most_viewed_article_id)) print() print('The most viewed article in the dataset is {}'.format(max_views)) print() print('Most viewed article_id is {}, and viewed as {} many times'.format(int(most_viewed_article_id),max_views)) # + ## No need to change the code here - this will be helpful for later parts of the notebook # Run this cell to map the user email column and remove the email column def email_mapper(): coded_dict = dict() cter = 1 email_encoded = [] for val in df['email']: if val not in coded_dict: coded_dict[val] = cter cter+=1 email_encoded.append(coded_dict[val]) return email_encoded email_encoded = email_mapper() del df['email'] df['user_id'] = email_encoded # show header df.head() # - user_article_interactions # + ## If you stored all your results in the variable names above, ## you shouldn't need to change anything in this cell sol_1_dict = { '`50% of individuals have _____ or fewer interactions.`': median_val, '`The total number of user-article interactions in the dataset is ______.`': user_article_interactions, '`The maximum number of user-article interactions by any 1 user is ______.`': max_views_by_user, '`The most viewed article in the dataset was viewed _____ times.`': max_views, '`The article_id of the most viewed article is ______.`': str(most_viewed_article_id), '`The number of unique articles that have at least 1 rating ______.`': unique_articles, '`The number of unique users in the dataset is ______`': unique_users, '`The number of unique articles on the IBM platform`': total_articles } # Test your dictionary against the solution t.sol_1_test(sol_1_dict) # - # ### <a class="anchor" id="Rank">Part II: Rank-Based Recommendations</a> # # Unlike in the earlier lessons, we don't actually have ratings for whether a user liked an article or not. We only know that a user has interacted with an article. In these cases, the popularity of an article can really only be based on how often an article was interacted with. # # **`2.1.`** Fill in the function below to return the **n** top articles ordered with most interactions as the top. Test your function using the tests below. # + def get_top_articles(n, df=df): ''' INPUT: n - (int) the number of top articles to return df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: top_articles - (list) A list of the top 'n' article titles ''' top_articles = df.title.value_counts(sort=True).index[:n].values return top_articles # Return the top article titles from df (not df_content) def get_top_article_ids(n, df=df): ''' INPUT: n - (int) the number of top articles to return df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: top_articles_ids - (list) A list of ids of the top 'n' article titles ''' top_articles_ids = df.article_id.value_counts(sort=True).index[:n].values return top_articles_ids # Return the top article ids # - print(get_top_articles(10)) print(get_top_article_ids(10)) # + # Test your function by returning the top 5, 10, and 20 articles top_5 = get_top_articles(5) top_10 = get_top_articles(10) top_20 = get_top_articles(20) # Test each of your three lists from above t.sol_2_test(get_top_articles) # - # ### <a class="anchor" id="User-User">Part III: User-User Based Collaborative Filtering</a> # # # **`3.1.`** Use the function below to reformat the **df** dataframe to be shaped with users as the rows and articles as the columns. # # * Each **user** should only appear in each **row** once. # # # * Each **article** should only show up in one **column**. # # # * **If a user has interacted with an article, then place a 1 where the user-row meets for that article-column**. It does not matter how many times a user has interacted with the article, all entries where a user has interacted with an article should be a 1. # # # * **If a user has not interacted with an item, then place a zero where the user-row meets for that article-column**. # # Use the tests to make sure the basic structure of your matrix matches what is expected by the solution. # + def create_user_item_matrix(df): ''' INPUT: df - pandas dataframe with article_id, title, user_id columns OUTPUT: user_item - user item matrix Description: Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with an article and a 0 otherwise ''' # Create list of User_id user_index = list(df.user_id.unique()) artic_col = list(df.article_id.unique());artic_col.sort() # Create User-Item Matrix with Zeros user_item = pd.DataFrame(np.zeros((len(user_index),len(artic_col))), index=user_index, columns=artic_col) # Create dictionary of Article_id and Article_Index dic = dict() for i in user_index: dic[i] = list(df[df.user_id==i]['article_id'].values) # Filling User-Item Matrix for i in user_index: for j in artic_col: if j in dic[i]: user_item.loc[i,j]=1 return user_item user_item = create_user_item_matrix(df) # - # Contraller print('1:',user_item.sum(axis=1)[1]) print('2:',user_item.sum(axis=1)[2]) print('3:',user_item.sum(axis=1)[3]) print('5145:',user_item.sum(axis=1)[5145]) print('5149:',user_item.sum(axis=1)[5146]) print('5147:',user_item.sum(axis=1)[5147]) print('5148:',user_item.sum(axis=1)[5148]) print('5149:',user_item.sum(axis=1)[5149]) ## Tests: You should just need to run this cell. Don't change the code. assert user_item.shape[0] == 5149, "Oops! The number of users in the user-article matrix doesn't look right." assert user_item.shape[1] == 714, "Oops! The number of articles in the user-article matrix doesn't look right." assert user_item.sum(axis=1)[1] == 36, "Oops! The number of articles seen by user 1 doesn't look right." print("You have passed our quick tests! Please proceed!") # **`3.2.`** Complete the function below which should take a user_id and provide an ordered list of the most similar users to that user (from most similar to least similar). The returned result should not contain the provided user_id, as we know that each user is similar to him/herself. Because the results for each user here are binary, it (perhaps) makes sense to compute similarity as the dot product of two users. # # Use the tests to test your function. def find_similar_users(user_id, user_item=user_item): ''' INPUT: user_id - (int) a user_id user_item - (pandas dataframe) matrix of users by articles: 1's when a user has interacted with an article, 0 otherwise OUTPUT: similar_users - (list) an ordered list where the closest users (largest dot product users) are listed first Description: Computes the similarity of every pair of users based on the dot product Returns an ordered ''' # compute similarity of each user to the provided user user_sim = user_item[user_item.index == user_id].dot(user_item.T) # sort by similarity user_sim.sort_values(by=user_id,axis=1,ascending=False, inplace=True) # create list of just the ids most_similar_users = user_sim.columns.values.tolist() # remove the own user's id most_similar_users.remove(user_id) return most_similar_users # return a list of the users in order from most to least similar # Do a spot check of your function print("The 10 most similar users to user 1 are: {}".format(find_similar_users(1)[:10])) print("The 5 most similar users to user 3933 are: {}".format(find_similar_users(3933)[:5])) print("The 3 most similar users to user 46 are: {}".format(find_similar_users(46)[:3])) # **`3.3.`** Now that you have a function that provides the most similar users to each user, you will want to use these users to find articles you can recommend. Complete the functions below to return the articles you would recommend to each user. # + def get_article_names(article_ids, df=df): ''' INPUT: article_ids - (list) a list of article ids df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: article_names - (list) a list of article names associated with the list of article ids (this is identified by the title column) ''' article_names = df['title'][df['article_id'].isin(article_ids)].unique().tolist() return article_names # Return the article names associated with list of article ids def get_user_articles(user_id, user_item=user_item): ''' INPUT: user_id - (int) a user id user_item - (pandas dataframe) matrix of users by articles: 1's when a user has interacted with an article, 0 otherwise OUTPUT: article_ids - (list) a list of the article ids seen by the user article_names - (list) a list of article names associated with the list of article ids (this is identified by the doc_full_name column in df_content) Description: Provides a list of the article_ids and article titles that have been seen by a user ''' article_ids = df[df.user_id==user_id]['article_id'].unique() article_ids = article_ids.astype(str).tolist() article_names = get_article_names(article_ids, df=df) return article_ids, article_names # return the ids and names def user_user_recs(user_id, m=10): ''' INPUT: user_id - (int) a user id m - (int) the number of recommendations you want for the user OUTPUT: recs - (list) a list of recommendations for the user Description: Loops through the users based on closeness to the input user_id For each user - finds articles the user hasn't seen before and provides them as recs Does this until m recommendations are found Notes: Users who are the same closeness are chosen arbitrarily as the 'next' user For the user where the number of recommended articles starts below m and ends exceeding m, the last items are chosen arbitrarily ''' most_similar_users = find_similar_users(user_id, user_item=user_item) u_articleid = get_user_articles(user_id, user_item=user_item)[0] recs= [] for user in most_similar_users: if len(recs) <= m: g_articleid, g_article_title= get_user_articles(user, user_item=user_item) new_rec = np.setdiff1d(g_articleid, u_articleid, assume_unique=True) recs.append(new_rec) recs = [x for sub in recs for x in sub] else: break recs = recs[:m] return recs # return your recommendations for this user_id # - # Check Results get_article_names(user_user_recs(1, 10)) # Return 10 recommendations for user 1 # Test your functions here - No need to change this code - just run this cell assert set(get_article_names(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0'])) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis']), "Oops! Your the get_article_names function doesn't work quite how we expect." assert set(get_article_names(['1320.0', '232.0', '844.0'])) == set(['housing (2015): united states demographic measures','self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook']), "Oops! Your the get_article_names function doesn't work quite how we expect." assert set(get_user_articles(20)[0]) == set(['1320.0', '232.0', '844.0']) assert set(get_user_articles(20)[1]) == set(['housing (2015): united states demographic measures', 'self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook']) assert set(get_user_articles(2)[0]) == set(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0']) assert set(get_user_articles(2)[1]) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis']) print("If this is all you see, you passed all of our tests! Nice job!") def get_top_sorted_users(user_id, df=df, user_item=user_item): ''' INPUT: user_id - (int) df - (pandas dataframe) df as defined at the top of the notebook user_item - (pandas dataframe) matrix of users by articles: 1's when a user has interacted with an article, 0 otherwise OUTPUT: neighbors_df - (pandas dataframe) a dataframe with: neighbor_id - is a neighbor user_id similarity - measure of the similarity of each user to the provided user_id num_interactions - the number of articles viewed by the user - if a u Other Details - sort the neighbors_df by the similarity and then by number of interactions where highest of each is higher in the dataframe ''' user_user = user_item.dot(user_item.T) len_user = df.user_id.value_counts().sort_index().values user_user['len_user'] = len_user neighbors_df = user_user[[user_id, 'len_user']].sort_values(by=[user_id, 'len_user'], ascending=False) neighbors_df.reset_index(inplace=True) neighbors_df.rename(columns={'index':'neighbor_id', user_id :'similarity', 'len_user':'num_interactions'}, inplace=True) return neighbors_df # Return the dataframe specified in the doc_string # **`3.4.`** Now we are going to improve the consistency of the **user_user_recs** function from above. # # * Instead of arbitrarily choosing when we obtain users who are all the same closeness to a given user - choose the users that have the most total article interactions before choosing those with fewer article interactions. # # # * Instead of arbitrarily choosing articles from the user where the number of recommended articles starts below m and ends exceeding m, choose articles with the articles with the most total interactions before choosing those with fewer total interactions. This ranking should be what would be obtained from the **top_articles** function you wrote earlier. # + def get_top_sorted_users(user_id, df=df, user_item=user_item): ''' INPUT: user_id - (int) df - (pandas dataframe) df as defined at the top of the notebook user_item - (pandas dataframe) matrix of users by articles: 1's when a user has interacted with an article, 0 otherwise OUTPUT: neighbors_df - (pandas dataframe) a dataframe with: neighbor_id - is a neighbor user_id similarity - measure of the similarity of each user to the provided user_id num_interactions - the number of articles viewed by the user - if a u Other Details - sort the neighbors_df by the similarity and then by number of interactions where highest of each is higher in the dataframe ''' user_user = user_item.dot(user_item.T) len_user = df.user_id.value_counts().sort_index().values user_user['len_user'] = len_user neighbors_df = user_user[[user_id, 'len_user']].sort_values(by=[user_id, 'len_user'], ascending=False) neighbors_df.reset_index(inplace=True) neighbors_df.rename(columns={'index':'neighbor_id', user_id :'similarity', 'len_user':'num_interactions'}, inplace=True) return neighbors_df # Return the dataframe specified in the doc_string def user_user_recs_part2(user_id, m=10): ''' INPUT: user_id - (int) a user id m - (int) the number of recommendations you want for the user OUTPUT: recs - (list) a list of recommendations for the user by article id rec_names - (list) a list of recommendations for the user by article title Description: Loops through the users based on closeness to the input user_id For each user - finds articles the user hasn't seen before and provides them as recs Does this until m recommendations are found Notes: * Choose the users that have the most total article interactions before choosing those with fewer article interactions. * Choose articles with the articles with the most total interactions before choosing those with fewer total interactions. ''' neighbor = get_top_sorted_users(user_id, df=df, user_item=user_item)['neighbor_id'].values.tolist() recs = [] rec_names = [] for user in neighbor: if len(recs) < 10: article_ids = get_user_articles(user)[0] article_names = get_user_articles(user)[1] for i,j in zip(article_ids, article_names): recs.append(i) rec_names.append(j) else: break return recs[:m], rec_names[:m] # - # Quick spot check - don't change this code - just use it to test your functions rec_ids, rec_names = user_user_recs_part2(20, 10) print("The top 10 recommendations for user 20 are the following article ids:") print(rec_ids) print() print("The top 10 recommendations for user 20 are the following article names:") print(rec_names) # **`3.5.`** Use your functions from above to correctly fill in the solutions to the dictionary below. Then test your dictionary against the solution. Provide the code you need to answer each following the comments below. # + ### Tests with a dictionary of results # Find the user that is most similar to user 1 user1_most_sim = get_top_sorted_users(1, df=df, user_item=user_item)['neighbor_id'].values.tolist()[1] # Find the 10th most similar user to user 131 user131_10th_sim = get_top_sorted_users(131, df=df, user_item=user_item)['neighbor_id'].values.tolist()[10] # + ## Dictionary Test Here sol_5_dict = { 'The user that is most similar to user 1.': user1_most_sim, 'The user that is the 10th most similar to user 131': user131_10th_sim, } t.sol_5_test(sol_5_dict) # - # **`3.6.`** If we were given a new user, which of the above functions would you be able to use to make recommendations? Explain. Can you think of a better way we might make recommendations? # _**<blockquote>Explanation for Question 6 <br></blockquote>**_ # _<blockquote>New user, new customer, new article means that there is no data available. That problem is called Cold Start Problem. For the cases like that, we can use Popular (Knowledge) Based filtering methods for that problems.</blockquote>_ # **`3.7.`** Using your existing functions, provide the top 10 recommended articles you would provide for the a new user below. You can test your function against our thoughts to make sure we are all on the same page with how we might make a recommendation. # + new_user = '0.0' # What would your recommendations be for this new user '0.0'? As a new user, they have no observed articles. # Provide a list of the top 10 article ids you would give to new_user_recs = get_top_article_ids(10) new_user_recs = [str(i) for i in new_user_recs] new_user_recs # + assert set(new_user_recs) == set(['1314.0','1429.0','1293.0','1427.0','1162.0','1364.0','1304.0','1170.0','1431.0','1330.0']), "Oops! It makes sense that in this case we would want to recommend the most popular articles, because we don't know anything about these users." print("That's right! Nice job!") # - # ### <a class="anchor" id="Content-Recs">Part IV: Content Based Recommendations (EXTRA - NOT REQUIRED)</a> # # Another method we might use to make recommendations is to perform a ranking of the highest ranked articles associated with some term. You might consider content to be the **doc_body**, **doc_description**, or **doc_full_name**. There isn't one way to create a content based recommendation, especially considering that each of these columns hold content related information. # # `1.` Use the function body below to create a content based recommender. Since there isn't one right answer for this recommendation tactic, no test functions are provided. Feel free to change the function inputs if you decide you want to try a method that requires more input values. The input values are currently set with one idea in mind that you may use to make content based recommendations. One additional idea is that you might want to choose the most popular recommendations that meet your 'content criteria', but again, there is a lot of flexibility in how you might make these recommendations. # # ### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills. df_content.head() print(df_content.doc_body.nunique()) print(df_content.doc_description.nunique()) print(df_content.doc_full_name.nunique()) print(df_content.shape) # ### make a recommendations for a user who has only interacted with article id '1427.0' # # There is no content info about article 1427 in "df_column". However from "df", I can find other 643 users' interaction with this article. These 643 users can be considered to be "neighbors" of this new user. # # I will focus on article ids these users read aside from 1427. It should make sense that more an article is read by more neighbors, the better the article should be recommended. # # # ### <a class="anchor" id="Matrix-Fact">Part V: Matrix Factorization</a> # # In this part of the notebook, you will build use matrix factorization to make article recommendations to the users on the IBM Watson Studio platform. # # **`5.1.`** You should have already created a **user_item** matrix above in **question 1** of **Part III** above. This first question here will just require that you run the cells to get things set up for the rest of **Part V** of the notebook. # Load the matrix here user_item_matrix = pd.read_pickle('user_item_matrix.p') # quick look at the matrix user_item_matrix.head() # **`5.2.`** In this situation, you can use Singular Value Decomposition from [numpy](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.svd.html) on the user-item matrix. Use the cell to perform SVD, and explain why this is different than in the lesson. u, s, vt = np.linalg.svd(user_item_matrix) print(u.shape) print(s.shape) print(vt.shape) # _**<blockquote>Explanation for Question 5.2 <br></blockquote>**_ # <blockquote>This time data is a lot larger and we did not emphasize the shape of data this time but in lesson it was given as (20,4)<br> # Also during the lesson, User-Item Matrix contained null values in it and SVD does not work with null values. We used Funk SVD in the lesson to solve this issue. In this case, the matrix does not have any null value that means we can use SVD method.<br></blockquote> # **`5.3.`** Now for the tricky part, how do we choose the number of latent features to use? Running the below cell, you can see that as the number of latent features increases, we obtain a lower error rate on making predictions for the 1 and 0 values in the user-item matrix. Run the cell below to get an idea of how the accuracy improves as we increase the number of latent features. # + num_latent_feats = np.arange(10,714,20) sum_errs = [] for k in num_latent_feats: # restructure with k latent features s_new, u_new, vt_new = np.diag(s[:k]), u[:, :k], vt[:k, :] # take dot product user_item_est = np.around(np.dot(np.dot(u_new, s_new), vt_new)) # compute error for each prediction to actual value diffs = np.subtract(user_item_matrix, user_item_est) # total errors and keep track of them err = np.sum(np.sum(np.abs(diffs))) sum_errs.append(err) plt.plot(num_latent_feats, 1 - np.array(sum_errs)/df.shape[0]); plt.xlabel('Number of Latent Features'); plt.ylabel('Accuracy'); plt.title('Accuracy vs. Number of Latent Features'); # - # **`5.4.`** From the above, we can't really be sure how many features to use, because simply having a better way to predict the 1's and 0's of the matrix doesn't exactly give us an indication of if we are able to make good recommendations. Instead, we might split our dataset into a training and test set of data, as shown in the cell below. df_train = df.head(40000) df_test = df.tail(5993) df.head() create_user_item_matrix(df_train).head() create_user_item_matrix(df_test).head() # + df_train = df.head(40000) df_test = df.tail(5993) def create_test_and_train_user_item(df_train, df_test): ''' INPUT: df_train - training dataframe df_test - test dataframe OUTPUT: user_item_train - a user-item matrix of the training dataframe (unique users for each row and unique articles for each column) user_item_test - a user-item matrix of the testing dataframe (unique users for each row and unique articles for each column) test_idx - all of the test user ids test_arts - all of the test article ids ''' # Find user-item matrix for train and test user_item_train = create_user_item_matrix(df_train) user_item_test = create_user_item_matrix(df_test) # Find user idx and article of train set train_idx = set(user_item_train.index) train_arts = set(user_item_train.columns) # Find user idx and article of test set test_idx = set(user_item_test.index) test_arts = set(user_item_test.columns) # Intersection user idx and article with train and test sets common_idx = list(train_idx.intersection(test_idx)) common_cols = list(train_arts.intersection(test_arts)) # user_item_test = user_item_test.loc[common_idx, common_cols] return user_item_train, user_item_test, test_idx, test_arts user_item_train, user_item_test, test_idx, test_arts = create_test_and_train_user_item(df_train, df_test) # - # Use the code from question 3 to understand the impact on accuracy of the training and test sets of data with different numbers of latent features. Using the split below: # # * How many users can we make predictions for in the test set? # * How many users are we not able to make predictions for because of the cold start problem? # * How many articles can we make predictions for in the test set? # * How many articles are we not able to make predictions for because of the cold start problem? # + print('articles of the test set we can make predictions about') print(len(np.intersect1d(df_train.article_id.unique(),df_test.article_id.unique()))) print('articles of the test set we cant make predictions about') print(len(df_test.article_id.unique()) - len(np.intersect1d(df_train.article_id.unique(),df_test.article_id.unique()))) print('users of the test set we can make predictions about') print(len(np.intersect1d(df_train.user_id.unique(),df_test.user_id.unique()))) print('users of the test set we cant make predictions about') print(len(df_test.user_id.unique()) - len(np.intersect1d(df_train.user_id.unique(),df_test.user_id.unique()))) # + # Replace the values in the dictionary below a = 662 b = 574 c = 20 d = 0 sol_4_dict = { 'How many users can we make predictions for in the test set?': c, 'How many users in the test set are we not able to make predictions for because of the cold start problem?': a, 'How many movies can we make predictions for in the test set?': b, 'How many movies in the test set are we not able to make predictions for because of the cold start problem?': d } t.sol_4_test(sol_4_dict) # - # **`5.5.`** Now use the **user_item_train** dataset from above to find U, S, and V transpose using SVD. Then find the subset of rows in the **user_item_test** dataset that you can predict using this matrix decomposition with different numbers of latent features to see how many features makes sense to keep based on the accuracy on the test data. This will require combining what was done in questions `2` - `4`. # # Use the cells below to explore how well SVD works towards making predictions for recommendations on the test data. # #### MODEL TRAINING and TESTING # + # fit SVD on the user_item_train matrix u_train, s_train, vt_train = np.linalg.svd(user_item_train) # fit svd similar to above then use the cells below print(u_train.shape) print(s_train.shape) print(vt_train.shape) # - row_idxs = user_item_train.index.isin(test_idx) col_idxs = user_item_train.columns.isin(test_arts) u_test = u_train[row_idxs, :] vt_test = vt_train[:, col_idxs] # + # Use these cells to see how well you can use the training decomposition to predict on test data num_latent_feats = np.arange(0,714,30) sum_errs_train = [] sum_errs_test = [] all_errs = [] for k in num_latent_feats: # restructure with k latent features s_train_lat, u_train_lat, vt_train_lat = np.diag(s_train[:k]), u_train[:, :k], vt_train[:k, :] u_test_lat, vt_test_lat = u_test[:, :k], vt_test[:k, :] # take dot product user_item_train_preds = np.around(np.dot(np.dot(u_train_lat, s_train_lat), vt_train_lat)) user_item_test_preds = np.around(np.dot(np.dot(u_test_lat, s_train_lat), vt_test_lat)) all_errs.append(1 - ((np.sum(user_item_test_preds)+np.sum(np.sum(user_item_test)))/(user_item_test.shape[0]*user_item_test.shape[1]))) # compute error for each prediction to actual value diffs_train = np.subtract(user_item_train, user_item_train_preds) diffs_test = np.subtract(user_item_test, user_item_test_preds) # total errors and keep track of them err_train = np.sum(np.sum(np.abs(diffs_train))) err_test = np.sum(np.sum(np.abs(diffs_test))) sum_errs_train.append(err_train) sum_errs_test.append(err_test) # + plt.plot(num_latent_feats, all_errs, label='All Data'); plt.plot(num_latent_feats, 1 - np.array(sum_errs_train)/(user_item_train.shape[0]*user_item_test.shape[1]), label='Train'); plt.plot(num_latent_feats, 1 - np.array(sum_errs_test)/(user_item_test.shape[0]*user_item_test.shape[1]), label='Test'); plt.xlabel('Number of Latent Features', fontsize= 13); plt.ylabel('Accuracy', fontsize= 13); plt.title('Accuracy - Number of Latent Features', fontsize= 16); plt.legend(fontsize= 13); # - # **`5.6.`** Use the cell below to comment on the results you found in the previous question. Given the circumstances of your results, discuss what you might do to determine if the recommendations you make with any of the above recommendation systems are an improvement to how users currently find articles? print('users of the test set we can make predictions about') print(len(np.intersect1d(df_train.user_id.unique(),df_test.user_id.unique()))) # > ## **Result and Interpretation of the Model**<br> # > Overall, the accuracy is very high across all sets for any number of **latent** features. <br> # In this training set, accuracy logarithmically increases as the number of latent features increases in the training set. It peaks at nearly **`99.9%`** accuracy and troughs at **`98.5%`** accurracy.<br> # Also, in this testing set and whole set, accuracy logarithmically decreases as the number of latent features increases. It peaks at **`98.2%`** accuracy and troughs at **`96.1%`** accuracy.<br> # > Besides of that, the accuracy is reported to be very high and it is important to verify the results in different ways. Since the dataset is inbalanced it might be interesting to explore undersampling techniques to see if that improves results.<br> # > * Unfortunately we have too less users of the test set(**` 20 `**) we can make **predictions about records** in the training and testing datasets, result will not satisfy our questions/search to see how good predictions are done through SVD. # > * Even if results and plots show us great result, in fact we cant claim it is true because there is imbalanced data problem. # > * Recommendation Engines are great to solve problems but as like most of the method it also has weakness for the offline methods. There is one appropriate way like _**Experimental Design - A/B Test approach**_ is a solid procedure to understand if the model brings real benefits. # > * Basic **A/B Test** approach for this situation could be to _assign %50 of users to a control group with no recommendations and another group with recommendations_. After settting up A/B Test approach we will be understanding whether the recommendation system increases the number of user-article interactions). # # <a id='conclusions'></a> # ### Extras # Using your workbook, you could now save your recommendations for each user, develop a class to make new predictions and update your results, and make a flask app to deploy your results. These tasks are beyond what is required for this project. However, from what you learned in the lessons, you certainly capable of taking these tasks on to improve upon your work here! # # # ## Conclusion # # > Congratulations! You have reached the end of the Recommendations with IBM project! # # > **Tip**: Once you are satisfied with your work here, check over your report to make sure that it is satisfies all the areas of the [rubric](https://review.udacity.com/#!/rubrics/2322/view). You should also probably remove all of the "Tips" like this one so that the presentation is as polished as possible. # # # ## Directions to Submit # # > Before you submit your project, you need to create a .html or .pdf version of this notebook in the workspace here. To do that, run the code cell below. If it worked correctly, you should get a return code of 0, and you should see the generated .html file in the workspace directory (click on the orange Jupyter icon in the upper left). # # > Alternatively, you can download this report as .html via the **File** > **Download as** submenu, and then manually upload it into the workspace directory by clicking on the orange Jupyter icon in the upper left, then using the Upload button. # # > Once you've done this, you can submit your project by clicking on the "Submit Project" button in the lower right here. This will create and submit a zip file with this .ipynb doc and the .html or .pdf version you created. Congratulations! from subprocess import call call(['python', '-m', 'nbconvert', 'Recommendations_with_IBM.ipynb'])
Recommendations_with_IBM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import re INPUT_DIR='/xchip/cga_home/bknisbac/CLL/results_cll1085/qc/crosscheck_rna/example_vcfs' VCF_FILE=INPUT_DIR + '/' + 'CRC-0027-T-04.60k_sites.vcf' VCF_FILE2=INPUT_DIR + '/' + 'CW106-L.60k_sites.vcf' MAP_FILE='/xchip/cga_home/bknisbac/resources/snps/haplotype_db_60K_forCrossCheckFingerprint_fromGit_hg19_nochr.20200426.map.txt' OUTPUT_ROOTDIR='/xchip/cga_home/bknisbac/CLL/results_cll1085/qc/crosscheck_rna/example_vcfs/output_test' os.makedirs(OUTPUT_ROOTDIR, exist_ok=True) # - # def find_intersect_sites(vcf1, vcf2, map_file, outdir): pd.read # + vcf1_fh = open(VCF_FILE, 'r') vcf_lines1 = vcf1_fh.readlines() vcf2_fh = open(VCF_FILE2, 'r') vcf_lines2 = vcf2_fh.readlines() map_fh = open(MAP_FILE, 'r') map_lines = map_fh.readlines() # - #init sites site_dict = dict() for l in map_lines: if re.match('^[@#].*', l): continue site_dict[':'.join(l.split('\t')[0:2])] = 0 for l in vcf_lines1: if re.match('^#.*', l): continue site_key = ':'.join(l.split('\t')[0:2]) if site_key in site_dict.keys(): site_dict[site_key] += 1 site_dict
terra_workspace/crosscheck/analyze_example_vcf.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="oauLq-B8z_n8" # !pip install -q sklearn # + id="jF_2A3XFWkLn" # %tensorflow_version 2.x import tensorflow.compat.v2.feature_column as fc import tensorflow.compat.v1.saved_model as saved_model import tensorflow as tf from sklearn.preprocessing import OneHotEncoder from __future__ import absolute_import, division, print_function, unicode_literals from IPython.display import clear_output from six.moves import urllib from copy import deepcopy import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # + colab={"base_uri": "https://localhost:8080/"} id="YRaJLoEOWmMA" outputId="454befdf-57d6-4dde-9f0e-56cc82f1ff61" tf.random.set_seed(197) print(tf.__version__) # + [markdown] id="SNY1hnSDh6Nc" # # Data Analysis # # **Linear analysis**: For linearly correlated datapoints in $\mathit{R^{n}}$ space # + [markdown] id="8ui1PHtRo4X4" # ## Loading the data # + id="Q-11N2a9ihLr" df_train = pd.read_csv("https://storage.googleapis.com/tf-datasets/titanic/train.csv") df_eval = pd.read_csv("https://storage.googleapis.com/tf-datasets/titanic/eval.csv") # + id="4td9QbGHdukg" y_train = df_train["survived"] y_eval = df_eval["survived"] # + id="2UCOFv0k687S" df_train_bak = deepcopy(df_train) # + [markdown] id="50SC2MMjq1Pn" # ## Preliminary Descriptive analysis # + id="IhEZ5wjDihCO" colab={"base_uri": "https://localhost:8080/", "height": 234} outputId="a7011b02-ed06-4f3a-99fb-3a0db238d56f" df_train.head(6) # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="m3kvE7phq7QK" outputId="7acb7e6b-b757-473a-daca-f11e331280a3" df_train.describe() # + colab={"base_uri": "https://localhost:8080/", "height": 172} id="4ZMa91wFxAwV" outputId="3ce0e8e1-1557-4c73-b1dc-d8986aeeb89b" df_train.describe(include='object') # + colab={"base_uri": "https://localhost:8080/"} id="AeI-GpaLmKxo" outputId="7165a42b-9a12-4b76-ff69-c7858419eb22" df_train.shape # + [markdown] id="ho1ru_bKpHXZ" # ## Exploratory analysis # + colab={"base_uri": "https://localhost:8080/"} id="1Wk7wQwxzJnR" outputId="32daba4e-edde-4607-8e1a-d215483c5103" unique_survived = [val for val in y_train.unique()] unique_survived # + [markdown] id="0J08nFhKbS1M" # ### 1 class # + [markdown] id="uRvqDR-dcRzj" # #### 1.1 Unique values # + colab={"base_uri": "https://localhost:8080/"} id="LU5URho2bjIV" outputId="05ba9444-f042-434c-cba9-4e574ff9b218" unique_class = [val for val in df_train['class'].unique()] unique_class # + [markdown] id="pF2RFIhuEJPq" # #### 1.2 Distribution # + colab={"base_uri": "https://localhost:8080/", "height": 309} id="kNxeVTtuEJp8" outputId="2a3c46dd-9658-407f-9653-076b4d16ab5f" df_train['class'].value_counts().plot(kind = 'bar') # + [markdown] id="nhvp3zU5NzHa" # #### 1.3 Stacked survival stats # + id="HefkCuH42FAH" colab={"base_uri": "https://localhost:8080/", "height": 323} outputId="0f65d3b4-5cd1-472f-9a0b-cf3642811749" df = pd.DataFrame(df_train.groupby("class")['survived'].value_counts()) df = df.rename(columns={'survived':'count'}).pivot_table(index = 'class', values = 'count', columns = 'survived').reset_index() df.plot(kind = "bar", stacked = True, x = 'class') # + [markdown] id="KoSbVVF4R9hJ" # ### 2 Alone # + [markdown] id="E-IcqcqYcnOo" # #### 2.1 Unique Values # + id="a259TniNVnM9" colab={"base_uri": "https://localhost:8080/"} outputId="b93d4561-7583-45e1-e3f8-9762ae6b9f64" unique_alone = [val for val in df_train['alone'].unique()] unique_alone # + [markdown] id="KyRuX4KNSIny" # #### 2.2 Stacked survival stats # + id="B-y1ffYp2Sve" colab={"base_uri": "https://localhost:8080/", "height": 293} outputId="84345d09-811e-4196-eb3c-1679f1852d1b" df = pd.DataFrame(df_train.groupby("alone")['survived'].value_counts()) df = df.rename(columns={'survived':'count'}).pivot_table(index = 'alone', values = 'count', columns = 'survived').reset_index() df.plot(kind = "bar", stacked = True, x = 'alone') # + [markdown] id="e43KNOhDVhL2" # ### 3 Sex # + [markdown] id="Q_vS2LeVcr01" # #### 3.1 Unique values # + id="L6Q504e7Vf2N" colab={"base_uri": "https://localhost:8080/"} outputId="f6c9dada-4fe9-416d-aff5-e1dd7bb1f136" unique_sex = [val for val in df_train['sex'].unique()] unique_sex # + [markdown] id="A8RhdQDvVfqs" # #### 3.2 Stacked survival stats # + id="WovSffgL9T_i" colab={"base_uri": "https://localhost:8080/", "height": 321} outputId="4ee7f5b2-75e9-4417-f7d5-aa03449cc8f2" df = pd.DataFrame(df_train.groupby("sex")['survived'].value_counts()) df = df.rename(columns={'survived':'count'}).pivot_table(index = 'sex', values = 'count', columns = 'survived').reset_index() df.plot(kind = "bar", stacked = True, x = 'sex') # + [markdown] id="quFUCczBVpP9" # ### 4 Embarked Town # + [markdown] id="YmdQJnvocvsg" # #### 4.1 Unique values # + id="mSQzhia5VpDT" colab={"base_uri": "https://localhost:8080/"} outputId="9d98fa63-7cab-4644-91b2-9ad0d3b95cec" unique_embark_town = [val for val in df_train['embark_town'].unique()] unique_embark_town # + [markdown] id="Pz22_jpcVrl2" # #### 4.2 Stacked survival stats # + id="EJsOrI8_DaPs" colab={"base_uri": "https://localhost:8080/", "height": 354} outputId="8edc570a-0128-45fd-9ba3-f5a8a4b209c3" df = pd.DataFrame(df_train.groupby("embark_town")['survived'].value_counts()) df = df.rename(columns={'survived':'count'}).pivot_table(index = 'embark_town', values = 'count', columns = 'survived').reset_index() df.plot(kind = "bar", stacked = True, x = 'embark_town') # + [markdown] id="hwEDu0m1WTIM" # ### 5 Deck # + id="Nv2Ab0BJWS-B" # + [markdown] id="XQqcb161WS00" # #### 5.1 Stacked survival stats # + id="uMew1udXDsMZ" colab={"base_uri": "https://localhost:8080/", "height": 334} outputId="eef203f0-217b-4408-fbae-82f9e39d945b" df = pd.DataFrame(df_train.groupby("deck")['survived'].value_counts()) df = df.rename(columns={'survived':'count'}).pivot_table(index = 'deck', values = 'count', columns = 'survived').reset_index() df.plot(kind = "bar", stacked = True, x = 'deck') # + [markdown] id="fVySGDBVWgjV" # ### 6 Parch # + [markdown] id="WltVVd_UcMJD" # #### 6.1 Unique values # + id="gjYBAXSmIj1h" colab={"base_uri": "https://localhost:8080/"} outputId="8fa09774-97af-4537-d312-a56c61c9d571" unique_parch = [val for val in df_train['parch'].unique()] unique_parch # + id="XiwUXq3tXGQF" # + [markdown] id="DZPEONF3XF_c" # #### 6.2 Stacked survival stats # + id="ZwXi1ePtIk9q" colab={"base_uri": "https://localhost:8080/", "height": 293} outputId="d9bf7b13-7386-4b8d-ceef-0a6e218559c4" df = pd.DataFrame(df_train.groupby("parch")['survived'].value_counts()) df = df.rename(columns={'survived':'count'}).pivot_table(index = 'parch', values = 'count', columns = 'survived').reset_index() df.plot(kind = "bar", stacked = True, x = 'parch') # + [markdown] id="6-p8CknBZnwy" # ### 7 Fare # + id="7M_J-nxoZoLz" # + [markdown] id="y3XVLYFCZnfq" # #### 7.1 Stacked survival status # + id="NxrOoxxgD7Rr" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="28f6f1e2-3c01-449f-f31b-9b3932b00e7b" df = pd.DataFrame(df_train.groupby("survived")['fare'].mean()) df = df.rename(columns={'fare':'mean'}).pivot_table(values = 'mean', columns = 'survived').reset_index(drop = True) df.plot(kind = "bar", stacked = False, title = 'Fare distribution') # + [markdown] id="Z0SPjmEFaqtx" # ### 8 Age # + [markdown] id="gIURhlNr8j49" # #### 8.1 Distribution # + id="e0K_goQVaqkC" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="a7f62882-6534-4593-c23c-f59f82366c26" df_train['age'].plot(kind = 'hist') # + [markdown] id="0a9AgqR-aqaT" # #### 8.2 Stacked survival stats # + id="_NRF9a8jFEhT" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="a5f2bf0d-ccfa-4e87-e1fe-e7fae4f5f76b" df = pd.DataFrame(df_train.groupby("survived")['age'].median()) df = df.rename(columns={'age':'median'}).pivot_table(values = 'median', columns = 'survived').reset_index(drop = True) df.plot(kind = "bar", stacked = False) # + [markdown] id="8yzBBt0KBkys" # # Building model # + [markdown] id="1RmNT6jkrr7p" # ## Preprocessing # + id="QMYZPgT2IVaR" _cols = df_train.columns _label = y_train.name _unused = ['deck'] df_train.drop(_unused, axis = 1, inplace=True) num_cols = df_train.select_dtypes(['int64','int32']).columns ## To convert int to float cat_cols = df_train.select_dtypes(['object']).columns ## To encode categorical data to numeric # + id="QyOkFoGlxnUb" colab={"base_uri": "https://localhost:8080/"} outputId="95d13de3-e616-42b7-a104-38273f179fd7" list(cat_cols)+[_label] # + id="ewIQ3EEFtkF2" _cat_types = { 'sex': pd.api.types.CategoricalDtype(categories=unique_sex), 'class': pd.api.types.CategoricalDtype(categories=unique_class), 'embark_town': pd.api.types.CategoricalDtype(categories=unique_embark_town), 'alone': pd.api.types.CategoricalDtype(categories=unique_alone), 'survived': pd.api.types.CategoricalDtype(categories=unique_survived) } # + id="wYUMc_P6zy0N" df_train[cat_cols] = df_train[cat_cols].apply(lambda x: x.astype(_cat_types[x.name])) df_train[cat_cols] = df_train[cat_cols].apply(lambda x: x.cat.codes) # + id="GpewfqIzFm8R" colab={"base_uri": "https://localhost:8080/"} outputId="0dcd60b2-ea9c-4f30-ac2c-8028ea929323" df_train['sex'].astype(_cat_types[df_train['sex'].name]).cat.codes # + [markdown] id="FuaHOkj74zLf" # ## Standardizing data # + id="Js0j_5pe43if" colab={"base_uri": "https://localhost:8080/"} outputId="5520a56b-37db-4d06-a08c-64f368366a91" [x for x in map(str, df_train.dtypes)] # + id="gMtaHkDI43cf" colab={"base_uri": "https://localhost:8080/"} outputId="bccd98e3-d416-43fa-ea00-43c307fd4844" df_train.dtypes.index # + id="ariN4W_w4o-b" dtypes = list(zip(df_train.dtypes.index, map(str, df_train.dtypes))) # + id="b3RkDanYNDSE" def standardize(dataset): dtypes = list(zip(dataset.dtypes.index, map(str, dataset.dtypes))) for column, dtype in dtypes: if dtype == 'float32': dataset[column] -= dataset[column].mean() dataset[column] /= dataset[column].std() return dataset # + [markdown] id="-WXa85JeKbT2" # ## Input function # + id="8PspB6JO5ggr" def input_fn(features, labels, shuffle = True, num_epochs = 10, batch_size = 32): dataset = tf.data.Dataset.from_tensor_slices(inputs) if shuffle: dataset = dataset.shuffle(buffer_size=len(features)) dataset = dataset.repeat(num_epochs) dataset = dataset.batch(batch_size) return dataset # + [markdown] id="aaUdO1l6gadv" # # Helper class # + id="EbLKb3yGg6Ol" class PreProcessHelper(): def __init__(self): pass def create_categorical_type(self, dataframe, dtypes): unique_values = [] _CATEGORICAL_TYPES = {} for column, dtype in dtypes: if dtype == 'object': unique_values = [x for x in dataframe[column].unique()] _CATEGORICAL_TYPES[column] = pd.api.types.CategoricalDtype(categories=unique_values) return _CATEGORICAL_TYPES def removeUnused(self, dataframe, unused): if(unused): dataframe.drop(unused, axis = 1, inplace=True) return dataframe def preprocess(self, dataframe, _CATEGORICAL_TYPES, model_type): # Convert integer valued (numeric) columns to floating point num_cols = dataframe.select_dtypes(['int64']).columns dataframe[num_cols] = dataframe[num_cols].astype('float32') # Convert categorical columns to numeric cat_cols = dataframe.select_dtypes(['object']).columns dataframe[cat_cols] = dataframe[cat_cols].apply(lambda x: x.astype(_CATEGORICAL_TYPES[x.name])) if(model_type == 'neural'): dataframe[cat_cols] = dataframe[cat_cols].apply(lambda x: x.cat.codes) return dataframe def standardize_data(self, dataframe, dtypes, response, standardize): if(standardize): dataframe_x = dataframe.drop(response, axis = 1) dataframe_y = dataframe[response] for column, dtype in dtypes: if dtype == 'float32': dataframe_x[column] -= dataframe_x[column].mean() dataframe_x[column] /= dataframe_x[column].std() dataframe = pd.concat([dataframe_x, dataframe_y], axis = 1) return dataframe def split_data(self, dataframe, response): train, eval = dataframe.xs('train'), dataframe.xs('eval') train_x, train_y = train.drop(response, axis = 1), train[response] eval_x, eval_y = eval.drop(response, axis = 1), eval[response] return train_x, train_y, eval_x, eval_y def get_feature_columns(self, train_x, _CATEGORICAL_TYPES): feature_column = [] num_cols = list(train_x.select_dtypes(['float32']).columns) for key,value in _CATEGORICAL_TYPES.items(): unique = train_x[key].unique() if(self.__class__.__name__ == 'TitanicSurvival_DNN'): feature_column.append(tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_vocabulary_list(key, unique))) else: feature_column.append(tf.feature_column.categorical_column_with_vocabulary_list(key, unique)) for col in num_cols: feature_column.append(tf.feature_column.numeric_column(col, dtype=tf.dtypes.float32)) return feature_column # + [markdown] id="ONw91_k1tUjZ" # # Linear Classifier model class # + id="UVlE_-gj9k78" df_train = deepcopy(df_train_bak) # + id="QpO07gAhNN4f" class TitanicSurvival_Estimator(PreProcessHelper): def __init__(self, df_train, df_eval, response, unused = [], standardize = False, model_type = 'estimator'): self.df_train = deepcopy(df_train) self.df_eval = deepcopy(df_eval) self.unused = unused self.response = response self.standardize = standardize self.model_type = model_type self.dataframe = pd.concat([self.df_train, self.df_eval], keys=['train', 'eval']) # Remove unused columns self.dataframe = self.removeUnused(self.dataframe, self.unused) self.dtypes = list(zip(self.dataframe.dtypes.index, map(str, self.dataframe.dtypes))) self._CATEGORICAL_TYPES = self.create_categorical_type(self.dataframe, self.dtypes) self.dataframe = self.preprocess(self.dataframe, self._CATEGORICAL_TYPES, self.model_type) self.dataframe = self.standardize_data(self.dataframe, self.dtypes, self.response, self.standardize) ## Train eval split on input-response self.train_x, self.train_y, self.eval_x, self.eval_y = self.split_data(self.dataframe, self.response) self.feature_column = self.get_feature_columns(self.train_x, self._CATEGORICAL_TYPES) def make_input_fn(self, features, labels, shuffle = True, num_epochs = 10, batch_size = 32): def input_fn(): dataset = tf.data.Dataset.from_tensor_slices((dict(features),labels)) if shuffle: dataset = dataset.shuffle(buffer_size=len(features)) dataset = dataset.repeat(num_epochs) dataset = dataset.batch(batch_size) return dataset return input_fn def train_model(self, train_x = None, train_y = None): if(train_x == None): train_x = deepcopy(self.train_x) if(train_y == None): train_y = deepcopy(self.train_y) ## Train input function train_input_fn = self.make_input_fn(train_x, train_y) self.lreg = tf.estimator.LinearClassifier(self.feature_column, n_classes=2) self.lreg.train(train_input_fn) clear_output() return(self.lreg) def evaluate_model(self, eval_x = None, eval_y = None, lreg = None): if(eval_x == None): eval_x = deepcopy(self.eval_x) if(eval_y == None): eval_y = deepcopy(self.eval_y) if(lreg == None): lreg = deepcopy(self.lreg) eval_input_fn = self.make_input_fn(eval_x, eval_y, num_epochs=1, shuffle=False) res = lreg.evaluate(eval_input_fn) clear_output() return res # + [markdown] id="CJmiCd5PDCG8" # ## Model training and evaluation # + id="Cv3T_smLc0D_" modelClass = TitanicSurvival_Estimator(df_train=df_train, df_eval=df_eval, response='survived', unused='deck') # + id="bDwOpNp6pzZm" model = modelClass.train_model() # + id="C0hvkYdNydfN" colab={"base_uri": "https://localhost:8080/"} outputId="cecf6ca1-9512-49ca-9e22-78ee0ad44cc1" modelClass.evaluate_model() # + [markdown] id="zTUNWbAw5nkj" # # DNN Classifier # + id="dMgKRv_f5mjq" class TitanicSurvival_DNN(PreProcessHelper): def __init__(self, df_train, df_eval, response, unused = [], standardize = False, model_type = 'estimator', hidden_units = [64,32,16,8]): self.df_train = deepcopy(df_train) self.df_eval = deepcopy(df_eval) self.unused = unused self.response = response self.standardize = standardize self.model_type = model_type self.hidden_units = hidden_units tf.random.set_seed(197) self.dataframe = pd.concat([self.df_train, self.df_eval], keys=['train', 'eval']) # Remove unused columns self.dataframe = self.removeUnused(self.dataframe, self.unused) self.dtypes = list(zip(self.dataframe.dtypes.index, map(str, self.dataframe.dtypes))) self._CATEGORICAL_TYPES = self.create_categorical_type(self.dataframe, self.dtypes) self.dataframe = self.preprocess(self.dataframe, self._CATEGORICAL_TYPES, self.model_type) self.dataframe = self.standardize_data(self.dataframe, self.dtypes, self.response, self.standardize) ## Train eval split on input-response self.train_x, self.train_y, self.eval_x, self.eval_y = self.split_data(self.dataframe, self.response) self.feature_column = self.get_feature_columns(self.train_x, self._CATEGORICAL_TYPES) self.model = self.create_model(self.feature_column, self.hidden_units) def input_fn(self, features, labels, shuffle = True, batch_size = 128): dataset = tf.data.Dataset.from_tensor_slices((dict(features),labels)) if shuffle: dataset = dataset.shuffle(buffer_size=len(features)) dataset = dataset.batch(batch_size) if shuffle: dataset = dataset.repeat(1000) return dataset def create_model(self, feature_columns, hidden_units): n_classes = len(self.eval_y.unique()) print(n_classes) model = tf.estimator.DNNClassifier( hidden_units = hidden_units, n_classes = 2, feature_columns = feature_columns ) clear_output() return model def train_model(self): for i in range(15): print(f'Model train step: {i}') self.model.train(input_fn=lambda: self.input_fn(self.train_x, self.train_y), steps=400) clear_output() return model def evaluate_model(self): res = self.model.evaluate(input_fn=lambda: self.input_fn(self.eval_x, self.eval_y, shuffle = False)) clear_output() return res # + id="MxwF6EGXFf4B" modelClass = TitanicSurvival_DNN(df_train=df_train, df_eval=df_eval, response='survived', unused='deck', hidden_units=[16,4]) # + id="s_Gu6plAF2Gz" model = modelClass.train_model() # + colab={"base_uri": "https://localhost:8080/"} id="OjXLPHHrGNBR" outputId="32b135d0-8798-4182-fd3f-d7c7c72fc0d1" modelClass.evaluate_model()
Basics/TensorFlow 2.0/Survival_using_Estimator_(tf_dataset_titanic).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] heading_collapsed=true id="v62Ymq73r0_i" # ## Setup # - # !pip install statsmodels monthdelta # + colab={"base_uri": "https://localhost:8080/"} hidden=true id="RF-WELprr0_o" outputId="7f193e23-c741-4a10-a2ce-8e97209405a4" import numpy as np import pandas as pd import math import statistics from datetime import datetime as dt import sklearn from matplotlib import pyplot as plt import seaborn as sns import statsmodels.api as sm # %matplotlib inline import warnings warnings.filterwarnings('ignore') # + [markdown] heading_collapsed=true id="6ZJSCmIir1AM" # ## Import raw data # + hidden=true id="mxmM7NFnr1AN" data = pd.read_csv("loans_2007.csv") # + [markdown] heading_collapsed=true id="zi4Ju85tr1AN" # ## Data cleaning # + [markdown] heading_collapsed=true hidden=true id="393EThuAr1AN" # ### Explore raw data # + [markdown] hidden=true id="J-w_OFEIr1AN" # #### Data quantity # + colab={"base_uri": "https://localhost:8080/"} hidden=true id="gvaUfP3Dr1AO" outputId="6beec818-5585-48a6-88fd-aed39d4fefd9" print("There are {} rows and {} columns".format(data.shape[0], data.shape[1])) # + colab={"base_uri": "https://localhost:8080/", "height": 407} hidden=true id="ck6RFf0Pr1AO" outputId="affa01cc-59fb-4fd7-9cae-43d369abaec3" print("Number of unique values by column:") display(data.nunique()) # + [markdown] hidden=true id="mMaT2DHrr1AP" # #### Data quality # + colab={"base_uri": "https://localhost:8080/", "height": 52} hidden=true id="erO4ZDpUr1AP" outputId="cd5c6f8b-8919-4ebb-bcf6-60c6f4920858" print("Columns with nulls:") num_nulls = data.isnull().sum() display(num_nulls[num_nulls > 0]) # + [markdown] hidden=true id="DIsNzoxIr1AP" # #### Summary statistics # + colab={"base_uri": "https://localhost:8080/", "height": 287} hidden=true id="wXrIGiRWr1AP" outputId="5fd1a22e-eeae-4095-d3c4-afa9a15d8c72" pd.set_option('display.float_format', lambda x: f"{x:.2f}" if x % 1 != 0 else f"{int(x):,}") data.describe() # - data.head() # + [markdown] hidden=true id="0v7gLNDIr1AQ" # #### Distribution of target variable: loan_status # + colab={"base_uri": "https://localhost:8080/", "height": 724} hidden=true id="q1QOiEaDr1AQ" outputId="e83bafce-4715-4301-e8a9-83a27835d5c8" # Count plot of loan status plt.figure(figsize = (20, 6)) plt.title("Count plot of loan_status categories") ax = sns.countplot(y = 'loan_status', data = data, orient = "v") # + [markdown] hidden=true id="CadRrOvRr1AQ" # ### Data cleaning # + [markdown] hidden=true id="e5PCPrP3r1AR" # #### Keep only rows with loan_status "Fully Paid" (0) or "Charged Off (1) # + colab={"base_uri": "https://localhost:8080/"} hidden=true id="JLtWVwthr1AR" outputId="28643996-94a3-4eb9-811f-2f6ed5ec8825" # Drop rows where loan_status is not "Fully Paid" or "Charged Off" old_len = len(data) data = data[data.loan_status.isin(["Fully Paid", "Charged Off"])] print("Original: {} rows. Dropped: {} rows. Remaining: {} rows.".format( old_len, old_len - len(data), len(data))) # Convert loan_status to binary variable: default = 1 if loan_status = "Charged Off", else default = 0 data["default"] = [ 0 if status == "Fully Paid" else 1 for status in data.loan_status ] data.drop("loan_status", axis=1, inplace=True) data.head() # + [markdown] hidden=true id="shjFyHs4r1AR" # #### Drop duplicate rows # + colab={"base_uri": "https://localhost:8080/"} hidden=true id="L_GbNsThr1AR" outputId="698263fc-9908-47cd-e67a-e90221e82a6a" # Drop duplicates old_len = len(data) data.drop_duplicates(data.columns[:-3], keep="last", inplace=True) print("Original: {} rows. Dropped: {} rows. Remaining: {} rows.".format( old_len, old_len - len(data), len(data))) # + [markdown] hidden=true id="MLG2oM5nr1AS" # #### Drop rows with NA values # - data[data.isnull().any(axis=1)] # + colab={"base_uri": "https://localhost:8080/"} hidden=true id="s7o5QePgr1AS" outputId="78d9a115-ab0c-420e-9a94-c63ee87c0918" # Drop duplicates old_len = len(data) data.dropna(how = "any", inplace = True) print("Original: {} rows. Dropped: {} rows. Remaining: {} rows.".format( old_len, old_len - len(data), len(data))) # - data.head() # + [markdown] hidden=true id="X1BNGMVJr1AS" # #### Drop columns that contain only 1 unique value # # These columns do not add any information to each observation # + colab={"base_uri": "https://localhost:8080/"} hidden=true id="e9lx9PoYr1AS" outputId="d5612b87-a17b-4e02-d7e2-cadae009ba17" # Drop columns with only 1 unique value old_len = len(data.columns) num_unique = data.nunique() data.drop(num_unique[num_unique <= 1].index, axis=1, inplace=True) print( "Original: {} columns. Dropped: {} columns. Remaining: {} columns.".format( old_len, old_len - len(data.columns), len(data.columns))) print("Dropped columns:") for col in num_unique[num_unique <= 1].index: print(col) # + [markdown] hidden=true id="BLJIhAotr1AT" # #### Drop redundant or non-useful columns # # Certain columns contain information that are not useful for prediction, or redundant information that has been fully captured by another column. # # * `id`: arbitrary number assigned by Lending Club # * `member_id`: same as `id` # * `emp_title`: highly unstructured text data, not useful unless significant cleaning is performed # * `title`: same as `emp_title` # * `zip_code`: redundant since the `addr_state` column already captures all geographical information revealed by the first 3 digits of `zip_code` # + colab={"base_uri": "https://localhost:8080/"} hidden=true id="57s5vIdhr1AT" outputId="6c107f63-776a-4eb0-bb47-b276020a34af" # Drop redundant or non-useful columns drop_cols = ["id", "member_id", "emp_title", "title", "zip_code"] old_len = len(data.columns) data.drop(drop_cols, axis = 1, inplace = True) print( "Original: {} columns. Dropped: {} columns. Remaining: {} columns.".format( old_len, old_len - len(data.columns), len(data.columns))) # + [markdown] hidden=true id="Bf4iG4mVr1AU" # #### Drop columns which contain information not available at application # # This model aims to predict, at the point of loan application, whether a borrower would eventually default. Certain information would not be available at the point of loan application and may introduce lookahead bias and/or cause overfitting. Columns with such information are listed below, and will be removed. # # * `funded_amnt` and `funded_amnt_inv`: only known after the loan has already been funded # * `total_pymnt` and `total_pymnt_inv`: only known after the loan has started to be paid off # * `total_rec_prncp`, `total_rec_int`, and `total_rec_late_fee`: only known after the loan has started to be paid off # * `recoveries` and `collection_recovery_fee`: only known after the loan has defaulted # * `last_pymnt_d` and `last_pymnt_amnt`: only known after the loan has started to be paid off # * `last_credit_pull_d`: only known after the loan has already been funded # * `grade` and `sub_grade`: assigned by Lending Club after credit scoring, but not available at the point of application # * `int_rate`: depends on `sub_grade` # * `installment`: depends on `int_rate` # # One particular column, `issue_d`, also contains information not available at application time (issue date is only known after funding has completed). However, according to [Lending Club](https://help.lendingclub.com/hc/en-us/articles/215492738-How-long-does-it-take-to-get-approved-), an average application takes around 7 days to be approved and funded. Thus the deviation between issue date and application date is likely to be small. Instead of removing the column, we can thus use `issue_d` as an approximate for time of application, which might contain useful information. # - data["issue_d"].astype('str') # + colab={"base_uri": "https://localhost:8080/"} hidden=true id="ObM2loqBr1AV" outputId="fe70dd0d-f7ac-4666-8aa0-db5bcb692335" # Drop columns with information not available at origination drop_cols = [ "funded_amnt", "funded_amnt_inv", "total_pymnt", "total_pymnt_inv", "total_rec_prncp", "total_rec_int", "total_rec_late_fee", "recoveries", "collection_recovery_fee", "last_pymnt_d", "last_pymnt_amnt", "last_credit_pull_d", "grade", "sub_grade", "int_rate", "installment" ] old_len = len(data.columns) data.drop(drop_cols, axis=1, inplace=True) print( "Original: {} columns. Dropped: {} columns. Remaining: {} columns.".format( old_len, old_len - len(data.columns), len(data.columns))) # Use issue date as proxy for application time data.rename({"issue_d": "app_time"}, axis = 1, inplace = True) data["app_time"] = pd.to_datetime(data.app_time.astype(str), format = "%b-%Y") # + [markdown] hidden=true id="EwASK1DWr1AV" # #### Re-format numeric columns # # Some numeric columns, e.g `term`, `revol_util` are formatted as text, and need to be re-formatted to float or integer type. Column `empl_length` contains inherently numeric data but is treated as categorical, thus we re-convert it to numeric type. # + hidden=true id="lxAdCMnQr1AW" # Re-format numeric columns data.term = [int(str(term).strip().split(" ")[0]) for term in data.term] data.revol_util = [float(str(util[:-1])) for util in data.revol_util] # Map employment length to integers: < 1 year is mapped to 0, >= 10 is mapped to 10 data["emp_length"] = ["0 year" if length == "< 1 year" else "10 years" if length == "10+ years" else length for length in data.emp_length] data["emp_length"] = [int(str(length).split(" ")[0]) for length in data.emp_length] # + [markdown] heading_collapsed=true id="76f_qMz8r1AW" # ## Data visualization # + [markdown] hidden=true id="ebqcIXbYr1AW" # #### Univariate distribution of numeric columns # # Observations: # * Many numeric columns appear to be right-skewed or resemble a lognormal distribution, e.g `loan_amnt`, `emp_length`, `open_acc`, `total_acc`, `revol_bal`, etc. # * Some columns may potentially be highly correlated, e.g: `open_acc` and `total_acc`. # * `Annual_income` appears to contain some extreme right ouliers. # + hidden=true id="0IcSEBIlr1AX" outputId="c1347800-3d28-49b4-a2b0-047405c44bd4" features = data._get_numeric_data().columns for i in range(len(features)): if i % 4 == 0: fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(15, 5)) ax1.hist(data[features[i]], bins = 50, rwidth = 0.5, label=features[i]) ax1.legend(loc = "best") elif i % 4 == 1: ax2.hist(data[features[i]], bins = 50, rwidth = 0.5, label=features[i]) ax2.legend(loc = "best") elif i % 4 == 2: ax3.hist(data[features[i]], bins = 50, rwidth = 0.5, label=features[i]) ax3.legend(loc = "best") elif i % 4 == 3: ax4.hist(data[features[i]], bins = 50, rwidth = 0.5, label=features[i]) ax4.legend(loc = "best") plt.show() # + [markdown] hidden=true id="m9RsyVuXr1AX" # Looking at annual income without the outliers (the top 1% and bottom 1% is winsorized), the data appears to resemble a lognormal distribution as well. # + hidden=true id="iYroaUATr1AX" outputId="c6091729-f523-4789-8d75-cc56be025b3f" from scipy.stats.mstats import winsorize income_winsorized = winsorize(data.annual_inc, limits = (0.01, 0.01), inplace = False) plt.figure(figsize = (10, 6)) plt.title("Histogram of annual income (winsorized top/bottom 1%)") plt.hist(income_winsorized, bins = 50, rwidth = 0.5, label = "Annual Income") # + [markdown] heading_collapsed=true hidden=true id="chCQKpL2r1AX" # #### Target variable distribution # # The target variable is heavily imbalanced towards the non-defaults. Model training needs to account for this distribution, otherwise it is likely to result in models with high accuracy but low recall (high accuracy can be achieved trivially by predicting all as non-defaults). # + hidden=true id="cUZCsYvvr1AY" outputId="3ecdc445-517a-4596-adbe-90c11677d947" # Count plot of default status plt.figure(figsize = (10, 6)) plt.title("Count of defaulted vs. non-default observations") ax = sns.countplot(x = 'default', data = data) # Display the absolute count and percentage of total loans in each category for p in ax.patches: height = p.get_height() ax.text(p.get_x() + p.get_width() / 2., height / 2, '{} ({}%)'.format(height, round(100 * height / len(data), 2)), ha = "center") # + [markdown] heading_collapsed=true hidden=true id="XUvXYPwor1AY" # #### Monthly loan count over time # # Increasing trend in number of loans applied for each month, showing the increasing popularity of Lending Club as an avenue to access personal credit. # + hidden=true id="lhAwd-fSr1AZ" outputId="b6d44700-a594-40d6-8c7b-92c0c079981c" # Plot monthly loan count over time num_loans_by_time = data.groupby("app_time")["default"].count() plt.figure(figsize = (10, 6)) plt.title("Number of loans over time") plt.plot(num_loans_by_time) # + [markdown] heading_collapsed=true hidden=true id="rg351S-vr1AZ" # #### Monthly average default rate over time # # Excluding an abnormally high period at the start, average default rate seems to remain stable at around 20% between 2007 and 2011. Unsurprisingly, given the steady increase in monthly loan count and stable default rate, total defaults increase steadily over time as well. # + hidden=true id="iT0IkLyLr1AZ" outputId="0b085741-46fe-486f-b396-765d37c82c3b" # Default rate over time total_default = data.groupby("app_time")["default"].sum() mean_default_rate = data.groupby("app_time")["default"].mean() fig, ax1 = plt.subplots(figsize = (10, 6)) plt.title("Total defaults and mean default rate over time") ax1.set_xlabel('time') ax1.set_ylabel('Total defaults', color = 'tab:red') ax1.plot(total_default, color = 'tab:red') ax1.tick_params(axis = 'y', labelcolor = 'tab:red') ax2 = ax1.twinx() ax2.set_ylabel('Mean default rate', color = 'tab:blue') ax2.plot(mean_default_rate, color = 'tab:blue') ax2.tick_params(axis = 'y', labelcolor = 'tab:blue') fig.tight_layout() plt.show() # + [markdown] heading_collapsed=true hidden=true id="jJ8cXYobr1AZ" # #### Average loan amount by purpose # # Loans for business were on average the highest among all documented purposes, followed by loans for housing needs (house purchase or home improvement) and financial needs (debt consolidation and credit cards). On the other end of the spectrum, discretionary expenses like vacation tend to have the lowest loan amounts. # + hidden=true id="9wkQnXOMr1AZ" outputId="6e2a86c8-a1d2-475d-99c0-d48e90ce036a" loan_by_purpose = data.groupby("purpose")["loan_amnt"].mean().sort_values(ascending=False) plt.figure(figsize = (15, 6)) plt.title("Average loan amount by purpose") plt.barh(width = loan_by_purpose, y = loan_by_purpose.index) # + [markdown] heading_collapsed=true hidden=true id="w-JVD0Uir1Aa" # #### Visualising default rate by state # # States with highest default rates appear to be concentrated in the West Coast (e.g California), as well as South East region. Central states appear less likely to default. # + hidden=true id="tm_tfJbWr1Aa" outputId="a7ebab4f-5176-4937-ab7f-939c95c6f78d" import folium from IPython.display import HTML default_by_state = pd.DataFrame(data.groupby("addr_state")["default"].mean()).reset_index() state_geo = r'https://gist.githubusercontent.com/datadave/108b5f382c838c3963d7/raw/3036216d894d49205948dbbfd562754ef3814785/us-states.json' map = folium.Map(location=[40, -100], zoom_start=4) map.choropleth(geo_data=state_geo, data=default_by_state, columns=['addr_state', 'default'], key_on='feature.id', threshold_scale = [0, 0.03, 0.06, 0.09, 0.12, 0.15, 0.18, 0.21, 0.24], fill_color="YlOrRd", fill_opacity=0.75, line_opacity=0.5, legend_name='default rate') map # + [markdown] heading_collapsed=true id="yh2z7hNyr1Aa" # ## Feature engineering # + [markdown] hidden=true id="sobRWYQZr1Aa" # #### Re-format datetime columns as time distance relative to a reference time point # # Datetime columns cannot be passed directly as features into a machine learning model. We thus re-format each datetime column as the time distance to a reference time point, i.e number of days / months / years passed since the reference point. # # Application time, `app_time`, is re-formatted as the number of months passed since January 2007, which is the start of this dataset. # # Earliest credit line, `earliest_cr_line`, is re-formatted as the time distance (in months) to the application time. This potentially extracts more useful information such as: "How long has the earliest credit line been in place, at the point of application?". # + hidden=true id="oi1PmtmRr1Aa" import monthdelta as md # Re-format earliest_cr_line as time distance relative to application time data["earliest_cr_line"] = pd.to_datetime(data["earliest_cr_line"], format = "%b-%Y") data["earliest_cr_line"] = [ md.monthmod(dt.date(data.iloc[i]["earliest_cr_line"]), dt.date(data.iloc[i]["app_time"]))[0].months for i in range(len(data)) ] # Re-format app_time as time distance relative to January 2007 (start of dataset) ref_date = dt.date(dt(2007, 1, 1)) data["app_time"] = [ md.monthmod(ref_date, dt.date(data.iloc[i]["app_time"]))[0].months for i in range(len(data)) ] # + [markdown] hidden=true id="rfTWUuEer1Aa" # #### Convert categorical columns to dummy variables # # Column `addr_state` may contain useful information, but there are too many discrete values and we'd need to add too many dummy variables columns to use it for classification. The column is thus dropped instead. # + hidden=true id="eQRbZhiVr1Aa" # Drop add_state column: data.drop("addr_state", axis = 1, inplace = True) # Map verification status to 0 or 1 data["verification_status"] = [0 if status == "Not Verified" else 1 for status in data.verification_status] # Convert "home_ownership" and "purpose" to dummy variables dummy_cols = ["home_ownership", "purpose"] data = pd.concat([data, pd.get_dummies(data[dummy_cols])], axis = 1) data.drop(dummy_cols, axis = 1, inplace = True) # + [markdown] hidden=true id="FRnAsIzBr1Ab" # For each categorical variable converted to dummies, i.e `home_ownership` and `purpose`, one of the original categories must be removed to avoid multicollinearity issues, which would distort coefficients of linear models. # + hidden=true id="vRWeuIwSr1Ab" # Remove one category of dummy variables data.drop(["home_ownership_OTHER", "purpose_other"], axis =1, inplace = True) # + [markdown] id="veR3qaqNr1Ab" # ## Model training # + id="DSh9yH-xr1Ab" # Reorganize target variable to the end of dataframe data["default2"] = data["default"] data.drop("default", axis = 1, inplace = True) data.rename({"default2": "default"}, axis = 1, inplace = True) # + [markdown] id="aAzPVNGFr1Ab" # ### Train/test split # # Train-test split must be done before feature selection to avoid using information from the eventual test set during the feature selection process, which may introduce unfair bias. We will use a stratified train-test split with 80% of the data in the training set, and 20% in the test set. As the dataset is highly imbalanced, a stratified split ensures that the proportion of defaults in the train and test set are similar. # + [markdown] id="NTgRWVp6vDEU" # # New section # + id="ZSNngEPMr1Ab" # Extract X and y columns: X = data.iloc[:, :-1] y = data.iloc[:, -1:] # Train/test split from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, stratify = y, test_size = 0.2, random_state = 0) # + [markdown] id="BSWyOg40r1Ab" # ### K-fold cross-validation # # Instead of a single train-test split, model performance can be better estimated using a technique called cross validation. In one popular approach, K-fold cross validation, the training set is further split into K equal non-intersecting, complementary subsets. In each iteration of training, the model can be trained on K-1 of the subsets, and validated on the remaining one (not used in training). The validation results from these K iterations can be averaged out to give a more robust, less biased estimate of model performance than in a single train-test split. # # ![image.png](attachment:image.png) # + [markdown] id="i5DzbkqBr1Ab" # #### Generate K folds from training set # # We select K = 5 for cross-validation, and each CV fold is generated using a stratified split similar to the train-test split earlier. This is to ensure that the ratio of defaults to non-defaults remain the same in our train and validation sets. # + id="YQV5fyCvr1Ac" from sklearn.model_selection import StratifiedKFold # set random state for reproducibility skf = StratifiedKFold(n_splits = 5, shuffle=True, random_state=100) skf.split(X_train, y_train) X_train_train, X_validate = [], [] y_train_train, y_validate = [], [] for train_index, validate_index in skf.split(X_train, y_train): X_train_train.append(X_train.iloc[list(train_index)]) X_validate.append(X_train.iloc[list(validate_index)]) y_train_train.append(y_train.iloc[list(train_index)]) y_validate.append(y_train.iloc[list(validate_index)]) # + # save train, validate and test sets import pickle for file_name, data in zip(['x_train', 'x_test', 'y_train', 'y_test'], [X_train, X_test, y_train, y_test]): with open(f"{file_name}.pickle", "wb") as f: pickle.dump(data, f)
CS421_LendingClub.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Jupyter Notebook problems in the Essentials of Paleomagnetism Textbook by <NAME> # ## Problems in Chapter 14 # ## Problem 1a # First let's set things up for business. import pmagpy.ipmag as ipmag import pmagpy.pmag as pmag import pandas as pd import numpy as np import matplotlib.pylab as plt # %matplotlib inline # We are supposed to use **ipmag.igrf()** to calculate the inclination values in Sicily at a latitude of 38$^{\circ}$ and longitude of 14$^{\circ}$E from 1600 to 1945. help(ipmag.igrf) # make a list of desired dates dates=range(1600,1910,10) # list of dates 10 year increments mod = 'cals10k_2' # choose the most recent model lat,lon,alt=38,14,0 # latitude, longitude and alitude for sicily Vecs=[] # list for Dec,Inc,Int outputs for date in dates: # step through the dates Vecs.append(ipmag.igrf([date,alt,lat,lon],mod=mod)) # append to list vector_df = pd.DataFrame(Vecs) # make it into a Pandas dataframe vector_df.columns=['dec','inc','int'] vector_df['vadms']=pmag.b_vdm(vector_df.int.values*1e-9, lat) # calculate the VADMs vector_df['dec_adj']=vector_df['dec'] vector_df.loc[vector_df.dec>180,['dec_adj']]=vector_df.dec-360 # adjust declinations to be -180 => 180 fig=plt.figure(1,figsize=(7,9)) # set up the figure fig.add_subplot(411) # make 4 rows of plots, this is the first plt.plot(dates,vector_df.dec_adj) # plot the adjusted declinations plt.ylabel('Declination ($^{\circ}$)') plt.title('Geomagnetic field evaluated at Lat: '+str(lat)+' / Lon: '+str(lon)) fig.add_subplot(412) # this is the second plt.plot(dates,vector_df.inc) # plot the inclinations plt.ylabel('Inclination ($^{\circ}$)') fig.add_subplot(413) plt.plot(dates,vector_df.int*1e-3) # plot the intensites (in uT instead of nT) plt.ylabel('Intensity ($\mu$T)') fig.add_subplot(414) # plot the VADMs plt.plot(dates,vector_df.vadms*1e-21) # plot as ZAm^2 plt.ylabel('VADM (ZAm$^2$)') plt.xlabel('Dates (CE)'); # ## Problem 1b # We need to read in the data from geomagia\_sel.txt. Take a peak and see that it is a comma delimited file with a header in the second line. We read the data in with pandas. geomagia=pd.read_csv('Chapter_14/geomagia_sel.txt',header=1) geomagia.head() # + active="" # We have to 'clean' the dataset by getting rid of the records with no inclinations (-999) We can use Panda's filtering power for that: # - geomagia_incs=geomagia[geomagia['Inc[deg.]']>-90] geomagia_incs['Inc[deg.]'] # Let's replot the GUFM data and decorate the plot with the geomagia data. plt.plot(dates,vector_df.inc) plt.plot(geomagia_incs['Age[yr.AD]'],geomagia_incs['Inc[deg.]'],'ro'); # ## Problem 1c # + active="" # To hunt around in the the MagIC data base, I need to know references that these data came from. So I went to geomagia website (http://geomagia.gfz-potsdam.de), clicked on the Archeomagnetic and volcanic query form tab and filled in the form thusly: # - Image(filename="geomagia_screenshot.png") # I clicked on the 'Perform Query' button and scrolled down to the References part: Image(filename="geomagia_refs.png") # Here is the original paper to see whether the samples were demagnetized properly, etc. So I did that. here is the pdf: # http://ac.els-cdn.com/0305440387900082/1-s2.0-0305440387900082-main.pdf?_tid=6f840d8c-c90e-11e4-a1c8-00000aacb360&acdnat=1426202636_67453570f83c4126d6c152fbb69fd35d # # The method used to determine the direction was AF demagnetization in 5mT steps to 50 mT (not very high) and averaged the data from each specimen with Fisher statistics, apparently after looking at orthogonal plots to determine the stable portion of each demag experiment. (This is of course not the way we do things nowadays. One should use the principal component technique by Kirschvink (1980).) After getting a direction from all the samples, they averaged data by flow, also using Fisher statistics. # # Any way, you get the idea. This procedure would be done for each reference to really assess the data quality. # # Now let's do the equal area plot using the well worn **ipmag** functions (from Chapter 2). ipmag.plot_net(1) # make an equal angle net ipmag.plot_di(geomagia_incs['Dec[deg.]'].values,geomagia_incs['Inc[deg.]'].values,color='blue') # put on the dots # So.. The data fall into two clumps. Is this because of secular variation? or is there tilting? Or temporal aliasing? Hmmm. # ## Problem 2a # To make this problem easier, I saved the PINT.xls as a text file. We can read it in with read_csv and find out what the columns are: pint=pd.read_excel('Chapter_14/PINT.xls') pint.columns # Before we do anything else, let's clean out the records with no VDM/VADM data: pint=pint.dropna(subset=['VDM/VADM']) pint['VDM/VADM'].head() # To filter by age, we use the 'AGE' column, and by method, the 'IntM' column. First let's do age: pint_last_10=pint[pint.AGE <10] pint_last_10.AGE.head() # Now let's fish out the 'T+' and 'S' methods separately, starting with T+ (which means Thellier-Thellier plus pTRM checks). T_plus=pint_last_10[pint_last_10.IntM=='T+'] T_plus.IntM.unique() # OK that did it. And now for the Shaw method data: # + Shaw=pint_last_10[pint_last_10.IntM=='S'] Shaw.IntM.unique() # - # Now we can plot the two data sets versus age: # + plt.plot(T_plus['AGE'],T_plus['VDM/VADM'],'bs',label='T+') plt.plot(Shaw['AGE'],Shaw['VDM/VADM'],'ro',label='Shaw') plt.legend(); # - # The Shaw data seem to be more scattered with higher values for the same ages.... # ## Problem 2b # # Now we look in the T+ data and separate them into three groups based on polarity. T_plus_trans=T_plus[T_plus.P=='T'] # pull out the transitional data T_plus_norm=T_plus[T_plus.P=='N'] # the normal data T_plus_rev=T_plus[T_plus.P=='R'] # the reverse data plt.plot(T_plus_trans['AGE'],T_plus_trans['VDM/VADM'],'g^',label='Transitional') plt.plot(T_plus_norm['AGE'],T_plus_norm['VDM/VADM'],'ro',label='Normal') plt.plot(T_plus_rev['AGE'],T_plus_rev['VDM/VADM'],'bs',label='Reverse') plt.xlabel('Age (Ma)') plt.ylabel('V[A]DM (10$^{22}$ Am$^2$)') plt.legend(); # Very interesting. The 'transitional' data are maybe lower in general, but there are a lot of high values. The highest values are all normal, but I'm not sure if there is a big difference. # ## Problem 3a # I went to the website for the paper Lawrence et al. (2009) here: http://earthref.org/MagIC/doi/10.1029/2008GC002072 # and clicked on the icon under the heading 'Download'. This downloaded a text file which I can unpack with **ipmag.download_magic**. # # help(ipmag.download_magic) ipmag.download_magic('magic_contribution_13436.txt',input_dir_path='Chapter_14/Problem_3',\ dir_path='Chapter_14/Problem_3') # ## Problem 3b # # Now I want to read in the data from the **sites.txt** file. From the documentation in **\_PmagPy\_cb.ipynb**, I know that MagIC files are tab delimited and the column names are in the second line, so this should do the trick: # site_means=pd.read_csv('Chapter_14/Problem_3/sites.txt','\t',header=1) site_means.head() # To plot them, I first fish out the directions that are not blank (some of the records are just intensity values). site_means=site_means.dropna(subset = ['dir_dec','dir_inc']) # Now plot the net with ipmag.plot_net and the directions with ipmag.plot_di. But first I set up a figure object. plt.figure(num=1) ipmag.plot_net(1) ipmag.plot_di(site_means['dir_dec'].values,site_means['dir_inc'].values,color='blue') # Now for the VGPs from the same records (vgp\_lat and vgp\_lon are the keys). First we have to drop the records without VGPs. site_means=site_means.dropna(subset=['vgp_lat','vgp_lon']) # And then take the antipodes of the reverse VGPs using **pmag.flip()**. help(pmag.flip) # need to make a nested array of values vgp_block=site_means[['vgp_lon','vgp_lat']].values norm_vgps,rev_vgps=pmag.flip(vgp_block) help(ipmag.plot_vgp) # Let's have fun with **ipmag.plot_vgp( )**. There are two ways to do maps in Python, the old way, **matplotlib**'s **Basemap** module, and the new **cartopy** way. We are shifting to **cartopy**, but there is also the **basemap** way if you have it installed and prefer it. # # **ipmag.plot_vgp( )** help page will walk us through this. help(ipmag.plot_vgp) has_basemap, Basemap = pmag.import_basemap() has_cartopy, Cartopy = pmag.import_cartopy() # + if not has_cartopy: print('You will need to correctly install cartopy in order to continue this cell') else: map_axis =ipmag.make_orthographic_map(central_latitude=60,figsize=(6,6),land_color='bisque') ipmag.plot_vgp(map_axis, di_block=norm_vgps, color='red',\ markersize=30, legend='no') ipmag.plot_vgp(map_axis, di_block=rev_vgps, color='blue',\ markersize=30, legend='no') # - if not has_basemap: print('You will need to correctly install basemap in order to continue this cell') else: m=Basemap(projection='ortho',lat_0=70,lon_0=230,resolution='c') plt.figure(num=1,figsize=(6,6)) m.drawcoastlines() m.fillcontinents(color='bisque') m.drawmeridians(np.arange(0,360,30)) m.drawparallels(np.arange(-60,90,30)) ipmag.plot_vgp_basemap(m,di_block=norm_vgps, color='r',label='normal VGPs') ipmag.plot_vgp_basemap(m,di_block=rev_flipped, color='b',label='reverse VGPs') # ## Problem 3c # # Here we calculate the VGP scatter. We need now to have a single nested array for the normal and the reverse antipodes and then a function to calculate S. Antarctic_combo=np.array(pmag.flip(vgp_block,combine=True)).transpose() # + def Scalc(vgps): N=len(vgps[1]) colats=90.-vgps[1] Sp2=np.sum(colats**2)/(N-1.) return '%7.1f'%(np.sqrt(Sp2)) # - print (Scalc(Antarctic_combo)) # ## Problem 3d # def Scalc_w_cutoff(vgps,c): N=len(vgps[1]) colats=90.-vgps[1] Good=[] for i in range(N): if colats[i]<c:Good.append(colats[i]) sel_colats=np.array(Good) Sp2=np.sum(sel_colats**2)/(N-1.) return '%7.1f'%(np.sqrt(Sp2)) for c in range(90,20,-5): print (c,Scalc_w_cutoff(Antarctic_combo,c)) # Well the values of Sp with different cutoff values just look arbitrary to me. How am I supposed to know what the cutoff should be? Seems like that question puts the cart before the horse. # ## Problem 3e # Ok. now we repeat this with the data in Chapter_14/hawaii.txt hw_site_means=pd.read_csv('Chapter_14/Problem_3/hawaii.txt','\t',header=0) hw_site_means.head() # It looks like the columns we want are model_vgp_lon and model_vgp_lat. Here's another way to flip the reverse VGPs vgp_block=hw_site_means[['model_vgp_lon','model_vgp_lat']].values hawaii_combo=np.array(pmag.flip(vgp_block,combine=True)).transpose() print (Scalc(hawaii_combo)) # Wow. so there is a BIG latitudinal dependence. Now let's calculate Model G for the latitudes at Hawaii and Antarctica. def ModelG(lat): a,b=0.26,11.9 S2=(a*lat)**2+b**2 return np.sqrt(S2) antarctica_lat=np.average(site_means['lat']) hawaii_lat=np.average(hw_site_means['model_lat']) print (antarctica_lat,ModelG(antarctica_lat)) print (hawaii_lat,ModelG(hawaii_lat)) # So the values we get are much larger than Model G. But what about using a cutoff of, say 45$^{\circ}$? print (Scalc_w_cutoff(Antarctic_combo,45)) print (Scalc_w_cutoff(hawaii_combo,45)) # So, Model G works well for data with a 45$^{\circ}$ cutoff. And now just to show off: lats=range(100) Ss=[] for lat in lats: Ss.append(ModelG(lat)) plt.plot(lats,Ss); # ## Problem 4a: # First we we will generate 100 directions from the function **ipmag.tk03()** at a latitude of 20$^{\circ}$. help(ipmag.tk03) hawaii_tk03=np.array(ipmag.tk03(lat=20)) # get a nested array of vectors decs=hawaii_tk03.transpose()[0] incs=hawaii_tk03.transpose()[1] ipmag.plot_net(1) ipmag.plot_di(dec=decs,inc=incs,color='red',edge='black') # ## Problem 4b: # Let's find the IGRF value at lat=20, lon=-156. d,i,f=ipmag.igrf([2010,0,20,-156]) print (d,i) # Now let's find the GAD inclination at this latitude with pmag.pinc GAD_inc=pmag.pinc(20) print (GAD_inc) # and rotate the hawaii.tk03 data to this expected GAD direction using **pmag.dodirot_V** help(pmag.dodirot_V) tk03_DIs=np.column_stack((decs,incs)) tk03_rot=pmag.dodirot_V(tk03_DIs,0,GAD_inc) # And plot them. ipmag.plot_net(1) ipmag.plot_di(di_block=tk03_rot,color='blue',edge='black') # Now we do the same for the Hawaiian data we extracted from hawaii.txt. But first we have to save them to a file. hw_DIs=hw_site_means[['dec','inc']].values hw_rot=pmag.dodirot_V(hw_DIs,0,GAD_inc) ipmag.plot_net(1) ipmag.plot_di(di_block=hw_rot,color='red',edge='black') # The data from Hawaii are dual polarity and apparantly have a different mean direction from the GAD field. They also look stretched in a weird way and I'm beginning to think that some part of Hawaii is tilted.... Maybe a project for someone in here. # ## Problem 4c # And now let's calculate the eigenparameters for the Hawaii data set. help(pmag.doprinc) pmag.doprinc(hw_DIs) # And bingo! The average inclination is about 28$^{\circ}$, not 36! This is what you would get if there is a non-zero g$^0_2$ term.
data_files/notebooks/Essentials/essentials_ch_14.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/strangelycutlemon/DS-Unit-2-Regression-Classification/blob/master/module3/assignment_regression_classification_3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="o9eSnDYhUGD7" colab_type="code" colab={} # If you're in Colab... import os, sys in_colab = 'google.colab' in sys.modules if in_colab: # Install required python packages: # category_encoders, version >= 2.0 # pandas-profiling, version >= 2.0 # plotly, version >= 4.0 # !pip install --upgrade category_encoders pandas-profiling plotly # Pull files from Github repo os.chdir('/content') # !git init . # !git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Regression-Classification.git # !git pull origin master # Change into directory for module os.chdir('module3') # + id="ipBYS77PUwNR" colab_type="code" colab={} # Ignore this Numpy warning when using Plotly Express: # FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead. import warnings warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy') # + id="QJBD4ruICm1m" colab_type="code" colab={} import pandas as pd import pandas_profiling # Read New York City property sales data df = pd.read_csv('../data/NYC_Citywide_Rolling_Calendar_Sales.csv') # Change column names: replace spaces with underscores df.columns = [col.replace(' ', '_') for col in df] # SALE_PRICE was read as strings. # Remove symbols, convert to integer df['SALE_PRICE'] = ( df['SALE_PRICE'] .str.replace('$','') .str.replace('-','') .str.replace(',','') .astype(int) ) # + [markdown] id="7IXUfiQ2UKj6" colab_type="text" # Lambda School Data Science, Unit 2: Predictive Modeling # # # Regression & Classification, Module 3 # # ## Assignment # # We're going back to our other **New York City** real estate dataset. Instead of predicting apartment rents, you'll predict property sales prices. # # But not just for condos in Tribeca... # # Instead, predict property sales prices for **One Family Dwellings** (`BUILDING_CLASS_CATEGORY` == `'01 ONE FAMILY DWELLINGS'`) using a subset of the data where the **sale price was more than \\$100 thousand and less than $2 million.** # # The [NYC Department of Finance](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page) has a glossary of property sales terms and NYC Building Class Code Descriptions. The data comes from the [NYC OpenData](https://data.cityofnewyork.us/browse?q=NYC%20calendar%20sales) portal. # # # - [X] Do train/test split. Use data from January — March 2019 to train. Use data from April 2019 to test. # - [X] Do exploratory visualizations with Seaborn. # - [ ] Do one-hot encoding of categorical features. # - [ ] Do feature selection with `SelectKBest`. # - [ ] Fit a linear regression model with multiple features. # - [ ] Get mean absolute error for the test set. # - [ ] As always, commit your notebook to your fork of the GitHub repo. # # # ## Stretch Goals # - [ ] Add your own stretch goal(s) ! # - [ ] Try [`RidgeCV`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html) instead of Linear Regression, especially if your errors blow up! Watch [<NAME>'s 9 minute video on Ridge Regression](https://www.youtube.com/watch?v=XK5jkedy17w) to learn more. # - [ ] Do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html). # - [ ] Learn more about feature selection: # - ["Permutation importance"](https://www.kaggle.com/dansbecker/permutation-importance) # - [scikit-learn's User Guide for Feature Selection](https://scikit-learn.org/stable/modules/feature_selection.html) # - [mlxtend](http://rasbt.github.io/mlxtend/) library # - scikit-learn-contrib libraries: [boruta_py](https://github.com/scikit-learn-contrib/boruta_py) & [stability-selection](https://github.com/scikit-learn-contrib/stability-selection) # - [_Feature Engineering and Selection_](http://www.feat.engineering/) by Kuhn & Johnson. # - [ ] Try [statsmodels](https://www.statsmodels.org/stable/index.html) if you’re interested in more inferential statistical approach to linear regression and feature selection, looking at p values and 95% confidence intervals for the coefficients. # - [ ] Read [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapters 1-3, for more math & theory, but in an accessible, readable way (without an excessive amount of formulas or academic pre-requisites). # (That book is good regardless of whether your cultural worldview is inferential statistics or predictive machine learning) # - [ ] Read <NAME>'s paper, ["Statistical Modeling: The Two Cultures"](https://projecteuclid.org/download/pdf_1/euclid.ss/1009213726) # - [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html): # # > Pipeline can be used to chain multiple estimators into one. This is useful as there is often a fixed sequence of steps in processing the data, for example feature selection, normalization and classification. Pipeline serves multiple purposes here: # # > - **Convenience and encapsulation.** You only have to call fit and predict once on your data to fit a whole sequence of estimators. # > - **Joint parameter selection.** You can grid search over parameters of all estimators in the pipeline at once. # > - **Safety.** Pipelines help avoid leaking statistics from your test data into the trained model in cross-validation, by ensuring that the same samples are used to train the transformers and predictors. # + id="XHklVaVc2E8A" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 118} outputId="bd185c29-b173-4b38-c687-44dca5ce47c5" df.SALE_PRICE.tail() # + id="tpb2-lbQ1HIJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} outputId="912134bb-f885-49ec-bb3e-1d0b30b844cb" # train test split df['SALE_DATE'] = pd.to_datetime(df['SALE_DATE'], infer_datetime_format=True) df['SALE_DATE'].describe() # + id="BA5x-FEu3nHU" colab_type="code" colab={} df = df[(df['SALE_PRICE'] >= 100000) & (df['SALE_PRICE'] < 2000000) & (df['BUILDING_CLASS_CATEGORY'] == '01 ONE FAMILY DWELLINGS')] # + id="DCvbD63h19xr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} outputId="7e687629-6370-4d42-90c4-230561e42d86" train = df[df['SALE_DATE'].dt.month < 4] test = df[df['SALE_DATE'].dt.month >= 4] train.SALE_DATE.describe() # + id="kjRdz1g397n9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 386} outputId="f1602092-77bb-4c5c-c39e-6ffa06176996" train.dtypes # + id="kbBoowRy4P9_" colab_type="code" colab={} import seaborn as sns # + id="AiHaNFhI9eRN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="e5f01a76-1f96-4176-8719-9ffe0dfb847e" for i in train.columns: if train[i].nunique() < 20: sns.catplot(x=i, y='SALE_PRICE', data=train, kind='bar', color='grey') else: pass # + id="ciUtGqzgOpJE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9cdcec70-1c57-4880-dc93-402e409d867d" train['TAX_CLASS_AT_TIME_OF_SALE'].value_counts().to_list() # + id="r-pEyeuZ9i9g" colab_type="code" colab={} from sklearn.preprocessing import OneHotEncoder, LabelEncoder le = LabelEncoder() ohe = OneHotEncoder() ohe.fit(train[['TAX_CLASS_AT_PRESENT']]) list(ohe.categories_) TAX_CLASS_AT_PRESENT_ENCODED = pd.DataFrame(ohe.transform(df[['TAX_CLASS_AT_PRESENT']]).toarray(), columns=['1', 'D1']) # + id="YjR250rRBH2r" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="dbd68a3d-9a3c-414b-a4a1-c03fdfc19b0b" TAX_CLASS_AT_PRESENT_ENCODED.head() # + id="l3YvW-I4VT70" colab_type="code" colab={} # for col in objects.columns: # ohe.fit(train[[col]])) # list(ohe.categories_)[:5] # + id="crGWp_gKSjNb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="024cbf67-1ad3-4f0f-c8ef-69b480f69841" objects = df.select_dtypes(include=[object]) objects.columns # + id="Yuh873-VWROW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="27110e05-d9fd-4c8c-b062-ccae253f80f5" import category_encoders as ce encoder = ce.OneHotEncoder(use_cat_names=True) encoder.fit(df['TAX_CLASS_AT_TIME_OF_SALE'].astype(str)) # + id="tmJjzzN2ZHDC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="211e3a9d-4e02-4cb5-8855-93664e43afcd" df2 = df df2['TAX_CLASS_AT_PRESENT'] = encoder.fit_transform(df2['TAX_CLASS_AT_PRESENT'].astype(str)) df2['TAX_CLASS_AT_PRESENT'].value_counts() # Why does this do nothing??? # + id="Wfb7XapvaQDR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="18d97676-4c82-4697-de0f-3cf64290f9c6" df.head() # + id="hcBTpWVhafvh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="d0beceaa-38df-41ef-eb36-2a193ea5dcd8" df2.head() # + id="HjgjXJ6OZWs_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 404} outputId="353448dd-fb54-4243-ccab-7322aeb8b603" for col in objects.columns: print(col) df2[col] = encoder.fit_transform(df2[col].astype(str)) df2.head() # + id="-g-d5LraZ7f9" colab_type="code" colab={}
module3/assignment_regression_classification_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Making big cat states using even-parity projectors # Author: <NAME> # In this tutorial, we numerically simulate the protocol proposed in [arXiv:1908.10314](https://arxiv.org/abs/1908.10314) for engineering superpositions of coherent states. We study how to make big cat states $|\text{cat}\rangle \sim |\alpha \rangle + |-\alpha \rangle$ with $|\alpha|^2 = 10$. # + import numpy as np from qutip import wigner, Qobj, wigner_cmap import matplotlib.pyplot as plt import matplotlib as mpl from matplotlib import cm # %matplotlib inline # %config InlineBackend.figure_formats=['svg'] import strawberryfields as sf from strawberryfields.ops import * from thewalrus.quantum import state_vector, density_matrix # - # ## Ideal preparation # Here we setup some basic parameters, like the value of the photon-number-resolving detectors we will use to herald and the parameters of the different Gaussian unitaries. Lambda = 0.9 # Lambda is a squeezing parameter in [0,1) r = np.arctanh(Lambda) catSize = 10 # One obtains a better fidelity when this number is an integer (see paper for explanation) alpha = np.sqrt(catSize)*Lambda # Coherent state amplitude is chosen to compensate finite squeezing, Lambda < 1 detOutcome = int(round(catSize)) m0 = detOutcome m1 = detOutcome print(m0,m1) # Now we setup a 3-mode quantum circuit in Strawberry Fields and obtain the covariance matrix and vector of means of the Gaussian state. nmodes = 3 prog = sf.Program(nmodes) eng = sf.Engine("gaussian") with prog.context as q: S2gate(r) | (q[1],q[2]) Dgate(1j*alpha) | q[0] BSgate() | (q[0],q[1]) state = eng.run(prog).state mu = state.means() cov = state.cov() # Here we use the sf circuit drawer and standard linux utilities # to generate an svg representing the circuit file, _ = prog.draw_circuit() filepdf = file[0:-3]+"pdf" filepdf = filepdf.replace("circuit_tex/","") filecrop = filepdf.replace(".pdf","-crop.pdf") name = "cat_circuit.svg" # !pdflatex $file > /dev/null 2>&1 # !pdfcrop $filepdf > /dev/null 2>&1 # !pdf2svg $filecrop $name # Here is a graphical representation of the circuit. It is always assumed that the input is vacuum in all the modes. <br> # ![img](./cat_circuit.svg) # We can now inspect the covariance matrix and vector of means. Note that the vector of means is non-zero since we used a displacement gate. print(np.round(mu,10)) print(np.round(cov,10)) # We now use The Walrus to obtain the Fock representation of the state `psi` that is heralded when modes 0 and 1 are measured to have the value $n=10$. We also calculate the probability of success in heralding in the variable `p_psi`. cutoff = 18 psi = state_vector(mu, cov, post_select={0:m0,1:m1}, normalize=False, cutoff=cutoff) p_psi = np.linalg.norm(psi) psi = psi/p_psi print("The probability of successful heralding is ", np.round(p_psi**2,5)) # We now plot the photon-number distribution of the heralded state. Note that the state only has even photon components. plt.bar(np.arange(cutoff),np.abs(psi)**2) plt.xlabel("$i$") plt.ylabel(r"$p_i$") plt.show() # We can now plot the Wigner function of the heralded state, grid = 100 xvec = np.linspace(-7,7,grid) Wp = wigner(Qobj(psi), xvec, xvec) wmap = wigner_cmap(Wp) sc1 = np.max(Wp) nrm = mpl.colors.Normalize(-sc1, sc1) fig, axes = plt.subplots(1, 1, figsize=(5, 4)) plt1 = axes.contourf(xvec, xvec, Wp, 60, cmap=cm.RdBu, norm=nrm) axes.contour(xvec, xvec, Wp, 60, cmap=cm.RdBu, norm=nrm) axes.set_title("Wigner function of the heralded state"); cb1 = fig.colorbar(plt1, ax=axes) fig.tight_layout() plt.show() # and a cut of the Wigner function along $p=0$. plt.plot(xvec, Wp[:,grid//2]) plt.title(r"$W(0,p)$") plt.xlabel(r"p") plt.show() # %reload_ext version_information # %version_information qutip, strawberryfields, thewalrus
docs/gallery/cat.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Analyzing the MSTIS simulation # # Included in this notebook: # # * Opening files for analysis # * Rates, fluxes, total crossing probabilities, and condition transition probabilities # * Per-ensemble properties such as path length distributions and interface crossing probabilities # * Move scheme analysis # * Replica exchange analysis # * Replica move history tree visualization # * Replaying the simulation # * MORE TO COME! Like free energy projections, path density plots, and more # # NOTE: This notebook uses our old analysis approach. See the "new analysis" appendix notebook for details on how to customize analysis. from __future__ import print_function # If our large test file is available, use it. Otherwise, use file generated # from toy_mstis_2_run.ipynb. This is so the notebook can be used in testing. import os test_file = "../toy_mstis_1k_OPS1.nc" filename = test_file if os.path.isfile(test_file) else "mstis.nc" print("Using file `"+ filename + "` for analysis") # %matplotlib inline import matplotlib.pyplot as plt import openpathsampling as paths import numpy as np # %%time storage = paths.Storage(filename, mode='r') # + run_control={"marked": false} # the following works with the old file we use in testing; the better way is: # mstis = storage.networks['mstis'] # when objects are named, use the name mstis = storage.networks.first # - # ## Reaction rates # # TIS methods are especially good at determining reaction rates, and OPS makes it extremely easy to obtain the rate from a TIS network. # # Note that, although you can get the rate directly, it is very important to look at other results of the sampling (illustrated in this notebook and in notebooks referred to herein) in order to check the validity of the rates you obtain. # By default, the built-in analysis calculates histograms the maximum value of some order parameter and the pathlength of every sampled ensemble. You can add other things to this list as well, but you must always specify histogram parameters for these two. The pathlength is in units of frames. mstis.hist_args['max_lambda'] = { 'bin_width' : 0.05, 'bin_range' : (0.0, 0.5) } mstis.hist_args['pathlength'] = { 'bin_width' : 5, 'bin_range' : (0, 150) } # %%time mstis.rate_matrix(storage.steps) # The self-rates (the rate of returning the to initial state) are undefined, and return not-a-number. # # The rate is calculated according to the formula: # # $$k_{AB} = \phi_{A,0} P(B|\lambda_m) \prod_{i=0}^{m-1} P(\lambda_{i+1} | \lambda_i)$$ # # where $\phi_{A,0}$ is the flux from state A through its innermost interface, $P(B|\lambda_m)$ is the conditional transition probability (the probability that a path which crosses the interface at $\lambda_m$ ends in state B), and $\prod_{i=0}^{m-1} P(\lambda_{i+1} | \lambda_i)$ is the total crossing probability. We can look at each of these terms individually. # ### Total crossing probability stateA = storage.volumes["A"] stateB = storage.volumes["B"] stateC = storage.volumes["C"] # + tcp_AB = mstis.transitions[(stateA, stateB)].tcp tcp_AC = mstis.transitions[(stateA, stateC)].tcp tcp_BC = mstis.transitions[(stateB, stateC)].tcp tcp_BA = mstis.transitions[(stateB, stateA)].tcp tcp_CA = mstis.transitions[(stateC, stateA)].tcp tcp_CB = mstis.transitions[(stateC, stateB)].tcp plt.plot(tcp_AB.x, tcp_AB, '-r') plt.plot(tcp_CA.x, tcp_CA, '-k') plt.plot(tcp_BC.x, tcp_BC, '-b') plt.plot(tcp_AC.x, tcp_AC, '-g') # same as tcp_AB in MSTIS # - # We normally look at these on a log scale: plt.plot(tcp_AB.x, np.log(tcp_AB), '-r') plt.plot(tcp_CA.x, np.log(tcp_CA), '-k') plt.plot(tcp_BC.x, np.log(tcp_BC), '-b') plt.xlim(0.0, 1.0) # Now, in case you want to know the total crossing probabability at each interface (for example, to use as a bias in an SRTIS calculation): # + # TODO: MOVE THESE TO A METHOD INSIDE THE CODE; MAKE THEM WORK WITH NEW ANALYSIS # - import pandas as pd def crossing_probability_table(transition): tcp = transition.tcp interface_lambdas = transition.interfaces.lambdas values = [tcp(x) for x in interface_lambdas] return pd.Series(values, index=interface_lambdas, name=transition.name) def outer_crossing_probability(transition): tcp = transition.tcp interface_outer_lambda = transition.interfaces.lambdas[-1] return tcp(interface_outer_lambda) crossing_probability_table(mstis.from_state[stateA]) outer_crossing_probability(mstis.from_state[stateA]) tcp_AB(mstis.from_state[stateA].interfaces.lambdas[-1]) tcp_A = mstis.from_state[stateA].tcp # ### Flux # # Here we also calculate the flux contribution to each transition. The flux is calculated based on flux_dict = { (transition.stateA, transition.interfaces[0]): transition._flux for transition in mstis.transitions.values() } flux_dict paths.analysis.tis.flux_matrix_pd(flux_dict) # ### Conditional transition probability # + state_names = [s.name for s in mstis.states] outer_ctp_matrix = pd.DataFrame(columns=state_names, index=state_names) for state_pair in mstis.transitions: transition = mstis.transitions[state_pair] outer_ctp_matrix.at[state_pair[0].name, state_pair[1].name] = transition.ctp[transition.ensembles[-1]] outer_ctp_matrix # + state_pair_names = {t: "{} => {}".format(t[0].name, t[1].name) for t in mstis.transitions} ctp_by_interface = pd.DataFrame(index=state_pair_names.values()) for state_pair in mstis.transitions: transition = mstis.transitions[state_pair] for ensemble_i in range(len(transition.ensembles)): state_pair_name = state_pair_names[transition.stateA, transition.stateB] ctp_by_interface.at[state_pair_name, ensemble_i] = transition.conditional_transition_probability( storage.steps, transition.ensembles[ensemble_i] ) ctp_by_interface # - # ## Path ensemble properties hists_A = mstis.transitions[(stateA, stateB)].histograms hists_B = mstis.transitions[(stateB, stateC)].histograms hists_C = mstis.transitions[(stateC, stateB)].histograms # ### Interface crossing probabilities # # We obtain the total crossing probability, shown above, by combining the individual crossing probabilities of hists = {'A': hists_A, 'B': hists_B, 'C': hists_C} plot_style = {'A': '-r', 'B': '-b', 'C': '-k'} for hist in [hists_A, hists_B, hists_C]: for ens in hist['max_lambda']: normalized = hist['max_lambda'][ens].normalized() plt.plot(normalized.x, normalized) # + # add visualization of the sum # - for hist_type in hists: hist = hists[hist_type] for ens in hist['max_lambda']: reverse_cumulative = hist['max_lambda'][ens].reverse_cumulative() plt.plot(reverse_cumulative.x, reverse_cumulative, plot_style[hist_type]) plt.xlim(0.0, 1.0) for hist_type in hists: hist = hists[hist_type] for ens in hist['max_lambda']: reverse_cumulative = hist['max_lambda'][ens].reverse_cumulative() plt.plot(reverse_cumulative.x, np.log(reverse_cumulative), plot_style[hist_type]) plt.xlim(0.0, 1.0) # ### Path length histograms for hist in [hists_A, hists_B, hists_C]: for ens in hist['pathlength']: normalized = hist['pathlength'][ens].normalized() plt.plot(normalized.x, normalized) for ens in hists_A['pathlength']: normalized = hists_A['pathlength'][ens].normalized() plt.plot(normalized.x, normalized) # ## Sampling properties # # The properties we illustrated above were properties of the path ensembles. If your path ensembles are sufficiently well-sampled, these will never depend on how you sample them. # # But to figure out whether you've done a good job of sampling, you often want to look at properties related to the sampling process. OPS also makes these very easy. # ### Move scheme analysis scheme = storage.schemes[0] scheme.move_summary(storage.steps) scheme.move_summary(movers='shooting') scheme.move_summary(movers='minus') scheme.move_summary(movers='repex') scheme.move_summary(movers='pathreversal') # ### Replica exchange sampling # # See the notebook `repex_networks.ipynb` for more details on tools to study the convergence of replica exchange. However, a few simple examples are shown here. All of these are analyzed with a separate object, `ReplicaNetwork`. repx_net = paths.ReplicaNetwork(scheme, storage.steps) # #### Replica exchange mixing matrix repx_net.mixing_matrix() # #### Replica exchange graph # # The mixing matrix tells a story of how well various interfaces are connected to other interfaces. The replica exchange graph is essentially a visualization of the mixing matrix (actually, of the transition matrix -- the mixing matrix is a symmetrized version of the transition matrix). # # Note: We're still developing better layout tools to visualize these. repxG = paths.ReplicaNetworkGraph(repx_net) repxG.draw('spring') # #### Replica exchange flow # # Replica flow is defined as ***TODO*** # # Flow is designed for calculations where the replica exchange graph is linear, which ours clearly is not. However, we can define the flow over a subset of the interfaces. # ### Replica move history tree import openpathsampling.visualize as vis from IPython.display import SVG # + tree = vis.PathTree( storage.steps[0:500], vis.ReplicaEvolution(replica=2, accepted=True) ) SVG(tree.svg()) # - decorrelated = tree.generator.decorrelated print("We have " + str(len(decorrelated)) + " decorrelated trajectories.") # ### Visualizing trajectories # we use the %run magic because this isn't in a package # %run ../resources/toy_plot_helpers.py background = ToyPlot() background.contour_range = np.arange(-1.5, 1.0, 0.1) background.add_pes(storage.engines[0].pes) xval = paths.FunctionCV("xval", lambda snap : snap.xyz[0][0]) yval = paths.FunctionCV("yval", lambda snap : snap.xyz[0][1]) visualizer = paths.StepVisualizer2D(mstis, xval, yval, [-1.0, 1.0], [-1.0, 1.0]) visualizer.background = background.plot() visualizer.draw_samples(list(tree.samples)) # NBVAL_SKIP # The skip directive tells our test runner not to run this cell import time max_step = 10 for step in storage.steps[0:max_step]: visualizer.draw_ipynb(step) time.sleep(0.1) # ## Histogramming data (TODO)
examples/toy_model_mstis/toy_mstis_3_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ___ # # <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a> # ___ # <center>*Copyright <NAME> 2017*</center> # <center>*For more information, visit us at www.pieriandata.com*</center> # # Rolling and Expanding # # A very common process with time series is to create data based off of a rolling mean (**moving average**). Let's show you how to do this easily with pandas! import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # Best way to read in data with time series index! df = pd.read_csv('time_data/walmart_stock.csv',index_col='Date',parse_dates=True) df.head(10) df['Open'].plot(figsize=(16,6)) # Now let's add in a rolling mean! This rolling method provides row entries, where every entry is then representative of the window. # 7 day rolling mean df.rolling(7).mean().head(20) df['Open'].plot() df.rolling(window=30).mean()['Close'].plot() # Easiest way to add a legend is to make this rolling value a new column, then pandas does it automatically! df['Close: 30 Day Mean'] = df['Close'].rolling(window=30).mean() df.tail() df['Close: 30 Day Mean'] = df['Close'].rolling(window=30).mean() df[['Close','Close: 30 Day Mean']].plot(figsize=(16,6)) # ## expanding # # Now what if you want to take into account everything from the start of the time series as a rolling value? For instance, not just take into account a period of 7 days, or monthly rolling average, but instead, take into everything since the beginning of the time series, continuously: # Optional specify a minimum number of periods # ogni punto nel plot corrisponde alla media dei valori di tutti i giorni precedenti + quello corrente df['Close'].expanding(min_periods=1).mean().plot(figsize=(16,6)) # ## Bollinger Bands # # We will talk a lot more about financial analysis plots and technical indicators, but here is one worth mentioning! # # More info : http://www.investopedia.com/terms/b/bollingerbands.asp # # *Developed by <NAME>, Bollinger Bands® are volatility bands placed above and below a moving average. Volatility is based on the standard deviation, which changes as volatility increases and decreases. The bands automatically widen when volatility increases and narrow when volatility decreases. This dynamic nature of Bollinger Bands also means they can be used on different securities with the standard settings. For signals, Bollinger Bands can be used to identify Tops and Bottoms or to determine the strength of the trend.* # # *Bollinger Bands reflect direction with the 20-period SMA and volatility with the upper/lower bands. As such, they can be used to determine if prices are relatively high or low. According to Bollinger, the bands should contain 88-89% of price action, which makes a move outside the bands significant. Technically, prices are relatively high when above the upper band and relatively low when below the lower band. However, relatively high should not be regarded as bearish or as a sell signal. Likewise, relatively low should not be considered bullish or as a buy signal. Prices are high or low for a reason. As with other indicators, Bollinger Bands are not meant to be used as a stand alone tool. * # + # Close 20 MA df['Close: 20 Day Mean'] = df['Close'].rolling(window=20).mean() # Upper = 20MA + 2*std(20) df['Upper'] = df['Close: 20 Day Mean'] + 2*df['Close'].rolling(window=20).std() # Lower = 20MA - 2*std(20) df['Lower'] = df['Close: 20 Day Mean'] - 2*df['Close'].rolling(window=20).std() # Close df[['Close','Close: 20 Day Mean','Upper','Lower']].plot(figsize=(16,6)) # - df[['Close','Close: 20 Day Mean','Upper','Lower']].tail(200).plot(figsize=(16,6)) # For expanding operations, it doesn't help very much to visualize this against the daily data, but instead its a good way to get an idea of the "stability" of a stock. This idea of stability and volatility is something we are going to be exploring heavily in the next project, so let's jump straight into it!
05-Pandas-with-Time-Series/Rolling and Expanding + Bollinger.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 405 DQN Reinforcement Learning # # View more, visit my tutorial page: https://mofanpy.com/tutorials/ # My Youtube Channel: https://www.youtube.com/user/MorvanZhou # More about Reinforcement learning: https://mofanpy.com/tutorials/machine-learning/reinforcement-learning/ # # Dependencies: # * torch: 0.1.11 # * gym: 0.8.1 # * numpy import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F import numpy as np import gym # Hyper Parameters BATCH_SIZE = 32 LR = 0.01 # learning rate EPSILON = 0.9 # greedy policy GAMMA = 0.9 # reward discount TARGET_REPLACE_ITER = 100 # target update frequency MEMORY_CAPACITY = 2000 env = gym.make('CartPole-v0') env = env.unwrapped N_ACTIONS = env.action_space.n N_STATES = env.observation_space.shape[0] class Net(nn.Module): def __init__(self, ): super(Net, self).__init__() self.fc1 = nn.Linear(N_STATES, 10) self.fc1.weight.data.normal_(0, 0.1) # initialization self.out = nn.Linear(10, N_ACTIONS) self.out.weight.data.normal_(0, 0.1) # initialization def forward(self, x): x = self.fc1(x) x = F.relu(x) actions_value = self.out(x) return actions_value class DQN(object): def __init__(self): self.eval_net, self.target_net = Net(), Net() self.learn_step_counter = 0 # for target updating self.memory_counter = 0 # for storing memory self.memory = np.zeros((MEMORY_CAPACITY, N_STATES * 2 + 2)) # initialize memory self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR) self.loss_func = nn.MSELoss() def choose_action(self, x): x = Variable(torch.unsqueeze(torch.FloatTensor(x), 0)) # input only one sample if np.random.uniform() < EPSILON: # greedy actions_value = self.eval_net.forward(x) action = torch.max(actions_value, 1)[1].data.numpy()[0, 0] # return the argmax else: # random action = np.random.randint(0, N_ACTIONS) return action def store_transition(self, s, a, r, s_): transition = np.hstack((s, [a, r], s_)) # replace the old memory with new memory index = self.memory_counter % MEMORY_CAPACITY self.memory[index, :] = transition self.memory_counter += 1 def learn(self): # target parameter update if self.learn_step_counter % TARGET_REPLACE_ITER == 0: self.target_net.load_state_dict(self.eval_net.state_dict()) self.learn_step_counter += 1 # sample batch transitions sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE) b_memory = self.memory[sample_index, :] b_s = Variable(torch.FloatTensor(b_memory[:, :N_STATES])) b_a = Variable(torch.LongTensor(b_memory[:, N_STATES:N_STATES+1].astype(int))) b_r = Variable(torch.FloatTensor(b_memory[:, N_STATES+1:N_STATES+2])) b_s_ = Variable(torch.FloatTensor(b_memory[:, -N_STATES:])) # q_eval w.r.t the action in experience q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1) q_next = self.target_net(b_s_).detach() # detach from graph, don't backpropagate q_target = b_r + GAMMA * q_next.max(1)[0] # shape (batch, 1) loss = self.loss_func(q_eval, q_target) self.optimizer.zero_grad() loss.backward() self.optimizer.step() dqn = DQN() # + print('\nCollecting experience...') for i_episode in range(400): s = env.reset() ep_r = 0 while True: env.render() a = dqn.choose_action(s) # take action s_, r, done, info = env.step(a) # modify the reward x, x_dot, theta, theta_dot = s_ r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.8 r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5 r = r1 + r2 dqn.store_transition(s, a, r, s_) ep_r += r if dqn.memory_counter > MEMORY_CAPACITY: dqn.learn() if done: print('Ep: ', i_episode, '| Ep_r: ', round(ep_r, 2)) if done: break s = s_ # -
tutorial-contents-notebooks/405_DQN_Reinforcement_learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import cv2 import os import pandas as pd def describe_images(folder): desc=[] for i in os.listdir(folder): image =folder+'/'+i img = cv2.imread(image) height, width, depth = img.shape desc.append({'file':image,'w':width,'h':height,'aspect_ratio':height/width}) desc=pd.DataFrame(desc) return desc #w=max(ws) #max width full stub images #h=max(hs) #max width full stub images desc=describe_images('images/thumbs') desc thumb_desc=describe_images('images/thumbs') def resize(img,df): height, width, depth = img.shape aspect_ratio = height/width #actual aspect ratio print('original size ---> height: '+str(height)+' width: '+str(width)) df=df.iloc[(df['aspect_ratio']-height).abs().argsort()[:1]] #closest aspect ratio w=df.w.iloc[0] h=df.h.iloc[0] if height>width: dimension = (int(h/aspect_ratio),h) else: dimension = (w,int(w*aspect_ratio)) print('new size ---> height: '+str(dimension[0])+' width: '+str(dimension[1])) return cv2.resize(img, dimension) #return dimension def thumbnail(img,desc): df=desc.copy() height, width, depth = img.shape print('original image dim---> height:'+str(height)+' width: '+str(width)) aspect_ratio = height/width #actual aspect ratio h=df.loc[0,'h'] #fixed height if width>height: w=df.loc[df['aspect_ratio'].idxmin(),'w'] #select row with higher aspect_radio, set new width to this value if int(w*aspect_ratio)>h: dimension=(w,int(w*aspect_ratio)) else: dimension=(int(h/aspect_ratio),h) else: w=df.loc[df['aspect_ratio'].idxmax(),'w'] #select row with lower aspect_radio, set new width to this value if int(w*aspect_ratio)>h: dimension=(w,int(w*aspect_ratio)) else: dimension=(int(h/aspect_ratio),h) print(dimension) img=cv2.resize(img, dimension) height, width, depth = img.shape print('resized image dim---> height:'+str(height)+' width: '+str(width)) img=img[ int(height/2)-int(h/2):int(height/2)+int(h/2), int(width/2)-int(w/2):int(width/2)+int(w/2)] height, width, depth = img.shape print('final thumbnail dim---> height:'+str(height)+' width: '+str(width)) return img # + #thumbnail(img,thumb_desc) # - def show_image(image): if type(image)==str: img = cv2.imread(image) else: img=image cv2.imshow("Original", img) cv2.waitKey(0) cv2.destroyWindow('Original') path='images/fulls' # + #show_image('images/<NAME> - Twitter Maps_files/madrid___235K.png') # - def caption(img,caption,x,y,cap_color='white'): height, width, depth = img.shape if cap_color=='white': BLACK = (255,255,255) else: BLACK = (0,0,0) font = cv2.FONT_HERSHEY_SIMPLEX font_size = 0.5 font_color = BLACK font_thickness = 1 text = caption x,y = int(width*x),int(height*y) img = cv2.putText(img, text, (x,y), font, font_size, font_color, font_thickness, cv2.LINE_AA) return img def batch_resize(source='',target='',thumb_target='',thumb_model='',model='',cap=''): desc=describe_images(model) thumb_desc=describe_images(thumb_model) for i in os.listdir(source): image =source+'/'+i print('processing image:'+image) img = cv2.imread(image) print('generating thumbnail') thumb=thumbnail(img,thumb_desc) print('resizing image') img=resize(img,desc) lista=[1,2,3,4] y=[x for x in lista if(str(x) in i)] print(y) if len(y)>0: print('adding caption') if y[0]==1 or y[0]==3 : cap_color='white' else: cap_color='black' img=caption(img,cap,0.07,0.95,cap_color=cap_color) print('saving file '+target+'/'+i) cv2.imwrite(target+'/'+i,img) print('saving file '+thumb_target+'/'+i) cv2.imwrite(thumb_target+'/'+i,thumb) batch_resize(thumb_target='images/tmp/thumbs',thumb_model='images/thumbs',source='images/Francisco Humeres - Eigencities_files',target='images/tmp/fulls',model='images/fulls',cap='(c) 2015-16 <NAME>.')
eigenCities/convert_images-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="2AJ9ihdg-rDH" # !pip install git+https://github.com/hyperreality/Poetry-Tools.git # !pip install MIDIUtil # !pip install pyrhyme # + id="my1TJA1JrK5A" # !pip install transformers # !pip install datasets # %tensorflow_version 1.x # !gsutil -q -m cp -r gs://magentadata/models/music_transformer/primers/* /content/ # !gsutil -q -m cp gs://magentadata/soundfonts/Yamaha-C5-Salamander-JNv5.1.sf2 /content/ # !apt-get update -qq && apt-get install -qq libfluidsynth1 build-essential libasound2-dev libjack-dev # !pip install -q 'tensorflow-datasets < 4.0.0' # !pip install -qU google-cloud magenta pyfluidsynth # + id="a1N1BswOvrP2" from collections import defaultdict from collections import OrderedDict from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup from transformers import BertForSequenceClassification import torch import numpy as np import pandas as pd from torch import nn, optim from torch.utils.data import Dataset, DataLoader from datasets import load_dataset import tensorflow.compat.v1 as tf from google.colab import files from tensor2tensor import models from tensor2tensor import problems from tensor2tensor.data_generators import text_encoder from tensor2tensor.utils import decoding from tensor2tensor.utils import trainer_lib from magenta.models.score2perf import score2perf import note_seq import poetrytools import pyrhyme import re from midiutil.MidiFile import MIDIFile import spacy from random import choice, seed, random tf.disable_v2_behavior() # + id="b_PJHr_-wTiH" class PoemDataset(Dataset): def __init__(self, lines, targets, tokenizer, max_len): self.lines = lines self.targets = targets self.tokenizer = tokenizer self.max_len = max_len def __len__(self): return len(self.lines) def __getitem__(self, item): line = str(self.lines[item]) target = self.targets[item] encoding = self.tokenizer.encode_plus( line, add_special_tokens=True, return_token_type_ids=False, padding='max_length', max_length=self.max_len, return_attention_mask=True, return_tensors='pt' ) return { 'line_text': line, 'input_ids': encoding['input_ids'].flatten(), 'attention_mask': encoding['attention_mask'].flatten(), 'targets': torch.tensor(target, dtype=torch.long) } # + id="Dqa6riFxwY2Y" def create_data_loader(df, tokenizer, max_len, batch_size): ds = PoemDataset( lines=df.verse_text.to_numpy(), targets=df.label.to_numpy(), tokenizer=tokenizer, max_len=max_len ) return DataLoader( ds, batch_size=batch_size, num_workers=4 ) # + id="gGzRNLblwJrT" def train_epoch( model, data_loader, loss_fn, optimizer, device, scheduler, n_examples ): model = model.train() losses = [] correct_predictions = 0 for d in data_loader: input_ids = d['input_ids'].to(device) attention_mask = d['attention_mask'].to(device) targets = d['targets'].to(device) outputs = model( input_ids=input_ids, attention_mask=attention_mask, ) _, preds = torch.max(outputs, dim=1) loss = loss_fn(outputs, targets) correct_predictions += torch.sum(preds == targets) losses.append(loss.item()) loss.backward() nn.utils.clip_grad_norm(model.parameters(), max_norm=1.0) optimizer.step() scheduler.step() optimizer.zero_grad() return float(correct_predictions) / n_examples, np.mean(losses) # + id="B25ubgAZwK9i" def eval_model(model, data_loader, loss_fn, device, n_examples): model = model.eval() losses = [] correct_predictions = 0 with torch.no_grad(): for d in data_loader: input_ids = d['input_ids'].to(device) attention_mask = d["attention_mask"].to(device) targets = d["targets"].to(device) outputs = model( input_ids=input_ids, attention_mask=attention_mask ) _, preds = torch.max(outputs, dim=1) loss = loss_fn(outputs, targets) correct_predictions += torch.sum(preds == targets) losses.append(loss.item()) return float(correct_predictions) / n_examples, np.mean(losses) # + id="JfpWSrEnwcEg" class SentimentClassifier(nn.Module): def __init__(self, n_classes): BERT_MODEL = 'bert-base-uncased' BERT_CACHE_PATH = 'bert_cache/' super(SentimentClassifier, self).__init__() self.bert = BertModel.from_pretrained(BERT_MODEL, cache_dir=BERT_CACHE_PATH) self.bert.config.return_dict = False self.drop = nn.Dropout(p=0.3) self.out = nn.Linear(self.bert.config.hidden_size, n_classes) def forward(self, input_ids, attention_mask): _, pooled_output = self.bert( input_ids=input_ids, attention_mask=attention_mask ) output = self.drop(pooled_output) return self.out(output) # + id="Mv04VdcAxe-X" def train_model(): RANDOM_SEED = 42 np.random.seed(RANDOM_SEED) torch.manual_seed(RANDOM_SEED) device = torch.device('cuda:0') poem_dataset = load_dataset('poem_sentiment') df_train = pd.DataFrame(poem_dataset['train']) df_validate = pd.DataFrame(poem_dataset['validation']) df_test = pd.DataFrame(poem_dataset['test']) MAX_TOKEN_LENGTH = 40 BATCH_SIZE = 16 BERT_MODEL = 'bert-base-uncased' BERT_CACHE_PATH = 'bert_cache/' TOKENIZER_PATH = 'project_cache/' btokenizer = BertTokenizer.from_pretrained(BERT_MODEL, cache_dir=TOKENIZER_PATH) train_data_loader = create_data_loader(df_train, btokenizer, MAX_TOKEN_LENGTH, BATCH_SIZE) val_data_loader = create_data_loader(df_validate, btokenizer, MAX_TOKEN_LENGTH, BATCH_SIZE) test_data_loader = create_data_loader(df_test, btokenizer, MAX_TOKEN_LENGTH, BATCH_SIZE) class_names = ['negative', 'positive', 'no_impact', 'mixed'] model = SentimentClassifier(len(class_names)) model = model.to(device) EPOCHS = 5 optimizer = AdamW(model.parameters(), lr=2e-5, correct_bias=False) total_steps = len(train_data_loader) * EPOCHS scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=0, num_training_steps=total_steps ) loss_fn = nn.CrossEntropyLoss().to(device) for epoch in range(EPOCHS): print(f'Epoch {epoch + 1}/{EPOCHS}') print('-' * 10) train_acc, train_loss = train_epoch( model, train_data_loader, loss_fn, optimizer, device, scheduler, len(df_train) ) print(f'Train loss {train_loss} accuracy {train_acc}') val_acc, val_loss = eval_model( model, val_data_loader, loss_fn, device, len(df_validate) ) print(f'Val loss {val_loss} accuracy {val_acc}') print() return model # + id="8C0q6lT90d9H" model = train_model() # + id="b8IeGEGuFoRV" def poem_classification(poem_path): RANDOM_SEED = 42 np.random.seed(RANDOM_SEED) torch.manual_seed(RANDOM_SEED) device = torch.device('cuda:0') MAX_TOKEN_LENGTH = 40 BERT_MODEL = 'bert-base-uncased' BERT_CACHE_PATH = 'bert_cache/' TOKENIZER_PATH = 'project_cache/' btokenizer = BertTokenizer.from_pretrained(BERT_MODEL, cache_dir=TOKENIZER_PATH) class_names = ['negative', 'positive', 'no_impact', 'mixed'] with open(poem_path) as f: poem_text = f.read() poem_lines = poem_text.split('\n') poem_lines = [l for l in poem_lines if l] score = defaultdict(int) for test_line in poem_lines: encoded_line = btokenizer.encode_plus( test_line, add_special_tokens=True, return_token_type_ids=False, padding=True, max_length=MAX_TOKEN_LENGTH, return_attention_mask=True, return_tensors='pt', ) input_ids = encoded_line['input_ids'].to(device) attention_mask = encoded_line['attention_mask'].to(device) output = model(input_ids, attention_mask) _, prediction = torch.max(output, dim=1) predicted_class = class_names[prediction] if predicted_class != 'no_impact': score[predicted_class] += 1 if score['positive'] > score['negative']: return 'positive' else: return 'negative' # + id="nKKszq46G6_B" def choose_note(scale, curr_note, forced=0): if forced == 0: p = random() else: p = 0 if p < 0.7: if len(scale[curr_note]) > 1: new_note = scale[curr_note][1] else: new_note = scale[curr_note][0] else: new_note = scale[curr_note][0] return new_note def create_midi(durations, notes, path, pol): midi_numbers = {'C': 60, 'D': 62, 'E': 64, 'F': 65, 'G': 67, 'A': 69, 'B': 71, 'C_2': 72, 'D_2': 74, 'E_2': 76, 'F_2': 77, 'G_2': 79, 'A_2': 81, 'B_2': 83} beats = {'8': 0.5, '4': 1, '2': 2, 'p4': 1, 'p8': 0.5} track = 0 channel = 0 time = 0 if pol == 'positive': tempo = 120 else: tempo = 70 volume = 100 midi_song = MIDIFile(1) midi_song.addTrackName(track, time, 'Sample') midi_song.addTempo(track, time, tempo) for d, n in zip(durations, notes): if n != '_': midi_song.addNote(track, channel, midi_numbers[n], time, beats[d], volume) time += beats[d] bin_file = open(path, 'wb') midi_song.writeFile(bin_file) bin_file.close() def gen_music(poem_text, nlp, a, r, pol): seed(42) rhymes = {} c_major = {'C': ['D'], 'D': ['C', 'E'], 'E': ['D', 'F'], 'F': ['D', 'G'], 'G': ['F', 'A'], 'A': ['G', 'B'], 'B': ['A', 'C'], 'C_2': ['B', 'D_2'], 'D_2': ['C_2', 'E_2'], 'E_2': ['D_2', 'F_2'], 'F_2': ['E_2', 'G_2'], 'G_2': ['F_2', 'A_2'], 'A_2': ['G_2', 'B_2'], 'B_2': ['A_2']} a_minor = {'A': ['B'], 'B': ['A', 'C'], 'C': ['B', 'D'], 'D': ['C', 'E'], 'E': ['D', 'F'], 'F': ['E', 'G'], 'G': ['F', 'A_2'], 'A_2': ['G', 'B_2'], 'B_2': ['A_2', 'C_2'], 'C_2': ['B_2', 'D_2'], 'D_2': ['C_2', 'E_2'], 'E_2': ['D_2', 'F_2'], 'F_2': ['E_2', 'G_2'], 'G_2': ['F_2']} notes = [] i = 0 if pol == 'positive': current_note = 'C' else: current_note = 'A' for sent in poem_text: sent_notes = [] j = 0 for k in range(i): if poem_text[k] == sent and notes: sent_notes = notes[k] break tokens = nlp(sent) if i % 2 == 0: if pol == 'positive': current_note = 'C' else: current_note = 'A' for t in tokens: if t.pos_ == 'PART' and t.text == '\'s': continue elif t.is_punct and (t.text == '.' or t.text == '...' or t.text == ';'): sent_notes.append('_') continue elif t.is_punct and t.text == ',': sent_notes.append('_') continue elif t.is_punct and t.text == '!': sent_notes.append(current_note) continue elif t.is_punct and t.text == '?': if pol == 'positive': current_note = choose_note(c_major, current_note, forced=1) else: current_note = choose_note(a_minor, current_note, forced=1) sent_notes.append(current_note) sent_notes.append('_') continue elif t.is_punct: continue elif len(a[i][j]) == 1: sent_notes.append(current_note) elif len(a[i][j]) > 1: for _ in a[i][j]: sent_notes.append(current_note) if pol == 'positive': current_note = choose_note(c_major, current_note) else: current_note = choose_note(a_minor, current_note) j += 1 if pol == 'positive': current_note = choose_note(c_major, current_note) else: current_note = choose_note(a_minor, current_note) if sent_notes[-1] == '_': idx = -2 else: idx = -1 if pol == 'positive': if '2' in sent_notes[-2]: sent_notes[idx] = choice(['E_2', 'G_2']) else: sent_notes[idx] = choice(['E', 'G']) else: if '2' in sent_notes[-2]: sent_notes[idx] = choice(['C_2', 'E_2']) else: sent_notes[idx] = choice(['C', 'E']) if sent_notes[-1] != '_': if r[i] in rhymes: sent_notes[-3:] = rhymes[r[i]] else: rhymes[r[i]] = sent_notes[-3:] i += 1 notes.append(sent_notes) if notes[-1][-1] == '_': idx = -2 else: idx = -1 if pol == 'positive': if '2' in notes[-1][idx]: notes[-1][idx] = 'C_2' else: notes[-1][idx] = 'C' else: if '2' in notes[-1][idx]: notes[-1][idx] = 'A_2' else: notes[-1][idx] = 'A' return notes def get_durations(poem_text, nlp, a): notes = [] i = 0 for sent in poem_text: sent_notes = [] j = 0 tokens = nlp(sent) for t in tokens: if t.pos_ == 'PART' and t.text == '\'s': continue elif t.is_punct and (t.text == '.' or t.text == '...' or t.text == ';'): sent_notes.append('p4') continue elif t.is_punct and t.text == ',': sent_notes.append('p8') continue elif t.is_punct and t.text == '!': sent_notes.append('8') continue elif t.is_punct and t.text == '?': sent_notes.append('8') sent_notes.append('p8') continue elif t.is_punct: continue elif t.is_stop and len(a[i][j]) == 1: sent_notes.append('8') elif len(a[i][j]) == 1: if a[i][j][0] == '1': sent_notes.append('4') else: sent_notes.append('8') elif len(a[i][j]) > 1: for stress in a[i][j]: if stress == '1': sent_notes.append('4') else: sent_notes.append('8') j += 1 if 'p' not in sent_notes[-1]: sent_notes[-1] = '2' i += 1 notes.append(sent_notes) return notes def find_rhyme_scheme(poem_verses): to_rhyme_list = [] for verse in poem_verses: v = re.sub('[\W_]', ' ', verse).split(' ') v = [a for a in v if a.isupper() or len(a) > 1] to_rhyme_list.append(v[-1]) rhyme_dict = {} rhyme_finder = pyrhyme.RhymeBrain() for word in to_rhyme_list: all_rhymes = rhyme_finder.rhyming_list(word, lang='en') all_rhymes = [r.word for r in all_rhymes] rhyme_dict[word] = all_rhymes verse_dict = {} idx = 0 verse_number = 0 for word in to_rhyme_list: found = 0 for k in verse_dict: to_find = verse_dict[k][0] rhyme_num = verse_dict[k][1] if to_find in rhyme_dict[word] or word in rhyme_dict[to_find]: verse_dict[verse_number] = (word, rhyme_num) found = 1 break if found == 0: verse_dict[verse_number] = (word, idx) idx += 1 verse_number += 1 rhyme_scheme = [verse_dict[k][1] for k in verse_dict] return rhyme_scheme # + id="nkONBotTWp_Z" def create_song(poem_path, polarity, song_path): with open(poem_path) as f: poem_text = f.read() poem = poetrytools.tokenize(poem_text) s = poetrytools.scanscion(poem) s = [v for v in s if v] verses = poem_text.split('\n') verses = [v for v in verses if v] r = find_rhyme_scheme(verses) nlp = spacy.load('en_core_web_sm') durations = get_durations(verses, nlp, s) notes = gen_music(verses, nlp, s, r, polarity) durations = [d for sub_d in durations for d in sub_d] notes = [n for sub_n in notes for n in sub_n] create_midi(durations, notes, song_path, polarity) # + id="PDNzPZC0AaI1" SF2_PATH = '/content/Yamaha-C5-Salamander-JNv5.1.sf2' SAMPLE_RATE = 16000 # Upload a MIDI file and convert to NoteSequence. def upload_midi(song_path): with open(song_path, 'rb') as f: data = f.read() return note_seq.midi_to_note_sequence(data) # Decode a list of IDs. def decode(ids, encoder): ids = list(ids) if text_encoder.EOS_ID in ids: ids = ids[:ids.index(text_encoder.EOS_ID)] return encoder.decode(ids) # + id="6X5TSmRnAe97" model_name = 'transformer' hparams_set = 'transformer_tpu' ckpt_path = 'gs://magentadata/models/music_transformer/checkpoints/melody_conditioned_model_16.ckpt' class MelodyToPianoPerformanceProblem(score2perf.AbsoluteMelody2PerfProblem): @property def add_eos_symbol(self): return True problem = MelodyToPianoPerformanceProblem() melody_conditioned_encoders = problem.get_feature_encoders() # Set up HParams. hparams = trainer_lib.create_hparams(hparams_set=hparams_set) trainer_lib.add_problem_hparams(hparams, problem) hparams.num_hidden_layers = 16 hparams.sampling_method = 'random' # Set up decoding HParams. decode_hparams = decoding.decode_hparams() decode_hparams.alpha = 0.0 decode_hparams.beam_size = 1 # Create Estimator. run_config = trainer_lib.create_run_config(hparams) estimator = trainer_lib.create_estimator( model_name, hparams, run_config, decode_hparams=decode_hparams) # These values will be changed by the following cell. inputs = [] decode_length = 0 # Create input generator. def input_generator(): global inputs while True: yield { 'inputs': np.array([[inputs]], dtype=np.int32), 'targets': np.zeros([1, 0], dtype=np.int32), 'decode_length': np.array(decode_length, dtype=np.int32) } # Start the Estimator, loading from the specified checkpoint. input_fn = decoding.make_input_fn_from_generator(input_generator()) melody_conditioned_samples = estimator.predict( input_fn, checkpoint_path=ckpt_path) # "Burn" one. _ = next(melody_conditioned_samples) # + colab={"base_uri": "https://localhost:8080/"} id="hxN3pyEqysLV" executionInfo={"status": "ok", "timestamp": 1625479902332, "user_tz": -180, "elapsed": 2165, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06418993449199010893"}} outputId="753a2f70-a98c-4a9a-903d-d773a5e03056" POEM_PATH = '/content/drive/MyDrive/Poems/SonnetLXXIIAmoretti.txt' poem_sentiment = poem_classification(POEM_PATH) print(poem_sentiment) # + id="3ARetzFWAidd" event_padding = 2 * [note_seq.MELODY_NO_EVENT] split_path = POEM_PATH.split('/') MIDI_FOLDER = '/content/drive/MyDrive/MidiFiles/' SONG_PATH = MIDI_FOLDER + split_path[-1][:-3] + '.mid' print(f'Poem sentiment is: {poem_sentiment}') create_song(POEM_PATH, poem_sentiment, SONG_PATH) melody_ns = upload_midi(SONG_PATH) melody_instrument = note_seq.infer_melody_for_sequence(melody_ns) notes = [note for note in melody_ns.notes if note.instrument == melody_instrument] del melody_ns.notes[:] melody_ns.notes.extend( sorted(notes, key=lambda note: note.start_time)) for i in range(len(melody_ns.notes) - 1): melody_ns.notes[i].end_time = melody_ns.notes[i + 1].start_time inputs = melody_conditioned_encoders['inputs'].encode_note_sequence( melody_ns) # Play and plot the melody. note_seq.play_sequence( melody_ns, synth=note_seq.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH) note_seq.plot_sequence(melody_ns) # + id="OFMRlmS4AllC" decode_length = 4096 sample_ids = next(melody_conditioned_samples)['outputs'] # Decode to NoteSequence. midi_filename = decode( sample_ids, encoder=melody_conditioned_encoders['targets']) accompaniment_ns = note_seq.midi_file_to_note_sequence(midi_filename) # Play and plot. note_seq.play_sequence( accompaniment_ns, synth=note_seq.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH) note_seq.plot_sequence(accompaniment_ns) # + id="IuJ_u4ZWApty" colab={"base_uri": "https://localhost:8080/", "height": 17} executionInfo={"status": "ok", "timestamp": 1624557254818, "user_tz": -180, "elapsed": 608, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06418993449199010893"}} outputId="de894db1-95f3-4aee-dda6-c6ec8dedd471" note_seq.sequence_proto_to_midi_file( accompaniment_ns, '/content/drive/MyDrive/SonnetLXXIIAmoretti7.mid') files.download('/content/drive/MyDrive/SonnetLXXIIAmoretti7.mid')
PoetryMusicGeneration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import LabelEncoder # - dados = pd.read_csv('E:/Mestrado Matérias/2 MÓDULO/Machine Learning/LISTA 2/Q6.csv') dados # Separandos os dados # + X_dados = dados.iloc[:,1:5].values X_dados # + y_dados = dados.iloc[:,-1].values y_dados # - # Transformando os dados categóricos em numéricos # + label_encoder_tempo = LabelEncoder() label_encoder_temperatura = LabelEncoder() label_encoder_umidade = LabelEncoder() label_encoder_vento = LabelEncoder() # + X_dados[:,0] = label_encoder_tempo.fit_transform(X_dados[:,0]) X_dados[:,1] = label_encoder_temperatura.fit_transform(X_dados[:,1]) X_dados[:,2] = label_encoder_umidade.fit_transform(X_dados[:,2]) X_dados[:,3] = label_encoder_vento.fit_transform(X_dados[:,3]) X_dados # - X_train, X_test, y_train, y_test = train_test_split(X_dados, y_dados, test_size = 0.3, random_state=0) # + naive_dados = GaussianNB() #Distribuição normal #Treinando o algoritmo naive_dados.fit(X_dados,y_dados) #Acurácia do modelos naive_dados.score(X_dados,y_dados) # - # <h5>Previsão de um novo dado # # + #chuvoso(0), quente(2), alta(0) e forte(0) previsao = naive_dados.predict([[0,2,0,0]]) # - previsao # + #Classes naive_dados.classes_ # + #Atributos por classe naive_dados.class_count_ # + #Propriedade a priori naive_dados.class_prior_ # -
Naive Bayes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="f0F8I8wnm5gX" outputId="a4c4b069-041a-4636-cfd5-40c61263f83f" import networkx as nx try: import osmnx as ox except: # osmnx depends on the system package libspatialindex # !apt install libspatialindex-dev # !pip install osmnx import osmnx as ox try: import geopandas as gpd except: # !pip install geopandas import geopandas as gpd try: import contextily as ctx except: # install dependencies for contextily # !apt install libproj-dev proj-data proj-bin # !apt install libgeos-dev # !pip install cython # !pip install cartopy # install contextily # !pip install contextily==1.0rc1 --no-use-pep517 --no-cache-dir import contextily as ctx import fiona from shapely.geometry import Point, LineString, Polygon import pandas as pd import numpy as np import matplotlib.pyplot as plt import pathlib # + [markdown] colab_type="text" id="ejhrKBqPng9F" # # Traveling Salesman Problem # The canonical Traveling Salesman Problem is stated as: # > "Given a list of cities and the distances between each pair of cities, what is the shortest possible route that visits each city and returns to the origin city?" # # This is generalizable to finding the shortest [Hamiltonian cycle](http://mathworld.wolfram.com/HamiltonianCycle.html) on a fully connected graph (i.e. all nodes can be reached from all other nodes). # # This problem is [NP-hard](https://en.wikipedia.org/wiki/P_versus_NP_problem), meaning it is not possible for an algorithm to solve all instances of the problem quickly (i.e. in polynomial time). However, there are many approximate and heuristic approaches which can give reasonable solutions in shorter time. # + colab={} colab_type="code" id="C6JmtPFto13K" place_name = 'New York City, NY, United States' place_roads = ox.graph_from_place(place_name) # + colab={} colab_type="code" id="k5W0RTsawXlE" place_roads_nodes, place_roads_edges = ox.graph_to_gdfs(place_roads) # + colab={"base_uri": "https://localhost:8080/", "height": 530} colab_type="code" id="Hnp2qXbMss49" outputId="08d8d5d5-600f-482e-9623-e7691cf4e06c" fig = plt.figure(figsize=[10,10]) ax = fig.add_subplot(1,1,1) place_roads_edges.plot(ax=ax, color=[0.8, 0.8, 0.8], alpha=0.5) # + [markdown] colab_type="text" id="IoiI3SS1pIIs" # Let's say you wanted to do a ice cream crawl: you want to visit every ice cream shop in a city. What is the shortest route that you would take that takes you to every ice cream shop in a city and brings you back to your starting point? # + colab={"base_uri": "https://localhost:8080/", "height": 779} colab_type="code" id="tAQK62C7pB7V" outputId="8323c432-4da4-4e10-bd43-8a9534da31da" place_ice_cream = ox.pois_from_place(place_name, tags={"amenity":"ice_cream"}) #some of the ice cream shops return polygons instead of points, so we need to take their centroids place_ice_cream = place_ice_cream.to_crs("epsg:3857") #projecting to Web-Mercator for more accurate centroids place_ice_cream["geometry"] = place_ice_cream["geometry"].centroid place_ice_cream = place_ice_cream.to_crs("epsg:4326") #projecting back to lat/long place_ice_cream # + colab={"base_uri": "https://localhost:8080/", "height": 69} colab_type="code" id="DSEkty3Lr0AX" outputId="5fbf5594-6005-4b2c-93a2-0854624c22bb" ice_cream_nodes = ox.get_nearest_nodes(place_roads, place_ice_cream.geometry.x, place_ice_cream.geometry.y) ice_cream_nodes # + [markdown] colab_type="text" id="7vjZXdo4wwx0" # ## Exercise # Plot the locations of the ice cream shops on the map of the roads # + colab={} colab_type="code" id="wUuhBee2w9Bd" # + [markdown] colab_type="text" id="qF1NSIerw-j4" # ## Compute shortest path matrix # + colab={"base_uri": "https://localhost:8080/", "height": 399} colab_type="code" id="q2c8QobPsq8e" outputId="86cd1497-74cc-4b3e-f8e2-9d1155adecd3" shortest_path_matrix = np.zeros([len(ice_cream_nodes),len(ice_cream_nodes)]) for idx_i, orig in enumerate(ice_cream_nodes): shortest_paths = nx.single_source_dijkstra_path_length(place_roads, orig, weight='length') for idx_j, dest in enumerate(ice_cream_nodes): shortest_path_matrix[idx_i, idx_j] = shortest_paths[dest] shortest_path_matrix # + colab={} colab_type="code" id="Dx6x4175wv90" ice_cream_graph = nx.from_numpy_matrix(shortest_path_matrix, create_using=nx.MultiDiGraph) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="QXZedXvnzGX3" outputId="6927e94d-aa98-453a-f714-1cfdf650f7a7" # new graph indexes from 0 ice_cream_graph.nodes # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="rUufs51xteVN" outputId="75cdee08-8b22-4f0f-a9da-ab1dfb64676a" # rename node labels using original labels ice_cream_graph = nx.relabel_nodes(ice_cream_graph,{k:v for k, v in zip(ice_cream_graph.nodes, ice_cream_nodes)}) ice_cream_graph.nodes # + [markdown] colab_type="text" id="IkkkXc4YzRX4" # ## Exercise # Implement each of the following methods to see how good of a TSP path you can obtain. # + [markdown] colab={} colab_type="code" id="s5iJS8jbyBFu" # ## Method 1: Random # Let's start by setting a baseline; how well can we do by starting at a random node and choosing a random node out of the ones remaining each time? # # After you find the path, draw it on the map and print its length. (You don't need to draw the actual roads taken, just draw lines between the nodes.) # - # ## Method 2: Greedy # Now, let's try to choose nodes more intelligently: start at a random node again, but instead of choosing a random node each time, always choose the node closest to the current node each time. # # Again, draw the path on the map and print its length. # ## Method 3: Random with 2-opt swapping # # You may have noticed that both paths contain a lot of edges that cross each other, which is nonideal. However, there exists an algorithm to remove all the paths that cross each other from a Hamiltonian cycle: the [2-opt](https://en.wikipedia.org/wiki/2-opt) algorithm. We can use that to our advantage here. # # Start by generating a random Hamiltonian cycle like in method 1, but this time, use the 2-opt algorithm to optimize it further. Again, draw it on the map and print its length. # ## Method 4: Open-ended # # Although the 2-opt swaps reduce the length of the Hamiltonian cycle by quite a lot, they almost never provide the optimal solution. See if you can use another method to produce a Hamiltonian cycle shorter than the one you got with method 3. Some options to explore include: # # - [3-opt](https://en.wikipedia.org/wiki/3-opt) # - [Multi-fragment algorithm](https://en.wikipedia.org/wiki/Multi-fragment_algorithm) with 2- or 3-opt swapping # - [Simulated annealing](https://en.wikipedia.org/wiki/Simulated_annealing) # # The [TSP Wikipedia page](https://en.wikipedia.org/wiki/Travelling_salesman_problem) has many other algorithms that could be of use to you as well. #
.ipynb_checkpoints/07_Graph_Optimization_Problems_TSP-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Nodes and References Implementation of a Tree # # In this notebook is the code corresponding to the lecture for implementing the representation of a Tree as a class with nodes and references! class BinaryTree(object): def __init__(self, obj): self.key = obj self.left_node = None self.right_node = None def insertLeft(self, newObj): t = BinaryTree(newObj) if not self.left_node: self.left_node = t else: t.left_node = self.left_node self.left_node = t def insertRight(self, newObj): t = BinaryTree(newObj) if not self.right_node: self.right_node = t else: t.right_node = self.right_node self.right_node = t def getRightChild(self): return self.right_node def getLeftChild(self): return self.left_node def setRootVal(self,obj): self.key = obj def getRootVal(self): return self.key # We can see some examples of creating a tree and assigning children. Note that some outputs are Trees themselves! # + jupyter={"outputs_hidden": false} from __future__ import print_function r = BinaryTree('a') print(r.getRootVal()) print(r.getLeftChild()) r.insertLeft('b') print(r.getLeftChild()) print(r.getLeftChild().getRootVal()) r.insertRight('c') print(r.getRightChild()) print(r.getRightChild().getRootVal()) r.getRightChild().setRootVal('hello') print(r.getRightChild().getRootVal()) # -
Trees Solved/Tree Representation Implementation (Nodes and References).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Name # # Data preparation by deleting a cluster in Cloud Dataproc # # # Label # Cloud Dataproc, cluster, GCP, Cloud Storage, Kubeflow, Pipeline # # # # Summary # A Kubeflow Pipeline component to delete a cluster in Cloud Dataproc. # # ## Intended use # Use this component at the start of a Kubeflow Pipeline to delete a temporary Cloud Dataproc # cluster to run Cloud Dataproc jobs as steps in the pipeline. This component is usually # used with an [exit handler](https://github.com/kubeflow/pipelines/blob/master/samples/core/exit_handler/exit_handler.py) to run at the end of a pipeline. # # # ## Runtime arguments # | Argument | Description | Optional | Data type | Accepted values | Default | # |----------|-------------|----------|-----------|-----------------|---------| # | project_id | The Google Cloud Platform (GCP) project ID that the cluster belongs to. | No | GCPProjectID | | | # | region | The Cloud Dataproc region in which to handle the request. | No | GCPRegion | | | # | name | The name of the cluster to delete. | No | String | | | # | wait_interval | The number of seconds to pause between polling the operation. | Yes | Integer | | 30 | # # # ## Cautions & requirements # To use the component, you must: # * Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project). # * The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details. # * Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project. # # ## Detailed description # This component deletes a Dataproc cluster by using [Dataproc delete cluster REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters/delete). # # Follow these steps to use the component in a pipeline: # 1. Install the Kubeflow Pipeline SDK: # + # %%capture --no-stderr # !pip3 install kfp --upgrade # - # 2. Load the component using KFP SDK # + import kfp.components as comp dataproc_delete_cluster_op = comp.load_component_from_url( 'https://raw.githubusercontent.com/kubeflow/pipelines/1.1.0-alpha.1/components/gcp/dataproc/delete_cluster/component.yaml') help(dataproc_delete_cluster_op) # - # ### Sample # # Note: The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template. # # #### Prerequisites # # [Create a Dataproc cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster) before running the sample code. # # #### Set sample parameters # + tags=["parameters"] PROJECT_ID = '<Please put your project ID here>' CLUSTER_NAME = '<Please put your existing cluster name here>' REGION = 'us-central1' EXPERIMENT_NAME = 'Dataproc - Delete Cluster' # - # #### Example pipeline that uses the component import kfp.dsl as dsl import json @dsl.pipeline( name='Dataproc delete cluster pipeline', description='Dataproc delete cluster pipeline' ) def dataproc_delete_cluster_pipeline( project_id = PROJECT_ID, region = REGION, name = CLUSTER_NAME ): dataproc_delete_cluster_op( project_id=project_id, region=region, name=name) # #### Compile the pipeline pipeline_func = dataproc_delete_cluster_pipeline pipeline_filename = pipeline_func.__name__ + '.zip' import kfp.compiler as compiler compiler.Compiler().compile(pipeline_func, pipeline_filename) # #### Submit the pipeline for execution # + #Specify pipeline argument values arguments = {} #Get or create an experiment and submit a pipeline run import kfp client = kfp.Client() experiment = client.create_experiment(EXPERIMENT_NAME) #Submit a pipeline run run_name = pipeline_func.__name__ + ' run' run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) # - # ## References # # * [Component Python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_delete_cluster.py) # * [Component Docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile) # * [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/dataproc/delete_cluster/sample.ipynb) # * [Dataproc delete cluster REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters/delete) # # # ## License # By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
components/gcp/dataproc/delete_cluster/sample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_pytorch_p36 # language: python # name: conda_pytorch_p36 # --- # # ResNet18 from ASSETS19 - 3 Classes - ROI Crops # Using Project Sidewalk team's model and try to train on top of it # # Adapted from fast.ai's Deep Learning for Coders Notebook: `https://github.com/fastai/course-v3/blob/master/nbs/dl1/lesson1-pets.ipynb` and the AWS version # %reload_ext autoreload # %autoreload 2 # %matplotlib inline from fastai.vision import * from fastai.metrics import error_rate bs = 64 # ## Looking at the ROI Images Data path = Path(r'/home/ec2-user/SageMaker/classify-streetview/roi-images') path path.ls() # Transforms applied tfms = get_transforms(do_flip=False) # + #classes_list = ['0_missing', '1_null', '2_present'] # https://docs.fast.ai/vision.data.html#ImageDataBunch.from_folder data = ImageDataBunch.from_folder(path, ds_tfms = tfms, size = 224, bs=bs).normalize() data.classes # - data.show_batch(rows=2, figsize=(7, 7)) print(data.classes) len(data.classes),data.c # ## Training with ResNet18 Trained on Other Cities learn = cnn_learner(data, models.resnet18, metrics=error_rate) learn.model # + SAGEMAKER_PATH = r'/home/ec2-user/SageMaker' MODEL_PATH = os.path.join(SAGEMAKER_PATH, r'classify-streetview/sidewalk-cv-assets19/model/20e_slid_win_no_feats_r18') print(os.path.exists(MODEL_PATH)) learn.load(MODEL_PATH) # - # Just train the fully connected layers for determining the class learn.fit_one_cycle(4) learn.save('resnet18-transfer') # # Write out the Test Results # https://docs.fast.ai/tutorial.inference.html preds,y = learn.get_preds() df_preds = pd.DataFrame(preds.tolist()) df_preds.columns = ['0_missing', '1_null', '2_present'] df_preds['y'] = pd.Series(y.tolist()) filepaths = list(data.items) df_preds = filepaths df_preds.head() df_preds.head() df_preds.to_csv('3class-probs-with-y.csv', index = False) # + num = len(learn.data.test_ds) for i in range(num): filename = str(learn.data.test_ds.items[i]).split('/')[-1] learn.predict(learn.data.test_ds[i][0]) # - # ## Results # + interp = ClassificationInterpretation.from_learner(learn) losses,idxs = interp.top_losses() len(data.valid_ds)==len(losses)==len(idxs) # - interp.plot_confusion_matrix(figsize=(4,4), dpi=60) # Worst Predictions interp.plot_top_losses(12, figsize=(15,11)) # Look at the best predictions! interp.plot_top_losses(12, largest = False, figsize=(15,11)) # ## Unfreezing, fine-tuning, and learning rates # Since our model is working as we expect it to, we will *unfreeze* our model and train some more. learn.lr_find() learn.recorder.plot() learn.unfreeze() learn.fit_one_cycle(4, max_lr=slice(1e-5,1e-3)) interp_unfreeze = ClassificationInterpretation.from_learner(learn) interp_unfreeze.plot_confusion_matrix(figsize=(4,4), dpi=60) interp_unfreeze.plot_top_losses(12, figsize=(15,11)) interp_unfreeze.plot_top_losses(12, largest = False, figsize=(15,11))
model3-pretrained-attempt-train/2020-04-04-AttemptTraining.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.3.10 # language: julia # name: julia-0.3 # --- # Let us see how we can process the textual information to create a vector representation, also known as word embeddings or word vectors, which can be used as an input to a neural network. # # <h2 class="section-heading">One-Hot Vector</h2> # # This is the most simplest one where for each word we create a vector of length equal to the size of the vocabulary, $R^{\left\|V\right\|}$. We fill the vector with $1$ at the index of the word, rest all $0s$. # # $$W^{apple} = # \begin{bmatrix} # 1 \\ # \vdots \\ # \vdots \\ # \vdots \\ # 0 \\ # \end{bmatrix} # W^{banana} = # \begin{bmatrix} # 0 \\ # 1 \\ # \vdots \\ # \vdots \\ # 0 \\ # \end{bmatrix} # W^{king} = # \begin{bmatrix} # 0 \\ # \vdots \\ # 1 \\ # \vdots \\ # 0 \\ # \end{bmatrix} # W^{queen} = # \begin{bmatrix} # 0 \\ # \vdots \\ # \vdots \\ # 1 \\ # 0 \\ # \end{bmatrix}$$ # # All these vectors are independent to each other. Hence this representation doesn't encodes any relationship between words: # # $$(W^{apple})^TW^{banana}=(W^{king})^TW^{queen}=0$$ # # Also, each vector would be very sparse. Hence this approach requires large space to encode all our words in the vector form. # # <blockquote> # You shall know a word by the company it keeps (<NAME>. 1957:11) # <p align="right">- <a href="https://en.wikipedia.org/wiki/John_Rupert_Firth">Wikipedia</a></p> # </blockquote> # # <h2 class="section-heading">Word-Document Matrix</h2> # # In this approach, we create a matrix where a column represents a document and a row represents the frequency of a word in the document. This matrix scales with the number of documents ($D$). The matrix size would be $R^{\left\|D*V\right\|}$ where $V$ is the size of the vocabulary. # # <h2 class="section-heading">Word-Word Matrix</h2> # # In this case, we build a co-occurence matrix where both columns and rows represent words from the vocabulary. The benefit of building this matrix is that the co-occurence value of the words which are highly likely to come together in a sentence will always be high as compared to the words which rarely come together. Hence we should be fine once we have a descent sized dataset or say documents. Also, the size of the matrix dependent now on the size of the vocabulary, $R^{\left\|V*V\right\|}$. # # The beauty of the last two approaches is that we can apply [Singular-Value-Decomposition](https://en.wikipedia.org/wiki/Singular_value_decomposition) (SVD) on the matrix and further reduce the dimentionality. Let us see an example on the Word-Word matrix. # # Consider our data to have the following 3 sentence: # # - I enjoy driving. # - I like banana. # - I like reading. # # The co-occurence matrix will look like: # # $$X = # \begin{array}{c|lcr} # words & \text{I} & \text{enjoy} & \text{driving} & \text{like} & \text{banana} & \text{reading} &\text{.}\\ # \hline # \text{I} & 0 & 1 & 0 & 2 & 0 & 0 & 0 \\ # \text{enjoy} & 1 & 0 & 1 & 0 & 0 & 0 & 0 \\ # \text{driving} & 0 & 1 & 0 & 0 & 0 & 0 & 1 \\ # \text{like} & 2 & 0 & 0 & 0 & 1 & 1 & 0 \\ # \text{banana} & 0 & 0 & 0 & 1 & 0 & 0 & 1 \\ # \text{reading} & 0 & 0 & 0 & 1 & 0 & 0 & 1 \\ # \text{.} & 0 & 0 & 1 & 0 & 1 & 1 & 0 \\ # \end{array} # $$ words = ["I" "enjoy" "driving" "like" "banana" "reading" "."]; X = [0 1 0 2 0 0 0; 1 0 1 0 0 0 0; 0 1 0 0 0 0 1; 2 0 0 0 1 1 0; 0 0 0 1 0 0 1; 0 0 0 1 0 0 1 0 0 1 0 1 1 0]; # In [Julia](http://julia.readthedocs.org/en/latest/stdlib/linalg/), applying SVD on our matrix $X$ will give us $U$, $S$ and $V$ where: # # <center>$$A == U*diagm(S)*V^T$$</center> U,S,V = svd(X); U S V # >"A useful rule of thumb is to retain enough singular values to make up # >90% of the energy in Σ. That is, the sum of the squares of the retained # >singular values should be at least 90% of the sum of the squares of all the # >singular values." - <NAME> # # S matrix is the $\sum$, hence the total energy here is: totEnergy = sum(S.^2) energy = zeros(length(S)); energy[1] = S[1]^2/totEnergy; for i=2:length(S) energy[i] = energy[i-1]+(S[i]^2/totEnergy); end energy using PyPlot plot(1:length(energy), energy) xlabel("Dimensions") ylabel("% Energy Retained") grid("on") # Looking at the plot we can determine that keeping 4 dimensions are good enough for us rather than all 6. We can also print/plot the words based on the first two columns of $U$ corresponding to the two biggest singular values. Y = X[:,1:4] U,S,V = svd(Y); U for w=1:length(words) plt.text(U[w,1], U[w,2], words[w]); end plt.xlim((minimum(U[:,1])-1, maximum(U[:,1])+1)); plt.ylim((minimum(U[:,2])-1, maximum(U[:,2])+1)); # In the coming posts, I'll write about more interesting ways of generating word vectors. # <h2 class="section-heading"> References: </h2> # # - [From Frequency to Meaning: Vector Space Models of Semantics](http://arxiv.org/abs/1003.1141) # - [Efficient Estimation of Word Representations in Vector Space](http://arxiv.org/abs/1301.3781) # - [Singular Value Decomposition Tutorial PDF](https://www.ling.ohio-state.edu/~kbaker/pubs/Singular_Value_Decomposition_Tutorial.pdf) # - [Dimensionality Reduction](http://infolab.stanford.edu/~ullman/mmds/ch11.pdf)
notebooks/wordVec_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="WBR-Vd1FcBF1" # # 自然数分割問題を計算する # # 自然数分割問題とは、ある自然数の集合を2つのグループA, Bに分割し、それぞれのグループ内の自然数の和が同じになるような分割方法を探す問題です。 # これをwildqatを使用して解いてみます。 # # # wildqatがインストールされていない場合は、環境に併せて以下のようにインストールしてください。 # # # + colab={} colab_type="code" id="pMkrUpQgb5yC" # !pip3 install wildqat # + [markdown] colab_type="text" id="VWDd6GlHpnQb" # 必要なライブラリをimportし、wildqatオブジェクトをインスタンス化します。 # + colab={} colab_type="code" id="Giyn1-GNcIZ0" import wildqat as wq import numpy as np a = wq.opt() # + [markdown] colab_type="text" id="V1qLi57eeO8l" # # 解きたい問題のQUBOマトリクスを作成します。 # N個の自然数の$i$番目の自然数を$n_i$とし、その自然数がどちらのグループに属するかを$q_i$で表します。 # $n_i$がグループAに属する時には # $q_i=1$、グループBに属する時には$q_i=0$とします。 # ここで、2つのグループ内のそれぞれの和が等しい時に最小となるようなコスト関数$E$を考えます。 # # この場合、 # #  $E=\{\sum_{i=1}^{N}n_i*(2q_i-1)\}^2$ # # とすれば、自然数$n_i$がグループAに属するとき$2q_i-1=1$、グループBに属するとき$2q_i-1=-1$ # になりますので、グループAとグループBに属する自然数の和が等しいときに # $E=0$になり、異なると$E>0$になります。 # # 展開すると、 # #  $E=(\sum_{i=1}^{N}2n_iq_i)^2 - 2(\sum_{i=1}^{N}2n_iq_i)(\sum_{j=1}^{N}n_j) + (\sum_{i=1}^{N}n_i)^2$ # # コスト関数Eは最小化すれば良いので、最後の定数項は要らなくなります。またコスト関数は大きさのみ関係あるので、全体を4で割って # #  $E=(\sum_{i=1}^{N}n_iq_i)^2 - \sum_{i=1}^{N}n_iq_i\sum_{j=1}^{N}n_j$ # # また、$q_i=1$または$q_i=0$のとき、$q_i^2=q_i$です。また、$\sum_{j=1}^N{n_j}$ はnの総和で定数ですので、 # $n_{sum}$とします。さらに展開して整理すると</br> # #  $E=\sum_{i=1}^{N}(n_i^2 - n_{sum}n_i)q_i +2 \sum_{i < j}n_in_jq_iq_j$ # # これを行列表記すると下記のようになります。 # #  $qubo = \left[\begin{array}{rrrrr}n_1^2 - n_{sum}n_1 & 2n_1n_2 & 2n_1n_3 & 2n_1n_4 & ...\\ 0 & n_2^2 - n_{sum}n_2 & 2n_2n_3& 2n_2n_4 &...\\ 0 & 0 & n_3^2 - n_{sum}n_3 & 2n_3n_4 & ...\\ 0 & 0 & 0 & n_4^2 - n_{sum}n_4 & ...\\ ... & ... & ... & ... &... \end{array} \right]$ # # これをpythonプログラムで書き、シミュレータを実行して結果を得ます。 # + colab={} colab_type="code" id="1wqDGvAheHnd" numbers = np.array([3,2,6,9,2,5,7,3,3,6,7,3,5,3,2,2,2,6,8,4,6,3,3,6,4,3,3,2,2,5,8,9]) a.qubo = np.zeros((numbers.size,numbers.size)) for i in range(numbers.size): for j in range(numbers.size): if i == j: a.qubo[i][i]=numbers[i]**2-numbers.sum()*numbers[i] elif i<j: a.qubo[i][j]=2*numbers[i] * numbers[j] print(a.qubo.size) answer = a.sa() # + [markdown] colab_type="text" id="MJuEqL6VRptU" # 得られた結果を表示してみます。 # 自然数が2つのグループに分けられ、和が等しくなっています。 # + colab={} colab_type="code" id="<KEY>" group1_string = "" group2_string = "" group1_sum = 0 group2_sum = 0 for i in range(numbers.size): if answer[i] == 0: group1_string+= '+' + str(numbers[i]) group1_sum+=numbers[i] else: group2_string+= '+' + str(numbers[i]) group2_sum+=numbers[i] print(group1_string[1:],"=",group1_sum) print(group2_string[1:],"=",group1_sum) # -
examples_ja/tutorial003_numberpartitioning_ja.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import base64 from __future__ import print_function # py 2.7 compat. from IPython.html import widgets # Widget definitions. from IPython.utils.traitlets import Unicode # Traitlet needed to add synced attributes to the widget. # This is a custom widget that allows the user to upload file data to the notebook server. The file data is sent via a statefull `value` attribute of the widget. The widget has an upload failed event that fires in the front-end and is echoed to the back-end using a custom msg. class FileWidget(widgets.DOMWidget): _view_name = Unicode('FilePickerView', sync=True) value = Unicode(sync=True) filename = Unicode(sync=True) def __init__(self, **kwargs): """Constructor""" widgets.DOMWidget.__init__(self, **kwargs) # Call the base. # Allow the user to register error callbacks with the following signatures: # callback() # callback(sender) self.errors = widgets.CallbackDispatcher(accepted_nargs=[0, 1]) # Listen for custom msgs self.on_msg(self._handle_custom_msg) def _handle_custom_msg(self, content): """Handle a msg from the front-end. Parameters ---------- content: dict Content of the msg.""" if 'event' in content and content['event'] == 'error': self.errors() self.errors(self) # + language="javascript" # # require(["widgets/js/widget", "widgets/js/manager"], function(widget, manager){ # # var FilePickerView = widget.DOMWidgetView.extend({ # render: function(){ # // Render the view. # this.setElement($('<input />') # .attr('type', 'file')); # }, # # events: { # // List of events and their handlers. # 'change': 'handle_file_change', # }, # # handle_file_change: function(evt) { # // Handle when the user has changed the file. # # // Retrieve the first (and only!) File from the FileList object # var file = evt.target.files[0]; # if (file) { # # // Read the file's textual content and set value to those contents. # var that = this; # var file_reader = new FileReader(); # file_reader.onload = function(e) { # that.model.set('value', e.target.result); # that.touch(); # } # file_reader.readAsText(file); # } else { # # // The file couldn't be opened. Send an error msg to the # // back-end. # this.send({ 'event': 'error' }); # } # # // Set the filename of the file. # this.model.set('filename', file.name); # this.touch(); # }, # }); # # // Register the DatePickerView with the widget manager. # manager.WidgetManager.register_widget_view('FilePickerView', FilePickerView); # }); # - # The following shows how the file widget can be used. # + file_widget = FileWidget() # Register an event to echo the filename when it has been changed. def file_loading(): print("Loading %s" % file_widget.filename) file_widget.on_trait_change(file_loading, 'filename') # Register an event to echo the filename and contents when a file # has been uploaded. def file_loaded(): print("Loaded, file contents: %s" % file_widget.value) file_widget.on_trait_change(file_loaded, 'value') # Register an event to print an error message when a file could not # be opened. Since the error messages are not handled through # traitlets but instead handled through custom msgs, the registration # of the handler is different than the two examples above. Instead # the API provided by the CallbackDispatcher must be used. def file_failed(): print("Could not load file contents of %s" % file_widget.filename) file_widget.errors.register_callback(file_failed) file_widget
notebooks/1 - IPython Notebook Examples/IPython Project Examples/Interactive Widgets/File Upload Widget.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- library(fpp2) library(gridExtra) # + # 4. 확률 과정 Z_t = 1 + 0.9 * Z_(t - 1) + ε_t (t = 1, 2, ..., 100) 으로부터 시계열자료를 생성한 후 다음을 수행하라. 단, # Z_0 = 10의 값을 주고 오차항 ε_t은 WN (white noise) N(0,1)이라고 가정한다. set.seed(1) zt <- ts(numeric(100)) ; zt[1] = 10 ; print(zt) e <- rnorm(100) for(t in 2:100){ zt[t] <- 1 + 0.9 * zt[t - 1] + e[t] } print(head(zt, 21)) # + # (a) {Z_t}의 시계열그림을 그려라 autoplot(zt) # + # (b) {Z_t}에 대한 표본상관계수(ACF) ρ_k, (k = 0,1, … ,10)를 구하라. correlation <- Acf(zt, lag.max = 10, type=c("correlation"), plot = F) correlation # + # (c) {Z_t}에 대한 부분표본상관계수(PACF)를 구하라. partial <- Acf(zt, lag.max = 10, type=c("partial"), plot = F) partial Pacf(zt, lag.max = 10, plot = F) # - p1 <- ggAcf(zt, type = c("correlation")) p2 <- ggPacf(zt) grid.arrange(p1, p2, layout_matrix = rbind( c(1), c(2))) # + # (d) Z_t, Z_(t - 1) 의 산점도를 그리고, 이 산점도와 ρ1의 관계를 설명하라. zt_1 <- ts(numeric(99)) for(t in 2:100){ zt_1[t - 1] <- zt[t] } nzt = zt[1:99] nzt_1 = as.numeric(zt_1) df_z <- data.frame(nzt, nzt_1) summary(lm(nzt_1 ~ nzt)) cat("\nρ1 : ", round(correlation$acf[2], 5)) # - ggplot(df_z, aes(nzt, nzt_1)) + geom_point() + stat_smooth(method = lm, level = 0.95) # + # (e) Z_t, Z_(t - 2) 의 산점도를 그리고, 이 산점도와 ρ2의 관계를 설명하라. zt_2 <- ts(numeric(98)) for(t in 3:100){ zt_2[t - 2] <- zt[t] } nzt = zt[1:98] nzt_2 = as.numeric(zt_2) df_z <- data.frame(nzt, nzt_2) summary(lm(nzt_2 ~ nzt)) cat("\nρ2 : ", round(correlation$acf[3], 5)) # - ggplot(df_z, aes(nzt, nzt_2)) + geom_point() + stat_smooth(method = lm, level = 0.95) # + correlation <- Acf(zt, lag.max = 10, type=c("correlation"), plot = F) cat("\nρ1 : ", correlation$acf[2]) cat("\nρ2 : ", correlation$acf[3]) autoplot(zt, series = 'zt') + autolayer(zt_1) + autolayer(zt_2)
2nd semester/02.TimeSeries/HW3/HW(3-4).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # SWISS-MODEL - Downloading homology models # # This notebook gives a tutorial of the **SWISSMODEL object**, which is a simple class to parse and download models from the SWISS-MODEL homology model repository. # # <div class="alert alert-info"> # # **Input:** Path to SWISS-MODEL metadata directory, UniProt accession(s) # # </div> # # <div class="alert alert-info"> # # **Output:** Downloaded SWISS-MODEL models # # </div> # # ## Instructions # 1. Download the metadata for your organism of interest at SWISS-MODEL from here: https://swissmodel.expasy.org/repository, under the column "Download Metadata (Models and structures)" # 1. Extract that directory, which will then create a SWISS-MODEL_Repository folder) # 1. Create a new SWISSMODEL object, pointing it to that folder # 1. Download models for a specified UniProt accession from ssbio.databases.swissmodel import SWISSMODEL my_swissmodels = SWISSMODEL('/tmp/SWISS-MODEL_Repository/') # This gives you a list of all UniProt accession numbers with at least one model my_swissmodels.uniprots_modeled[:10] # This gives you a list of all models available for a UniProt entry my_swissmodels.get_models(uniprot_acc='Q9I1D5') # This downloads all models available for a UniProt entry my_swissmodels.download_models(uniprot_acc='Q9I1D5', outdir='/tmp/', force_rerun=False)
docs/notebooks/SWISS-MODEL - Downloading homology models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exercise 9 # # The result will be evaluated from a report in Jupyter, which must be found in a public GitHub repository. The project must be carried out in the groups assigned in class. Use clear and rigorous procedures. Due date: July 20, 2021, 11:59 pm, through Bloque Neón + (Upload repository link) # # ## Car Price Prediction # # Predict if the price of a car is low or high # + # %matplotlib inline import pandas as pd data = pd.read_csv('../datasets/dataTrain_carListings.zip') data = data.loc[data['Model'].str.contains('Camry')].drop(['Make', 'State'], axis=1) data = data.join(pd.get_dummies(data['Model'], prefix='M')) data['HighPrice'] = (data['Price'] > data['Price'].mean()).astype(int) data = data.drop(['Model', 'Price'], axis=1) data.head() # - data.shape y = data['HighPrice'] X = data.drop(['HighPrice'], axis=1) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) # # Exercise 8.1 # # Estimate a Decision Tree Classifier Manually using the code created in the decision trees notebook. # # Evaluate the accuracy on the testing set # # Exercise 8.2 # # Estimate a Bagging of 10 Decision Tree Classifiers Manually using the code created in bagging notebook. # # Evaluate the accuracy on the testing set # # Exercise 8.3 # # Compare the impact in the results by varing the parameter max_features # # Evaluate the accuracy on the testing set # # Exercise 8.4 # # Estimate a Bagging of 10 Decision Tree Classifiers with `max_features = log(n_features)` # # Evaluate the accuracy on the testing set # # Exercise 8.5 # # Using sklearn, train a RandomForestClassifier # # Evaluate the accuracy on the testing set # # Exercise 8.6 # # Find the best parameters of the RandomForestClassifier (max_depth, max_features, n_estimators) # # Evaluate the accuracy on the testing set # # Exercise 8.7 # # Using xgboost train an XGBClassifier # # Evaluate the accuracy on the testing set # # Exercise 8.8 # # Modify the parameters learning rate, gamma, colsample_bytree. Explain what each parameter means. # # Evaluate the accuracy on the testing set
Exercises/E8-RandomForests_Boosting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import xarray as xr import matplotlib.patches as patches import cartopy import cartopy.crs as ccrs from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER import cartopy.feature as cfeature import cmocean.cm as cmo import matplotlib.pyplot as plt # %matplotlib inline from mpl_toolkits.axes_grid1 import make_axes_locatable # - # Open and briefly analyze an example of synthetic-aperture radar (SAR) data, which is a form of radar used to create two or three dimensional reconstructions of objects. As part of the SUNRISE grant I am on with Rob, we are interested in using SAR data to identify submesoscale eddies in the northern Gulf of Mexico. SAR data can be particulary useful for this because some of the premium products contain very high spatial resolution compared to alternatives like satillite altimetry. # The data structure can be quite complicated - and the example shown is Level 2 Sentinal 1 downloaded from https://search.asf.alaska.edu/#/. You will have to make an account to download the data, but it only takes a minute. Level 2 means the data contain derived geophysical variables from processed (and calibrated) data; see https://sentinel.esa.int/web/sentinel/technical-guides/sentinel-1-sar/products-algorithms/level-2-algorithms/formatting for more information on the formatting and https://sentinel.esa.int/web/sentinel/user-guides/sentinel-1-sar/definitions for definitions. # # The data consists of three components: Ocean Swell Spectra, Ocean Wind Field, and Radial Surface Velocity. Each of these components is on a different spatial grid - further complicating things. ds = xr.open_dataset('s1a-iw-ocn-vv-20200913t001819-20200913t001844-034333-03FDDC-001.nc') ds # Plot the radial velocity as an example. It is defined as the 'ground range gridded difference between the measured Level-2 Doppler grid and the Level-1 calculated geometrical Doppler. The RVL component provides continuity of the ASAR Doppler grid. The RVL estimates are produced on a ground-range grid'. https://sentinel.esa.int/web/sentinel/technical-guides/sentinel-1-sar/products-algorithms/level-2/products/surface-radial-velocity-component ds.rvlRadVel # The dimensions 'rvlRadVel' and 'rvlRaSize' can be thought of as the X and Y coordinates, with rvlSwath containing the different grids that the velocity is measured on. Here, let's plot the second one because it doesn't touch land, therefore containing no NaNs, which will make things easier when plotting ds.rvlRadVel[:,:,1].plot() # We can make this plot better. Let's clean it up using cartopy to provide some geographical context. lon = ds.rvlLon[:,:,1].data lat = ds.rvlLat[:,:,1].data vel = ds.rvlRadVel[:,:,1].values # + fig = plt.figure(figsize=(15,6)) ax = fig.add_axes([0.06, 0.01, 0.93, 0.95], projection=ccrs.Mercator()) mappable = ax.pcolormesh(lon, lat, vel, cmap = cmo.amp, vmin = -1.5, vmax = 0, transform = ccrs.PlateCarree()) land_10m = cfeature.NaturalEarthFeature('physical', 'land', '10m', edgecolor='face', facecolor=cfeature.COLORS['land']) states_provinces = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='10m', facecolor='none') gl = ax.gridlines(linewidth=0.4, color='black', alpha=0.5, linestyle='-', draw_labels=True) ax.set_extent([-98, -90, 27, 30.5], ccrs.PlateCarree()) ax.add_feature(land_10m, facecolor='0.8') ax.add_feature(states_provinces, edgecolor='0.2') ax.add_feature(cfeature.LAND) ax.add_feature(cfeature.COASTLINE) ax.add_feature(cfeature.RIVERS) ax.add_feature(cfeature.BORDERS, linestyle='-', edgecolor='0.2') ax.set_aspect('auto') cax = fig.add_axes([0.09, 0.91, 0.32, 0.02]) cb = fig.colorbar(mappable, cax=cax, orientation='horizontal') cb.set_label(r'Radial Velocity [ms$^{-1}$]', fontsize=18, color='0.2') cb.ax.tick_params(labelsize=18, length=2, color='0.2', labelcolor='0.2') ax.tick_params(axis='y', labelsize=18) gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER gl.right_labels = False gl.top_labels = False ax.set_title('Sentinel-1 SAR Radial Velocity: 09/13/20') plt.rcParams.update({'font.size': 18}) # - # Let's look at another variable: the wind velocity measured from the SAR backscatter. It is a bit unclear what the reference height is but standard practice is 10 m above the surface ds.owiWindSpeed lon = ds.owiLon.data lat = ds.owiLat.data windspd = ds.owiWindSpeed.values # + fig = plt.figure(figsize=(15,6)) ax = fig.add_axes([0.06, 0.01, 0.93, 0.95], projection=ccrs.Mercator()) mappable = ax.pcolormesh(lon, lat, windspd, cmap = cmo.thermal, transform = ccrs.PlateCarree()) gl = ax.gridlines(linewidth=0.4, color='black', alpha=0.5, linestyle='-', draw_labels=True) ax.set_extent([-98, -90, 27, 30.5], ccrs.PlateCarree()) ax.add_feature(land_10m, facecolor='0.8') ax.add_feature(states_provinces, edgecolor='0.2') ax.add_feature(cfeature.LAND) ax.add_feature(cfeature.COASTLINE) ax.add_feature(cfeature.RIVERS) ax.add_feature(cfeature.BORDERS, linestyle='-', edgecolor='0.2') ax.set_aspect('auto') cax = fig.add_axes([0.09, 0.91, 0.32, 0.02]) cb = fig.colorbar(mappable, cax=cax, orientation='horizontal') cb.set_label(r'Wind Speed [ms$^{-1}$]', fontsize=18, color='0.2') cb.ax.tick_params(labelsize=18, length=2, color='0.2', labelcolor='0.2') ax.tick_params(axis='y', labelsize=18) gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER gl.right_labels = False gl.top_labels = False ax.set_title('Sentinel-1 SAR Wind Speed: 09/13/20') plt.rcParams.update({'font.size': 18}) # - # Basic statistics for the velocity fields are available (standard deviation), although we could easily derive them as well. This wraps up the introduction to opening ocean SAR data.
HW1/Synthetic_aperture_radar/HW1_SAR_Schlichting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import cv2 import pandas as pd import math import numpy as np import warnings warnings.filterwarnings("ignore") GAME_STATE_FILE_NAME = "game" GAME_STATE_FILE_EXT = ".csv" GAMES_DIR = "games/" PROCESSED_GAMES_DIR = "processed_games/" MODEL_NAME = "2048_model.h5" MOVES = ["UP", "DOWN", "LEFT", "RIGHT"] MOVE_COL_NAME = "MOVE" FILE_HEADER = [""] N_SIZE = 4 N_FILES = len(os.listdir(GAMES_DIR)) SHOW_GRAPHS = False MAX_CELL_VALUE_THRESHOLD = 10 CAP_MAX_CELL_VALUE = True CELL_VALUE_RELATIVE_NORMALIZATION = True DATA_AUGMENTATION = False def load_data(file, direc=GAMES_DIR, header=True): csv_path = os.path.join(direc, file) if header: return pd.read_csv(csv_path) else: return pd.read_csv(csv_path, header=None) # - def extract_move(row): move = np.int16(row[len(row) - 1]) row = row[:len(row) - 1] return move, row def normalize_row(row): row = np.float16(row) row = np.add(row, 1) row = np.log2(row) row = np.int16(row) if CELL_VALUE_RELATIVE_NORMALIZATION: min_val = 100 for val in row: if val == 0: continue if min_val > val: min_val = val if min_val > 0: row = np.subtract(row, min_val - 1) row_max = np.max(row) row = row / row_max row = np.round(row, 6) return row # + def get_stringy_row(row): return str(row) def update_record(record_dict, row, move): row_str = get_stringy_row(row) if record_dict.__contains__(row_str): moves = record_dict[row_str] else: moves = np.array([0,0,0,0]) moves[move] = moves[move] + 1 record_dict[row_str] = moves return record_dict # + def get_rot_template(): rot_move_map = {} # Moves: UDLR -> 0123 # Movement: 0->2, 2->1, 1->3, 3->0 rot_move_map[0] = 2 rot_move_map[2] = 1 rot_move_map[1] = 3 rot_move_map[3] = 0 return rot_move_map def augment_data(row, move): rot_move_map = get_rot_template() row_2d = row.reshape(4,4) aug_row_arr = [] moves_arr = [] if DATA_AUGMENTATION: for n in range(len(rot_move_map)): row_2d = np.rot90(row_2d, 1) row = np.array(row_2d.flatten()) move = rot_move_map[move] row = np.append(row, move) aug_row_arr.append(row) else: row = np.append(row, move) aug_row_arr.append(row) return aug_row_arr # + record_dict = {} for n_file in range(N_FILES): filename = GAME_STATE_FILE_NAME + str(n_file) + GAME_STATE_FILE_EXT data = load_data(file=filename, direc=GAMES_DIR) data_col = data.columns data = np.float32(data.values) for n_row in range(len(data)): row = data[n_row] move, row = extract_move(row) if CAP_MAX_CELL_VALUE and np.max(row) > (2 ** MAX_CELL_VALUE_THRESHOLD): break record_dict = update_record(record_dict, row, move) polluted_counter = 0 total_counter = 0 for row_str in record_dict.keys(): record = record_dict[row_str] is_polluted = (np.max(record) / np.sum(record)) < 1 total_counter = total_counter + np.sum(record) if is_polluted: polluted_counter = polluted_counter + np.sum(record) print("Pollution ratio: ", (polluted_counter / total_counter)) # + total_moves = 0 dropped_counter = 0 for n_file in range(N_FILES): filename = GAME_STATE_FILE_NAME + str(n_file) + GAME_STATE_FILE_EXT data = load_data(file=filename, direc=GAMES_DIR) data_col = data.columns data = np.float32(data.values) augmented_data = [] for n_row in range(len(data)): row = data[n_row] move, row = extract_move(row) if CAP_MAX_CELL_VALUE and np.max(row) > (2**MAX_CELL_VALUE_THRESHOLD): break row_str = get_stringy_row(row) if record_dict.__contains__(row_str): moves_count = record_dict[row_str] move = np.argmax(moves_count) row = normalize_row(row) aug_row_arr = augment_data(row, move) augmented_data.extend(aug_row_arr) augmented_data = np.array(augmented_data) dropped_counter = dropped_counter + len(data) - len(augmented_data) data = augmented_data total_moves = total_moves + len(data) processed_data = pd.DataFrame(data, columns=data_col) processed_data.to_csv(PROCESSED_GAMES_DIR + GAME_STATE_FILE_NAME + str(n_file) + GAME_STATE_FILE_EXT, sep=',', encoding='utf-8', index=False) print("Processing data for game " + str(n_file)) if SHOW_GRAPHS: import matplotlib.pyplot as plt print("Graphs for game", n_file, ":\n") get_ipython().run_line_magic('matplotlib', 'inline') processed_data.hist(bins=50, figsize=(20,15)) plt.show() import matplotlib.pyplot as plt # + total_counter = 0 polluted_counter = 0 for row_str in record_dict.keys(): total_counter = total_counter + 1 record = record_dict[row_str] is_polluted = (np.max(record) / np.sum(record)) < 1 if is_polluted: polluted_counter = polluted_counter + 1 print("Polluted Records: ", record) print("total_moves", total_moves) print("total_counter", total_counter) print("polluted_counter", polluted_counter) print("dropped_counter",dropped_counter) # -
data-processing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Домашнее задание №2 (курс "Практикум по программированию на языке Python") # ### Тема: Объектно-ориентированное программирование на языке Python. # # #### Преподаватель: <NAME> (<EMAIL>) # # **Выдана**: 20 марта 2021 # # **Дедлайн**: 21:00 04 апреля 2021 # # **Среда выполнения**: Jupyter Notebook (Python 3.7) # # #### Правила: # # Результат выполнения задания - Jupyter Notebook с кодом и подробными ответами в случае теоретических вопросов. __Максимальное число баллов за задание - 20__. # # Все ячейки должны быть "выполненными", при этом результат должен воспроизводиться при проверке (на Python 3.7). Если какой-то код не был запущен или отрабатывает с ошибками, то пункт не засчитывается. Задание, сданное после дедлайна, _не принимается_. Можно отправить недоделанное задание, выполненные пункты будут оценены. # # Готовое задание отправляется на почту преподавателя. # # Задание выполняется самостоятельно. Если какие-то студенты будут уличены в списывании, все они автоматически получат за эту работу 0 баллов. Если вы нашли в Интернете какой-то специфичный код, который собираетесь заимствовать, обязательно укажите это в задании - наверняка вы не единственный, кто найдёт и использует эту информацию. # # Удалять фрагменты формулировок заданий запрещается. # #### Постановка задачи: # # - В данной работе нужно # - ответить на ряд теоретических вопросов; # - решить набор задач, проверяющих владение ООП-инструментами языка; # - решить задачу на проектирование кода. # - Ответы на теоретические вопросы должны быть полными и обоснованными. # - Каждая задача представляет собой написание функции или класса, а также набора тестов, проверяющих работу решения в общих и крайних случаях. # - Отсутствие тестов автоматически уменьшает количество баллов за задание как минимум в два раза, некачественные тесты также будут штрафоваться. # - Даже если это не указано явно в требованиях, код должен быть по возможности неизбыточным, работать с разумной сложностью и объёмом потребялемой памяти, проверяющие могут снизить балл за задание, выполненное без учёта этого требования. # - Результирующий код должен быть читаемым, с единой системой отступов и адеквантными названиями переменных, проверяющие могут снизить балл за задание, выполненное без учёта этого требования. # __Задание 1 (2 балла):__ Дайте подробные ответы на следующие вопросы: # # 1. В чём смысл инкапсуляции? Приведите пример конкретной ситуации в коде, в которой нарушение инкапсуляции приводит к проблемам. # 2. Какой метод называется статическим? Что такое параметр `self`? # 3. В чём отличия методов `__new__` и `__init__`? # 4. Какие виды отношений классов вы знаете? Для каждого приведите примеры. Укажите взаимные различия. # 5. Зачем нужны фабрики? Опишите смысл использования фабричного метода, фабрики и абстрактной фабрики, а также их взаимные отличия. # __Задание 2 (1 балл):__ Опишите класс комплексных чисел. У пользователя должна быть возможность создать его объект на основе числа и в алгебраической форме, и в полярной. Класс должен поддерживать основные математические операции (+, -, \*, /) за счет перегрузки соответствующих магических методов. Также он должен поддерживать возможность получить число в алгебраической и полярной форме. Допускается использование модуля `math`. class ComplexNumber: pass # __Задание 3 (2 балла):__ Опишите класс для векторов в N-мерном пространстве. В качестве основы используйте список значений координат вектора, задаваемый `list`. Обеспечьте поддержку следующих операций: сложение, вычитание (с созданием нового вектора-результата), скалярное произведение, косинус угла, евклидова норма. Все операции, которые можно перегрузить с помощью магических методов, должны быть реализованы именно через них. Класс должен производить проверку консистентности аргументов для каждой операции и в случаях ошибок выбрасывать исключение `ValueError` с исчерпывающим объяснением ошибки. class Vector: def __init__(self, vector_values_list): pass # __Задание 4 (2 балл):__ Опишите декоратор, который принимает на вход функцию и при каждом её вызове печатает строку "This function was called N times", где N - число раз, которое это функция была вызвана на текущий момент (пока функция существует как объект, это число, очевидно, может только неубывать). def calls_counter(func): pass # __Задание 5 (3 балла):__ Опишите декоратор класса, который принимает на вход другой класс и снабжает декорируемый класс всеми атрибутами входного класса, названия которых НЕ начинаются с "\_". В случае конфликтов имён импортируемый атрибут должен получить имя с суффиксом "\_new". def copy_class_attrs(cls): pass # __Задание 6 (5 баллов):__ Опишите класс для хранения двумерных числовых матриц на основе списков. Реализуйте поддержку индексирования, итерирования по столбцам и строкам, по-элементные математические операции (с помощью магических методов), операцию умножения матрицы (как метод `dot` класса), транспонирование, поиска следа матрицы, а также поиск значения её определителя, если он существует, в противном случае соответствующий метод должен выводить сообщение об ошибке и возвращать `None`. # # Матрицу должно быть возможным создать из списка (в этом случае у неё будет одна строка), списка списков, или же передав явно три числа: число строк, число столбцов и значение по-умолчанию (которое можно не задавать, в этом случае оно принимается равным нулю). Все операции должны проверять корректность входных данных и выбрасывать исключение с информативным сообщением в случае ошибки. # # Матрица должна поддерживать методы сохранения на диск в текстовом и бинарном файле и методы обратной загрузки с диска для обоих вариантов. Также она должна поддерживать метод полного копирования. Обе процедуры должны быть реализованы с помощью шаблона "примесь" (Mixin), т.е. указанные функциональности должны быть описаны в специализированных классах. # # В реализации математических операций запрещается пользоваться любыми функциями, требующими использования оператора `import`. class Matrix: pass # __Задание 7 (5 баллов):__ Ставится задача расчета стоимости чашки кофе. Опишите классы нескольких типов кофе (латте, капучино, американо), а также классы добавок к кофе (сахар, сливки, кардамон, пенка, сироп). Используйте шаблон "декоратор". Каждый класс должен характеризоваться методом вычисления стоимости чашки `calculate_cost`. Пользователь должен иметь возможность комбинировать любое число добавок с выбранным кофе и получить на выходе общую стоимость: # # ``` # cream(sugar(latte())).calculate_cost() # ``` # # Первым элементом чашки всегда должен быть сам кофе, а не добавка, в противном случае при попытке создания чашки должно выбрасываться исключение: # # ``` # cream(latte(sugar())).calculate_cost() -> exception # ``` # # Кофе может встречаться в чашке только один раз, в противном случае при попытке создания чашки должно выбрасываться исключение: # # ``` # cappuccino(sugar(latte())).calculate_cost() -> exception # ``` # # Добавки могут включаться в чашку в любом количестве и порядке. # Добавление новых типов кофе и добавок не должно требовать изменения существующего кода.
tasks/02-design.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h3>Team:<br> <NAME>, <br>Vikrant</h3> # # Code for Article Download from NYT # #### A. Import Required Libraries import requests from bs4 import BeautifulSoup import time import json import re import os # #### B. Define Required Functions to: # ##### 1. Search NYT for given Query , API Key and Date Range # ##### 2. Download Articles and Save them at Disk (~/Data/NYT) def getAPIkey(file='./data/nyt_api.key') : """ Get New York Times API key from a file Parameters ---------- file : str Full designated path of the key file Returns ------- str String of API key """ try: with open(file) as fp: key = fp.read().strip() return key except Exception as e: print(e) def searchNYTimes(api_key='', query='', fq='', fields='', sort='', begin_date='YYYYMMDD', end_date='YYYYMMDD', page=-1,): """ Search New York times for articles through provided API. Parameters ---------- api_key : str NYtimes API key string query : str Search query term. Search is performed on the article body, headline and byline. fq : str Filtered search query using standard Lucene syntax. The filter query can be specified with or without a limiting field: label. See Filtering Your Search for more information about filtering begin_date : str Format: YYYYMMDD Restricts responses to results with publication dates of the date specified or later." end_date : str Format: YYYYMMDD Restricts responses to results with publication dates of the date specified or earlier. sort : str By default, search results are sorted by their relevance to the query term (q). Use the sort parameter to sort by pub_date. Allowed values are: > newest > oldest fields : string Comma-delimited list of fields (no limit) Limits the fields returned in your search results. By default (unless you include an fl list in your request), the following fields are returned: snippet, lead_paragraph, abstract, print_page, blog, source, multimedia, headline, keywords, pub_date, document_type, news_desk, byline, type_of_material, _id, word_count page : int The value of page corresponds to a set of 10 results (it does not indicate the starting number of the result set). For example, page=0 corresponds to records 0-9. To return records 10-19, set page to 1, not 10. Returns ------- dict Dictionary representation of json object """ # hardcoded link to article search api json object api_search_url= 'https://api.nytimes.com/svc/search/v2/articlesearch.json' if len(query) < 1: print('Query string is empty') fl_items = ['web_url', 'snippet', 'lead_paragraph', 'abstract', 'print_page', 'blog', 'source', 'multimedia', 'headline', 'keywords', 'pub_date', 'document_type', 'news_desk', 'byline', 'type_of_material', '_id', 'word_count'] search_param={'api-key':api_key, 'q':query } if len(fq) > 0 : search_param['fq'] = fq if len(fields) > 0: if set(fields).issubset(fl_items) : search_param['fl'] = fields else: print('Enter valid field values') return None if len(sort) > 0: if sort == 'newest' | sort == 'oldest': search_param['sort'] = sort if begin_date != 'YYYYMMDD': if int(begin_date[4:6]) > 0 & int(begin_date[4:6]) <= 12: if int(begin_date[6:9]) > 0 & int(begin_date[6:9]) <= 31: search_param['begin_date'] = begin_date if end_date != 'YYYYMMDD': if int(begin_date[4:6]) > 0 & int(begin_date[4:6]) <= 12: if int(begin_date[6:9]) > 0 & int(begin_date[6:9]) <= 31: search_param['end_date'] = end_date if page >= 0: # print('page is {}'.format(page)) search_param['page'] = page try: # print('search params: {}'.format(search_param)) resp = requests.get(url=api_search_url,params=search_param) # print(resp.text) # print(resp.status_code) response_json = resp.json() resp.close() except Exception as e: print(e) if(response_json != None): return response_json # + class NYTapiResponseWrapper: """Python Wrapper class for the json object returned by NYtimes API""" def __init__(self, response_json = {}): """ Constructor of Wrapper class Parameters ---------- response_json : dict response json object from API """ if len(response_json.keys()) > 0: self.status = response_json['status'] self.copyright = response_json['copyright'] self._response = response_json['response'] self._parseResponse(self._response) def parseJSON(self, response_json = {} ): """ Parser function to parse json object if Wrapper is not initialized with a json Parameters ---------- response_json : dict response json object from API """ self.status = response_json['status'] self.copyright = response_json['copyright'] self._response = response_json['response'] self._parseResponse(self._response) def _parseResponse(self, response): self._docs = response['docs'] self._meta = response['meta'] self._parseDocs(self._docs) def _parseDocs(self, docs): self.docs = [] i = 0 for doc_item in docs: # print(i) i += 1 self.docs.append(Doc(doc = doc_item)) # - class Doc: def __init__(self, doc = {}): self._id = doc['_id'] self.blog = doc['blog'] self.document_type = doc['document_type'] self.headline = doc['headline'] self.keywords = doc['keywords'] self.multimedia = doc['multimedia'] self.score = doc['score'] self.snippet = doc['snippet'] self.type_of_material = doc['type_of_material'] self.web_url = doc['web_url'] self.word_count = doc['word_count'] # + def getPageByURL(URL = ''): try: resp = requests.get(url=URL) soup = BeautifulSoup(resp.text, 'html.parser') resp.close() return soup except Exception as e: print(e) # - def saveArticleText(headline, textParasSoup, filename): try: with open(filename, 'w') as fp: fp.write(headline) for para in textParasSoup: fp.write(para.text) except Exception as e: print(e) # + def getArticlesInMass(api_key='', query='', fq='', fields='', sort='', begin_date='YYYYMMDD', end_date='YYYYMMDD', page_count = 1, write_to_file=False, filename='./data/Articles/ArticleList.json'): """ Get multiple pageset of articles instead of 1 Parameters ---------- page_count : int number of pageset giving 10 articles for each count, i.e. page_count of i will give i*10 articles write_to_file : boolean Write obtained article list to a file filename : str Name of file if write_to_file is True Remainig params are same as searchNYTimes function Returns ------- list list of all the articles obtained from API """ article_list = [] for page in range(0,page_count): resp = searchNYTimes(api_key=api_key, query=query, fq=fq, fields=fields,sort=sort, begin_date=begin_date,end_date=end_date, page=page) resp_ob = NYTapiResponseWrapper(resp) if len(resp_ob.docs) <= 0 : break # print(resp_ob._meta['offset']) for doc_item in resp_ob.docs: article = {'id':doc_item._id,'headline':doc_item.headline['main'], 'url':doc_item.web_url, 'downloaded':'N'} article_list.append(article) time.sleep(1) if write_to_file: with open(filename, 'w') as file: json.dump(article_list, file) return article_list # - def groupByCategories(article_list=[]): """ Group articles into article categories Category is obtained from the http link in below format: 1(https://www.nytimes.com/2018/05/04/) 2(movies) 3(/sandra-bullock-mindy-kaling-oceans-8.html) 2 gives the category Parameters ---------- article_list : list List of Articles Returns ------- dict Dictionary of articles where key is category and value is list of articles """ groupedArticles = {} for article in article_list: url = article['url'] print(url) # split the url into 3 parts # eg. 1(https://www.nytimes.com/2018/05/04/) 2(movies) 3(/sandra-bullock-mindy-kaling-oceans-8.html) reg_ex = "([a-zA-Z0-9\.\-_/:]*/[0-9]{4}/[0-9]{2}/[0-9]{2}/)([a-zA-Z0-9]+)(/[a-zA-Z\-\.]*)" if len(re.split(reg_ex, url)) < 3: continue category = re.split(reg_ex, url)[2] if not category in groupedArticles: groupedArticles[category] = [article] else: groupedArticles[category].append(article) return groupedArticles def getArticleListByCategory(category='business', api_key='', begin_date='YYYYMMDD', end_date='YYYYMMDD', page_count = 0): """ Returns article of a specific category """ return getArticlesInMass(api_key=api_key, fq='web_url:*'+category+'*',page_count=page_count) # + def downloadAllArticles(article_list, grouped=False, parentDirectory='./data/NYT-articles/'): """ Download all the articles in the List Parameters ---------- article_list : list/dict A list or dict of articles grouped : boolean if grouped is True a dictionary of grouped articles is expceted in article_list else a list of articles parentDirectory : str Path of parent directory where articles files are saved by article id file name """ if len(article_list) <1: return None if not grouped: if type(article_list) == list: total_articles_written = _downloadArticles(article_list, directory = parentDirectory) else: print('Error: List Expected '+type(article_list).__name__+' found') return None else: if type(article_list) == dict: total_articles_written = 0 for category in article_list.keys(): articles_written = _downloadArticles(article_list[category], directory=parentDirectory + category+'/') total_articles_written += articles_written else: print('Error: Dict expected '+type(article_list).__name__+' found') return None return total_articles_written # - def _downloadArticles(article_list=[], directory='./'): if len(article_list) <1: return None if not os.path.exists(directory): os.makedirs(directory) articles_written = 0 for article in article_list: article_soup = getPageByURL(URL = article['url']) # print(article['id'], article['url']) paras = article_soup.find_all('p') article_text = '' for para in paras: if 'class' in para.attrs: p_class = ' '.join(para.attrs['class']) if 'css-1' in p_class and ' e2' in p_class or 'story' in p_class: article_text += para.text +'\n' try: with open(directory + article['id'], 'w', encoding='utf-8') as file: file.write(article['headline']+ '\n') file.write(article_text) time.sleep(1) articles_written += 1 except Exception as e: print(e) finally: # future: write the article list to file once done or occurance of an excecption pass return articles_written # #### C. Download articles for categories "Business", "Sports", "Politics" and "Art" # + #article_business = getArticleListByCategory(category='business', api_key='<KEY>', begin_date='20180101', # end_date='20180505', page_count = 101) business = getArticleListByCategory(category='business', api_key='e4dc65f5fd794792895d12c9554efe04', begin_date='20180101', end_date='20180509', page_count = 10) art = getArticleListByCategory(category='art', api_key='e4dc65f5fd794792895d12c9554efe04', begin_date='20180101', end_date='20180509', page_count = 10) # - politics = getArticleListByCategory(category='politics', api_key='e4dc65f5fd794792895d12c9554efe04', begin_date='20180101', end_date='20180509', page_count = 10) sports = getArticleListByCategory(category='sports', api_key='e4dc65f5fd794792895d12c9554efe04', begin_date='20180101', end_date='20180509', page_count = 10) artCatFiles = downloadAllArticles(musicGrp, grouped=False, parentDirectory='./data/NYT-articles/')musicCatFiles = downloadAllArticles(musicGrp, grouped=True, parentDirectory='./data/NYT-articles/') businessCatFiles = downloadAllArticles(businessGrp, grouped=False, parentDirectory='./data/NYT-articles/') sportsCatFiles = downloadAllArticles(sportsGrp, grouped=False, parentDirectory='./data/NYT-articles/') politicsCatFiles = downloadAllArticles(politics, grouped=False, parentDirectory='./data/NYT-articles/') # #### D. Follow up code: # ##### Text Classification Using PySpark.ipynb
DataDownloadNYT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MNIST-Neural Network-Two Hidden Layers with Variable Optimziers # + # coding: utf-8 import sys, os import numpy as np import matplotlib.pyplot as plt import math sys.path.append(os.pardir) from deeplink.mnist import * from deeplink.functions import * from deeplink.layers import * from deeplink.util import * from deeplink.optimizers import * # - # ## Multilayer Neural Network Model # + import sys, os from collections import OrderedDict from scipy import stats from pandas import DataFrame class TwoLayerNet: def __init__(self, input_size, hidden_layer1_size, hidden_layer2_size, output_size, weight_init_std = 0.01): self.params = {} self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_layer1_size) self.params['b1'] = np.zeros(hidden_layer1_size) self.params['W2'] = weight_init_std * np.random.randn(hidden_layer1_size, hidden_layer2_size) self.params['b2'] = np.zeros(hidden_layer2_size) self.params['W3'] = weight_init_std * np.random.randn(hidden_layer2_size, output_size) self.params['b3'] = np.zeros(output_size) self.layers = OrderedDict() self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1']) self.layers['Relu1'] = ReLU() self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2']) self.layers['Relu2'] = ReLU() self.layers['Affine3'] = Affine(self.params['W3'], self.params['b3']) self.lastLayer = SoftmaxWithCrossEntropyLoss() def predict(self, x): for layer in self.layers.values(): x = layer.forward(x) return x def loss(self, x, t): y = self.predict(x) return self.lastLayer.forward(y, t) def accuracy(self, x, t): y = self.predict(x) y = np.argmax(y, axis=1) if t.ndim != 1 : t = np.argmax(t, axis=1) accuracy = np.sum(y == t) / float(x.shape[0]) return accuracy def backpropagation_gradient(self, x, t): # forward self.loss(x, t) # backward din = 1 din = self.lastLayer.backward(din) layers = list(self.layers.values()) layers.reverse() for layer in layers: din = layer.backward(din) grads = {} grads['W1'], grads['b1'] = self.layers['Affine1'].dW, self.layers['Affine1'].db grads['W2'], grads['b2'] = self.layers['Affine2'].dW, self.layers['Affine2'].db grads['W3'], grads['b3'] = self.layers['Affine3'].dW, self.layers['Affine3'].db return grads def learning(self, learning_rate, x_batch, t_batch, optimizer): grads = self.backpropagation_gradient(x_batch, t_batch) optimizer.update(self.params, grads) # - # ## Learning and Validation # + data = mnist_data("/Users/yhhan/git/aiclass/0.Professor/data/MNIST_data/.") (img_train, label_train), (img_validation, label_validation), (img_test, label_test) = data.load_mnist(flatten=True, normalize=True, one_hot_label=True) optimizers = {} optimizers['SGD'] = SGD() optimizers['Momentum'] = Momentum() optimizers['Nesterov'] = Nesterov() optimizers['AdaGrad'] = AdaGrad() optimizers['RMSprop'] = RMSprop() optimizers['Adam'] = Adam() num_epochs = 50 train_size = img_train.shape[0] batch_size = 1000 learning_rate = 0.1 networks = {} train_errors = {} validation_errors = {} test_accuracy_values = {} max_test_accuracy_epoch = {} max_test_accuracy_value = {} for key in optimizers.keys(): networks[key] = TwoLayerNet(input_size=784, hidden_layer1_size=128, hidden_layer2_size=128, output_size=10) train_errors[key] = [] validation_errors[key] = [] test_accuracy_values[key] = [] max_test_accuracy_epoch[key] = 0 max_test_accuracy_value[key] = 0.0 num_batch = math.ceil(train_size / batch_size) epoch_list = [] for i in range(num_epochs): epoch_list.append(i) for key in optimizers.keys(): for k in range(num_batch): x_batch = img_train[k * batch_size : k * batch_size + batch_size] t_batch = label_train[k * batch_size : k * batch_size + batch_size] networks[key].learning(learning_rate, x_batch, t_batch, optimizers[key]) train_loss = networks[key].loss(x_batch, t_batch) train_errors[key].append(train_loss) validation_loss = networks[key].loss(img_validation, label_validation) validation_errors[key].append(validation_loss) test_accuracy = networks[key].accuracy(img_test, label_test) test_accuracy_values[key].append(test_accuracy) if test_accuracy > max_test_accuracy_value[key]: max_test_accuracy_epoch[key] = i max_test_accuracy_value[key] = test_accuracy print("{0:8s}-Epoch:{1:3d}, Train Err.:{2:7.5f}, Validation Err.:{3:7.5f}, Test Accuracy:{4:7.5f}, Max Test Accuracy:{5:7.5f}".format( key, i, train_loss, validation_loss, test_accuracy, max_test_accuracy_value[key] )) print() # + markers = {"SGD": "o", "Momentum": "x", "Nesterov": "s", "AdaGrad": "o", "RMSprop": "x", "Adam": "s"} f, axarr = plt.subplots(2, 2, figsize=(15,10)) for key in optimizers.keys(): axarr[0, 0].plot(epoch_list[1:], train_errors[key][1:], marker=markers[key], markevery=2, label=key) axarr[0, 0].set_ylabel('Train - Total Error') axarr[0, 0].set_xlabel('Epochs') axarr[0, 0].grid(True) axarr[0, 0].set_title('Train Error') axarr[0, 0].legend(loc='upper left') for key in optimizers.keys(): axarr[0, 1].plot(epoch_list[1:], validation_errors[key][1:], marker=markers[key], markevery=2, label=key) axarr[0, 1].set_ylabel('Validation - Total Error') axarr[0, 1].set_xlabel('Epochs') axarr[0, 1].grid(True) axarr[0, 1].set_title('Validation Error') axarr[0, 1].legend(loc='upper left') for key in optimizers.keys(): axarr[1, 0].plot(epoch_list[1:], train_errors[key][1:], marker=markers[key], markevery=2, label=key) axarr[1, 0].set_ylabel('Train - Total Error') axarr[1, 0].set_xlabel('Epochs') axarr[1, 0].grid(True) axarr[1, 0].set_ylim(0, 0.3) axarr[1, 0].set_title('Train Error (0.00 ~ 0.30)') axarr[1, 0].legend(loc='upper left') for key in optimizers.keys(): axarr[1, 1].plot(epoch_list[1:], validation_errors[key][1:], marker=markers[key], markevery=2, label=key) axarr[1, 1].set_ylabel('Validation - Total Error') axarr[1, 1].set_xlabel('Epochs') axarr[1, 1].grid(True) axarr[1, 1].set_ylim(0, 0.3) axarr[1, 1].set_title('Validation Error (0.00 ~ 0.30)') axarr[1, 1].legend(loc='upper left') f.subplots_adjust(hspace=0.3) plt.show() # + f, axarr = plt.subplots(2, 1, figsize=(15,10)) for key in optimizers.keys(): axarr[0].plot(epoch_list[1:], test_accuracy_values[key][1:], marker=markers[key], markevery=1, label=key) axarr[0].set_ylabel('Test Accuracy') axarr[0].set_xlabel('Epochs') axarr[0].grid(True) axarr[0].set_title('Test Accuracy') axarr[0].legend(loc='upper left') for key in optimizers.keys(): axarr[1].plot(epoch_list[1:], test_accuracy_values[key][1:], marker=markers[key], markevery=1, label=key) axarr[1].set_ylabel('Test Accuracy') axarr[1].set_xlabel('Epochs') axarr[1].grid(True) axarr[1].set_ylim(0.9, 1.0) axarr[1].set_title('Test Accuracy (0.9 ~ 1.0)') axarr[1].legend(loc='upper left') f.subplots_adjust(hspace=0.3) plt.show() # - for key in optimizers.keys(): print("{0:26s} - Epoch:{1:3d}, Max Test Accuracy: {2:7.5f}".format(key, max_test_accuracy_epoch[key], max_test_accuracy_value[key]))
1.DeepLearning/03.Optimizers/mnist_two_hidden_layers_optimizers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/martin-fabbri/colab-notebooks/blob/master/causal-inference/dowhy_conditional_treatment_effects_01.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="y7r2UZ4tOpP8" # # Conditional Average Treatment Effect (CATE) # # This notebook leverages EconML's CATE estimation using different methods. # # We will follow the typical doWhy workflow: model, identify, estimate, and refute. # + [markdown] colab_type="text" id="0kc_hFKpOoIJ" # ## Setup Dependencies # + colab={"base_uri": "https://localhost:8080/", "height": 356} colab_type="code" id="RRpIyhddNSYH" outputId="960ce220-f373-463d-b805-36e2406ada45" # # !sudo apt install graphviz libgraphviz-dev graphviz-dev pkg-config -y -q # # !pip install -q pygraphviz --install-option="--include-path=/usr/include/graphviz" \ # # --install-option="--library-path=/usr/lib/graphviz/" # # !pip install -q dowhy # + colab={} colab_type="code" id="IVCkLoOsNioS" import numpy as np import pandas as pd import logging import dowhy import dowhy.datasets from IPython.display import Image from IPython.display import display from dowhy import CausalModel import econml import warnings warnings.filterwarnings('ignore') print(dowhy.__version__) # + colab={} colab_type="code" id="jwd2mm1_NilY" data = dowhy.datasets.linear_dataset(10, num_common_causes=4, num_samples=10000, num_instruments=2, num_effect_modifiers=2, num_treatments=1, treatment_is_binary=False) df = data['df'] df.head() # + colab={} colab_type="code" id="rtFwEfD5NiiN" model = CausalModel(data=data['df'], treatment=data['treatment_name'], outcome=data['outcome_name'], graph=data['gml_graph']) # + colab={} colab_type="code" id="ZdW8dVdWNifL" model.view_model() display(Image(filename='causal_model.png')) # + colab={} colab_type="code" id="ZnoNMZBBNicS" identified_estimand = model.identify_effect() print(identified_estimand) # + colab={} colab_type="code" id="s0pMIU37NiZb" linear_estimate = model.estimate_effect(identified_estimand, method_name='backdoor.linear_regression', control_value=0, treatment_value=1) print(linear_estimate) # + from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LassoCV from sklearn.ensemble import GradientBoostingRegressor dml_estimate = model.estimate_effect(identified_estimand, method_name="backdoor.econml.dml.DMLCateEstimator", control_value = 0, treatment_value = 1, target_units = lambda df: df["X0"]>1, # condition used for CATE confidence_intervals=False, method_params={"init_params":{'model_y':GradientBoostingRegressor(), 'model_t': GradientBoostingRegressor(), "model_final":LassoCV(), 'featurizer':PolynomialFeatures(degree=1, include_bias=True)}, "fit_params":{}}) print(dml_estimate) # - print("True causal estimate is", data["ate"]) dml_estimate = model.estimate_effect(identified_estimand, method_name="backdoor.econml.dml.DMLCateEstimator", control_value = 0, treatment_value = 1, target_units = 1, # condition used for CATE confidence_intervals=False, method_params={"init_params":{'model_y':GradientBoostingRegressor(), 'model_t': GradientBoostingRegressor(), "model_final":LassoCV(), 'featurizer':PolynomialFeatures(degree=1, include_bias=True)}, "fit_params":{}}) print(dml_estimate) # + from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LassoCV from sklearn.ensemble import GradientBoostingRegressor dml_estimate = model.estimate_effect(identified_estimand, method_name="backdoor.econml.dml.DMLCateEstimator", target_units = lambda df: df["X0"]>1, confidence_intervals=True, method_params={"init_params":{'model_y':GradientBoostingRegressor(), 'model_t': GradientBoostingRegressor(), "model_final":LassoCV(), 'featurizer':PolynomialFeatures(degree=1, include_bias=True)}, "fit_params":{ 'inference': 'bootstrap', } }) print(dml_estimate) print(dml_estimate.cate_estimates[:10]) print(dml_estimate.effect_intervals) # - test_cols= data['effect_modifier_names'] # only need effect modifiers' values test_arr = [np.random.uniform(0,1, 10) for _ in range(len(test_cols))] # all variables are sampled uniformly, sample of 10 test_df = pd.DataFrame(np.array(test_arr).transpose(), columns=test_cols) dml_estimate = model.estimate_effect(identified_estimand, method_name="backdoor.econml.dml.DMLCateEstimator", target_units = test_df, confidence_intervals=False, method_params={"init_params":{'model_y':GradientBoostingRegressor(), 'model_t': GradientBoostingRegressor(), "model_final":LassoCV(), 'featurizer':PolynomialFeatures(degree=1, include_bias=True)}, "fit_params":{} }) print(dml_estimate.cate_estimates) print(dml_estimate._estimator_object) dml_estimate
causal-inference/dowhy_conditional_treatment_effects_01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## # Sliding Window # # Feb 2019 # # # import matplotlib.patches as patches # import seaborn as sns # import copy # import torchvision # from IPython.display import display # to display images # from PIL import Image, ImageDraw # import numpy as np # # import torch # import torch.nn as nn # from skimage import io # import math # # from torch.utils.data import Dataset, DataLoader # import torch.nn.functional as F # # import cv2 # import matplotlib.pyplot as plt # # import time # import os # # from IPython.display import Audio # # Running main! # + import matplotlib.patches as patches import seaborn as sns import copy import torchvision from PIL import Image, ImageDraw import numpy as np import torch import torch.nn as nn from skimage import io import math from torch.utils.data import Dataset, DataLoader import torch.nn.functional as F import cv2 import matplotlib.pyplot as plt import time import os # from IPython.display import Audio # from IPython.display import display # to display images # + # Make dataset ------------------------------------------------------- IMG_X, IMG_Y = 200, 200 # length and width of blocks (fixed for now) block_l, block_w = 20, 30 def makeRectangle(l, w, theta, offset=(0, 0)): c, s = math.cos(theta), math.sin(theta) rectCoords = [(l / 2.0, w / 2.0), (l / 2.0, -w / 2.0), (-l / 2.0, -w / 2.0), (-l / 2.0, w / 2.0), ] return [(c * x - s * y + offset[0], s * x + c * y + offset[1]) for (x, y) in rectCoords] # ---- Make depth images --- def make_dataset(dirname, num_images): true_coords = [] newpath = "./" + dirname if not os.path.exists(newpath): os.makedirs(newpath) print(newpath) for i in range(num_images): # orient = 0 # degrees img = Image.new("RGB", (IMG_X, IMG_Y), "black") # block_l and _w offset so blocks don't run off edge of image rand_x = int(np.random.rand() * (IMG_X - 2 * block_l)) + block_l rand_y = int(np.random.rand() * (IMG_Y - 2 * block_w)) + block_w orient = int(np.random.rand() * 180) # .random() is range [0.0, 1.0). orient = math.radians(orient) # math.cos takes radians! # switch to degrees so that we normalize between the x,y,orient for MSELoss true_coords.append(np.array((rand_x, rand_y, orient))) rect_vertices = makeRectangle( block_l, block_w, orient, offset=(rand_x, rand_y)) idraw = ImageDraw.Draw(img) idraw.polygon(rect_vertices, fill="white") img.save(newpath + "/rect" + str(i) + ".png") return true_coords # NOTE Define size of dataset train_truth = make_dataset("data", 5000) # print(len(train_truth)) test_truth = make_dataset("./data/test", 300) np.save("train_truth.npy", train_truth) np.save("test_truth.npy", test_truth) train_truth = np.load("train_truth.npy") test_truth = np.load("test_truth.npy") # loading the training and testing data # - # Define Dataloader ------------------------------------------------------- class RectDepthImgsDataset(Dataset): """Artificially generated depth images dataset""" def __init__(self, img_dir, coords, transform=None): """ """ self.img_dir = img_dir self.true_coords = coords self.transform = transform STEPSIZE = 50 # todo make this input arguments WINDOWSIZE = (100, 100) MARGIN_PX = 10 self.step = STEPSIZE self.cropSize = WINDOWSIZE self.detectMargin = MARGIN_PX def __len__(self): # print('true coord len', len(self.true_coords)) return len(self.true_coords) def __getitem__(self, idx): # image = self.images[idx] image = io.imread(self.img_dir + "/rect" + str(idx) + ".png", as_gray=True) # image = torch.FloatTensor(image).permute(2, 0, 1) # PIL and torch expect difft orders coords = torch.FloatTensor(self.true_coords[idx]) if self.transform: image = self.transform(image) crops, labels, cropCoords = self.makeCrops( image, self.step, self.cropSize, coords, self.detectMargin) sample = image, torch.FloatTensor( labels), torch.FloatTensor(cropCoords) return sample def makeCrops(self, image, stepSize, windowSize, rectCenter, detectMargin): """ Returns image crops, as well as T/F for those crops """ crops = [] c_x, c_y, theta = rectCenter margin = detectMargin hasRects = [] rectCoords = [] for y in range(0, image.shape[0] - windowSize[0] + 1, stepSize): for x in range(0, image.shape[1] - windowSize[1] + 1, stepSize): end_x, end_y = x + windowSize[1], y + windowSize[0] hasRect = (x + margin < c_x < end_x - margin) and ( y + margin < c_y < end_y - margin) crops.append(image[y:end_y, x:end_x]) hasRects.append(hasRect) if hasRect: rectCoords.append((c_x, c_y, theta)) else: # NOTE: Return empty label, when not hasRect rectCoords.append((0, 0, 0)) # print('length of truths in makeCrops', len(truths)) return crops, hasRects, rectCoords # + # --- Define Nets ------------------------------------------------------- class regrNet(nn.Module): def __init__(self, cropSize, numOutputs): """ We need the image width and height to determine CNN layer sizes """ super(regrNet, self).__init__() _pool = 2 _stride = 5 _outputlayers = 16 STEPSIZE = 50 # todo make this input arguments self.step = STEPSIZE self.cropSize = cropSize self.numOutputs = numOutputs def _calc(val): # use to calculate layer sizes layer_size = (val - (_stride - 1)) / _pool return layer_size self._const = _calc(_calc(self.cropSize[0])) self._const *= _calc(_calc(self.cropSize[1])) self._const *= _outputlayers self._const = int(self._const) # --- LOCATION OF RECTANGLE # NOTE: only one channel for now (black/white) self.conv1 = nn.Conv2d(1, 6, _stride).to(device) self.pool = nn.MaxPool2d(_pool, _pool).to(device) self.conv2 = nn.Conv2d(6, _outputlayers, _stride).to(device) self.fc1 = nn.Linear(self._const, 120).to(device) self.fc2 = nn.Linear(120, 84).to(device) self.fc3 = nn.Linear(84, self.numOutputs).to(device) def forward(self, crops): """ Forward propogation : param image: images, a tensor of dimensions(N, 3, IMG_X, IMG_Y) : return: (x, y, theta) and T/F for each window """ # TODO: presumably by doing this i lose some of the multithread goodness crops = crops.to(device) # LOCALIZATION regr_crops = self.pool(F.relu((self.conv1(crops)))) regr_crops = self.pool(F.relu(self.conv2(regr_crops))) regr_crops = regr_crops.view(-1, self._const) regr_crops = F.relu(self.fc1(regr_crops)) regr_crops = F.relu(self.fc2(regr_crops)) regr_crops = self.fc3(regr_crops) objCoords = regr_crops # reshape to batchsize x number of crops x 3 objCoords = objCoords.reshape(-1, self.numOutputs) return objCoords class classifNet(nn.Module): # CIFAR is 32x32x3, MNIST is 28x28x1) def __init__(self, IMG_X, IMG_Y): """ We need the image width and height to determine CNN layer sizes """ super(classifNet, self).__init__() self._imgx = IMG_X self._imgy = IMG_Y _pool = 2 _stride = 5 _outputlayers = 16 STEPSIZE = 50 # todo make this input arguments WINDOWSIZE = (100, 100) self.step = STEPSIZE self.cropSize = WINDOWSIZE self.numCrops = 0 # T/F for now # calculate number of crops for x in range(0, IMG_Y - WINDOWSIZE[0] + 1, STEPSIZE): for y in range(0, IMG_X - WINDOWSIZE[1] + 1, STEPSIZE): self.numCrops += 1 def _calc(val): # use to calculate layer sizes layer_size = (val - (_stride - 1)) / _pool return layer_size # print(self._imgx) # self._const = _calc(_calc(self._imgx)) # self._const *= _calc(_calc(self._imgy)) self._const = _calc(_calc(self.cropSize[0])) self._const *= _calc(_calc(self.cropSize[1])) self._const *= _outputlayers self._const = int(self._const) # --- CLASSIFICATION OF WINDOWS # batch, 3 input image channels (RGB), 6 output channels, 5x5 square convolution # NOTE: we switched to 1 input channel self.conv1 = nn.Conv2d(self.numCrops, 6, _stride).to(device) self.pool = nn.MaxPool2d(_pool, _pool).to(device) self.conv2 = nn.Conv2d(6, _outputlayers, _stride).to(device) self.fc1 = nn.Linear(self._const, 120).to(device) self.fc2 = nn.Linear(120, 84).to(device) self.fc3 = nn.Linear(84, self.numCrops).to(device) self.sigmoid = nn.Sigmoid() # TODO: batch normalization self.bn = nn.BatchNorm2d() def forward(self, x): """ Forward propogation : param image: images, a tensor of dimensions(N, 3, IMG_X, IMG_Y) : return: (x, y, theta) and T/F for each window """ x = x.to(device) batch_images = x all_crops = [] for img in batch_images: crops, cropCoords = self.makeCrops(img, self.step, self.cropSize) all_crops.append(crops) all_crops = torch.stack(all_crops) feats = all_crops.view(-1, self.numCrops, self.cropSize[0], self.cropSize[1]).to(device) # CLASSIFICATION of the windows c_crops = self.pool(F.relu((self.conv1(feats)))) c_crops = self.pool(F.relu(self.conv2(c_crops))) c_crops = c_crops.view(-1, self._const) c_crops = F.relu(self.fc1(c_crops)) c_crops = F.relu(self.fc2(c_crops)) c_crops = self.fc3(c_crops) c_crops = self.sigmoid(c_crops) containsObj = c_crops return containsObj, all_crops, cropCoords def makeCrops(self, image, stepSize, windowSize): """ Returns a generator of cropped boxes(the top left x, y, the image data) """ image = image.type(torch.FloatTensor).to(device) crops = [] cropCoords = [] # TODO: look into ordering, why it's y,x ! for y in range(0, image.shape[0] - windowSize[0] + 1, stepSize): for x in range(0, image.shape[1] - windowSize[1] + 1, stepSize): end_x, end_y = x + windowSize[1], y + windowSize[0] # print('This is the x and y used: ', x, '; ', y) crops.append(image[y:end_y, x:end_x]) cropCoords.append(torch.FloatTensor((y, x))) crops = torch.stack(crops) cropCoords = torch.stack(cropCoords) # self.numCrops=len(crops) return crops, cropCoords # In[6]: # + # -- Utility fxn ------------------------------------------------------- # Source: https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Object-Detection/blob/master/train.py class AverageMeter(object): """ Keeps track of most recent, average, sum, and count of a metric. """ def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def save_checkpoint( epoch, epochs_since_improvement, model1, model2, optimizer1, optimizer2, loss, loss2, best_loss, is_best ): """ Save model checkpoint. :param epoch: epoch number :param epochs_since_improvement: number of epochs since last improvement :param model: model :param optimizer: optimizer :param loss: validation loss in this epoch :param best_loss: best validation loss achieved so far (not necessarily in this checkpoint) :param is_best: is this checkpoint the best so far? """ state = { "epoch": epoch, "epochs_since_improvement": epochs_since_improvement, "loss": loss, "loss2": loss2, "best_loss": best_loss, "model1": model1, "model2": model2, "optimizer1": optimizer1, "optimizer2": optimizer2, } filename = "checkpoint_v2sliding.pth.tar" torch.save(state, filename) # If this checkpoint is the best so far, store a copy so it doesn't get overwritten by a worse checkpoint if is_best: torch.save(state, "BEST_" + filename) def save_checkpoint_small( epoch, model1, model2, optimizer1, optimizer2): """ Save model checkpoint. :param epoch: epoch number :param epochs_since_improvement: number of epochs since last improvement :param model: model :param optimizer: optimizer """ state = { "epoch": epoch, "epochs_since_improvement": epochs_since_improvement, "model1": model1, "model2": model2, "optimizer1": optimizer1, "optimizer2": optimizer2, } filename = "checkpoint_v2sliding.pth.tar" torch.save(state, filename) # If this checkpoint is the best so far, store a copy so it doesn't get overwritten by a worse checkpoint if is_best: torch.save(state, "BEST_" + filename) # + # --- Define Train and Test functions ------------------------------------------------------- def train(train_loader, c_model, r_model, classifCriterion, regrCriterion, optimizer1, optimizer2, epoch): """ One epoch's training. : param train_loader: DataLoader for training data : param model: model : param criterion: for classification (crop contains an Obj, t/f) : param criterion: for regresion (of the x,y, theta) : param optimizer: optimizer : param epoch: epoch number """ c_model.train() # training mode enables dropout r_model.train() # training mode enables dropout batch_time = AverageMeter() # forward prop. + back prop. time data_time = AverageMeter() # data loading time losses = AverageMeter() # loss losses2 = AverageMeter() # loss start = time.time() for i_batch, (images, labels, coords) in enumerate(train_loader): data_time.update(time.time() - start) images = images.to(device) labels = labels.to(device) coords = coords.to(device) # CLASSIFICATION # Forward pass # predicted_class, predicted_locs = model(images) predicted_class, all_crops, cropCoords = c_model(images) all_crops = all_crops.to(device) cropCoords = cropCoords.to(device) # print('size of predicted vs labels', # predicted_class, labels) # print('size of predicted vs labels', # predicted_class.size(), labels.size()) loss1 = classifCriterion(predicted_class, labels) optimizer1.zero_grad() loss1.backward() # Update model optimizer1.step() # REGRESSION # Forward pass # all crops is of size (batchsize, numcrops, x, y) # we'll do it batchsize x 1 crop at a time... # coords = batchsize, numcrops, x,y, theta for i in range(9): # print('!-- ', all_crops.size()) batchcrop = all_crops[:, i, :, :] batchcrop.unsqueeze_(1) # print('!-- ', batchcrop.size()) # print('!--', cropCoords.size()) offset = cropCoords[i] # pad with column of zeros - don't touch the theta # print('!--', offset.size()) offset = offset.repeat(all_crops.size(0), 1) offset = torch.cat((offset, torch.zeros((all_crops.size(0), 1)).to(device)), dim=1) center_truth = coords[:, i, :] center_est = r_model(batchcrop).to(device) # print('!-- ', center_est) # print('!-- ', offset) center_est = center_est + offset loss2 = regrCriterion(center_truth, center_est) optimizer2.zero_grad() loss2.backward() optimizer2.step() losses2.update(loss2.item()) losses.update(loss1.item()) batch_time.update(time.time() - start) start = time.time() # Print status if i_batch % print_freq == 0: print( "Epoch: [{0}][{1}/{2}]\t" "Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t" "Loss {loss.val:.4f} ({loss.avg:.4f})\t".format( epoch, i_batch, len(train_loader), batch_time=batch_time, loss=losses, ), "RLoss {loss.val:.4f} ({loss.avg:.4f})\t".format( epoch, i_batch, len(train_loader), batch_time=batch_time, loss=losses2, ), ) # free some memory since their histories may be stored del predicted_class, images, labels, coords # ---------------------------------- def validate(val_loader, c_model, r_model, c_criterion, r_criterion): """ One epoch's validation. : param val_loader: DataLoader for validation data : param model: model : param criterion: MultiBox loss : return: average validation loss """ c_model.eval() # eval mode disables dropout r_model.eval() # eval mode disables dropout batch_time = AverageMeter() losses = AverageMeter() losses2 = AverageMeter() start = time.time() # Prohibit gradient computation explicity because I had some problems with memory with torch.no_grad(): # Batches for i_batch, (images, labels, coords) in enumerate(train_loader): # Move to default device images = images.to(device) labels = labels.to(device) coords = coords.to(device) # CLASSIFICATION Eval predicted_class, all_crops, cropCoords = c_model(images) loss1 = c_criterion(predicted_class, labels) all_crops = all_crops.to(device) cropCoords = cropCoords.to(device) # REGRESSION Eval for i in range(9): batchcrop = all_crops[:, i, :, :] batchcrop.unsqueeze_(1) offset = cropCoords[i] offset = offset.repeat(all_crops.size(0), 1) offset = torch.cat((offset, torch.zeros((all_crops.size(0), 1)).to(device)), dim=1) center_truth = coords[:, i, :] center_est = r_model(batchcrop).to(device) center_est = center_est + offset loss2 = regrCriterion(center_truth, center_est) losses2.update(loss2.item()) losses.update(loss1.item()) batch_time.update(time.time() - start) start = time.time() # Print status if i_batch % print_freq == 0: print( "[{0}/{1}]\t" "Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t" "Loss {loss.val:.4f} ({loss.avg:.4f})\t".format(i_batch, len(val_loader), batch_time=batch_time, loss=losses), "Regr Loss {loss.val:.4f} ({loss.avg:.4f})\t".format(i_batch, len(val_loader), batch_time=batch_time, loss=losses2) ) print("\n * LOSS - {loss.avg:.3f}\n".format(loss=losses)) print(" * REGR LOSS - {loss.avg:.3f}\n".format(loss=losses2)) return losses.avg, losses2.avg # In[9]: # + # -- Load data ------------------------------------------------------- batch_size = 15 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print("CUDA available? device: ", device) # Dataset is depth images of rectangular blocks train_dataset = RectDepthImgsDataset(img_dir="./data", coords=train_truth) # Data loader train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_dataset = RectDepthImgsDataset(img_dir="./data/test", coords=test_truth) # Data loader test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) # -- Hyperparamaters ------------------------- num_epochs = 80 # number of epochs to run without early-stopping learning_rate = 0.001 start_epoch = 0 # start at this epoch # number of epochs since there was an improvement in the validation metric epochs_since_improvement = 0 best_loss = 1000.0 # assume a high loss at first workers = 4 # number of workers for loading data in the DataLoader classifModel = classifNet(IMG_X, IMG_Y) classifModel = classifModel.to(device) regrModel = regrNet((100, 100), 3) # crop size in pixels; output x,y, theta regrModel = regrModel.to(device) # criterion = nn.BCELoss() classifCriterion = nn.BCELoss() regrCriterion = nn.MSELoss() #regrCriterion = nn.SmoothL1Loss() optimizer1 = torch.optim.Adam(classifModel.parameters(), lr=learning_rate) optimizer2 = torch.optim.Adam(regrModel.parameters(), lr=learning_rate) print_freq = 25 # print training or validation status every __ batches # + # -- Hyperparamaters ------------------------- def main(): """ Training and validation. """ global epochs_since_improvement, start_epoch, best_loss, epoch, checkpoint print("Training model now...") # -- Begin training ------------------------- for epoch in range(num_epochs): train( train_loader=train_loader, c_model=classifModel, r_model=regrModel, classifCriterion=classifCriterion, regrCriterion=regrCriterion, optimizer1=optimizer1, optimizer2=optimizer2, epoch=epoch, ) # Save checkpoint save_checkpoint_small(epoch, classifModel, regrModel, optimizer1, optimizer2) # One epoch's validation val_loss, regr_loss = validate(val_loader=test_loader, c_model=classifModel, r_model=regrModel, c_criterion=classifCriterion, r_criterion=regrCriterion) # Did validation loss improve? is_best = val_loss < best_loss best_loss = min(val_loss, best_loss) if not is_best: epochs_since_improvement += 1 print("\nEpochs since last improvement: %d\n" % (epochs_since_improvement,)) else: epochs_since_improvement = 0 # Save checkpoint save_checkpoint(epoch, epochs_since_improvement, classifModel, regrModel, optimizer1, optimizer2, val_loss, regr_loss, best_loss, is_best) # save_checkpoint(epoch, epochs_since_improvement, classifModel, optimizer1, # val_loss, best_loss, is_best)) # + main() # alert when training is done sound_file = '/home/rui/Downloads/newyear.ogg' Audio(sound_file, autoplay=True) # + print('All Ready!') filename = "checkpoint_v2sliding.pth.tar" checkpoint = torch.load(filename) start_epoch = checkpoint['epoch'] + 1 best_loss = checkpoint['best_loss'] print('\nLoaded checkpoint from epoch %d. Best loss so far is %.3f.\n' % (start_epoch, best_loss)) classifModel = checkpoint['model1'] regrModel = checkpoint['model2'] # + dataiter = iter(test_loader) images, labels, coords = dataiter.next() n = 1 print("\n\nFOR N = ", n) imgSample = images[n] max_idx = torch.argmax(coords[n][:,0]) x, y, theta = coords[n][max_idx] print(labels[n]) print([float(zed) for zed in (x,y,theta)]) #print(x,y,theta) #crops, __ = makeCrops(imgSample, 50, (100, 100), (x,y, theta)) plt.imshow(imgSample) #fig, axess = plt.subplots(3,3, figsize=(10,10)) #axess = np.array(axess).flatten()#order='F') #'F' means to flatten in column-major (Fortran- style) order. #for i in range(9): # axess[i].imshow(crops[i]) # axess[i].set_ylabel('# '+str(i)) plt.suptitle('Window') plt.show() # + # -- Check the results ------------------------------------------------------- # -- Utility --------------------------------------------- def makeCrops(image, stepSize, windowSize, true_center): """ """ image = image.type(torch.FloatTensor) crops = [] truths = [] c_x, c_y, orient = true_center # TODO: look into otdering, why it's y,x ! margin = 10 # --> is x, but is the column # to slide horizontally, y must come first for y in range(0, image.shape[0] - windowSize[0] + 1, stepSize): for x in range(0, image.shape[1] - windowSize[1] + 1, stepSize): end_x, end_y = x + windowSize[1], y + windowSize[0] hasRect = (x + margin < c_x < end_x - margin) and ( y + margin < c_y < end_y - margin ) truths.append(hasRect) crops.append(image[y:end_y, x:end_x]) crops = torch.stack(crops) print("shape of crops", crops.shape) return crops, truths losses = AverageMeter() # loss losses2 = AverageMeter() # loss # -- Get some validation results --------------------------------------------- classifModel.to(device).eval() regrModel.to(device).eval() c_criterion = nn.BCELoss() r_criterion = nn.MSELoss() with torch.no_grad(): dataiter = iter(test_loader) images, labels, coords = dataiter.next() # Move to default device images = images.to(device) labels = labels.to(device) coords = coords.to(device) # Forward pass predicted_class, all_crops, cropCoords = classifModel(images) # Loss loss = c_criterion(predicted_class, labels) all_crops = all_crops.to(device) cropCoords = cropCoords.to(device) loss1 = classifCriterion(predicted_class, labels) guesses = [] # num batches = 9 for i in range(9): batchcrop = all_crops[:, i, :, :] batchcrop.unsqueeze_(1) offset = cropCoords[i] offset = offset.repeat(all_crops.size(0), 1) offset = torch.cat((offset, torch.zeros((all_crops.size(0), 1)).to(device)), dim=1) center_truth = coords[:, i, :] #print('!-- center truth\n', center_truth) center_est = regrModel(batchcrop).to(device) #print('!-- center est\n', center_est) center_est = center_est + offset #print('!-- offset center est\n', center_est) guesses.append(center_est) loss2 = regrCriterion(center_truth, center_est) losses2.update(loss2.item()) # Forward gass #labels_est = torch.FloatTensor( #predicted_class.detach().cpu().numpy()) guesses = torch.stack(guesses) # Loss #loss2 = r_criterion(masked_est, masked_truth) print("loss across batch size of ", labels.size()[0], 'is: ', loss1, loss2) #print(labels) print('!-- guesses size', guesses.size()) print('!-- labels size', labels.size()) #print(torch.round(predicted_class)) predicted_coords = guesses.view(-1, 9, 3) #print(predicted_class) #print(predicted_coords) print('!-- guesses size', predicted_coords.size()) print(cropCoords) # - # + # -- Plot windows and print labels ----------------------------------- n = 2 imgSample = images[n] crops, __ = makeCrops(imgSample, 50, (100, 100), (x,y, theta)) # -- Show original image, and the sliding windo crops ------- plt.imshow(imgSample) # --------------------------------------------------- foo_label = labels[n] foo_coord = coords[n] predicted_locs = predicted_coords.view(-1, coords.size(1), coords.size(2)) foo_coord_est = predicted_locs[n] # 3 per window foo_label_est = predicted_class[n] print('!-- coords', foo_coord) max_idx = torch.argmax(foo_coord[:,0]) x, y, theta = foo_coord[max_idx] est_max_idx = torch.argmax(foo_coord_est[:,0]) x_est, y_est, theta_est = foo_coord_est[est_max_idx] # -- Print x,y for one result ------- # -- Print window t/f for one result ------- print("!-- FOR N = ", n) print("y (crops) \n\t", [int(l) for l in foo_label]) print("!-- yhat (crops) \n\t", [int(np.round(p,0)) for p in foo_label_est] ) # ------------------------------------------------- sns.set(rc={"figure.figsize": (8, 6)}) print("\n\nFOR N = ", n) print("!-- center y \n\t", [float(zed) for zed in (x, y, theta)]) print("!-- center y est \n\t ", [float(zed) for zed in (x_est, y_est, theta_est)]) print(foo_coord_est) fig, axess = plt.subplots(3,3, figsize=(10,10)) axess = np.array(axess).flatten()#order='F') #'F' means to flatten in column-major (Fortran- style) order. for i in range(9): axess[i].imshow(crops[i]) axess[i].set_ylabel('# '+str(i)) plt.suptitle('Window') plt.show() #for (i, crop) in enumerate(crops): # print("1-index number of window: ", i+1, 'x', x, 'y', y, 'has rectangle?', hasRect) #plt.figure() #plt.suptitle("numero: %d" % (i)) #plt.imshow(crop) # + #print("!-- yhat \n\t", [int(round(o, 0)) for o in outputs[n].cpu().numpy()]) print("!-- ") print('True locations, defined for each crop\n', foo_coord) print("!-- ") print('Full predicted locations (3 per crop)\n', foo_coord_est) #print("\n ------ x,y center + orient: ", coords[n], "\n")) print(np.argwhere(foo_coord_est > 0) ) # -- Print outputs for multiple results ------- for ix in range(5): print('\nSAMPLE ', ix) print("!-- y (crops) \n\t", [int(l) for l in labels[ix]]) print("!-- yhat (crops) \n\t", [int(np.round(p, 0)) for p in predicted_class[ix]] ) # -- Main --------------------------------------------- #if __name__ == '__main__': #main() # -
rcnn_depth/Sliding Window v4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [![pythonista.io](imagenes/pythonista.png)](https://pythonista.io) # # Mixins. # Los mixins son una coleccion de clases que contienen métodos, los cuales pueden ser intercambiables. Aprovechan la herencia múltiple para conformar clases modulares. # # Por lo general estas clases contienen métodos que corresponden a bibliotecas que no sobrescriben a las otras superclases. # # El uso de mixins es muy controvertido e incluso algunos lo consideran un "antipatrón", pero aún así es aceptable en Python. class AutoRobot: def conversion(self): return 'Soy un felino.' class LeonNegro(AutoRobot): def opera_cabeza(self): return 'Jeje. Soy el más importante' class LeonRojo(AutoRobot): def opera_brazo_derecho(self): return 'Operando brazo derecho' class LeonVerde(AutoRobot): def opera_brazo_izquierdo(self): return 'Operando brazo izquierdo' class LeonAzul(AutoRobot): def opera_pierna_derecha(self): return 'Operando pierna derecha.' class LeonAmarillo(AutoRobot): def opera_pierna_izquierda(self): return 'Quería ser la cabeza.' class Voltron(LeonNegro, LeonRojo, LeonAzul, LeonVerde, LeonAmarillo): pass help(Voltron) ensamblar = Voltron() ensamblar.conversion() ensamblar.opera_pierna_izquierda() ensamblar.opera_brazo_izquierdo() # <p style="text-align: center"><a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Licencia Creative Commons" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a><br />Esta obra está bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Licencia Creative Commons Atribución 4.0 Internacional</a>.</p> # <p style="text-align: center">&copy; <NAME>. 2019.</p>
09_mixins.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # AsteroGaP: A Guide # # Dear user/reader, # # I have so much to tell you. In this notebook I will be going through and explaining how to use our code, and how the code itself functions. I'll try my best to explain everything as clearly as possible and delve into details, so feel free to skip around if there are parts that interest you more than others. # # But first, a quick look at the what motivated us to develop this code. # # ## Motivation # # The motivation for this code stems from a drive to incorporate new computational methods with new astronomical problems. # # Asteroids are leftover remnents from the creation of the solar system. As they orbit the Sun, they also rotate along an axis. This rotation makes it so that the light reflected and measured off an asteroid will vary with time, meaning if we monitor the asteriod for long enough, we will be able to measure a complete rotation and figure out what the asteroid's rotational period is (an example lightcurve is plotted in the cell below). This rotational period tells us a long about that the asteroid might be made off and how we can expect it to behave in the future. # # Traditionally, these rotational periods were measured by training a telescope on an asteroid for several hours, and observing as frequently as possible so that you could see the lightcurve. # + # import packages to run import matplotlib.pyplot as plt import pandas as pd import numpy as np # find a way to include or fetch this data filename = "../../../CometGPs/data/paper_plots/3200/3200_lc_49627_to_49787.txt" data = pd.read_csv(filename, delim_whitespace=True, header=None) time = data[0] flux = data[1] x = 1440 # 12 hours, every index is 30 seconds plt.plot(time[0: x], flux[0: x]) plt.xlabel("Time (%.1f hours)"%(x*0.5/60)) plt.ylabel("Flux") plt.title("Phaethon 3200") # - # Currently (as of 2020) we are seeing a slew of new survey telescopes being developed (e.g. LSST), intended for all-sky coverage. While these telescopes are great for developing time-series data, their expansive field makes it so that the same objects are only photographed every couple of nights. Rather than looking like the asteroid above, it would instead look sparse, like the following plot, if it was only observed every 12 hours for a month. # + x = 1440*2*30 # 30 days plt.plot(time[0: x: 2880], flux[0: x: 2880], "o") plt.xlabel("Time (%.1f hours)"%(x*0.5/60)) plt.ylabel("Flux") plt.title("Phaethon 3200") plt.show() # - # Visually, it's a nearly impossible to guess what the rotational period should be, and even Lomb-Scargle Periodograms sometimes have a hard time determining what the period might be if the lightcurve is sparse enough. # # It was with these sparse incoming datasets in mind that we decided to develop this method, lovingly named AsteroGaP for **Astero**id **Ga**ussian **P**rocesses. # ## Gaussian Processes # # You might at this point be wondering, what are Gaussian Processes (GP)? A Gaussian Process (also known as Gaussian Process Regression) is a generative kernel-based framework for solving regression problems \citep{Roberts2012}. Rather than modeling the data points directly as in standard linear regression, a Gaussian process models the \textit{covariance} between data points. # # Run the following cells to view an interactive visualization of how GP models are fit. from ipywidgets import interact import george import numpy as np import matplotlib.pyplot as plt def create_data(gamma = 10, period = 1, amp = 1): """ Generate some periodic data using a periodic kernel. """ # generate 10 random numbers for x x = 10 * np.sort(np.random.rand(10)) # determine the y error and make it an array as long as x # have the error scale with the amplitude yerr = 0.2 * amp * np.ones_like(x) x_possible = np.linspace(0,10, 1000) # create a sinusoidal plot based on inputs # establish the kernel type we are using kernel = amp * george.kernels.ExpSine2Kernel(gamma, period) gp = george.GP(kernel) # generate a ExpSine2 function using our x-values (0-10) and our y error gp.compute(x, yerr) # sample the y-values from the function we made y_possible = gp.sample(x_possible) # a subset of possible x-values #return our simulated data with the original function return(x_possible, y_possible, gp) def sample_data(x_possible, y_possible, yerr_amp, random_n=10): """ Samples a n-sized random array of points from the sample data generated in the create_data function or another type of light curve sample. """ idx = y_possible.size # randomly select n points from 1-500 idx_choice = np.random.choice(idx, random_n, replace= False) idx_choice.sort() # filter out the randomly chosen indicies from earlier x = x_possible[idx_choice] # get the predicted y values corresponding to x values y_base = y_possible[idx_choice] # generate some uncertainty yerr = yerr_amp * 0.01 * np.ones_like(x) #turn 0.2 into an input (1%) # add noise to initial y values y = y_base + yerr * np.random.randn(len(x)) return(x, y, yerr) def plotting_gp_ex(x, y, yerr,pre_y, x_possible, gp): """""" plt.figure(figsize=(15,10)) pred, pred_var = gp.predict(y, x_possible, return_var=True) plt.fill_between(x_possible, pred - np.sqrt(pred_var), pred + np.sqrt(pred_var), color="red", alpha=0.4) plt.plot(x_possible, pred, "red", lw=1.5, alpha=0.7) plt.errorbar(x, y, yerr = yerr, fmt="ok", capsize=0, ) plt.plot(x_possible,pre_y) plt.xlim(x_possible.min(), x_possible.max()) plt.ylim(pre_y.min()-0.5, pre_y.max()+0.5) plt.xlabel("x") plt.ylabel("y"); plt.show() def fit_data(x, y, yerr, gamma, period, gp): gp.compute(x, yerr) x_possible = np.linspace(0,10,500) pred, pred_var = gp.predict(y, x_possible, return_var=True) ln_likelihood = gp.log_likelihood(y) #print(ln_likelihood) return gp, ln_likelihood # + def data_comparison(gamma, period, amp): x_pos, py, gp1 = create_data(gamma, period, amp) def _do_work(n = 10): x, y, yerr = sample_data(x_pos, py, 1, n) gp2, ln_likelihood = fit_data(x, y, yerr, gamma, period, gp1) plotting_gp_ex(x,y,yerr,py,x_pos,gp2) return _do_work #n will only work if m is set equal to 0 # - vary_nm = data_comparison(gamma=10, period=np.log(3), amp=1) interact(vary_nm, n=(2, 20, 2), continuous_update=False) # The code above is perhaps a little complicated to explain now, but the main thing to focus on is this interactive plot down here at the bottom. For the last cell, I have set up a periodic lightcurve in **blue** that repeats every 3 units (the kernel takes the logarithm of the period, hence the log(3) notation), with a gamma of 10, and an amplitude (amp) of 1. Feel free to test out different values for gamma, the period, and the amplitude to get a feel for how the lightcurve changes. # # The number **n** in the bar above is the number of **black** observations/samples of the blue lightcurve we are taking. Once we have n number of randomly selected observations (time/x, flux/y, and flux error), we can then feed those samples into a Gaussian Process and see how the model would generate a fit given what it knowns. # # As you can hopefully see, with few points, the GP model is only certain with its fit in specific areas (small red shading), and is very uncertain about what happens in other parts of the period (large red shading). The red line is what the predicted fit is, and the shading is the uncertainty. # # If we increase the number of points, the GP model has more information and can make better estimates about what is going in with the lightcurve, reducing the uncertainty. # # Even with just a few observations (~10-20), if the lightcurve has been broadly and evenly sampled, the GP model is able to produce a really great fit for the lightcurve data. # # ## Kernels # # The main component driving this Gaussian Process model is the kernel, which is a matrix telling the model how the different data points should relate to one another. # # We have specified an Exponential Sine Squared kernel, which is significant because this kernel expects data to repeat after a certain amount of time, like a period. # + # %matplotlib notebook import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import seaborn as sns sns.set_style("white") sns.set_palette('viridis') import numpy as np import pandas as pd import george from george import kernels # + amp = 1.0 log_period = np.log(3.0) gamma = [0.1, 1.0, 20.0] fig = plt.figure(figsize=(10,10)) gs = gridspec.GridSpec(2, 2, figure=fig) ax1 = plt.subplot(gs[0, :]) ax2 = plt.subplot(gs[1,0]) ax3 = plt.subplot(gs[1,1]) x = np.linspace(-10, 10, 500) for i, g in enumerate(gamma): kernel = amp * kernels.ExpSine2Kernel(gamma=g, log_period=log_period) kernel_vals = kernel.get_value(np.atleast_2d(x).T) ax2.plot(x, kernel_vals[0], color=sns.color_palette("viridis", 9)[4*i], label=r"$\gamma = %2.f$"%g) ax2.set_xlim(x[0], x[-1]) if i == 1: ax3.imshow(kernel_vals, cmap="viridis", interpolation="nearest") ax3.set_xticklabels([]) ax3.set_yticklabels([]); ax3.set_title("2D representation of the covariance function") np.random.seed(1234) gp = george.GP(kernel) gp.compute(x) y = gp.sample(size=1) ax1.plot(x, y.T+i, lw=1.5, alpha=1.0, color=sns.color_palette("viridis", 9)[4*i], label=r"$\gamma = %2.f$"%g) ax1.set_xlim(-5, 5); ax1.set_xlabel("x") ax1.set_ylabel("y") ax1.set_title("Example realizations of the Gaussian Process") ax1.legend(loc=2) ax2.set_xlabel("x") ax2.set_ylabel("Covariance between data points") ax2.set_title("1D representation of the covariance function") ax3.set_xlabel("x") ax3.set_ylabel("x") plt.tight_layout() #plt.savefig("../paper/fig1.pdf", format='pdf') # - # ### Period # # In the image above, we can visualize such a kernel by plotting the distances (which corresponds to time in our data) between data points (x) versus how high their correlation should be. For data points close to one another time wise, we expect their values to be similar, with this similarity getting lower the further away the data points being compared are (take a look at how fast the correlation for the green curve drops in the 1D plot above). The key to this kernel, however, is that after some amount of time, the **Period**, this correlation will jump back up again, and we expect data points to have similar values once again. # # ### Gamma # # The other parameter being visualized here is **Gamma**, often referred to as the inverse of the length-scale. Gamma is simply a measurement of how much variability we expect to see within each period, and how fast we can expect data points to cease correlating with each other. A low value of Gamma means that the intra-period variability should be minor and that data points should remain highly correlated with one another, where as a high Gamma value means that there is a lot of changes happening between each period and data points are not very correlated. # # Take a look at the upper plot. We can see 3 different curves modeled, each with the same period but a different value for Gamma. The curve with a Gamma of 0 remains fairly smooth while the curve with a Gamma of 20 varies a lot within each period. The data points from the smooth plot are all pretty similar and are highly correlated. This can be visualized in the lower-left plot, where the purple line shows the correlation for all the data points. Alternatively, the green line shows the correlation for the curve with a high Gamma value, and the correlation essentially drops to 0 inbetween every period. This just means that, even though the curve is expected to repeat itself, there's essentially no telling how a one section of the curve will behave if we only have data for another section. # # ### Amplitude (periodic) # # The **Amplitude** parameter is another part of the kernel but all it really does is scale the height of the curves. A big difference in overall magnitudes for a lightcurve would indicate a large amplitude value, and vice versa. # # ### Mean # # The last parameter assigned to the kernel is the **mean** value. This just determines around which values the fit should be. This mean can either be set to a single value, or another function, but in our case, since we are working with data that has had the phase-angle effect from the sun removed, we can just assume our fits should fall around a single value (often 0 or 1 depending on how the data was normalized) # # ## Additional Kernel # # So it's clear that we expect lightcurves from asteroids to appear periodic, but because of the changing nature of an asteroid's angle to the sun, even though we correct for any major changes in the magnitude, we still expect to see minor changes in the profile in the lightcurve. And indeed, this can be seen even with simulated data after just a couple of months. # # The plot below shows what Phaethon's lightcurve looks like at one time versus 2 months later. As you can hopefully see, although the period has remained the same, the lightcurve itself has changed quite a bit. # # # # + # %matplotlib inline x = 1440 # 12 hours, every index is 30 seconds plt.plot(time[0: x], flux[0: x]) plt.plot(time[230: 230+x], flux[172800: 172800+x], "orange",label="2 months later\n (Shifted for better comparison)") plt.xlabel("Time (%.1f hours)"%(x*0.5/60)) plt.ylabel("Flux") plt.title("Phaethon 3200") plt.legend() # - # We wanted to account for this change in the lightcurve in our kernel since that will be used to analyze this data are expected to span many months if not years, and we were worried that a ridgid periodic kernel would not be able to adapt to these subtle changes. We modified our kernel by multiply our basic Exponential Sine Squared kernel with an Exponential Squared kernel. This kernel has been plotted below. # + amp_1 = 10.0 metric = 20 amp_2 = 5.0 log_period = np.log(3.0) gamma = [0.1, 1.0, 20.0] fig = plt.figure(figsize=(10,10)) gs = gridspec.GridSpec(2, 2, figure=fig) ax1 = plt.subplot(gs[0, :]) ax2 = plt.subplot(gs[1,0]) ax3 = plt.subplot(gs[1,1]) x = np.linspace(-10, 10, 500) for i, g in enumerate(gamma): k1 = amp_1 * kernels.ExpSquaredKernel(metric=metric) #k2 = amp_2 * kernels.ExpSine2Kernel(gamma=g, log_period=log_period) kernel = k1#*k2 #or k1*k2 kernel_vals = kernel.get_value(np.atleast_2d(x).T) ax2.plot(x, kernel_vals[0], color=sns.color_palette("viridis", 9)[4*i], label=r"$\gamma = %2.f$"%g) ax2.set_xlim(x[0], x[-1]) if i == 1: ax3.imshow(kernel_vals, cmap="viridis", interpolation="nearest") ax3.set_xticklabels([]) ax3.set_yticklabels([]); ax3.set_title("2D representation of the covariance function") np.random.seed(1234) gp = george.GP(kernel) gp.compute(x) y = gp.sample(size=1) ax1.plot(x, y.T+i, lw=1.5, alpha=1.0, color=sns.color_palette("viridis", 9)[4*i], label=r"$\gamma = %2.f$"%g) ax1.set_xlim(-5, 5); ax1.set_xlabel("x") ax1.set_ylabel("y") ax1.set_title("Example realizations of the Gaussian Process") ax1.legend(loc=2) ax2.set_xlabel("x") ax2.set_ylabel("Covariance between data points") ax2.set_title("1D representation of the covariance function") ax3.set_xlabel("x") ax3.set_ylabel("x") plt.tight_layout() # - # As you can see, the correlation visualized for this kernel just drops off with distance. This is the most common sort of correlation we see in most statistical analyses, and it's really just saying that stuff nearby in one regard (be it time or space) is expected to be similar to other ways too (in our situation, magnitudes!). # # Now alone this kernel wouldn't be very helpful in our asteroid analysis, but when we multiply the two kernels together, an interesting correlation emerges. # + amp_1 = 1 metric = 10 amp_2 = 5.0 log_period = np.log(3.0) gamma = [0.1, 1.0, 20.0] fig = plt.figure(figsize=(10,10)) gs = gridspec.GridSpec(2, 2, figure=fig) ax1 = plt.subplot(gs[0, :]) ax2 = plt.subplot(gs[1,0]) ax3 = plt.subplot(gs[1,1]) x = np.linspace(-10, 10, 500) for i, g in enumerate(gamma): k1 = amp_1 * kernels.ExpSquaredKernel(metric=metric) k2 = amp_2 * kernels.ExpSine2Kernel(gamma=g, log_period=log_period) kernel = k1*k2 #or k1*k2 kernel_vals = kernel.get_value(np.atleast_2d(x).T) ax2.plot(x, kernel_vals[0], color=sns.color_palette("viridis", 9)[4*i], label=r"$\gamma = %2.f$"%g) ax2.set_xlim(x[0], x[-1]) if i == 1: ax3.imshow(kernel_vals, cmap="viridis", interpolation="nearest") ax3.set_xticklabels([]) ax3.set_yticklabels([]); ax3.set_title("2D representation of the covariance function") np.random.seed(1234) gp = george.GP(kernel) gp.compute(x) y = gp.sample(size=1) ax1.plot(x, y.T+i, lw=1.5, alpha=1.0, color=sns.color_palette("viridis", 9)[4*i], label=r"$\gamma = %2.f$"%g) ax1.set_xlim(-5, 5); ax1.set_xlabel("x") ax1.set_ylabel("y") ax1.set_title("Example realizations of the Gaussian Process") ax1.legend(loc=2) ax2.set_xlabel("x") ax2.set_ylabel("Covariance between data points") ax2.set_title("1D representation of the covariance function") ax3.set_xlabel("x") ax3.set_ylabel("x") plt.tight_layout() # - # This plot is a visualization of both kernels (specifically multiplied, not added). Although the effects are a little extreme in comparison to what we expect from our other data, we can see in the top figure that the lightcurve repeats and remains periodic, but gradually evolves over time. The correlation between the data points nearby are still periodic, but the expectation that such a pristine periodicity remains the same forever has been removed, which is exactly what we were hoping for. # # Now just to clarify, this doesn't mean that the kernel expects the lightcurve to stop being periodic as some point. It just means that the expectation for the lightcurve to return to exactly the same value every cycle has been lessened, meaning that the kernel is better able to fit lightcurves with data sets captured over longer time frames. Exciting stuff! # # ### Metric # # Now just as before, I want to quickly clarify what the parameters attributed to this kernel mean. The **Metric** parameter is a bit like Gamma, in that is determines how fast the correlation between data points drops off. A larger metric value means a longer time before the correlation falls to 0, and vice versa. # # ### Amplitude (long) # # The **Amplitude** here is attributed to how much the kernel varies over time. # # ### NOTE! # # In order to avoid any confusion with the previously-mentioned amplitude parameter, I will always refer to this amplitude as **amplitude (long)** since it is responsible for the long-term adaptations in the kernel, while the other amplitude is referred to as **amplitude (periodic)** since that amplitude determines the height of the periodic kernel. Hopefully this distinction should lessen any confusion in the future. # # This is most of what I can offer as an explanation for the kernels. We use the package **george** to set all of this up. A lot of great and more technical documentation can be found over on the website for george (https://george.readthedocs.io/en/latest/)
docs/source/AsteroGaP_Example_Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from gs_quant.session import GsSession, Environment from gs_quant.data import Dataset from datetime import date import matplotlib.pyplot as plt import plotly.express as px import seaborn as sns import pandas as pd GsSession.use(client_id='8870cdcf26db4484beeef2b2166e1f15', client_secret='<KEY>') who_dataset = Dataset('COVID19_COUNTRY_DAILY_WHO') def getDataSet(countryId): data_frame = who_dataset.get_data(countryId=countryId, start = date(2019,1,1)) for i in range(1, len(data_frame)): data_frame['rateOfChange'] = (data_frame['newConfirmed'] - data_frame['newConfirmed'].shift())/data_frame['newConfirmed'].shift() data_frame['rateOfChange'] = data_frame['rateOfChange'].fillna(0) return data_frame data_frame_US = getDataSet('US') data_frame_UK = getDataSet('GB') data_frame_BR = getDataSet('BR') data_frame_NG = getDataSet('NG') data_frame_NZ = getDataSet('NZ') data_frame_IN = getDataSet('IN') df=pd.concat([data_frame_US,data_frame_UK,data_frame_IN,data_frame_BR,data_frame_NG,data_frame_NZ]) for country in df['countryId'].unique(): fig = px.line(df[df['countryId']==country], y='rateOfChange', title=country) fig.show() # + fig = px.line(df, y='rateOfChange', color='countryId') fig.show()
GS_Hackathon_Snehal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Coroutines # What is a coroutine? # # The word co actually comes from **cooperative**. # # A coroutine is a generalization of subroutines (think functions), focused on **cooperation** between routines. # # If you have some concepts of multi-threading, this is similar in some ways. But whereas in multi-threaded applications the **operating system** usually decides when to suspend one thread and resume another, without asking permission, so-called **preemptive** multitasking, here we have routines that voluntarily yield control to something else - hence the term **cooperative**. # # We actually have all the tools we need to start looking at this. # # It is the `yield` statement we studied in the last section on generator functions. # # Let's dig a little further to truly understand what coroutines are and how they can be used. # We'll need to first define quickly what a queue is. # # It is a collection where items are added to the back of the queue, and items are removed from the end of the queue. So, very similar to a queue in a supermarket - you join the queue at the back of the queue, and the person in the front of the queue is the first one to leave the queue and go to the checkout counter. # # This is also called a First-In First-Out data structure. # # (For comparison, you also have a **stack** which is like a stack of pancakes - the last cooked pancake is placed on top of the stack of pancakes (called a **push**), and it's the first one you take fomr the stack and eat (called a **pop**) - so that is called First-In Last-Out) # # We can just use a simple list to act as queue, but lists are not particularly effecient when adding elements to the beginning of the list - they are fine for adding element to the end, but less so at inserting elements, including at the front. # # So, instead of using a list, let's just use a more efficient data structure for our queue. # # The `queue` module has some queue implementations, including some very specialized ones. In Python 3.7, it also has the `SimpleQueue` class that is more lightweight. # # In this case though, I'm going to use the `deque` class (double-ended queue) from the `collections` module - it is very efficient adding and removing elements from both the start and the end of the queue - so, it's very general purpose and widely used. The `queue` implementations are more specialized and have several features useful for multi-tasking that we won't actually need. from collections import deque # We can specify a maximum size for the queue when create it - this allows us to limit the number of items in the queue. # # We can then add and remove items by using the methods: # * `append`: appends an element to the right of the queue # * `appendleft`: appends an element to the left of the queue # * `pop`: remove and return the element at the very right of the queue # * `popleft`: remove and return the element at the very left of the queue # # (Note that I'm avoiding calling it the start and end of the queue, because what you consider the start/end of the queue might depend on how you are using it) # Let's just try it out to make sure we're comfortable with it: dq = deque([1, 2, 3, 4, 5]) dq dq.append(100) dq dq dq.appendleft(-10) dq dq.pop() dq dq.popleft() dq # We can create a capped queue: dq = deque([1, 2, 3, 4], maxlen=5) dq.append(100) dq dq.append(200) dq dq.append(300) dq # As you can see the first item (`2`) was automatically discarded from the left of the queue when we added `300` to the right. # We can also find the number of elements in the queue by using the `len()` function: len(dq) # as well as query the `maxlen`: dq.maxlen # There are more methods, but these will do for now. # Now let's create an empty queue, and write two functions - one that will add elements to the queue, and one that will consume elements from the queue: def produce_elements(dq): for i in range(1, 36): dq.appendleft(i) def consume_elements(dq): while len(dq) > 0: item = dq.pop() print('processing item', item) # Now we can use them as follows: def coordinator(): dq = deque() producer = produce_elements(dq) consume_elements(dq) coordinator() # But suppose now that the `produce_elements` function is reading a ton of data from somewhere (maybe an API call that returns course ratings on some Python course :-) ). # # The goal is to process these after some time, and not wait until all the items have been added to the queue - maybe the incoming stream is infinite even. # # In that case, we want to "pause" adding elements to the queue, process (consume) those items, then once they've all been processed we want to resume adding elements, and rinse and repeat. # We'll use a capped `deque`, and change our producer and consumers slightly, so that each one does it's work, the yields control back to the caller once it's done with its work - the producer adding elements to the queue, and the consumer removing and processing elements from the queue: # + def produce_elements(dq, n): for i in range(1, n): dq.appendleft(i) if len(dq) == dq.maxlen: print('queue full - yielding control') yield def consume_elements(dq): while True: while len(dq) > 0: print('processing ', dq.pop()) print('queue empty - yielding control') yield def coordinator(): dq = deque(maxlen=10) producer = produce_elements(dq, 36) consumer = consume_elements(dq) while True: try: print('producing...') next(producer) except StopIteration: # producer finished break finally: print('consuming...') next(consumer) # - coordinator() # Notice a **really important** point here - the producer and consumer generator functions do not use `yield` for iteration purposes - they are simply using `yield` to suspend themselves and cooperatively hand control back to the caller - our coordinator function in this case. # # The generators used `yield` to cooperatively suspend themselves and yield control back to the caller. # # Similarly, we are not using `next` for iteration purposes, but more for starting and resuming the generators. # # This is a fundamentally different idea than using `yield` to implement iterators, and forms the basis for the idea of using generators as coroutines. # ### Timings using Lists and Deques for Queues # Let's see some timing differences between `lists` and `deques` when inserting and popping elements. We'll compare this with appending elements to a `list` as well. from timeit import timeit # + list_size = 10_000 def append_to_list(n=list_size): lst = [] for i in range(n): lst.append(i) def insert_front_of_list(n=list_size): lst = [] for i in range(n): lst.insert(0, i) lst = [i for i in range(list_size)] def pop_from_list(lst=lst): for _ in range(len(lst)): lst.pop() lst = [i for i in range(list_size)] def pop_from_front_of_list(lst=lst): for _ in range(len(lst)): lst.pop(0) # - # Let's time those out: timeit('append_to_list()', globals=globals(), number=1_000) timeit('insert_front_of_list()', globals=globals(), number=1_000) timeit('pop_from_list()', globals=globals(), number=1_000) timeit('pop_from_front_of_list()', globals=globals(), number=1_000) # As you can see, insert elements at the front of the list is not very efficient compared to the end of the list. So lists are OK to use as stacks, but not as queues. # # The standard library's `deque` is efficient at adding/removing items from both the start and end of the collection: from collections import deque # + list_size = 10_000 def append_to_deque(n=list_size): dq = deque() for i in range(n): dq.append(i) def insert_front_of_deque(n=list_size): dq = deque() for i in range(n): dq.appendleft(i) dq = deque(i for i in range(list_size)) def pop_from_deque(dq=dq): for _ in range(len(lst)): dq.pop() dq = deque(i for i in range(list_size)) def pop_from_front_of_deque(dq=dq): for _ in range(len(lst)): dq.popleft() # - timeit('append_to_deque()', globals=globals(), number=1_000) timeit('insert_front_of_deque()', globals=globals(), number=1_000) timeit('pop_from_deque()', globals=globals(), number=1_000) timeit('pop_from_front_of_deque()', globals=globals(), number=1_000)
python-tuts/1-intermediate/06 - Generator Based Co-routines/01 - Coroutines.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: MFE Environment # language: python # name: mfe_env # --- # + import warnings warnings.simplefilter(action='ignore', category=FutureWarning) import pandas as pd import numpy as np from scipy import optimize import re import copy import logging import sys from datetime import datetime, timedelta import matplotlib.pyplot as plt import seaborn as sns from matplotlib.ticker import StrMethodFormatter sns.set() # a little hacky, but works if you don't want to actually install the # custom packages sys.path.append('../') from uniswapv3_simulator.pool import Uniswapv3Pool from uniswapv3_simulator.utils import * from uniswapv3_simulator.math import tick_to_sqrt_price, sqrt_price_to_tick from utils import amount_to_float # + POOL = 'USDC-WETH-500' NUMERAIRE_TOKEN = 0 MAX_DATE = '2022-01-28' SAVE_FREQ = 'D' # change to 'H' if you want hourly pool snapshots SAVED_POOLS = f'./saved-pools/{POOL}.pickle' SAVED_DAILY_RETURNS = f'./saved-daily-returns/{POOL}.pickle' SAVED_WEEKLY_RETURNS = f'./saved-weekly-returns/{POOL}.pickle' timestamp = datetime.now().strftime('%y%m%d%H%M%S') logging.basicConfig(level=logging.INFO, filename=f'./logs/{POOL}_{timestamp}.log') logging.getLogger('uniswap-v3').setLevel(logging.INFO) logging.getLogger('uniswap-v3.utils').setLevel(logging.DEBUG) # - data = pd.read_pickle('../data/pool_data_clean.pickle') swaps = data[POOL]['swaps'] liquidity = data[POOL]['liquidity'] swaps.info() swaps.head() liquidity.info() liquidity.head() adds = liquidity.loc[liquidity['liquidity_event'] == 'ADD_LIQUIDITY', :].copy() adds = adds.sort_values('txn_time').reset_index(drop=True) adds.head() first_add_hash = adds.at[0, 'tx_hash'] print(f'First liquidity add hash: {first_add_hash}') # + # from https://etherscan.io/tx/0x125e0b641d4a4b08806bf52c0c6757648c9963bcda8681e4f996f09e00d4c2cc#eventlog liquidity_delta = amount_to_float('345073104699360', 18) # belive all liquidity amounts use 18 decimals assert liquidity_delta == adds.at[0, 'liquidity'] token0 = adds.at[0, 'token_0_amount'] token1 = adds.at[0, 'token_1_amount'] tick_lower = adds.at[0, 'price_tick_lower'] tick_upper = adds.at[0, 'price_tick_upper'] token0_decimals = adds.at[0, 'contract_decimals_token_0'] token1_decimals = adds.at[0, 'contract_decimals_token_1'] init_price = pool_init_price(token0, token1, tick_upper, tick_lower, liquidity_delta, token0_decimals, token1_decimals) token0_symb = liquidity.at[0, 'contract_ticker_symbol_token_0'] token1_symb = liquidity.at[0, 'contract_ticker_symbol_token_1'] print(f'Pool initial price ({token1_symb}/{token0_symb}): {init_price:,.12e}') print(f'Pool initial price ({token0_symb}/{token1_symb}): {1 / init_price:,.12e}') # - sqrt_price_x96 = 1.350174849792634181862360983626536e+33 etherscan_price = sqrt_price_x96 ** 2 / 2 ** 192 print(f"Calculated initial price: {init_price:.12e}") print(f"Price per Etherscan: {etherscan_price:.12e}") all_txn = organize_txns(liquidity, swaps, max_date=MAX_DATE) all_txn CHECKS_ON = False # need to think about appropriate error tolerances # TODO: maybe base these tolerances on the average transaction size? TOKEN0_TOLS = {'atol': 1e-12, 'rtol': 1e-8} TOKEN1_TOLS = {'atol': 1e-12, 'rtol': 1e-8} LIQUIDITY_TOLS = {'atol': 1e-8, 'rtol': 1e-5} # there are >1.2M transactions, so we don't show the logging here pool_snapshots, tx_results = run_historical_pool( init_price, all_txn, liquidity, swaps, save_freq=SAVE_FREQ, position_id='generic_LP', checks_on=CHECKS_ON, verbose=True, token0_tols=TOKEN0_TOLS, token1_tols=TOKEN1_TOLS, liquidity_tols=LIQUIDITY_TOLS ) pd.to_pickle(pool_snapshots, SAVED_POOLS) # + example_day = '2022-01-27 00:00:00' sigma = 0.04 pool = pool_snapshots[example_day] price_bins = np.array( [0] + [pool.price * (1 + i * sigma) for i in range(-10, 11)] + [np.inf] ) fig, ax = plt.subplots(figsize=(20, 8)) pool.plot_liquidity_curve(interval=(0, 0.001), ax=ax) for price in price_bins[1:-1]: if price == pool.price: ax.axvline(price, color='k', label='Pool Price') else: ax.axvline(price, color='r', linestyle='--') ax.legend(loc='upper left') plt.show() # + # %%time # daily returns all_returns = calc_all_returns_per_bin( pool_snapshots, all_txn, liquidity, swaps, freq='D', # pandas frequency codes (can also include anchor) sigma=0.04, numeraire_token=NUMERAIRE_TOKEN ) pd.to_pickle(all_returns, SAVED_DAILY_RETURNS) bin_returns = defaultdict(list) for day, irrs in all_returns.items(): for bin_pos, irr in irrs.items(): bin_no = int(re.findall(r'_bin(\d+)_', bin_pos)[0]) bin_returns[bin_no].append(irr) fig, ax = plt.subplots(figsize=(20, 8)) x_pos, x_labels, y = [], [], [] for i in range(1, 23): x_pos.append(i) x_labels.append(f'{i - 11}$\sigma$') y.append(np.mean(bin_returns[i])) ax.plot(x_pos, y) ax.set_xticks(ticks=np.array(x_pos[:-1]) + 0.5, labels=x_labels[:-1]) ax.set_title('Daily Mean Returns Per Bin') ax.set_xlabel('Bin Edge, as a Function of $\sigma$') ax.yaxis.set_major_formatter(StrMethodFormatter('{x:,.2%}')) plt.show() # + # %%time # weekly returns all_returns = calc_all_returns_per_bin( pool_snapshots, all_txn, liquidity, swaps, freq='W', # pandas frequency codes (can also include anchor) sigma=0.04, numeraire_token=NUMERAIRE_TOKEN ) pd.to_pickle(all_returns, SAVED_WEEKLY_RETURNS) bin_returns = defaultdict(list) for day, irrs in all_returns.items(): for bin_pos, irr in irrs.items(): bin_no = int(re.findall(r'_bin(\d+)_', bin_pos)[0]) bin_returns[bin_no].append(irr) fig, ax = plt.subplots(figsize=(20, 8)) x_pos, x_labels, y = [], [], [] for i in range(1, 23): x_pos.append(i) x_labels.append(f'{i - 11}$\sigma$') y.append(np.mean(bin_returns[i])) ax.plot(x_pos, y) ax.set_xticks(ticks=np.array(x_pos[:-1]) + 0.5, labels=x_labels[:-1]) ax.set_title('Weekly Mean Returns Per Bin') ax.set_xlabel('Bin Edge, as a Function of $\sigma$') ax.yaxis.set_major_formatter(StrMethodFormatter('{x:,.2%}')) plt.show() # + # # %%time # # hourly returns # all_returns = calc_all_returns_per_bin( # pool_snapshots, # all_txn, # liquidity, # swaps, # freq='H', # pandas frequency codes (can also include anchor) # sigma=0.005, # reducing this for this test # numeraire_token=NUMERAIRE_TOKEN # ) # # pd.to_pickle(all_returns, SAVED_WEEKLY_RETURNS) NOT SAVING THIS ONE FOR NOW! # bin_returns = defaultdict(list) # for day, irrs in all_returns.items(): # for bin_pos, irr in irrs.items(): # bin_no = int(re.findall(r'_bin(\d+)_', bin_pos)[0]) # bin_returns[bin_no].append(irr) # fig, ax = plt.subplots(figsize=(20, 8)) # x_pos, x_labels, y = [], [], [] # for i in range(1, 23): # x_pos.append(i) # x_labels.append(f'{i - 11}$\sigma$') # y.append(np.mean(bin_returns[i])) # ax.plot(x_pos, y) # ax.set_xticks(ticks=np.array(x_pos[:-1]) + 0.5, labels=x_labels[:-1]) # ax.set_title('Hourly Mean Returns Per Bin') # ax.set_xlabel('Bin Edge, as a Function of $\sigma$') # ax.yaxis.set_major_formatter(StrMethodFormatter('{x:,.2%}')) # plt.show() # -
eda_notebooks/USDC-WETH-500 Pool.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 1.063485, "end_time": "2021-07-22T06:29:41.908055", "exception": false, "start_time": "2021-07-22T06:29:40.844570", "status": "completed"} tags=[] # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # Data visualization # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # + [markdown] papermill={"duration": 0.03967, "end_time": "2021-07-22T06:29:41.990396", "exception": false, "start_time": "2021-07-22T06:29:41.950726", "status": "completed"} tags=[] # ## Load training dataset # + papermill={"duration": 0.092361, "end_time": "2021-07-22T06:29:42.123625", "exception": false, "start_time": "2021-07-22T06:29:42.031264", "status": "completed"} tags=[] Df = pd.read_csv("../input/titanic/train.csv") Df.head(10) # + papermill={"duration": 0.050103, "end_time": "2021-07-22T06:29:42.215756", "exception": false, "start_time": "2021-07-22T06:29:42.165653", "status": "completed"} tags=[] Df.shape # + papermill={"duration": 0.069055, "end_time": "2021-07-22T06:29:42.326199", "exception": false, "start_time": "2021-07-22T06:29:42.257144", "status": "completed"} tags=[] Df.info() # + [markdown] papermill={"duration": 0.042249, "end_time": "2021-07-22T06:29:42.409895", "exception": false, "start_time": "2021-07-22T06:29:42.367646", "status": "completed"} tags=[] # ### Check for NaN values # + papermill={"duration": 0.054412, "end_time": "2021-07-22T06:29:42.506485", "exception": false, "start_time": "2021-07-22T06:29:42.452073", "status": "completed"} tags=[] Df.isna().sum() # + papermill={"duration": 0.548992, "end_time": "2021-07-22T06:29:43.097346", "exception": false, "start_time": "2021-07-22T06:29:42.548354", "status": "completed"} tags=[] sns.heatmap(Df.isna()) # + papermill={"duration": 0.065349, "end_time": "2021-07-22T06:29:43.206926", "exception": false, "start_time": "2021-07-22T06:29:43.141577", "status": "completed"} tags=[] df = Df.copy() df.drop(axis=1, columns=['PassengerId', 'Name', 'Ticket', 'Cabin'], inplace=True) df.head() # + papermill={"duration": 0.063523, "end_time": "2021-07-22T06:29:43.314278", "exception": false, "start_time": "2021-07-22T06:29:43.250755", "status": "completed"} tags=[] df.groupby(by=df['Sex']).mean() # + papermill={"duration": 0.058082, "end_time": "2021-07-22T06:29:43.415984", "exception": false, "start_time": "2021-07-22T06:29:43.357902", "status": "completed"} tags=[] df['Sex'].value_counts() # + papermill={"duration": 0.053881, "end_time": "2021-07-22T06:29:43.514209", "exception": false, "start_time": "2021-07-22T06:29:43.460328", "status": "completed"} tags=[] df['Age'].mean() # + papermill={"duration": 0.066496, "end_time": "2021-07-22T06:29:43.625441", "exception": false, "start_time": "2021-07-22T06:29:43.558945", "status": "completed"} tags=[] df.groupby(df['Embarked']).mean() # + papermill={"duration": 0.059722, "end_time": "2021-07-22T06:29:43.730774", "exception": false, "start_time": "2021-07-22T06:29:43.671052", "status": "completed"} tags=[] df['Fare'].describe() # + papermill={"duration": 0.069172, "end_time": "2021-07-22T06:29:43.845616", "exception": false, "start_time": "2021-07-22T06:29:43.776444", "status": "completed"} tags=[] df2 = df.loc[(df['Sex']=='male'), ['Survived', 'Age', 'Fare']] df2 # + papermill={"duration": 0.0757, "end_time": "2021-07-22T06:29:43.968740", "exception": false, "start_time": "2021-07-22T06:29:43.893040", "status": "completed"} tags=[] df2.loc[df2['Survived']==1, 'Fare'].describe() # + [markdown] papermill={"duration": 0.047262, "end_time": "2021-07-22T06:29:44.063336", "exception": false, "start_time": "2021-07-22T06:29:44.016074", "status": "completed"} tags=[] # 109 Males survived # + papermill={"duration": 0.066741, "end_time": "2021-07-22T06:29:44.177622", "exception": false, "start_time": "2021-07-22T06:29:44.110881", "status": "completed"} tags=[] df3 = df.loc[(df['Sex']=='female'), ['Survived', 'Age', 'Fare']] df3 # + papermill={"duration": 0.061341, "end_time": "2021-07-22T06:29:44.286095", "exception": false, "start_time": "2021-07-22T06:29:44.224754", "status": "completed"} tags=[] df3.loc[df3['Survived']==1, 'Fare'].describe() # + [markdown] papermill={"duration": 0.04788, "end_time": "2021-07-22T06:29:44.383077", "exception": false, "start_time": "2021-07-22T06:29:44.335197", "status": "completed"} tags=[] # 233 Females survived. # So, total number of people survived = 342 # + papermill={"duration": 0.06187, "end_time": "2021-07-22T06:29:44.492202", "exception": false, "start_time": "2021-07-22T06:29:44.430332", "status": "completed"} tags=[] df2.loc[df2['Survived']==1, 'Age'].describe() # + papermill={"duration": 0.061603, "end_time": "2021-07-22T06:29:44.602255", "exception": false, "start_time": "2021-07-22T06:29:44.540652", "status": "completed"} tags=[] df3.loc[df3['Survived']==1, 'Age'].describe() # + [markdown] papermill={"duration": 0.047876, "end_time": "2021-07-22T06:29:44.698679", "exception": false, "start_time": "2021-07-22T06:29:44.650803", "status": "completed"} tags=[] # The overall 'Age' mean and 'Age' mean for those who survived do not vary enough to consider differently, for both males and females. So, let's fill NaN values in 'Age' values by overall mean. # + papermill={"duration": 0.062274, "end_time": "2021-07-22T06:29:44.809346", "exception": false, "start_time": "2021-07-22T06:29:44.747072", "status": "completed"} tags=[] df.loc[df['Sex']=='female', 'Age'] = df.loc[df['Sex']=='female', 'Age'].fillna(value=27.9157) df.loc[df['Sex']=='male', 'Age'] = df.loc[df['Sex']=='male', 'Age'].fillna(value=30.7266) # + papermill={"duration": 0.061574, "end_time": "2021-07-22T06:29:44.919423", "exception": false, "start_time": "2021-07-22T06:29:44.857849", "status": "completed"} tags=[] df.isnull().sum() # + papermill={"duration": 0.059282, "end_time": "2021-07-22T06:29:45.026934", "exception": false, "start_time": "2021-07-22T06:29:44.967652", "status": "completed"} tags=[] df.dropna(inplace=True) # + [markdown] papermill={"duration": 0.048486, "end_time": "2021-07-22T06:29:45.123880", "exception": false, "start_time": "2021-07-22T06:29:45.075394", "status": "completed"} tags=[] # ## Handle with categorical features # + papermill={"duration": 0.064147, "end_time": "2021-07-22T06:29:45.236780", "exception": false, "start_time": "2021-07-22T06:29:45.172633", "status": "completed"} tags=[] gender_dummy = pd.get_dummies(df['Sex'], drop_first=True) gender_dummy.head() # + papermill={"duration": 0.063583, "end_time": "2021-07-22T06:29:45.349021", "exception": false, "start_time": "2021-07-22T06:29:45.285438", "status": "completed"} tags=[] embarked_dummy = pd.get_dummies(df['Embarked'], drop_first=True) embarked_dummy.head() # + papermill={"duration": 0.072008, "end_time": "2021-07-22T06:29:45.472112", "exception": false, "start_time": "2021-07-22T06:29:45.400104", "status": "completed"} tags=[] df = pd.concat([df, gender_dummy, embarked_dummy], axis=1) df.drop(columns=['Sex', 'Embarked'], axis=1, inplace=True) df.head() # + [markdown] papermill={"duration": 0.049376, "end_time": "2021-07-22T06:29:45.571279", "exception": false, "start_time": "2021-07-22T06:29:45.521903", "status": "completed"} tags=[] # ## Load test dataset # + papermill={"duration": 0.077059, "end_time": "2021-07-22T06:29:45.698326", "exception": false, "start_time": "2021-07-22T06:29:45.621267", "status": "completed"} tags=[] df_test = pd.read_csv("../input/titanic/test.csv") df_test.head() # + papermill={"duration": 0.058039, "end_time": "2021-07-22T06:29:45.806801", "exception": false, "start_time": "2021-07-22T06:29:45.748762", "status": "completed"} tags=[] df_test.shape # + [markdown] papermill={"duration": 0.051444, "end_time": "2021-07-22T06:29:45.909846", "exception": false, "start_time": "2021-07-22T06:29:45.858402", "status": "completed"} tags=[] # ### Check for missing values # + papermill={"duration": 0.514178, "end_time": "2021-07-22T06:29:46.474924", "exception": false, "start_time": "2021-07-22T06:29:45.960746", "status": "completed"} tags=[] sns.heatmap(df_test.isnull()) # + papermill={"duration": 0.064758, "end_time": "2021-07-22T06:29:46.591796", "exception": false, "start_time": "2021-07-22T06:29:46.527038", "status": "completed"} tags=[] df_test.isnull().sum() # + papermill={"duration": 0.061566, "end_time": "2021-07-22T06:29:46.705640", "exception": false, "start_time": "2021-07-22T06:29:46.644074", "status": "completed"} tags=[] df_test.drop(columns=['Cabin'], axis=1, inplace=True) # + papermill={"duration": 0.066493, "end_time": "2021-07-22T06:29:46.827371", "exception": false, "start_time": "2021-07-22T06:29:46.760878", "status": "completed"} tags=[] df_test.at[df_test['Fare'].isnull(), 'Fare'] = np.mean(df_test['Fare']) # + papermill={"duration": 0.072487, "end_time": "2021-07-22T06:29:46.952073", "exception": false, "start_time": "2021-07-22T06:29:46.879586", "status": "completed"} tags=[] df_test.groupby('Sex').mean() # + papermill={"duration": 0.06206, "end_time": "2021-07-22T06:29:47.067007", "exception": false, "start_time": "2021-07-22T06:29:47.004947", "status": "completed"} tags=[] df_test['Age'] = df_test['Age'].fillna(30.27) # + papermill={"duration": 0.064548, "end_time": "2021-07-22T06:29:47.184251", "exception": false, "start_time": "2021-07-22T06:29:47.119703", "status": "completed"} tags=[] df_test.isna().sum() # + papermill={"duration": 0.063287, "end_time": "2021-07-22T06:29:47.300420", "exception": false, "start_time": "2021-07-22T06:29:47.237133", "status": "completed"} tags=[] sex = pd.get_dummies(df_test['Sex'], drop_first=True) embarked = pd.get_dummies(df_test['Embarked'], drop_first=True) # + papermill={"duration": 0.078195, "end_time": "2021-07-22T06:29:47.432279", "exception": false, "start_time": "2021-07-22T06:29:47.354084", "status": "completed"} tags=[] df_test = pd.concat([df_test, sex, embarked], axis=1) df_test.head() # + papermill={"duration": 0.072364, "end_time": "2021-07-22T06:29:47.558059", "exception": false, "start_time": "2021-07-22T06:29:47.485695", "status": "completed"} tags=[] df_test.drop(columns=['Name', 'Ticket', 'Sex', 'Embarked'], axis=1, inplace=True) df_test.head() # + [markdown] papermill={"duration": 0.054846, "end_time": "2021-07-22T06:29:47.668176", "exception": false, "start_time": "2021-07-22T06:29:47.613330", "status": "completed"} tags=[] # ## Prepare final training and test dataset and build model # + papermill={"duration": 0.064529, "end_time": "2021-07-22T06:29:47.787589", "exception": false, "start_time": "2021-07-22T06:29:47.723060", "status": "completed"} tags=[] X_train = df.drop(columns=['Survived'], axis=1) y_train = df['Survived'] X_test = df_test.drop(columns=['PassengerId'], axis=1).copy() # + papermill={"duration": 0.446639, "end_time": "2021-07-22T06:29:48.288623", "exception": false, "start_time": "2021-07-22T06:29:47.841984", "status": "completed"} tags=[] from sklearn.linear_model import LogisticRegression lr_model = LogisticRegression(solver='liblinear', penalty='l1') # + papermill={"duration": 0.07894, "end_time": "2021-07-22T06:29:48.422200", "exception": false, "start_time": "2021-07-22T06:29:48.343260", "status": "completed"} tags=[] lr_model.fit(X_train, y_train) # + papermill={"duration": 0.06492, "end_time": "2021-07-22T06:29:48.542443", "exception": false, "start_time": "2021-07-22T06:29:48.477523", "status": "completed"} tags=[] titanic_preds = lr_model.predict(X_test) # + papermill={"duration": 0.068911, "end_time": "2021-07-22T06:29:48.665455", "exception": false, "start_time": "2021-07-22T06:29:48.596544", "status": "completed"} tags=[] lr_model_acc = round(lr_model.score(X_train, y_train)*100, 2) print("Training accuracy of the model is ", lr_model_acc) # + papermill={"duration": 0.063416, "end_time": "2021-07-22T06:29:48.784162", "exception": false, "start_time": "2021-07-22T06:29:48.720746", "status": "completed"} tags=[] from sklearn.model_selection import cross_val_predict from sklearn.metrics import confusion_matrix # + papermill={"duration": 0.100528, "end_time": "2021-07-22T06:29:48.939656", "exception": false, "start_time": "2021-07-22T06:29:48.839128", "status": "completed"} tags=[] preds = cross_val_predict(lr_model, X_train, y_train, cv=3) confusion_matrix(y_train, preds) # + papermill={"duration": 0.076603, "end_time": "2021-07-22T06:29:49.071157", "exception": false, "start_time": "2021-07-22T06:29:48.994554", "status": "completed"} tags=[] submit = pd.read_csv("../input/titanic/gender_submission.csv") submit.head() # + papermill={"duration": 0.066094, "end_time": "2021-07-22T06:29:49.199854", "exception": false, "start_time": "2021-07-22T06:29:49.133760", "status": "completed"} tags=[] submit['Survived'] = titanic_preds submit.to_csv("submit.csv", index=False)
titanicdisaster-survivalprediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Dependencies # %matplotlib inline import pandas as pd from sqlalchemy import create_engine import matplotlib.pyplot as plt # Engine/Connection user = 'postgres' pswd = '<PASSWORD>' host = 'localhost' port = '5432' database = 'EmployeeSQL' engine = create_engine(f'postgresql+psycopg2://{user}:{pswd}@{host}:{port}/{database}') connection = engine.connect() # Saving salaries as pandas database, only relevent attributes salaries = pd.read_sql("SELECT emp_no, salary FROM salaries", connection) salaries.head() fig, ax = plt.subplots(figsize=(9,7)) ax.hist(salaries.salary, bins=10, facecolor='blue', alpha=0.5, ec='black', lw=1.2) ax.set_title('Ten Salary Ranges') ax.set_xlabel('Salary') ax.set_ylabel('Frequency') plt.tight_layout() plt.savefig('Salary_Ranges.png') plt.show() # Saving titles as pandas database, only relevent attributes titles = pd.read_sql("SELECT emp_no, title FROM titles", connection) titles.head() # Merging Salaries and titles salaries_titles = pd.merge(salaries, titles, how='inner', on='emp_no') salaries_titles.head() average_salaries = salaries_titles.drop(columns='emp_no').groupby('title').mean().reset_index() average_salaries category = average_salaries.title height = average_salaries.salary fig, ax = plt.subplots(figsize=(9,7)) ax.bar(category, height, color=['k', 'c', 'r', 'g', 'b', 'y', 'm'], alpha=0.5, ec='black', lw=1) plt.xticks(category, rotation="vertical") ax.set_title('Average Salaries for each Employee Job Title') ax.set_xlabel('Job Title') ax.set_ylabel('Average Salary') plt.tight_layout() plt.savefig('Average_Salaries.png') plt.show() # ## Trends # * A very large portion of employees make the lowest salary range 40,000 ~ 48,000 # * Average salaries by titles do not make sense: # - **_Senior Staff_** and **_Staff_** make the highest, and about the same on average # - **_Senior Engineers_** make less than **_Engineers_** on average...and **_Assistant Engineers_**, **_Engineers_**, **_Senior Engineers_** all make about the same on average
EmployeeSQL/bonus.ipynb
# --- # jupyter: # jupytext: # split_at_heading: true # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + hide_input=false #hide from fastai.gen_doc.nbdoc import * # - # # A neural net from the foundations # ## A neural net layer from scratch # ### Modeling a neuron # ### Matrix multiplication from scratch import torch from torch import tensor def matmul(a,b): ar,ac = a.shape # n_rows * n_cols br,bc = b.shape assert ac==br c = torch.zeros(ar, bc) for i in range(ar): for j in range(bc): for k in range(ac): c[i,j] += a[i,k] * b[k,j] return c m1 = torch.randn(5,28*28) m2 = torch.randn(784,10) # %time t1=matmul(m1, m2) # %timeit -n 20 t2=m1@m2 a = tensor([10., 6, -4]) b = tensor([2., 8, 7]) a + b a < b (a < b).all(), (a==b).all() (a + b).mean().item() m = tensor([[1., 2, 3], [4,5,6], [7,8,9]]) m*m n = tensor([[1., 2, 3], [4,5,6]]) m*n def matmul(a,b): ar,ac = a.shape br,bc = b.shape assert ac==br c = torch.zeros(ar, bc) for i in range(ar): for j in range(bc): c[i,j] = (a[i] * b[:,j]).sum() return c # %timeit -n 20 t3 = matmul(m1,m2) # ### Broadcasting # #### Broadcasting with a scalar a = tensor([10., 6, -4]) a > 0 m = tensor([[1., 2, 3], [4,5,6], [7,8,9]]) (m - 5) / 2.73 # #### Broadcasting a vector to a matrix c = tensor([10.,20,30]) m = tensor([[1., 2, 3], [4,5,6], [7,8,9]]) m.shape,c.shape m + c c.expand_as(m) t = c.expand_as(m) t.storage() t.stride(), t.shape c + m c = tensor([10.,20,30]) m = tensor([[1., 2, 3], [4,5,6]]) c+m c = tensor([10.,20]) m = tensor([[1., 2, 3], [4,5,6]]) c+m c = tensor([10.,20,30]) m = tensor([[1., 2, 3], [4,5,6], [7,8,9]]) c = c.unsqueeze(1) m.shape,c.shape c+m t = c.expand_as(m) t.storage() t.stride(), t.shape c = tensor([10.,20,30]) c.shape, c.unsqueeze(0).shape,c.unsqueeze(1).shape c.shape, c[None,:].shape,c[:,None].shape c[None].shape,c[...,None].shape def matmul(a,b): ar,ac = a.shape br,bc = b.shape assert ac==br c = torch.zeros(ar, bc) for i in range(ar): # c[i,j] = (a[i,:] * b[:,j]).sum() # previous c[i] = (a[i ].unsqueeze(-1) * b).sum(dim=0) return c # %timeit -n 20 t4 = matmul(m1,m2) # #### Broadcasting Rules # ### Einstein summation def matmul(a,b): return torch.einsum('ik,kj->ij', a, b) # %timeit -n 20 t5 = matmul(m1,m2) # ## The forward and backward passes # ### Defining and initializing a layer def lin(x, w, b): return x @ w + b x = torch.randn(200, 100) y = torch.randn(200) w1 = torch.randn(100,50) b1 = torch.zeros(50) w2 = torch.randn(50,1) b2 = torch.zeros(1) l1 = lin(x, w1, b1) l1.shape l1.mean(), l1.std() x = torch.randn(200, 100) for i in range(50): x = x @ torch.randn(100,100) x[0:5,0:5] x = torch.randn(200, 100) for i in range(50): x = x @ (torch.randn(100,100) * 0.01) x[0:5,0:5] x = torch.randn(200, 100) for i in range(50): x = x @ (torch.randn(100,100) * 0.1) x[0:5,0:5] x.std() x = torch.randn(200, 100) y = torch.randn(200) from math import sqrt w1 = torch.randn(100,50) / sqrt(100) b1 = torch.zeros(50) w2 = torch.randn(50,1) / sqrt(50) b2 = torch.zeros(1) l1 = lin(x, w1, b1) l1.mean(),l1.std() def relu(x): return x.clamp_min(0.) l2 = relu(l1) l2.mean(),l2.std() x = torch.randn(200, 100) for i in range(50): x = relu(x @ (torch.randn(100,100) * 0.1)) x[0:5,0:5] x = torch.randn(200, 100) for i in range(50): x = relu(x @ (torch.randn(100,100) * sqrt(2/100))) x[0:5,0:5] x = torch.randn(200, 100) y = torch.randn(200) w1 = torch.randn(100,50) * sqrt(2 / 100) b1 = torch.zeros(50) w2 = torch.randn(50,1) * sqrt(2 / 50) b2 = torch.zeros(1) l1 = lin(x, w1, b1) l2 = relu(l1) l2.mean(), l2.std() def model(x): l1 = lin(x, w1, b1) l2 = relu(l1) l3 = lin(l2, w2, b2) return l3 out = model(x) out.shape def mse(output, targ): return (output.squeeze(-1) - targ).pow(2).mean() loss = mse(out, y) # ### Gradients and backward pass def mse_grad(inp, targ): # grad of loss with respect to output of previous layer inp.g = 2. * (inp.squeeze() - targ).unsqueeze(-1) / inp.shape[0] def relu_grad(inp, out): # grad of relu with respect to input activations inp.g = (inp>0).float() * out.g def lin_grad(inp, out, w, b): # grad of matmul with respect to input inp.g = out.g @ w.t() w.g = inp.t() @ out.g b.g = out.g.sum(0) # ### Sidebar: SymPy from sympy import symbols,diff sx,sy = symbols('sx sy') diff(sx**2, sx) # ### End sidebar def forward_and_backward(inp, targ): # forward pass: l1 = inp @ w1 + b1 l2 = relu(l1) out = l2 @ w2 + b2 # we don't actually need the loss in backward! loss = mse(out, targ) # backward pass: mse_grad(out, targ) lin_grad(l2, out, w2, b2) relu_grad(l1, l2) lin_grad(inp, l1, w1, b1) # ### Refactor the model class Relu(): def __call__(self, inp): self.inp = inp self.out = inp.clamp_min(0.) return self.out def backward(self): self.inp.g = (self.inp>0).float() * self.out.g class Lin(): def __init__(self, w, b): self.w,self.b = w,b def __call__(self, inp): self.inp = inp self.out = inp@self.w + self.b return self.out def backward(self): self.inp.g = self.out.g @ self.w.t() self.w.g = self.inp.t() @ self.out.g self.b.g = self.out.g.sum(0) class Mse(): def __call__(self, inp, targ): self.inp = inp self.targ = targ self.out = (inp.squeeze() - targ).pow(2).mean() return self.out def backward(self): x = (self.inp.squeeze()-self.targ).unsqueeze(-1) self.inp.g = 2.*x/self.targ.shape[0] class Model(): def __init__(self, w1, b1, w2, b2): self.layers = [Lin(w1,b1), Relu(), Lin(w2,b2)] self.loss = Mse() def __call__(self, x, targ): for l in self.layers: x = l(x) return self.loss(x, targ) def backward(self): self.loss.backward() for l in reversed(self.layers): l.backward() model = Model(w1, b1, w2, b2) loss = model(x, y) model.backward() # ### Going to PyTorch class LayerFunction(): def __call__(self, *args): self.args = args self.out = self.forward(*args) return self.out def forward(self): raise Exception('not implemented') def bwd(self): raise Exception('not implemented') def backward(self): self.bwd(self.out, *self.args) class Relu(LayerFunction): def forward(self, inp): return inp.clamp_min(0.) def bwd(self, out, inp): inp.g = (inp>0).float() * out.g class Lin(LayerFunction): def __init__(self, w, b): self.w,self.b = w,b def forward(self, inp): return inp@self.w + self.b def bwd(self, out, inp): inp.g = out.g @ self.w.t() self.w.g = self.inp.t() @ self.out.g self.b.g = out.g.sum(0) class Mse(LayerFunction): def forward (self, inp, targ): return (inp.squeeze() - targ).pow(2).mean() def bwd(self, out, inp, targ): inp.g = 2*(inp.squeeze()-targ).unsqueeze(-1) / targ.shape[0] # + from torch.autograd import Function class MyRelu(Function): @staticmethod def forward(ctx, i): result = i.clamp_min(0.) ctx.save_for_backward(i) return result @staticmethod def backward(ctx, grad_output): i, = ctx.saved_tensors return grad_output * (i>0).float() # + import torch.nn as nn class LinearLayer(nn.Module): def __init__(self, n_in, n_out): super().__init__() self.weight = nn.Parameter(torch.randn(n_out, n_in) * sqrt(2/n_in)) self.bias = nn.Parameter(torch.zeros(n_out)) def forward(self, x): return x @ self.weight.t() + self.bias # - lin = LinearLayer(10,2) p1,p2 = lin.parameters() p1.shape,p2.shape class Model(nn.Module): def __init__(self, n_in, nh, n_out): super().__init__() self.layers = nn.Sequential( nn.Linear(n_in,nh), nn.ReLU(), nn.Linear(nh,n_out)) self.loss = mse def forward(self, x, targ): return self.loss(self.layers(x).squeeze(), targ) class Model(Module): def __init__(self, n_in, nh, n_out): self.layers = nn.Sequential( nn.Linear(n_in,nh), nn.ReLU(), nn.Linear(nh,n_out)) self.loss = mse def forward(self, x, targ): return self.loss(self.layers(x).squeeze(), targ) # ## Things to remember # ## Questionnaire # 1. Write the Python code to implement a single neuron. # 1. Write the Python code to implement ReLU. # 1. Write the Python code for a dense layer in terms of matrix multiplication. # 1. Write the Python code for a dense layer in plain Python (that is with list comprehensions and functionality built into Python). # 1. What is the hidden size of a layer? # 1. What does the `t` method to in PyTorch? # 1. Why is matrix multiplication written in plain Python very slow? # 1. In matmul, why is `ac==br`? # 1. In Jupyter notebook, how do you measure the time taken for a single cell to execute? # 1. What is elementwise arithmetic? # 1. Write the PyTorch code to test whether every element of `a` is greater than the corresponding element of `b`. # 1. What is a rank-0 tensor? How do you convert it to a plain Python data type? # 1. What does this return, and why?: `tensor([1,2]) + tensor([1])` # 1. What does this return, and why?: `tensor([1,2]) + tensor([1,2,3])` # 1. How does elementwise arithmetic help us speed up matmul? # 1. What are the broadcasting rules? # 1. What is `expand_as`? Show an example of how it can be used to match the results of broadcasting. # 1. How does `unsqueeze` help us to solve certain broadcasting problems? # 1. How can you use indexing to do the same operation as `unsqueeze`? # 1. How do we show the actual contents of the memory used for a tensor? # 1. When adding a vector of size 3 to a matrix of size 3 x 3, are the elements of the vector added to each row, or each column of the matrix? (Be sure to check your answer by running this code in a notebook.) # 1. Do broadcasting and `expand_as` result in increased memory use? Why or why not? # 1. Implement matmul using Einstein summation. # 1. What does a repeated index letter represent on the left-hand side of einsum? # 1. What are the three rules of Einstein summation notation? Why? # 1. What is the forward pass, and the backward pass, of a neural network? # 1. Why do we need to store some of the activations calculated for intermediate layers in the forward pass? # 1. What is the downside of having activations with a standard deviation too far away from one? # 1. How can weight initialisation help avoid this problem? # 1. What is the formula to initialise weights such that we get a standard deviation of one, for a plain linear layer; for a linear layer followed by ReLU? # 1. Why do we sometimes have to use the `squeeze` method in loss functions? # 1. What does the argument to the squeeze method do? Why might it be important to include this argument, even though PyTorch does not require it? # 1. What is the chain rule? Show the equation in either of the two forms shown in this chapter. # 1. Show how to calculate the gradients of `mse(lin(l2, w2, b2), y)` using the chain rule. # 1. What is the gradient of relu? Show in math or code. (You shouldn't need to commit this to memory—try to figure it using your knowledge of the shape of the function.) # 1. In what order do we need to call the `*_grad` functions in the backward pass? Why? # 1. What is `__call__`? # 1. What methods do we need to implement when writing a `torch.autograd.Function`? # 1. Write `nn.Linear` from scratch, and test it works. # 1. What is the difference between `nn.Module` and fastai's `Module`? # ### Further research # 1. Implement relu as a `torch.autograd.Function` and train a model with it. # 1. If you are mathematically inclined, find out what the gradients of a linear layer are in maths notation. Map that to the implementation we saw in this chapter. # 1. Learn about the `unfold` method in PyTorch, and use it along with matrix multiplication to implement your own 2d convolution function, and train a CNN that uses it. # 1. Implement all what is in this chapter using numpy instead of PyTorch.
fastai/course-v4/nbs/17_foundations.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: SageMath 9.0 # language: sage # name: sagemath # --- # # 排列組合與離散機率(Combinatorics and Discrete Probability) # ![Creative Commons License](https://i.creativecommons.org/l/by/4.0/88x31.png) # # This work by <NAME> is licensed under a [Creative Commons Attribution 4.0 International License](http://creativecommons.org/licenses/by/4.0/). # _Tested on SageMath version 8.7_ # ## 排列組合 # ### 排列 # # 從 $n$ 個東西中拿出 $k$ 個出來**排列**時要計較順序 # 可以用 `Permutations` 來找出所有排列 # (注意最後有個 `s`) elements = [1,2,3,4,5] for per in Permutations(elements,2): print(per) # 從 $n$ 個東西中拿出 $k$ 個出來排列的 # 方法數有 $P^n_k=\frac{n!}{(n-k)!}$ 種 # # 也有人把 $P^n_k$ 記作 $[n]_k$ factorial(5)/factorial(5-2) # ### 組合 # # 從 $n$ 個東西中拿出 $k$ 個出來**組合**時要**不計較**順序 # 可以用 `Combinations` 來找出所有組合 # (注意最後一樣有個 `s`) elements = [1,2,3,4,5] for com in Combinations(elements,2): print(com) # 從 $n$ 個東西中拿出 $k$ 個出來組合的 # 方法數有 $C^n_k=\frac{n!}{k!(n-k)!}$ 種 # # 也有人把 $C^n_k$ 記作 $\binom{n}{k}$ factorial(5)/factorial(5-2)/factorial(2) # ### 計數組合學(Enumerative Combinatorics) # # 到底有幾個?何不全部列出來試試看? # 自己設定一個參數(比如說 `counter`)來計算次數 # 在 1 到 100 中 # 除以 13 餘 1 # 或是 # 除以 17 餘 1 # 的數字有幾個 # + counter = 0 for i in range(1,101): if i%13 == 1 or i%17 == 1: counter = counter + 1 print(i) counter # - # 數字 1,2,3,4,5 排成一排 # 1 在開頭 # 而且 # 5 不在結尾 # 的排列有幾種 # + elements = [1,2,3,4,5] counter = 0 for per in Permutations(elements,5): if per[0] == 1 and per[4] != 5: counter = counter + 1 counter # - # 數字 1,1,2,2,3 排成一排 # 的排列有幾種 # + elements = [1,1,2,2,3] counter = 0 for per in Permutations(elements,5): counter = counter + 1 counter # - # 從數字 1,2,3,4,5 中選出三個 # 選到 1 # 而且 # 沒選到 5 # 的組合有幾種 # + elements = [1,2,3,4,5] counter = 0 for com in Combinations(elements,3): if 1 in com and 5 not in com: counter = counter + 1 counter # - # 從數字 1,1,2,2,3 中選出三個 # 的組合有幾種 # + elements = [1,1,2,2,3] counter = 0 for com in Combinations(elements,3): counter = counter + 1 counter # - # $x+y+z = 5$ # 的非負整數解有幾組 # + counter = 0 for x in range(6): for y in range(6): for z in range(6): if x + y + z == 5: counter = counter + 1 counter # - # $x+y+z=5$ # 的正整數解有幾組 # + counter = 0 for x in range(1,6): for y in range(1,6): for z in range(1,6): if x + y + z == 5: counter = counter + 1 counter # - # 100 的因數有幾個 # + n = 100 counter = 0 for p in range(1,101): if n % p == 0: counter = counter + 1 counter # - # 100 的因數總和是多少 # + n = 100 total = 0 for p in range(1,101): if n % p == 0: total = total + p total # - # ### 二項式定理 # # 將 $(1+x)^n$ 展開後 # 其 $x^k$ 的係數為 $C^n_k$ # # 所以 $C^n_k$ 又稱為**二項式係數**(binomial coefficient) # + x = var('x') n = 10 k = 4 p = expand((1+x)^n) print(p) print(p.coefficient(x^k)) # - # 驗證一下對不對 # + n = 10 k = 4 factorial(n)/factorial(n-k)/factorial(k) # - # 也可以用 `binomial(n,k)` 來計算 # + n = 10 k = 4 binomial(n,k) # - # ## 離散機率 # # 機率就存在我們生活之中 # 用 `random` 套件來模擬看看吧! # 後面的程式很多都要用到 `random` 套件 # 記得要執行下面這行以後才能用 import random # `random.choice` 會從輸入的列表中 # 隨機挑出一個元素 # # 多執行幾次試試看 numbers = [1,2,3,4,5,6] random.choice(numbers) # 把它包成一個函數就可以 # 做出一個虛擬的骰子了 def roll_a_dice(): return random.choice([1,2,3,4,5,6]) roll_a_dice() # 骰子公不公正? # 要試試看才知道 # + counter = [0,0,0,0,0,0] n = 10000 for i in range(n): k = roll_a_dice() counter[k-1] = counter[k-1] + 1 # 骰子的數字是 1,2,3,4,5,6 # counter 的編號是 0,1,2,3,4,5 print('出現次數 %s'%counter) print('機率 %s'%[N(count/n, digits=4) for count in counter]) # - # 也可以用 `random.randint(a,b)` # 隨機從 `a` 到 `b` 之間取出一個數 # (注意 `a` 和 `b` 都有可能取到,這和 `range(a,b)` 不一樣) random.randint(1,6) # 撲克牌裡有 52 張牌 # 連抽五張 # 抽到鐵支(4+1)的機率是多少? def pick_one(l): card = random.choice(l) l.remove(card) return card # + suits = ['spade', 'heart', 'diamond', 'club'] ranks = [1,2,3,4,5,6,7,8,9,10,11,12,13] cards = [(suit,rank) for suit in suits for rank in ranks] hand = [] for _ in range(5): hand.append(pick_one(cards)) hand # + K = 100000 counter = 0 for _ in range(K): suits = ['spade', 'heart', 'diamond', 'club'] ranks = [1,2,3,4,5,6,7,8,9,10,11,12,13] cards = [(suit,rank) for suit in suits for rank in ranks] hand = [] for _ in range(5): hand.append(pick_one(cards)) for i in range(1,14): occur = [card for card in hand if card[1] == i] if len(occur) == 4: counter += 1 break; print(N(counter / K)) # - # 從 1 到 100 中 # 有幾個質數? # 選到一個質數的機率是多少? # + counter = 0 for i in range(1,101): if is_prime(i): counter = counter + 1 print(counter) print(N(counter/100)) # - # 包成一個函數 # # 從 1 到 `K` 中 # 選出一個數字 # 回傳這數字是不是質數 def prime_in_K(K=100): p = random.randint(1,K) return is_prime(p) # 真的來測測看機率 # # 換言之,這就是 1 到 `K` 中 # 質數出現的比例 # # 換成不同的 `K` 試試看 # + K = 100 n = 10000 counter = 0 for i in range(n): if prime_in_K(): counter = counter + 1 N(counter / n) # - # #### Monty Hall 問題 # # 美國有一個電視節目叫 Let's Make a Deal # (Monty Hall 是主持人) # # 節目中有三扇門 # 其中一扇後面是車子 # 另外兩扇後面是山羊 # 如果玩家選中車子的門 # 就可以帶走車子 # 但是當玩家選好一扇門以後 # 主持人就會到後面看一看 # 打開一扇沒被選到的門 # 告訴玩家這扇門後面並不是車子 # 問你要不要換 # # 如果你是玩家 # 你會換嗎? # 我們來**模擬一場遊戲**: # # 三扇門 # 隨機選一扇放車子 # 玩家隨機選一個門 # 現在沒被選到又不是車子的門 # 可能有一扇(你沒選到車子) # 也有可能是兩扇(你選到車子了!) # 主持人打開其中一扇 # 你決定換到最後那扇 # 沒選到 也不是主持人打開的 # 那扇門 # # 然後開獎! # 選中是 `True` # 沒選中是 `False` # + doors = [1,2,3] car = random.choice(doors) player_choose = random.choice(doors) no_car_no_choose = [door for door in doors if door != car and door != player_choose] host_open = random.choice(no_car_no_choose) no_choose_no_open = [door for door in doors if door != player_choose and door != host_open] player_change = no_choose_no_open[0] player_change == car # - # 把整場遊戲包成一個函數 # 這樣我們才能一次又一次的模擬 # 並計算成功的機率 def Monty_Hall_game(): doors = [1,2,3] car = random.choice(doors) player_choose = random.choice(doors) no_car_no_choose = [door for door in doors if door != car and door != player_choose] host_open = random.choice(no_car_no_choose) no_choose_no_open = [door for door in doors if door != player_choose and door != host_open] player_change = no_choose_no_open[0] return player_change == car # 玩很多場試試看 # 究竟決定換一扇門 # 選到車子的機率是多少 # + n = 10000 counter = 0 for i in range(n): if Monty_Hall_game(): counter = counter + 1 N(counter / n) # - # ## 動手試試看 # ##### 練習 # 用 1,2,3,4,5 排成一排, # 其中 1 不在第一位、 # 2 不在第二位、 # 3 不在第三位、 # 4 不在第四位、而且 # 5 不在第五位 # 的排法有幾種。 # + ### your answer here # - # ##### 練習 1 # 袋中有三顆紅球、兩顆白球, # 用 `'r'` 代表紅球, # 用 `'w'` 代表白球。 # 列出所有 # 從袋中取出三顆球的組合方法。 # + ### your answer here # - # ##### 練習 2 # 求整數解的個數: # $x+y+z=10$ # $0\leq x\leq 4$, # $1\leq y\leq 5$, # $2\leq z\leq 6$。 # + ### your answer here # - # ##### 練習 3 # 求偶數解的個數: # $x+y+z=10$ # $0\leq x\leq 4$, # $1\leq y\leq 5$, # $2\leq z\leq 6$。 # + ### your answer here # - # ##### 練習 4 # 從 5 個 1 和 5 個 0 # 中拿出 5 個數字排列。 # 恰好使用奇數個 1 的排列方法有幾種。 # + ### your answer here # - # _做以下練習前記得先_ `import random` import random # ##### 練習 5 # 下面定義一顆虛擬的硬幣(1 是正面、0 是反面)。 # 試著丟這顆硬幣許多次來猜測 # 丟到正面的機率是多少。 def unknown_dice(): a = list(bin(1365))[2:] return int(random.choice(a)) # + ### your answer here # - # ##### 練習 6 # 定義一個函數 `six_get_three` 其功能為: # 不輸入任何參數, # 隨機產生一個長度是六的列表 `a`, # 每個個元素為 1 或 0 的機率各為 0.5, # 若 `a` 剛好有 3 個 1, # 則回傳 `True`,否則回傳 `False`。 # # 回傳 `True` 的機率是多少? # + ### your answer here # - # ##### 練習 7 # 撲克牌裡有 52 張牌 # 連抽五張 # 抽到葫蘆(3+2)的機率是多少? # + ### your answer here # - # ##### 練習 8 # 除了 `for` 迴圈以外, # 還有 `while` 迴圈: # ```Python # while condition: # do something # ``` # 當條件 `condition` 成立時 # 程式會不斷重覆執行 `do something`。 # # 猜看看這個函數的功能是什麼? # (輸入的 `a` 和 `b` 必須是正整數。) # ```Python # def the_number(a,b): # k = 1 # again = True # while again: # if k % a == 0 and k % b == 0: # again = False # return k # else: # k = k + 1 # ``` # + ### your answer here # - # ##### 練習 9(Monty Fall 問題) # 同樣在 Let's Make a Deal 的節目上, # 玩家選了一扇門, # 主持人在走向門的時候滑到了並不小心打開一扇門 # (這時候主持人不知道哪扇門後面有車子), # 好險主持人不小心打開的剛好不是車子。 # 主持人問玩家要不要換一扇門。 # # 如果你是玩家,你要換嗎? # # 寫一個函數 `Monty_Fall_game` 來模擬這個問題: # 這函數不輸入參數, # 每次執行時做以下的事情 # 1. 建立三扇門 # 2. 隨機選一扇放車子 # 3. 玩家隨機選一扇門 # 4. 主持人從玩家沒選到的兩扇門中隨機開啟一扇 # 5. 如果主持人開到車子的門,則這局不算並從步驟 1 重新開始 # 6. 如果主持人沒開到車子的門,則繼續步驟 7 # 7. 玩家換一扇門(一開始沒選到、也不是主持人打開的那扇) # 8. 如果玩家選到車子,回傳 `True`,否則回傳 `False` # # 若須要可以參考下方提示以及數學解釋 # + ### your answer here # - # 提示: # 第 1, 2, 3 步與 Monty Hall 問題相同 # 第 4 步則是類似,只要修改部分即可 # 第 5, 6 步可使用無窮 `while` 迴圈,直到符合第 6 步的條件才 `break` # 最後 7, 8 步也是相同 # # 完成以下程式碼的 `...` 部分 # ```Python # def Monty_Fall_game(): # while True: # doors = ... # step 1 # car = ... # step 2 # player_choose = ... # step 3 # # not_player_choose = ... # step 4 # host_open = ... # # if host_open != car: # step 5, 6 # break # # no_choose_no_open = ... # step 7 # player_change = ... # # return player_change == car # step 8 # ``` # # 完成 Monty Fall 問題後,可執行以下程式碼來統計成功的機率 # # ```Python # n = 10000 # # counter = 0 # for i in range(n): # if Monty_Fall_game(): # counter = counter + 1 # # N(counter / n) # ``` # ##### 用數學推論結果 # 假設 $X$ 為一隨機變數,代表 Monty Fall 問題的結果 # $X = 0$ 時代表沒選到車 # $X = 1$ 時代表選到車 # $E[X]$ 為 X 的期望值,而在這裡也代表了選到車的機率 # # 接著計算 $E[X]$ # 第一步時,玩家有 $\frac{1}{3}$ 的機率直接選到車,$\frac{2}{3}$ 的機率沒選到車 # # 若第一步玩家選到車,則主持人一定會開到普通的門(且遊戲不會重來) # 此時若換門,則不可能選到車($0\%$) # # 若第一步玩家沒選到車,則主持人有 $\frac{1}{2}$ 的機率開到普通的門,$\frac{1}{2}$ 的機率開到車 # 只有主持人開到門時遊戲才會繼續(若不小心開到車則遊戲重來) # 此時若換門,則一定選到車($100\%$) # # 整個過程有三部份: # Case 1. 選到車、主持人開普通門 $p_1 = \frac{1}{3}\cdot 1 = \frac{1}{3}$ # Case 2. 沒選到車、主持人開到普通門 $p_2 = \frac{2}{3}\cdot\frac{1}{2} = \frac{1}{3}$ # Case 3. 沒選到車、主持人開到有車的門 $p_3 = \frac{2}{3}\cdot\frac{1}{2} = \frac{1}{3}$ # 只有前兩個狀況遊戲才會繼續 # # 因此 # $E[X] = \big(p_1 \cdot$ 換門後選到車的機率 $+ p_1 \cdot$ 換門後選到車的機率$\big)/(p_1+p_2) = \frac{1}{2}$
Sage4HS/03-Combinatorics-and-Discrete-Probability.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable from torchvision import datasets, transforms from time import time USE_CUDA = torch.cuda.is_available() USE_CUDA def cuda_wrapper(torch_obj): if USE_CUDA: return torch_obj.cuda() else: return torch_obj class ModelFullyConnected(nn.Module): def __init__(self, num_hidden=1, hidden_size=32, dropout_rate=0.5): super(ModelFullyConnected, self).__init__() assert num_hidden > 0 self.input_size = (28, 28) self.output_size = 10 self.num_hidden = num_hidden self.dropout_rate = dropout_rate self.input_layer = nn.Linear(self.input_size[0] * self.input_size[1], hidden_size) self.hidden_layers = nn.ModuleList() for _ in range(num_hidden): self.hidden_layers.append(nn.Linear(hidden_size, hidden_size)) self.output_layer = nn.Linear(hidden_size, self.output_size) def forward(self, inputs): x = inputs.view(-1, self.input_size[0] * self.input_size[1]) x = F.dropout(F.relu(self.input_layer(x)), p=self.dropout_rate) for layer in self.hidden_layers: x = F.dropout(F.relu(layer(x)), p=self.dropout_rate) x = self.output_layer(x) return F.log_softmax(x, dim=1) class ModelConvolutional(nn.Module): def __init__(self, num_hidden=1, fc_hidden_size=32, dropout_rate=0.5, num_filters=16, filter_size=3): super(ModelConvolutional, self).__init__() assert num_hidden > 0 self.input_size = (28, 28) self.output_size = 10 self.num_hidden = num_hidden self.num_filters = num_filters self.dropout_rate = dropout_rate self.input_layer = nn.Conv2d(1, num_filters, filter_size) self.hidden_layers = nn.ModuleList() for _ in range(num_hidden): self.hidden_layers.append(nn.Conv2d(num_filters, num_filters, filter_size)) self.size_after_convs = ( self.input_size[0] - (filter_size - 1) * (num_hidden + 1), self.input_size[1] - (filter_size - 1) * (num_hidden + 1) ) self.fc_layer = nn.Linear(self.size_after_convs[0] * self.size_after_convs[1] * num_filters, fc_hidden_size) self.output_layer = nn.Linear(fc_hidden_size, self.output_size) def forward(self, inputs): x = inputs.view(-1, 1, self.input_size[0], self.input_size[1]) x = F.relu(self.input_layer(x)) for layer in self.hidden_layers: x = F.relu(layer(x)) x = x.view(-1, self.size_after_convs[0] * self.size_after_convs[1] * self.num_filters) #print(x.size()) x = F.dropout(F.relu(self.fc_layer(x)), p=self.dropout_rate) #print(x.size()) x = self.output_layer(x) #print(x.size()) return F.log_softmax(x, dim=1) class AdaSGD(): def __init__(self, params, init_base_lr=0, meta_lr=1e-3, meta_betas=(0.9, 0.999), meta_eps=1e-8, separate_lrs=False, accumulate_grads=True): self.params = list(params) self.separate_lrs = separate_lrs self.accumulate_grads = accumulate_grads if separate_lrs: self.base_lrs = [] self.grads = [] for _ in self.params: self.base_lrs.append(nn.Parameter(torch.Tensor([init_base_lr]))) self.base_lrs[-1].grad = torch.zeros_like(self.base_lrs[-1]) self.grads.append(0) self.meta_opt = optim.Adam(self.base_lrs, lr=meta_lr, betas=meta_betas, eps=meta_eps) else: self.base_lr = nn.Parameter(torch.Tensor([init_base_lr])) self.base_lr.grad = torch.zeros_like(self.base_lr) self.meta_opt = optim.Adam([self.base_lr], lr=meta_lr, betas=meta_betas, eps=meta_eps) self.flat_grads = 0 def step(self): if self.separate_lrs: for i, param in enumerate(self.params): if self.accumulate_grads: self.grads[i] += param.grad.data.view(-1) else: self.grads[i] = param.grad.data.view(-1) param.data -= self.base_lrs[i].data[0] * param.grad.data else: grads = [] for param in self.params: grads.append(param.grad.data.view(-1)) if self.accumulate_grads: self.flat_grads += torch.cat(grads) else: self.flat_grads = torch.cat(grads) for param in self.params: param.data -= self.base_lr.data[0] * param.grad.data def meta_step(self): if self.separate_lrs: self.meta_opt.zero_grad() for i, param in enumerate(self.params): self.base_lrs[i].grad.data = torch.clamp( torch.Tensor([-torch.sum(self.grads[i] * param.grad.data.view(-1))]), -1, 1 ) self.meta_opt.step() self.grads = [0] * len(self.params) else: grads = [] for param in self.params: grads.append(param.grad.data.view(-1)) flat_grads_new = torch.cat(grads) self.meta_opt.zero_grad() self.base_lr.grad.data = torch.clamp( torch.Tensor([-torch.sum(self.flat_grads * flat_grads_new)]), -1, 1 ) self.meta_opt.step() self.flat_grads = 0 batch_size = 32 test_batch_size = 32 max_epoch = 20 do_validation = False kwargs = {'num_workers': 1, 'pin_memory': True} if USE_CUDA else {} train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=test_batch_size, shuffle=True, **kwargs) losses = {} test_losses = {} times = {} learning_rates = {} Model = ModelFullyConnected model_kwargs = { 'num_hidden': 2, 'hidden_size': 256, 'dropout_rate': 0.8 } model_name = '_'.join(['fc'] + [key + '=' + str(val) for key, val in model_kwargs.items()]) print(model_name) Model = ModelConvolutional model_kwargs = { 'num_hidden': 2, 'fc_hidden_size': 256, 'dropout_rate': 0.5, 'num_filters': 16, 'filter_size': 3 } model_name = '_'.join(['conv'] + [key + '=' + str(val) for key, val in model_kwargs.items()]) print(model_name) losses[model_name] = {} test_losses[model_name] = {} times[model_name] = {} learning_rates[model_name] = {} # + torch.manual_seed(123) #meta_opt_names = [] run_count = 3 meta_lr = 1e-3 sep_lrs = True for truncated_bptt_step in [10, 100]: opt_name = 'AdaSGD' + '_bptt_steps=' + str(truncated_bptt_step) + '_meta_lr=' + str(meta_lr) + \ '_sep_lrs=' + str(sep_lrs) print(opt_name) meta_opt_names.append(opt_name) losses[model_name][opt_name] = [] test_losses[model_name][opt_name] = [] times[model_name][opt_name] = [] learning_rates[model_name][opt_name] = [] for run in range(run_count): losses[model_name][opt_name].append([]) test_losses[model_name][opt_name].append([]) times[model_name][opt_name].append([]) learning_rates[model_name][opt_name].append([]) model = cuda_wrapper(Model(**model_kwargs)) opt = AdaSGD(model.parameters(), meta_lr=meta_lr, separate_lrs=sep_lrs) av_loss = 0 av_test_loss = 0 alpha = 0.999 global_step = 0 start_time = time() for epoch in range(max_epoch): epoch_start_time = time() for X, y in train_loader: X, y = Variable(cuda_wrapper(X)), Variable(cuda_wrapper(y)) prob = model(X) loss = F.nll_loss(prob, y) model.zero_grad() do_meta_step = ((global_step + 1) % truncated_bptt_step == 0) if do_meta_step: loss.backward(retain_graph=True) else: loss.backward() opt.step() av_loss = alpha * av_loss + (1 - alpha) * loss.data[0] losses[model_name][opt_name][run].append(loss.data[0]) if do_meta_step: prob = model(X) loss = F.nll_loss(prob, y) model.zero_grad() loss.backward() opt.meta_step() #learning_rates[model_name][opt_name][run].append(opt.base_lr.data[0]) times[model_name][opt_name][run].append(time() - start_time) global_step += 1 if do_validation: for X, y in test_loader: X, y = Variable(cuda_wrapper(X)), Variable(cuda_wrapper(y)) prob = model(X) loss = F.nll_loss(prob, y) av_test_loss = alpha * av_test_loss + (1 - alpha) * loss.data[0] test_losses[model_name][opt_name][run].append(test_loss.data[0]) print('epoch {} took {:.1f}s; av_loss = {:.3f}; av_test_loss = {:.3f}'.format( epoch+1, time() - epoch_start_time, av_loss, av_test_loss )) if sep_lrs: print('learning rates:', end=' ') for lr in opt.base_lrs: print('{:.3f}'.format(lr.data[0]), end=' ') print() else: print('learning rate: {:.3f}'.format(opt.base_lr.data[0])) print() losses[model_name][opt_name] = np.array(losses[model_name][opt_name]) test_losses[model_name][opt_name] = np.array(test_losses[model_name][opt_name]) times[model_name][opt_name] = np.array(times[model_name][opt_name]) #learning_rates[model_name][opt_name] = np.array(learning_rates[model_name][opt_name]) # + torch.manual_seed(123) opt_names = [] run_count = 3 for lr in [0.01, 0.02, 0.05, 0.1]: opt_name = 'SGD' + '_lr=' + str(lr) print(opt_name) opt_names.append(opt_name) losses[model_name][opt_name] = [] test_losses[model_name][opt_name] = [] times[model_name][opt_name] = [] learning_rates[model_name][opt_name] = [] for run in range(run_count): losses[model_name][opt_name].append([]) test_losses[model_name][opt_name].append([]) times[model_name][opt_name].append([]) learning_rates[model_name][opt_name].append([]) model = cuda_wrapper(Model(**model_kwargs)) opt = optim.SGD(model.parameters(), lr=lr) av_loss = 0 av_test_loss = 0 alpha = 0.999 global_step = 0 start_time = time() for epoch in range(max_epoch): epoch_start_time = time() for X, y in train_loader: X, y = Variable(cuda_wrapper(X)), Variable(cuda_wrapper(y)) prob = model(X) loss = F.nll_loss(prob, y) model.zero_grad() loss.backward() opt.step() av_loss = alpha * av_loss + (1 - alpha) * loss.data[0] losses[model_name][opt_name][run].append(loss.data[0]) learning_rates[model_name][opt_name][run].append(lr) times[model_name][opt_name][run].append(time() - start_time) global_step += 1 if do_validation: for X, y in test_loader: X, y = Variable(cuda_wrapper(X)), Variable(cuda_wrapper(y)) prob = model(X) loss = F.nll_loss(prob, y) av_test_loss = alpha * av_test_loss + (1 - alpha) * loss.data[0] test_losses[model_name][opt_name][run].append(test_loss.data[0]) print('epoch {} took {:.1f}s; av_loss = {:.3f}; av_test_loss = {:.3f}; lr = {:.4f}'.format( epoch+1, time() - epoch_start_time, av_loss, av_test_loss, lr )) print() losses[model_name][opt_name] = np.array(losses[model_name][opt_name]) test_losses[model_name][opt_name] = np.array(test_losses[model_name][opt_name]) times[model_name][opt_name] = np.array(times[model_name][opt_name]) learning_rates[model_name][opt_name] = np.array(learning_rates[model_name][opt_name]) # - def running_average(a, alpha=0.999): #a = np.array(a) av = np.zeros_like(a) av[0] = a[0] for i in range(1, len(a)): av[i] = alpha * av[i-1] + (1 - alpha) * a[i] return av plt.figure(figsize=(15,8)) for opt_name in meta_opt_names: runs = running_average(losses[model_name][opt_name].T) plt.plot(runs.mean(axis=1)) plt.fill_between( np.arange(runs.shape[0]), runs.mean(axis=1) - runs.std(axis=1), runs.mean(axis=1) + runs.std(axis=1), alpha=0.5 ) for opt_name in ['SGD_lr=0.05']: runs = running_average(losses[model_name][opt_name].T) plt.plot(runs.mean(axis=1), 'k--') plt.grid(True) plt.ylim(0.0, 0.1) plt.legend(meta_opt_names + ['SGD_best']) plt.figure(figsize=(15,8)) for opt_name in meta_opt_names: runs = running_average(losses[model_name][opt_name].T) time_runs = times[model_name][opt_name].T plt.plot(time_runs.mean(axis=1), runs.mean(axis=1)) plt.fill_between( time_runs.mean(axis=1), runs.mean(axis=1) - runs.std(axis=1), runs.mean(axis=1) + runs.std(axis=1), alpha=0.5 ) #for opt_name in ['SGD_lr=0.05']: # runs = running_average(losses[model_name][opt_name].T) # time_runs = times[model_name][opt_name].T # plt.plot(time_runs.mean(axis=1), runs.mean(axis=1), 'k--') plt.grid(True) plt.ylim(0.0, 0.1) plt.legend(meta_opt_names + ['SGD_best']) plt.figure(figsize=(15,8)) for opt_name in opt_names: runs = running_average(losses[model_name][opt_name].T) time_runs = times[model_name][opt_name].T plt.plot(time_runs.mean(axis=1), runs.mean(axis=1)) plt.fill_between( time_runs.mean(axis=1), runs.mean(axis=1) - runs.std(axis=1), runs.mean(axis=1) + runs.std(axis=1), alpha=0.5 ) plt.grid(True) plt.ylim(0.0, 0.1) plt.legend(opt_names) plt.figure(figsize=(15,8)) for opt_name in meta_opt_names: runs = running_average(learning_rates[model_name][opt_name].T, alpha=0.99) plt.plot(runs.mean(axis=1)) plt.fill_between( np.arange(runs.shape[0]), runs.mean(axis=1) - runs.std(axis=1), runs.mean(axis=1) + runs.std(axis=1), alpha=0.5 ) plt.grid(True) #plt.ylim(0.0, 0.1) plt.legend(meta_opt_names)
AdaSGD_sep_lr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as num #imports numpy library import matplotlib.pyplot as plot #imports Matplotlib's pyplot library x = num.linspace(0, 2 * num.pi, 1000); #declares array from 0 to 2π with 1000 values y1 = 5.5 * num.cos(2 * x) + 5.5; #initializes first function y2 = 0.02 * num.exp(x); #initializes second function y3 = 0.25 * num.power(x, 2) + 0.1 * num.sin(10 * x); #initializes third function plot.xlim(0, 2 * num.pi); #sets x-axis range from 0 to 2π plot.ylim(-1, 10); #sets y-axis range from -1 to 10 plot.plot(x, y1); #plots first function plot.plot(x, y2); #plots second function plot.plot(x, y3); #plots third function plot.xlabel('Time in ASTR 119'); #labels x-axis plot.ylabel('Measures of Awesomeness'); #labels y-axis plot.show(); #shows graph
hw-3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Blend Boosting study on Coursera dataset of the Predict Future Sales: # # Here I share with you a systematic blend boosting study on Coursera dataset of the Predict Future Sales. (https://www.kaggle.com/c/competitive-data-science-predict-future-sales/). I just collect some submission files on the kaggle. # # Basically, I start to analysis of correlations, then decide to sort them according to their sum of correlation values in between. This lets me divide ~10 scores into 4 subgroups. Then I make internal linear calibration in each subgroup by considering their scores on the Kaggle. Finally I make recalling between subgroups to achieve higher scores on the Kaggle by resubmission. Due to daily limitation about number of submission, I am able to get this best result in my first four attempts (5th one is for notebook submission). Of course, if you spend much more time, you can always achieve betters scores, but it is already highest score on Kaggle ;-). In future I can submit much better results, and also plan to make a full model analysis on this dataset. # + import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # loading dummy submission file sub_file = pd.read_csv('../input/best-blend/submission_blend_1.csv') # loading data including 15 (some of them are identical) best scores df_sub = pd.read_csv(r'../input/best-blend/best_blend_1.csv') df_sub = df_sub.iloc[:, :10] # a rough correlation based visualization of 32 best scores plt.figure(figsize=(10,10)) sns.heatmap(df_sub.corr(), cmap='Spectral') plt.ylabel('file index numbers') plt.xlabel('file index numbers') plt.show() # + # basic analysis and visualization of subgroups in different color. plt.figure(figsize=(12, 5)) df_mean_corr = pd.DataFrame({'mean_corr': df_sub.corr().mean()}) df_mean_corr = df_mean_corr.sort_values('mean_corr', ascending=False) df_mean_corr = df_mean_corr.reset_index() plt.plot(df_mean_corr.index[:4], df_mean_corr['mean_corr'].values[:4], 'o', ms=10) plt.plot(df_mean_corr.index[4:6], df_mean_corr['mean_corr'].values[4:6], 'o', ms=10) plt.plot(df_mean_corr.index[6:8], df_mean_corr['mean_corr'].values[6:8], 'o', ms=10) plt.plot(df_mean_corr.index[8:], df_mean_corr['mean_corr'].values[8:], 'o', ms=10) plt.xticks([*range(len(df_mean_corr))], df_mean_corr['index'].tolist()) plt.title('determination of sub_groups') plt.ylabel('a corelation ralated index') plt.xlabel('file index numbers') plt.show() # + # a linear combination to achieve much better scores df_sub['weighted_avg'] = abs(1 * ( 10 * ( 5 * df_sub['2'] + 5 * df_sub['3'] + 2 * df_sub['6'] + 1 * df_sub['9'] ) / 13 + 25 * ( 1 * df_sub['4'] + 1 * df_sub['5'] ) / 2 + 2 * ( 2 * df_sub['7'] + 1 * df_sub['8'] ) / 3 + 100 * ( 5 * df_sub['0'] + 1 * df_sub['1'] ) / 6 ) / 137 ) # create the final submission file submission = pd.DataFrame({'ID': sub_file.ID, 'item_cnt_month': df_sub['weighted_avg'].tolist()}) submission.to_csv(r'submission_blend_1.csv', index=False) # - # ## It gets a 0.83386 as public score, and looks the best score on Kaggle so far ;-)
Kaggle_Pred_sale_price/sample_code/blend-boosting-best-score-on-predict-future-sales.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> # <script> # window.dataLayer = window.dataLayer || []; # function gtag(){dataLayer.push(arguments);} # gtag('js', new Date()); # # gtag('config', 'UA-59152712-8'); # </script> # # # Tutorial-IllinoisGRMHD: IllinoisGRMHD_headers.h # # ## Authors: <NAME> & <NAME> # # <font color='red'>**This module is currently under development**</font> # # ## In this tutorial module we explain the main `IllinoisGRMHD` header file. This module will likely be absorbed by another one by the time we finish documenting the code # # ### Required and recommended citations: # # * **(Required)** <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. IllinoisGRMHD: an open-source, user-friendly GRMHD code for dynamical spacetimes. Class. Quantum Grav. 32 (2015) 175009. ([arxiv:1501.07276](http://arxiv.org/abs/1501.07276)). # * **(Required)** <NAME>., <NAME>., <NAME>., <NAME>. Primitive Variable Solvers for Conservative General Relativistic Magnetohydrodynamics. Astrophysical Journal, 641, 626 (2006) ([astro-ph/0512420](https://arxiv.org/abs/astro-ph/0512420)). # * **(Recommended)** <NAME>., <NAME>., <NAME>. An efficient shock-capturing central-type scheme for multidimensional relativistic flows - II. Magnetohydrodynamics. A&A 400 (2) 397-413 (2003). DOI: 10.1051/0004-6361:20021641 ([astro-ph/0210618](https://arxiv.org/abs/astro-ph/0210618)). # <a id='toc'></a> # # # Table of Contents # $$\label{toc}$$ # # This module is organized as follows # # 0. [Step 0](#src_dir): **Source directory creation** # 1. [Step 1](#introduction): **Introduction** # 1. [Step 2](#igm_headers__h): **`IllinoisGRMHD_headers.h`** # 1. [Step n-1](#code_validation): **Code validation** # 1. [Step n](#latex_pdf_output): **Output this notebook to $\LaTeX$-formatted PDF file** # <a id='src_dir'></a> # # # Step 0: Source directory creation \[Back to [top](#toc)\] # $$\label{src_dir}$$ # # We will now use the [cmdline_helper.py NRPy+ module](Tutorial-Tutorial-cmdline_helper.ipynb) to create the source directory within the `IllinoisGRMHD` NRPy+ directory, if it does not exist yet. # + # Step 0: Creation of the IllinoisGRMHD source directory # Step 0a: Add NRPy's directory to the path # https://stackoverflow.com/questions/16780014/import-file-from-parent-directory import os,sys nrpy_dir_path = os.path.join("..","..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) # Step 0b: Load up cmdline_helper and create the directory import cmdline_helper as cmd IGM_src_dir_path = os.path.join("..","src") cmd.mkdir(IGM_src_dir_path) # Step 0c: Create the output file path outfile_path__IllinoisGRMHD_headers__h = os.path.join(IGM_src_dir_path,"IllinoisGRMHD_headers.h") # - # <a id='introduction'></a> # # # Step 1: Introduction \[Back to [top](#toc)\] # $$\label{introduction}$$ # <a id='igm_headers__h'></a> # # # Step 2: `IllinoisGRMHD_headers.h` \[Back to [top](#toc)\] # $$\label{igm_headers__h}$$ # + # %%writefile $outfile_path__IllinoisGRMHD_headers__h // To safeguard against double-including this header file: #ifndef ILLINOISGRMHD_HEADERS_H_ #define ILLINOISGRMHD_HEADERS_H_ #define MIN(a,b) ( ((a) < (b)) ? (a) : (b) ) #define MAX(a,b) ( ((a) > (b)) ? (a) : (b) ) #define SQR(x) ((x) * (x)) #define ONE_OVER_SQRT_4PI 0.282094791773878143474039725780 #define VERR_DEF_PARAMS __LINE__, __FILE__, CCTK_THORNSTRING // The order here MATTERS, as we assume that GUPXX+1=GUPYY, etc. static const int PHI=0,PSI=1,GXX=2,GXY=3,GXZ=4,GYY=5,GYZ=6,GZZ=7, LAPM1=8,SHIFTX=9,SHIFTY=10,SHIFTZ=11,GUPXX=12,GUPYY=13,GUPZZ=14, NUMVARS_FOR_METRIC_FACEVALS=15; //<-- Be _sure_ to set this correctly, or you'll have memory access bugs! // These are not used for facevals in the reconstruction step, but boy are they useful anyway. static const int GUPXY=15,GUPXZ=16,GUPYZ=17, NUMVARS_FOR_METRIC=18; //<-- Be _sure_ to set this correctly, or you'll have memory access bugs! // The order here MATTERS, and must be consistent with the order in the in_prims[] array in driver_evaluate_MHD_rhs.C. static const int RHOB=0,PRESSURE=1,VX=2,VY=3,VZ=4, BX_CENTER=5,BY_CENTER=6,BZ_CENTER=7,BX_STAGGER=8,BY_STAGGER=9,BZ_STAGGER=10, VXR=11,VYR=12,VZR=13,VXL=14,VYL=15,VZL=16,MAXNUMVARS=17; //<-- Be _sure_ to define MAXNUMVARS appropriately! static const int UT=0,UX=1,UY=2,UZ=3; // The "I" suffix denotes interpolation. In other words, these // definitions are used for interpolation ONLY. The order here // matters as well! static const int SHIFTXI=0,SHIFTYI=1,SHIFTZI=2,GUPXXI=3,GUPXYI=4,GUPXZI=5,GUPYYI=6,GUPYZI=7,GUPZZI=8, PSII=9,LAPM1I=10,A_XI=11,A_YI=12,A_ZI=13,LAPSE_PSI2I=14,LAPSE_OVER_PSI6I=15,MAXNUMINTERP=16; // Again, the order here MATTERS, since we assume in the code that, e.g., smallb[0]=b^t, smallb[3]=b^z, etc. static const int SMALLBT=0,SMALLBX=1,SMALLBY=2,SMALLBZ=3,SMALLB2=4,NUMVARS_SMALLB=5; // Again, the order here MATTERS, since we assume in the code that, CONSERV[STILDEX+1] = \tilde{S}_y static const int RHOSTAR=0,STILDEX=1,STILDEY=2,STILDEZ=3,TAUENERGY=4,NUM_CONSERVS=5; static const int LAPSE=0,PSI2=1,PSI4=2,PSI6=3,PSIM4=4,LAPSEINV=5,NUMVARS_METRIC_AUX=6; #define SET_LAPSE_PSI4(array_name,METRIC) { \ array_name[LAPSE] = METRIC[LAPM1]+1.0; \ array_name[PSI2] = exp(2.0*METRIC[PHI]); \ array_name[PSI4] = SQR(array_name[PSI2]); \ array_name[PSI6] = array_name[PSI4]*array_name[PSI2]; \ array_name[PSIM4] = 1.0/array_name[PSI4]; \ array_name[LAPSEINV] = 1.0/array_name[LAPSE]; \ } // Keeping track of ghostzones between routines is a nightmare, so // we instead attach ghostzone info to each gridfunction and set // the ghostzone information correctly within each routine. struct gf_and_gz_struct { CCTK_REAL *gf; int gz_lo[4],gz_hi[4]; }; #define MAX_EOS_PARAMS 10 struct eos_struct { int neos; CCTK_REAL rho_ppoly_tab[MAX_EOS_PARAMS-1]; CCTK_REAL eps_integ_const[MAX_EOS_PARAMS],K_ppoly_tab[MAX_EOS_PARAMS],Gamma_ppoly_tab[MAX_EOS_PARAMS]; }; struct output_stats { int font_fixed,vel_limited,failure_checker; long n_iter; }; // FIXME: For cosmetic purposes, we might want to make everything either zero-offset or one-offset, instead of a mixture. const int kronecker_delta[4][3] = { { 0,0,0 }, { 1,0,0 }, { 0,1,0 }, { 0,0,1 } }; /* PUBLIC FUNCTIONS, USED OUTSIDE IllinoisGRMHD AS WELL */ void IllinoisGRMHD_enforce_limits_on_primitives_and_recompute_conservs(const int already_computed_physical_metric_and_inverse,CCTK_REAL *U,struct output_stats &stats,eos_struct &eos, CCTK_REAL *METRIC,CCTK_REAL g4dn[4][4],CCTK_REAL g4up[4][4], CCTK_REAL *TUPMUNU,CCTK_REAL *TDNMUNU,CCTK_REAL *CONSERVS); void IllinoisGRMHD_convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij (const cGH *cctkGH,const int *cctk_lsh, CCTK_REAL *gxx,CCTK_REAL *gxy,CCTK_REAL *gxz,CCTK_REAL *gyy,CCTK_REAL *gyz,CCTK_REAL *gzz,CCTK_REAL *alp, CCTK_REAL *gtxx,CCTK_REAL *gtxy,CCTK_REAL *gtxz,CCTK_REAL *gtyy,CCTK_REAL *gtyz,CCTK_REAL *gtzz, CCTK_REAL *gtupxx,CCTK_REAL *gtupxy,CCTK_REAL *gtupxz,CCTK_REAL *gtupyy,CCTK_REAL *gtupyz,CCTK_REAL *gtupzz, CCTK_REAL *phi,CCTK_REAL *psi,CCTK_REAL *lapm1); void IllinoisGRMHD_set_symmetry_gzs_staggered(const cGH *cctkGH, const int *cctk_lsh,CCTK_REAL *X,CCTK_REAL *Y,CCTK_REAL *Z, CCTK_REAL *gridfunc, CCTK_REAL *gridfunc_syms,int stagger_x,int stagger_y,int stagger_z); #include "IllinoisGRMHD_EoS_lowlevel_functs.C" #endif // ILLINOISGRMHD_HEADERS_H # - # <a id='code_validation'></a> # # # Step n-1: Code validation \[Back to [top](#toc)\] # $$\label{code_validation}$$ # # First we download the original `IllinoisGRMHD` source code and then compare it to the source code generated by this tutorial notebook. # + # Verify if the code generated by this tutorial module # matches the original IllinoisGRMHD source code # First download the original IllinoisGRMHD source code import urllib from os import path original_IGM_file_url = "https://bitbucket.org/zach_etienne/wvuthorns/raw/5611b2f0b17135538c9d9d17c7da062abe0401b6/IllinoisGRMHD/src/IllinoisGRMHD_headers.h" original_IGM_file_name = "IllinoisGRMHD_headers-original.h" original_IGM_file_path = os.path.join(IGM_src_dir_path,original_IGM_file_name) # Then download the original IllinoisGRMHD source code # We try it here in a couple of ways in an attempt to keep # the code more portable try: original_IGM_file_code = urllib.request.urlopen(original_IGM_file_url).read().decode("utf-8") # Write down the file the original IllinoisGRMHD source code with open(original_IGM_file_path,"w") as file: file.write(original_IGM_file_code) except: try: original_IGM_file_code = urllib.urlopen(original_IGM_file_url).read().decode("utf-8") # Write down the file the original IllinoisGRMHD source code with open(original_IGM_file_path,"w") as file: file.write(original_IGM_file_code) except: # If all else fails, hope wget does the job # !wget -O $original_IGM_file_path $original_IGM_file_url # Perform validation # Validation__IllinoisGRMHD_headers__h = !diff $original_IGM_file_path $outfile_path__IllinoisGRMHD_headers__h if Validation__IllinoisGRMHD_headers__h == []: # If the validation passes, we do not need to store the original IGM source code file # !rm $original_IGM_file_path print("Validation test for IllinoisGRMHD_headers.h: PASSED!") else: # If the validation fails, we keep the original IGM source code file print("Validation test for IllinoisGRMHD_headers.h: FAILED!") # We also print out the difference between the code generated # in this tutorial module and the original IGM source code print("Diff:") for diff_line in Validation__IllinoisGRMHD_headers__h: print(diff_line) # - # <a id='latex_pdf_output'></a> # # # Step n: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] # $$\label{latex_pdf_output}$$ # # The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename # [Tutorial-IllinoisGRMHD__IllinoisGRMHD_headers.pdf](Tutorial-IllinoisGRMHD__IllinoisGRMHD_headers.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means). latex_nrpy_style_path = os.path.join(nrpy_dir_path,"latex_nrpy_style.tplx") # #!jupyter nbconvert --to latex --template $latex_nrpy_style_path --log-level='WARN' Tutorial-IllinoisGRMHD__IllinoisGRMHD_headers.ipynb # #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__IllinoisGRMHD_headers.tex # #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__IllinoisGRMHD_headers.tex # #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__IllinoisGRMHD_headers.tex # !rm -f Tut*.out Tut*.aux Tut*.log
IllinoisGRMHD/doc/Tutorial-IllinoisGRMHD__IllinoisGRMHD_headers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="http://hilpisch.com/tpq_logo.png" alt="The Python Quants" width="35%" align="right" border="0"><br> # # Python for Finance # **Analyze Big Financial Data** # # O'Reilly (2014) # # <NAME> # <img style="border:0px solid grey;" src="http://hilpisch.com/python_for_finance.png" alt="Python for Finance" width="30%" align="left" border="0"> # **Buy the book ** | # <a href='http://shop.oreilly.com/product/0636920032441.do' target='_blank'>O'Reilly</a> | # <a href='http://www.amazon.com/Yves-Hilpisch/e/B00JCYHHJM' target='_blank'>Amazon</a> # # **All book codes & IPYNBs** | # <a href="http://oreilly.quant-platform.com">http://oreilly.quant-platform.com</a> # # **The Python Quants GmbH** | <a href='http://tpq.io' target='_blank'>http://tpq.io</a> # # **Contact us** | <a href='mailto:<EMAIL>'><EMAIL></a> # # Introductory Examples from pylab import plt plt.style.use('seaborn') import matplotlib as mpl mpl.rcParams['font.family'] = 'serif' import warnings; warnings.simplefilter('ignore') # ## Implied Volatilities # + uuid="8e3ac03d-e5c1-4184-8494-5c02f5c1a897" V0 = 17.6639 # + uuid="3d727b16-4ff1-49fb-a54a-6e96568f54b0" r = 0.01 # + uuid="009042d8-f384-482c-8b4b-eeac6312315e" import pandas as pd h5 = pd.HDFStore('./source/vstoxx_data_31032014.h5', 'r') futures_data = h5['futures_data'] # VSTOXX futures data options_data = h5['options_data'] # VSTOXX call option data h5.close() # - import datetime as dt futures_data['DATE'] = futures_data['DATE'].apply(lambda x: dt.datetime.fromtimestamp(x / 1e9)) futures_data['MATURITY'] = futures_data['MATURITY'].apply(lambda x: dt.datetime.fromtimestamp(x / 1e9)) # + uuid="8d5c855d-87d0-4108-93c7-57094610bd68" futures_data # - options_data['DATE'] = options_data['DATE'].apply(lambda x: dt.datetime.fromtimestamp(x / 1e9)) options_data['MATURITY'] = options_data['MATURITY'].apply(lambda x: dt.datetime.fromtimestamp(x / 1e9)) # + uuid="48b29f70-8142-4960-8d4d-3a241685bc1d" options_data.info() # + uuid="dead6085-55a7-46c1-9396-3ec0e247b8f4" options_data[['DATE', 'MATURITY', 'TTM', 'STRIKE', 'PRICE']].head() # + uuid="808a0269-42d5-43ed-b753-28abb46820de" options_data['IMP_VOL'] = 0.0 # new column for implied volatilities # + uuid="233c9f45-d3c4-48f4-8422-7e77c490d232" from bsm_functions import * # + uuid="8da4e671-cbbc-44fd-8050-c8fe300a4501" tol = 0.5 # tolerance level for moneyness for option in options_data.index: # iterating over all option quotes forward = futures_data[futures_data['MATURITY'] == \ options_data.loc[option]['MATURITY']]['PRICE'].values[0] # picking the right futures value if (forward * (1 - tol) < options_data.loc[option]['STRIKE'] < forward * (1 + tol)): # only for options with moneyness within tolerance imp_vol = bsm_call_imp_vol( V0, # VSTOXX value options_data.loc[option]['STRIKE'], options_data.loc[option]['TTM'], r, # short rate options_data.loc[option]['PRICE'], sigma_est=2., # estimate for implied volatility it=100) options_data.ix[option, 'IMP_VOL'] = imp_vol # + uuid="94491814-c0c6-4565-b1f7-cedb1af12c48" futures_data['MATURITY'] # select the column with name MATURITY # + uuid="cbb44f1e-2ab5-4d8f-976a-2d36515eef12" options_data.loc[46170] # select data row for index 46170 # + uuid="e267720a-901b-42fd-86eb-f31c29ebc166" options_data.loc[46170]['STRIKE'] # select only the value in column STRIKE # for index 46170 # + uuid="03092953-5496-4500-9cf5-e3cbcf46d396" plot_data = options_data[options_data['IMP_VOL'] > 0] # + uuid="738b9618-b3a9-434f-a685-441a0c837b76" maturities = sorted(set(options_data['MATURITY'])) maturities # + uuid="8886807a-ca71-48d0-b5ef-ab7cb9470548" import matplotlib.pyplot as plt # %matplotlib inline plt.figure(figsize=(8, 6)) for maturity in maturities: data = plot_data[options_data.MATURITY == maturity] # select data for this maturity plt.plot(data['STRIKE'], data['IMP_VOL'], label=maturity.date(), lw=1.5) plt.plot(data['STRIKE'], data['IMP_VOL'], 'r.', label='') plt.grid(True) plt.xlabel('strike') plt.ylabel('implied volatility of volatility') plt.legend() plt.show() # tag: vs_imp_vol # title: Implied volatilities (of volatility) for European call options on the VSTOXX on 31. March 2014 # + uuid="34582055-b4b2-49ea-8678-e44288f7de88" keep = ['PRICE', 'IMP_VOL'] group_data = plot_data.groupby(['MATURITY', 'STRIKE'])[keep] group_data # + uuid="1b5253de-0b31-4af0-8155-b2c405363a0d" group_data = group_data.sum() group_data.head() # + uuid="390e4908-23ed-4419-a62d-b25ed3cf2d15" group_data.index.levels # - # ## Monte Carlo Simulation # + uuid="13a3c945-2ae9-4583-b21f-cb6d4e4729b8" from bsm_functions import bsm_call_value S0 = 100. K = 105. T = 1.0 r = 0.05 sigma = 0.2 bsm_call_value(S0, K, T, r, sigma) # - # ### Pure Python # + uuid="aebfedcc-472c-4225-82eb-6405ecf7eaa9" # %run mcs_pure_python.py # + uuid="d038ec31-373b-4979-aa49-a47ff96fc234" sum_val = 0.0 for path in S: # C-like iteration for comparison sum_val += max(path[-1] - K, 0) C0 = exp(-r * T) * sum_val / I round(C0, 3) # - # ### Vectorization with NumPy # + uuid="2f695395-b7b7-4d0c-bab3-659fcd4da5d0" v = list(range(1, 6)) print(v) # + uuid="548c6e07-9435-421c-9408-d63252608f74" 2 * v # + uuid="0a831df5-d2c3-46af-8399-ad2b4b411001" import numpy as np v = np.arange(1, 6) v # + uuid="4c5153ff-983f-4ee1-95d6-0dae809da4bf" 2 * v # + uuid="0fca4476-2003-4733-a6e1-f405997e1b1a" # %run mcs_vector_numpy.py # + uuid="6d10a22b-fed3-4627-a2bc-6f958824ca2f" round(tpy / tnp1, 2) # - # ### Full Vectorization with Log Euler Scheme # + uuid="02a121e5-75ff-4ad1-9812-ec0e69d3ebaa" # %run mcs_full_vector_numpy.py # - # ### Graphical Analysis # + uuid="f880bd6e-72fa-4906-b2a7-24f3c7419338" import matplotlib.pyplot as plt plt.plot(S[:, :10]) plt.grid(True) plt.xlabel('time step') plt.ylabel('index level') # tag: index_paths # title: The first 10 simulated index level paths # + uuid="997e2c48-f4e4-49c2-be9b-ce2ed0a2166f" plt.hist(S[-1], bins=50) plt.grid(True) plt.xlabel('index level') plt.ylabel('frequency') # tag: index_histo # title: Histogram of all simulated end of period index level values # + uuid="bc4f9174-c92c-4abf-9d31-da3e7d0baec5" plt.hist(np.maximum(S[-1] - K, 0), bins=50) plt.grid(True) plt.xlabel('option inner value') plt.ylabel('frequency') plt.ylim(0, 50000) # tag: option_iv_hist # title: Histogram of all simulated end of period option inner values # + uuid="d5aed456-001a-423c-9366-df294f1c3f0b" sum(S[-1] < K) # - # ## Technical Analysis # + uuid="dcba1a61-94e3-47dd-ab54-2c688e740741" import numpy as np import pandas as pd # + uuid="3b6ce702-6a8c-400c-a6fc-d382f316dd9c" raw = pd.read_csv('source/tr_eikon_eod_data.csv', index_col=0, parse_dates=True) AAPL = pd.DataFrame(raw['AAPL.O']) AAPL.columns = ['Close'] AAPL.info() # + uuid="444a17d9-8fd1-436d-825a-3001cb080b7c" AAPL['Close'].plot(grid=True, figsize=(8, 5)) # tag: AAPL # title: Historical levels of the S&P 500 index # + uuid="d134fa6d-4db4-40f3-b1ce-952ef2346e18" AAPL['42d'] = np.round(AAPL['Close'].rolling(window=42).mean(), 2) AAPL['252d'] = np.round(AAPL['Close'].rolling(window=252).mean(), 2) # + uuid="8af03824-0abb-4747-9f8f-d517d48bd673" AAPL[['Close', '42d', '252d']].tail() # + uuid="4dc6ac7e-6fac-4994-811b-35789c551de7" AAPL[['Close', '42d', '252d']].plot(grid=True, figsize=(8, 5)) # tag: AAPL_trend # title: The Apple, Inc. stock price with 42d and 252d trend lines # + uuid="ccc2b0e9-5ab7-49c1-8054-3a1e1b23ab92" AAPL['42-252'] = AAPL['42d'] - AAPL['252d'] AAPL['42-252'].tail() # + uuid="e127f7c9-8e7f-4038-b490-502e6bf436f7" AAPL['42-252'].head() # - AAPL.dropna(inplace=True) # + uuid="d1125e1b-3cd0-4274-aee7-9b1136db8c02" SD = 0.5 AAPL['Position'] = np.where(AAPL['42-252'] > SD, 1, 0) AAPL['Position'] = np.where(AAPL['42-252'] < -SD, -1, AAPL['Position']) AAPL['Position'].value_counts() # + uuid="ffec4dc1-ebb4-43e1-962d-1fbc465afee1" AAPL['Position'].plot(lw=1.5, grid=True) plt.ylim([-1.1, 1.1]); # tag: AAPL_signal # title: Positions over time # + uuid="0e51676f-7b84-441d-8191-08799247774c" AAPL['Market'] = np.log(AAPL['Close'] / AAPL['Close'].shift(1)) # + uuid="25f3c42a-ec95-46e3-a194-4e29f0c830af" AAPL['Strategy'] = AAPL['Position'].shift(1) * AAPL['Market'] # + uuid="ce759e67-918d-432c-a411-b22c6777dac5" AAPL[['Market', 'Strategy']].cumsum().apply(np.exp).plot(grid=True, figsize=(8, 5)); # tag: AAPL_wealth # title: The Apple stock performance vs. investor's wealth # - # ## Conclusions # ## Further Reading # <img src="http://hilpisch.com/tpq_logo.png" alt="The Python Quants" width="35%" align="right" border="0"><br> # # <a href="http://tpq.io" target="_blank">http://tpq.io</a> | <a href="http://twitter.com/dyjh" target="_blank">@dyjh</a> | <a href="mailto:<EMAIL>"><EMAIL></a> # # **Quant Platform** | # <a href="http://quant-platform.com">http://quant-platform.com</a> # # **Python for Finance** | # <a href="http://python-for-finance.com" target="_blank">Python for Finance @ O'Reilly</a> # # **Derivatives Analytics with Python** | # <a href="http://derivatives-analytics-with-python.com" target="_blank">Derivatives Analytics @ Wiley Finance</a> # # **Listed Volatility and Variance Derivatives** | # <a href="http://lvvd.tpq.io" target="_blank">Listed VV Derivatives @ Wiley Finance</a> # # **Python Training** | # <a href="http://training.tpq.io" target="_blank">Python for Finance University Certificate</a>
Oreilly_Python for Finance/03_Introductory_Examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Now You Code In Class : Rock Paper Scissors Experiment # # In this now you code we will learn to re-factor a program into a function. This is the most common way to write a function when you are a beginner. *Re-factoring* is the act of re-writing code without changing its functionality. We commonly do re-factoring to improve performance or readability of our code. Through the process we will also demonstrate the DRY (don't repeat yourself principle of coding). # # The way you do this is rather simple. First you write a program to solve the problem, then you re-write that program as a function and finally test the function to make sure it works as expected. # # This helps train you to think abstractly about problems, but leverages what you understand currently about programming. # # ## Introducing the Write - Refactor - Test - Rewrite approach # # The best way to get good at writing functions, a skill you will need to master to become a respectable programmer, is to use the **Write - Refactor - Test - Rewrite** approach. The basic idea is as follows: # # 1. Write the program # 2. Identify which parts of the program can be placed in a function # 3. Refactor the code into a function. Extract the bits into a function into a new, separate cell independent of the original code. # 4. Test the function so you are confident it works, using the expect... actual approach from the lab. # 5. Re-Write the original program to call the function instead. # # ## The Problem # # Let's build a game that plays rock paper scissors. # # ### The Approach # # 1. Write the program once (done for you in the cell below) # 2. refactor step 1 into its own function `play_game(playera, playerb)` which returns the winning player. # 3. test the function to make sure it works. Write tests for all cases # 4. re-write the main program to now call the function. # 5. use the function in the final program. # # + #ORIGINAL CODE import random choices = ['rock', 'paper', 'scissors'] wins = 0 losses = 0 ties = 0 computer = random.choice(choices) you = 'rock' #Always rock strategy if (you == 'rock' and computer == 'scissors'): outcome = "win" elif (you == 'scissors' and computer =='rock'): outcome = "lose" elif (you == 'paper' and computer =='rock'): outcome = "win" elif (you == 'rock' and computer=='paper'): outcome = "lose" elif (you == 'scissors' and computer == 'paper'): outcome = "win" elif (you == 'paper' and computer == 'scissors'): outcome = "lose" else: outcome = "tie" print(f"You:'{you}' Computer:'{computer}' Game: {outcome} ") # - # ## Problem Analysis # # For a function `rock_paper_scissors()` which plays the game, what are the inputs and outputs? # # Inputs: # # PROMPT 1 : # my choice # computer choice # # Outputs: # # PROMPT 2 : # # win, lose, tie (you) # # Function def in python: (just `def` part) # # PROMPT 3: Write the steps from input to output # # PROMPT 4: Write function def playRPO(you,computer): if (you == 'rock' and computer == 'scissors'): outcome = "win" elif (you == 'scissors' and computer =='rock'): outcome = "lose" elif (you == 'paper' and computer =='rock'): outcome = "win" elif (you == 'rock' and computer=='paper'): outcome = "lose" elif (you == 'scissors' and computer == 'paper'): outcome = "win" elif (you == 'paper' and computer == 'scissors'): outcome = "lose" else: outcome = "tie" return outcome # ## Test Cases # # Writing a function is not helpful unless we have some assurances that it is correct. We solve this problem with test cases: # # YOU COMPUTER OUTCOME # Rock Rock Tie # Rock Scissors Win # Rock Paper Lose # # Scissors Rock Lose # Scissors Scissors Tie # Scissors Paper Win # # Paper Rock Win # Paper Scissors Lose # Paper Paper Tie # # PROMPTS 5 - 13 # # Write a `print()` or `assert()` statement for each test case: # # `When YOU=?, COMPUTER=?, EXPECT=?, ACTUAL=(call the function)` # # # PROMPTS 5-13 test Cases assert playRPO("rock","paper") == "lose" assert playRPO("rock","scissors") == "win" assert playRPO("rock","rock") == "tie" assert playRPO("paper","paper") == "tie" assert playRPO("paper","scissors") == "lose" assert playRPO("paper","rock") == "win" assert playRPO("scissors","paper") == "win" assert playRPO("scissors","scissors") == "tie" assert playRPO("scissors","rock") == "lose" # ## Re-Write # # With the function code tested, and assurances it is correct, we can now re-write the original program, calling the function instead. # + import random choices = ['rock', 'paper', 'scissors'] cpu = random.choice(choices) me = 'rock' # - # ## Back to the game # # Now that we have function to play the game, we can write code for the acual game interation. # # # INPUTS: # # - user selects one of rock, paper, scissors from a drop-down menu # # OUTPUTS: # # - game says whether you won or lost, and keep a record of your total wins and losses # # # ALGORITHM: # # PROMPT 15: Game steps ! # # # ## Final Code # # Let's use ipywidgets to create the user interface. Some notes: # # - the list of items `["rock","paper","scissors","random"]` makes a drop down.| # # + from IPython.display import display, HTML from ipywidgets import interact_manual import random # PROMPT Initialize wins and losses here. choices = ["rock","paper","scissors"] @interact_manual(your_choice=choices) def clickbutton(your_choice): computer = random.choice(choices) result = playRPO(your_choice,computer) display(HTML(f"<p>YOU: {your_choice}")) display(HTML(f"<p>CPU: {computer}")) display(HTML(f"<p>Result: {result}")) # - # run this code to turn in your work! from coursetools.submission import Submission Submission().submit_now()
lessons/05-Functions/SmallGroup-Functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.3.0 # language: julia # name: julia-1.3 # --- # # Binary Search # # Implement a binary search algorithm. # # Searching a sorted collection is a common task. A dictionary is a sorted # list of word definitions. Given a word, one can find its definition. A # telephone book is a sorted list of people's names, addresses, and # telephone numbers. Knowing someone's name allows one to quickly find # their telephone number and address. # # If the list to be searched contains more than a few items (a dozen, say) # a binary search will require far fewer comparisons than a linear search, # but it imposes the requirement that the list be sorted. # # In computer science, a binary search or half-interval search algorithm # finds the position of a specified input value (the search "key") within # an array sorted by key value. # # In each step, the algorithm compares the search key value with the key # value of the middle element of the array. # # If the keys match, then a matching element has been found and the range of indices that equal the search key value are returned. # # Otherwise, if the search key is less than the middle element's key, then # the algorithm repeats its action on the sub-array to the left of the # middle element or, if the search key is greater, on the sub-array to the # right. # # If the remaining array to be searched is empty, then the key cannot be # found in the array and a special "not found" indication is returned. Search methods in Julia typically return an empty range located at the insertion point in this case. # # A binary search halves the number of items to check with each iteration, # so locating an item (or determining its absence) takes logarithmic time. # A binary search is a dichotomic divide and conquer search algorithm. # # **For simplification, you can assume that all elements of the list to be searched are unique.** Feel free to implement a solution that works on lists with non-unique elements as a bonus task. # # ## Bonus task # Implement keyword arguments `by`, `lt` and `rev` so that `by` specifies a transformation applied to all elements of the list, `lt` specifies a comparison and `rev` specifies if the list is ordered in reverse. # # ## Source # # Wikipedia [http://en.wikipedia.org/wiki/Binary_search_algorithm](http://en.wikipedia.org/wiki/Binary_search_algorithm) # # Some phrases above and the bonus tasks are taken from the [Julia base documentation (MIT license)](https://docs.julialang.org/en/v1/base/sort/#Base.Sort.searchsorted) of `searchsorted`. # # ## Version compatibility # This exercise has been tested on Julia versions >=1.0. # # ## Submitting Incomplete Solutions # It's possible to submit an incomplete solution so you can see how others have completed the exercise. # ## Your solution # + # submit # - # ## Test suite # + # canonical data version: 1.2.0 using Test # include("binary-search.jl") @testset "default binary search" begin @testset "value in array" begin @test binarysearch([6], 6) == 1:1 @test binarysearch([1, 3, 4, 6, 8, 9, 11], 6) == 4:4 @test binarysearch([1, 3, 4, 6, 8, 9, 11], 1) == 1:1 @test binarysearch([1, 3, 4, 6, 8, 9, 11], 11) == 7:7 @test binarysearch([1, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 634], 144) == 10:10 @test binarysearch([1, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377], 21) == 6:6 end @testset "value not in array" begin @test binarysearch([1, 3, 4, 6, 8, 9, 11], 7) == 5:4 @test binarysearch([1, 3, 4, 6, 8, 9, 11], 0) == 1:0 @test binarysearch([1, 3, 4, 6, 8, 9, 11], 13) == 8:7 @test binarysearch([], 1) == 1:0 end end @testset "bonus tasks" begin @testset "reverse search" begin @testset "value in array" begin @test_skip binarysearch([6], 6, rev = true) == 1:1 @test_skip binarysearch([11, 9, 8, 6, 4, 3, 1], 6, rev = true) == 4:4 @test_skip binarysearch([11, 9, 8, 6, 4, 3, 1], 1, rev = true) == 7:7 @test_skip binarysearch([11, 9, 8, 6, 4, 3, 1], 11, rev = true) == 1:1 @test_skip binarysearch([634, 377, 233, 144, 89, 55, 34, 21, 13, 8, 5, 3, 1], 144, rev = true) == 4:4 @test_skip binarysearch([377, 233, 144, 89, 55, 34, 21, 13, 8, 5, 3, 1], 21, rev = true) == 7:7 end @testset "value not in array" begin @test_skip binarysearch([11, 9, 8, 6, 4, 3, 1], 7, rev = true) == 4:3 @test_skip binarysearch([11, 9, 8, 6, 4, 3, 1], 0, rev = true) == 8:7 @test_skip binarysearch([11, 9, 8, 6, 4, 3, 1], 13, rev = true) == 1:0 @test_skip binarysearch([], 1, rev = true) == 1:0 end end @testset "apply transformation" begin @testset "value in array" begin @test_skip binarysearch([5.5], 6, by = round) == 1:1 @test_skip binarysearch([1.1, 2.9, 4.4, 5.5, 8.1, 9.0, 10.8], 6, by = round) == 4:4 @test_skip binarysearch([1.1, 2.9, 4.4, 5.5, 8.1, 9.0, 10.8], 1, by = round) == 1:1 @test_skip binarysearch([1.1, 2.9, 4.4, 5.5, 8.1, 9.0, 10.8], 11, by = round) == 7:7 @test_skip binarysearch([1, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 634], 144.4, by = round) == 10:10 @test_skip binarysearch([1, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377], 20.6, by = round) == 6:6 end @testset "value not in array" begin @test_skip binarysearch([1.1, 2.9, 4.4, 5.5, 8.1, 9.0, 10.8], 7, by = round) == 5:4 @test_skip binarysearch([1.1, 2.9, 4.4, 5.5, 8.1, 9.0, 10.8], 0, by = round) == 1:0 @test_skip binarysearch([1.1, 2.9, 4.4, 5.5, 8.1, 9.0, 10.8], 13, by = round) == 8:7 @test_skip binarysearch([], 1, by = round) == 1:0 end end @testset "compare with > instead of <" begin # this is equivalent to searching in reverse order @testset "value in array" begin @test_skip binarysearch([6], 6, lt = >) == 1:1 @test_skip binarysearch([11, 9, 8, 6, 4, 3, 1], 6, lt = >) == 4:4 @test_skip binarysearch([11, 9, 8, 6, 4, 3, 1], 1, lt = >) == 7:7 @test_skip binarysearch([11, 9, 8, 6, 4, 3, 1], 11, lt = >) == 1:1 @test_skip binarysearch([634, 377, 233, 144, 89, 55, 34, 21, 13, 8, 5, 3, 1], 144, lt = >) == 4:4 @test_skip binarysearch([377, 233, 144, 89, 55, 34, 21, 13, 8, 5, 3, 1], 21, lt = >) == 7:7 end @testset "value not in array" begin @test_skip binarysearch([11, 9, 8, 6, 4, 3, 1], 7, lt = >) == 4:3 @test_skip binarysearch([11, 9, 8, 6, 4, 3, 1], 0, lt = >) == 8:7 @test_skip binarysearch([11, 9, 8, 6, 4, 3, 1], 13, lt = >) == 1:0 @test_skip binarysearch([], 1, lt = >) == 1:0 end end end # - # ## Prepare submission # To submit your exercise, you need to save your solution in a file called `binary-search.jl` before using the CLI. # You can either create it manually or use the following functions, which will automatically write every notebook cell that starts with `# submit` to the file `binary-search.jl`. # # + # using Pkg; Pkg.add("Exercism") # using Exercism # Exercism.create_submission("binary-search")
exercises/binary-search/binary-search.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Already ran and worked and no changines in this cell import numpy as np import cv2 import os from matplotlib import pyplot as plt class ExtractExudates: jpegImg = 0 grayImg = 0 curImg = 0 def setImage(self, img): self.jpegImg = img self.curImg = np.array(img) ##Convert jpegFile to numpy array (Required for CV2) def getImage(self): return self.curImg def greenComp(self): ###Extracting Green Component gcImg = self.curImg[:,:,1] self.curImg = gcImg return self.curImg def applyCLAHE(self): #Applying Contrast Limited Adaptive Histogram Equalization (CLAHE) clahe = cv2.createCLAHE() clImg = clahe.apply(self.curImg) self.curImg = clImg return self.curImg # create a CLAHE object (Arguments are optional). #clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) #claheImg = clahe.apply(clImg) #cv2.imwrite('clahe_2.jpg',claheImg) def applyDilation(self): #Creating Structurig Element strEl = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(6,6)) #Dilation dilateImg = cv2.dilate(self.curImg, strEl) self.curImg = dilateImg return self.curImg def applyThreshold(self): #Thresholding with Complement/Inverse retValue, threshImg = cv2.threshold(self.curImg, 220, 220, cv2.THRESH_BINARY) self.curImg = threshImg return self.curImg def applyMedianFilter(self): #Median Filtering medianImg = cv2.medianBlur(self.curImg,5) self.curImg = medianImg #cv2.imwrite('medianfilter.jpg',self.curImg) return self.curImg ExEd = ExtractExudates() if __name__ == "__main__": pathFolder = 'train_images' filesArray = [x for x in os.listdir(pathFolder) if os.path.isfile(os.path.join(pathFolder,x))] destinationFolder = "Exudates result/" if not os.path.exists(destinationFolder): os.mkdir(destinationFolder) for file_name in filesArray: file_name_no_extension = os.path.splitext(file_name)[0] fundus = cv2.imread(pathFolder+'/'+file_name) ExEd.setImage(fundus) green = ExEd.greenComp() clache = ExEd.applyCLAHE() dilation = ExEd.applyDilation() thresh = ExEd.applyThreshold() median = ExEd.applyMedianFilter() cv2.imwrite(destinationFolder+file_name_no_extension+"_1original.png",fundus) cv2.imwrite(destinationFolder+file_name_no_extension+"_2green.png",green) cv2.imwrite(destinationFolder+file_name_no_extension+"_3clache.png",clache) cv2.imwrite(destinationFolder+file_name_no_extension+"_4dilation.png",dilation) cv2.imwrite(destinationFolder+file_name_no_extension+"_5thresh.png",thresh) cv2.imwrite(destinationFolder+file_name_no_extension+"_6median.png",median) # + #Try 2 => working for exudates and blood vessels import cv2 import numpy as np import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt import argparse parser = argparse.ArgumentParser() parser.add_argument('--img', type=str, required=False, default="df3adfd6ba36.png", help='/path/to/images/') #df3adfd6ba36 0a38b552372d FLAGS = parser.parse_known_args()[0] #parser.parse_args() def extract_bv(image): b,green_fundus,r = cv2.split(image) clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) contrast_enhanced_green_fundus = clahe.apply(green_fundus) # applying alternate sequential filtering (3 times closing opening) r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1) R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1) r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1) R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1) r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1) R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1) f4 = cv2.subtract(R3,contrast_enhanced_green_fundus) f5 = clahe.apply(f4) # removing very small contours through area parameter noise removal ret,f6 = cv2.threshold(f5,15,255,cv2.THRESH_BINARY) mask = np.ones(f5.shape[:2], dtype="uint8") * 255 im2, contours, hierarchy = cv2.findContours(f6.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: if cv2.contourArea(cnt) <= 200: cv2.drawContours(mask, [cnt], -1, 0, -1) im = cv2.bitwise_and(f5, f5, mask=mask) ret,fin = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV) newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1) # removing blobs of unwanted bigger chunks taking in consideration they are not straight lines like blood #vessels and also in an interval of area fundus_eroded = cv2.bitwise_not(newfin) xmask = np.ones(image.shape[:2], dtype="uint8") * 255 x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) for cnt in xcontours: shape = "unidentified" peri = cv2.arcLength(cnt, True) approx = cv2.approxPolyDP(cnt, 0.04 * peri, False) if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100: shape = "circle" else: shape = "veins" if(shape=="circle"): cv2.drawContours(xmask, [cnt], -1, 0, -1) finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask) blood_vessels = cv2.bitwise_not(finimage) return blood_vessels def exudate(img): jpegImg = 0 grayImg = 0 curImg = 0 #img = cv2.imread("./diaretdb0_v_1_1/resources/images/diaretdb0_fundus_images/image019.png") jpegImg = img curImg = np.array(img) ##Convert jpegFile to numpy array (Required for CV2) print(curImg.shape) gcImg = curImg[:,:,1] curImg = gcImg clahe = cv2.createCLAHE() clImg = clahe.apply(curImg) # clImg = clahe.apply(clImg) curImg = clImg # create a CLAHE object (Arguments are optional). #clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) #claheImg = clahe.apply(clImg) #cv2.imwrite('clahe_2.jpg',claheImg) #Creating Structurig Element strEl = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(6,6)) #Dilation dilateImg = cv2.dilate(curImg, strEl) curImg = dilateImg #Thresholding with Complement/15 retValue, threshImg = cv2.threshold(curImg, 235, 247, cv2.THRESH_BINARY_INV) curImg = threshImg #Median Filtering medianImg = cv2.medianBlur(curImg,3) curImg = medianImg return curImg #plt.imshow(cv2.bitwise_and(img, img, mask = curImg)) if __name__ == '__main__': norm_dir_path = "train_images/" img=cv2.imread(norm_dir_path+FLAGS.img) bv=extract_bv(img) ex=exudate(img) plt.figure(figsize=(25,25)) plt.subplot(232) plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) plt.title("Original - {}".format(FLAGS.img)) plt.subplot(234) plt.imshow(bv, cmap='gray') plt.title("Blood Vessels - {}".format(FLAGS.img)) plt.subplot(236) plt.imshow(ex, cmap='gray') plt.title("Exudations - {}".format(FLAGS.img)) plt.show() # -
Preprocessing/Exudates.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="C1NgIOEPAwRg" # **TASK-4 Exploratory Data Analysis - Terrorism** # + [markdown] id="hxQmGBsBA088" # **IMPORTING THE LIBRARIES** # + id="OJ0Jysp7rZFX" import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # + id="EvNC2SujA6aA" import folium from folium.plugins import MarkerCluster # + [markdown] id="kkI0zfmNA8jp" # **LOADING THE DATASET** # + colab={"base_uri": "https://localhost:8080/", "height": 410} id="_hnIsPBpA9SG" outputId="484dfe4a-ba8f-4610-ffed-51cb34b21b07" df= pd.read_csv("/content/sample_data/globalterrorismdb_0718dist.csv",encoding='latin1') df.head() # + colab={"base_uri": "https://localhost:8080/"} id="PdHLDSxDEHrs" outputId="1912c543-6224-4980-c152-ec20f265fef7" df.info() # + colab={"base_uri": "https://localhost:8080/", "height": 348} id="9yd9IqgoEITP" outputId="0c17653f-3f00-4ccb-a833-50609316c02e" df.describe() # + colab={"base_uri": "https://localhost:8080/", "height": 442} id="w1_3MpnjELFA" outputId="5bb5efa8-1a51-4807-a2fe-0eff87d5a3c3" df.corr() # + colab={"base_uri": "https://localhost:8080/"} id="TIlp5oRNENHQ" outputId="e51164ea-6aa2-4345-9482-b6db88e6c3ce" df.columns # + [markdown] id="nBDHlYWiETHc" # **US Terror Attacks and Death Injuries** # + colab={"base_uri": "https://localhost:8080/", "height": 820} id="F6RUuereEO8h" outputId="d4dc271e-b6c2-4882-fbb6-4d07b1d7010d" df.nkillus.plot(kind = 'line', color = 'red', label = 'The Number of Total Confirmed Fatalities for US', linewidth = 2, alpha = 0.8, grid = True, linestyle = ':', figsize = (20,20), fontsize=15) df.nwoundus.plot(color = "green", label = 'The Number of Confirmed Non-Fatal Injuries for US', linewidth = 2, alpha = 0.8, grid = True, linestyle = '-.', figsize = (20,20), fontsize=15) plt.legend(loc='upper right') # legend = puts label into plot plt.xlabel('Database Index', fontsize=10) # label = name of label plt.ylabel('Number of Dead or Injuries', fontsize=15) plt.title('Confirmed Fatalities & Non-Fatal Injuries for US') #plot title plt.show() # + [markdown] id="L7hOLL1NEY93" # **Death and Injuries at all time** # + colab={"base_uri": "https://localhost:8080/", "height": 817} id="yUWnxLwyEZ4M" outputId="df91015a-90aa-47dd-ad22-82f5443f6dc8" df.plot(kind = 'scatter', x = 'nkill', y = 'nwound', alpha = 0.5, color = 'red', figsize = (20,20), fontsize=15) plt.xlabel('Kill', fontsize=15) plt.ylabel('Wound', fontsize=15) plt.title('Kill - Wound Scatter Plot') plt.show() # + [markdown] id="21EVhDhKEcZJ" # In the majority of acts of terrorism, the mortality rate and injuries were low, but a small number of actions led to too many deaths and injuries. # + [markdown] id="0kCOHGJuEfH1" # **Frequency of Terrorist Actions in Customized Region** # # Let's analyze in Middle East and North Africa # + colab={"base_uri": "https://localhost:8080/", "height": 817} id="9NhpJ-g3Ehy_" outputId="548b0790-1de1-40d9-e107-f75eb9ca57f0" middleEastData = df[df['region'] == 10] middleEastData.iyear.plot(kind = 'hist', bins = 30, figsize = (20,20), color = 'red', fontsize=15) plt.xlabel('Year', fontsize=15) plt.ylabel('Frequency', fontsize=15) plt.title('Frequency of Middle East & North Africa Terrorism Actions by Years') plt.show() # + [markdown] id="TNf_jlVxEmWv" # Terrorist attacks have increased in recent years. # + colab={"base_uri": "https://localhost:8080/", "height": 824} id="T2QAAFbFEnbJ" outputId="cd1ed488-e3b3-4afc-f173-ca77fb098b05" df.nkillus.plot(kind = 'line', color = 'red', label = 'People Dead of Terrorism in the World', linewidth = 1.5, alpha = 0.8, grid = True, linestyle = ':', figsize = (20,20), fontsize=15) middleEastData.nkillus.plot(color = "green", label = 'People Dead of Terrorism in the Middle East & North Africa', linewidth = 1.5, alpha = 0.8, grid = True, linestyle = '-.', figsize = (20,20), fontsize=15) plt.legend(loc='upper right') # legend = puts label into plot plt.xlabel('Database Index', fontsize=15) # label = name of label plt.ylabel('Number of Dead', fontsize=15) plt.title('Comparing those who died in terror attacks in the World and Middle East & North Africa') #plot title plt.show() # + [markdown] id="SkH0O1-KErCR" # **Terrorist Attacks of a Particular year and their Locations** # # Let's look at the terrorist acts in the world over a certain year # # + id="ddqCw3ixErpq" filterYear = df['iyear'] == 1970 # + id="ofbJU5hQEtSV" filterData = df[filterYear] # filter data # filterData.info() reqFilterData = filterData.loc[:,'city':'longitude'] #We are getting the required fields reqFilterData = reqFilterData.dropna() # drop NaN values in latitude and longitude reqFilterDataList = reqFilterData.values.tolist() # reqFilterDataList # + colab={"base_uri": "https://localhost:8080/"} id="7G87fIP_EvuJ" outputId="d3912631-82a6-41a8-cb14-e63d0ae2b894" killData = df.loc[:,'nkill'] print('Number of people killed by terror attack:', int(sum(killData.dropna())))# drop the NaN values # + id="nuvF8VTUEwRO" attackData = df.loc[:,'attacktype1':'attacktype1_txt'] # attackData typeKillData = pd.concat([attackData, killData], axis=1) # + colab={"base_uri": "https://localhost:8080/", "height": 153} id="7S_XUG4TEyG3" outputId="4ff39079-b691-4e1b-d1e4-a0ebb95e8c09" typeKillFormatData = typeKillData.pivot_table(columns='attacktype1_txt', values='nkill', aggfunc='sum') typeKillFormatData # + colab={"base_uri": "https://localhost:8080/"} id="yuiCFyqTE17x" outputId="1cec68a1-7d65-4b22-9a16-4f0d25645f3a" typeKillFormatData.info() # + colab={"base_uri": "https://localhost:8080/", "height": 715} id="ZmVGwHHuE4gF" outputId="4de3e8ff-ab05-45d7-e285-5170071b9a85" labels = typeKillFormatData.columns.tolist() # convert line to list transpoze = typeKillFormatData.T # transpoze values = transpoze.values.tolist() fig, ax = plt.subplots(figsize=(20, 20), subplot_kw=dict(aspect="equal")) plt.pie(values, startangle=90, autopct='%.2f%%') plt.title('Types of terrorist attacks that cause deaths') plt.legend(labels, loc='upper right', bbox_to_anchor = (1.3, 0.9), fontsize=15) # location legend plt.show() # + [markdown] id="z-Ai45CqE7QY" # **Number of Killed in Terrorist Attacks by Countries** # + colab={"base_uri": "https://localhost:8080/", "height": 199} id="6rjpyii-E76V" outputId="a65cdd8f-ebda-4c39-c31c-72465a923969" countryData = df.loc[:,'country':'country_txt'] # countyData countryKillData = pd.concat([countryData, killData], axis=1) countryKillFormatData = countryKillData.pivot_table(columns='country_txt', values='nkill', aggfunc='sum') countryKillFormatData # + colab={"base_uri": "https://localhost:8080/"} id="80Ur46cWE_pd" outputId="71e14135-4606-4d54-9556-e8d6b64c844d" countryKillFormatData.info() # + id="drjyB3rCFB8d" fig_size = plt.rcParams["figure.figsize"] fig_size[0]=25 fig_size[1]=25 plt.rcParams["figure.figsize"] = fig_size # + id="dxmtoAj-FD1L" labels = countryKillFormatData.columns.tolist() labels = labels[:50] #50 bar provides nice view index = np.arange(len(labels)) transpoze = countryKillFormatData.T values = transpoze.values.tolist() values = values[:50] values = [int(i[0]) for i in values] # convert float to int colors = ['red', 'green', 'blue', 'purple', 'yellow', 'brown', 'black', 'gray', 'magenta', 'orange'] # color list for bar chart bar color # + colab={"base_uri": "https://localhost:8080/", "height": 881} id="FxTHbHv2FGEV" outputId="55560f15-0e87-4a8f-d4e0-917124a27f51" fig, ax = plt.subplots(1, 1) ax.yaxis.grid(True) fig_size = plt.rcParams["figure.figsize"] fig_size[0]=25 fig_size[1]=25 plt.rcParams["figure.figsize"] = fig_size plt.bar(index, values, color = colors, width = 0.9) plt.ylabel('Killed People', fontsize=15) plt.xticks(index, labels, fontsize=12, rotation=90) plt.title('Number of people killed by countries') # print(fig_size) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="WmEJTdCuFJMc" outputId="a91ce636-c8e6-4220-b1d2-7ecc1fd5217f" labels = countryKillFormatData.columns.tolist() labels = labels[50:101] index = np.arange(len(labels)) transpoze = countryKillFormatData.T values = transpoze.values.tolist() values = values[50:101] values = [int(i[0]) for i in values] colors = ['red', 'green', 'blue', 'purple', 'yellow', 'brown', 'black', 'gray', 'magenta', 'orange'] fig, ax = plt.subplots(1, 1) ax.yaxis.grid(True) fig_size = plt.rcParams["figure.figsize"] fig_size[0]=25 fig_size[1]=25 plt.rcParams["figure.figsize"] = fig_size plt.bar(index, values, color = colors, width = 0.9) plt.ylabel('Killed People', fontsize=15) plt.xticks(index, labels, fontsize=12, rotation=90) plt.title('Number of people killed by countries') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="JqVZCBWMFMN3" outputId="783b961c-4548-4b5a-d5f1-bc14b01d9c98" labels = countryKillFormatData.columns.tolist() labels = labels[152:206] index = np.arange(len(labels)) transpoze = countryKillFormatData.T values = transpoze.values.tolist() values = values[152:206] values = [int(i[0]) for i in values] colors = ['red', 'green', 'blue', 'purple', 'yellow', 'brown', 'black', 'gray', 'magenta', 'orange'] fig, ax = plt.subplots(1, 1) ax.yaxis.grid(True) fig_size = plt.rcParams["figure.figsize"] fig_size[0]=25 fig_size[1]=25 plt.rcParams["figure.figsize"] = fig_size plt.bar(index, values, color = colors, width = 0.9) plt.ylabel('Killed People', fontsize=15) plt.xticks(index, labels, fontsize=12, rotation=90) plt.title('Number of people killed by countries') plt.show() # + [markdown] id="2hgP-xGqFPVf" # Terrorist acts in the Middle East and northern Africa have been seen to have fatal consequences. The Middle East and North Africa are seen to be the places of serious terrorist attacks. In addition, even though there is a perception that Muslims are supporters of terrorism, Muslims are the people who are most damaged by terrorist attacks. If you look at the graphics, it appears that Iraq, Afghanistan and Pakistan are the most damaged countries. All of these countries are Muslim countries.
Task_4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "26abadea2cdf9d863717519cf3bb333d", "grade": false, "grade_id": "cell-dc7cbe8a9b702595", "locked": true, "schema_version": 3, "solution": false, "task": false} # # Perceptron Exercise # - import sklearn.datasets import matplotlib.pyplot as plt import numpy as np # We will use the famour iris data set data_dic = sklearn.datasets.load_iris() features = data_dic['data'] targets = data_dic['target'] c1 = features[targets==0] c2 = features[targets==1] c3 = features[targets==2] # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "ad6e5a6f9f5fd80cd173069104cfb632", "grade": false, "grade_id": "cell-319446f6c6f20c57", "locked": true, "schema_version": 3, "solution": false, "task": false} # This will show the data: # - ind1, ind2 = 0,1 plt.scatter(c1[:,ind1],c1[:,ind2], color='red', marker='s', alpha=0.5, label="Setosa") plt.scatter(c2[:,ind1],c2[:,ind2], color='blue', marker='x', alpha=0.5, label="Versicolour") plt.legend() plt.xlabel("sepal length [cm]") plt.ylabel("sepal width [cm]"); # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "c348ea4d6a805c06616a89a580b3feee", "grade": false, "grade_id": "cell-021859f7bb1befce", "locked": true, "schema_version": 3, "solution": false, "task": false} # This is used to get a smaller sample to play with # + def subSample(nData): X = np.empty((2*nData,2)) X[:nData] = c1[:nData,:2] X[nData:] = c2[:nData,:2] Y = np.empty(2*nData) Y[:nData] = np.ones(nData) Y[nData:] = -np.ones(nData) return X,Y X, Y = subSample(5) # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "61536b16aa0c05a7f2a3d457bf13b02b", "grade": false, "grade_id": "cell-d0a9d8a0233385a5", "locked": true, "schema_version": 3, "solution": false, "task": false} # ## Exercise # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "25bffbdeb8783db4eb3b011802db700c", "grade": false, "grade_id": "cell-96536402f756dcff", "locked": true, "schema_version": 3, "solution": false, "task": false} # This is the decision function $\phi$: # + def phi(x): if x<=0: return -1.0 else: return 1.0 phi = np.vectorize(phi) # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "b32f14807bbf94d1375f99f63351aa74", "grade": false, "grade_id": "cell-2cca13ed1424c392", "locked": true, "schema_version": 3, "solution": false, "task": false} # Implement the function to make a prediction for an input vector `x` and a weight vector `w`. # + deletable=false nbgrader={"cell_type": "code", "checksum": "fb9f352c779d95cef40df4ab6639016d", "grade": false, "grade_id": "cell-596cc24717e45eda", "locked": false, "schema_version": 3, "solution": true, "task": false} def predictOne(x, w): tmp = 0 tmp += w[0] for i in range(len(x)): tmp += w[i + 1] * x[i] return phi(tmp) # your function should return either -1. or 1. # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "6e06a2acc30c63b5b03e15c53a0faf5d", "grade": true, "grade_id": "cell-018e78431b0f84d3", "locked": true, "points": 1, "schema_version": 3, "solution": false, "task": false} assert predictOne(np.array([0.0,0.0]) , np.array([0.1,3.2,7.4])) == 1.0 assert predictOne(np.array([0.0,0.0]), np.array([-0.1,3.2,7.4])) == -1.0 assert predictOne(np.array([0.3,-0.7]), np.array([0.1,3.2,7.4])) == -1.0 assert predictOne(np.array([0.3,0.7]), np.array([0.1,3.2,7.4])) == 1.0 # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "e7fce14df25c5153934719555aba8bd7", "grade": false, "grade_id": "cell-1e4d406f0b5167dc", "locked": true, "schema_version": 3, "solution": false, "task": false} # Implement the same function but with an array of values. `X` here will be a `n_d` x `n_f` array, where `n_d` is the number of data points and `n_f` is the number of input per data point. # + deletable=false nbgrader={"cell_type": "code", "checksum": "a3f4fb13b5f4ee3e1bb00df73237290b", "grade": false, "grade_id": "cell-59123c9e070046dd", "locked": false, "schema_version": 3, "solution": true, "task": false} def predictMany(X, w): result = [] for x in X: result.append(predictOne(x, w)) return np.array(result) # your function should return a list/array of n_d values -1 or 1 # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "94b1f3ba7d74c545294c5d13cd96da6d", "grade": true, "grade_id": "cell-6fc335ecd51ef0d7", "locked": true, "points": 2, "schema_version": 3, "solution": false, "task": false} testX = np.array([ [0.0, 0.0], [0.3,-0.7], [0.3,0.7] ]) assert all(abs(elem) == 1 for elem in predictMany(testX, [0.1,3.2,7.4])), 'Your array should only contain elements with absoulte value 1.' assert (predictMany(testX, [0.1,3.2,7.4]) == np.array([1,-1,1])).all() # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "b521c363648cd7d109995ae791cc981e", "grade": false, "grade_id": "cell-c23fdd938000409c", "locked": true, "schema_version": 3, "solution": false, "task": false} # Implement the function that returns the updated weight vector according to the perceptron algorithm after running over the entire input data `X`, with labels `Y`, current weight vector `w` and learning rate `eta`. # + deletable=false nbgrader={"cell_type": "code", "checksum": "6196d2854f9754628d6f1aaea1a5e05d", "grade": false, "grade_id": "cell-24fb62e759d5838b", "locked": false, "schema_version": 3, "solution": true, "task": false} def update(X,Y,w,eta): neww = np.array(w) # YOUR CODE HERE for i in range(len(Y)): if predictOne(X[i], neww) != Y[i]: neww[0] += eta * Y[i] for j in range(len(w) - 1): neww[j + 1] += eta * Y[i] * X[i][j] return np.array(neww) # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "23a58edf649a521d0746be2c6f170dcd", "grade": true, "grade_id": "cell-dcf30aa973195628", "locked": true, "points": 2, "schema_version": 3, "solution": false, "task": false} Xtest = np.array([(-1,),(-2,),(5,),(7,)]) Ytest = np.array([ -1, -1, 1, 1]) wtest = np.array([ 1.5 , 1.0]) assert (update(Xtest,Ytest,wtest,0.1) == np.array([1.4, 1.1])).all() # - # Define a function `fit` that updates the perceptron parameters until a solution is found and returns the number of steps needed to converge and the value of the weight vector. def fit(X,Y,w0,eta): # YOUR CODE HERE # your function should return n_steps (as a number) and w (as an array) prediction = [] w = w0 n_steps = 0 for x, y in zip(X,Y): prediction_i = predictOne(x, w) prediction.append(prediction_i) for i in range(100): while np.isclose(prediction, Y).all() == True: break else: for i in range(len(X)): x = X[i] y = Y[i] y_p = predictMany(x,neww) if y_p != y: w[0] +=eta*y w[1] +=eta*y*x n_steps += 1 w = np.array(w) return (n_steps,w) # + deletable=false nbgrader={"cell_type": "code", "checksum": "4977dbf56eaad08e47c0102c7f3456a1", "grade": false, "grade_id": "cell-e79469877607d0c2", "locked": false, "schema_version": 3, "solution": true, "task": false} def fit(X,Y,w0,eta): for i in range(100): record_w0 = w0.copy() w0 = update(X,Y,record_w0,eta) if (np.array(w0) == np.array(record_w0)).all(): return i, np.array(w0) # YOUR CODE HERE # your function should return n_steps (as a number) and w (as an array) # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "214ba1914c0e5eabe89351d7cedcd453", "grade": true, "grade_id": "cell-9031049accce2a39", "locked": true, "points": 2, "schema_version": 3, "solution": false, "task": false} Xtest = np.array([[5.1, 3.5], [4.9, 3. ], [4.7, 3.2], [4.6, 3.1], [5. , 3.6], [7. , 3.2], [6.4, 3.2], [6.9, 3.1], [5.5, 2.3], [6.5, 2.8]]) Ytest = np.array([ 1., 1., 1., 1., 1., -1., -1., -1., -1., -1.]) # these values already fit the data, so we should no do any step and return the original weight vector test_n, test_w = fit(Xtest,Ytest,[0.2, -0.59, 0.92],0.1) assert len(fit(Xtest,Ytest,[0.2, -0.59, 0.92],0.1)) == 2, 'Your function needs two return values' assert isinstance(test_n, int), 'First return value should be an integer' assert isinstance(test_w, (list, tuple, np.ndarray)), 'Second return value should be an array/list/tuple' assert test_n == 0 assert (test_w == [ 0.2 , -0.59, 0.92]).all() # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "76a54e22ade9d99dcee7937c9648865f", "grade": true, "grade_id": "cell-be2061497a2d5b6c", "locked": true, "points": 3, "schema_version": 3, "solution": false, "task": false} # this test requires some iterations test_n, test_w = fit(Xtest,Ytest,[0.0, 0.0, 0.0],0.1) assert test_n == 10 assert np.isclose(test_w , [ 0.2 , -0.59, 0.92]).all() # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "a84fffb49ec5d31a47006b4d16cc9a10", "grade": false, "grade_id": "cell-41ee6d2d40a90a26", "locked": true, "schema_version": 3, "solution": false, "task": false} # # Using scikit-learn # # This is an example on how to use the perceptron implementation in scikit-learn. Use it to investigate things like # - does it help to randomize the input # - what is the effect of changing eta? # - from sklearn.linear_model import Perceptron # let's use a larger sample X, Y = subSample(50) # This is uses the `Perceptron` class. Using the `max_iter=1` and `warm_start=True` options we ensure that each time the `fit` function is called only one step is performed. This will result in warning saying we have not converged, but that's ok, this is what we wanted. clf = Perceptron( max_iter=1 , warm_start=True, shuffle=False, eta0=0.1) for i in range(20): clf.fit(X, Y) print (clf.intercept_,clf.coef_)
Core 1/machine learning/ipython notebook/Week 1/Perceptron_Exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # O método de Newton para calcular raízes # # O método da bisseção é bastante geral (funciona para qualquer função contínua!), # e converge "geométricamente rápido": o erro na etapa $n+1$ será, aproximadamente, # a metade do erro da etapa anterior. # # Para funções cuja derivada é conhecida, entretanto, # o _método de Newton_ é uma alternativa muito poderosa, # pois converge com maior velocidade. # Além disso, ele dispensa conhecer dois pontos onde o sinal da função seja diferente. # Vejamos como ele funciona. # ## Idéia geométrica # # Dado um ponto $(x,f(x))$ no gráfico de $f$, se traçarmos a tangente, # esta será uma boa aproximação da função "perto" de $x$. # Assim, seguimos esta reta tangente até que ela encontre o eixo-$x$ no ponto $(z,0)$, # esperando que esta interseção esteja próxima da verdadeira raiz, # que é a interseção da _curva_ descrita por $f$ e o eixo-$x$. # # Em fórmulas, temos: # $$ (z,0) \in T = \big\{ (x, f(x)) + t (1, f'(x)) \mid t \in R \big\} $$ # para o ponto $(z,0)$ que está na reta tangente $T$ e também no eixo-$x$ # (pois sua coordenada $y = 0$). # Resolvendo o sistema, encontramos # $$ z = x - \frac{f(x)}{f'(x)}. $$ # A presença de $f'(x)$ no denominador mostra que este método funciona **mal** # quando está próximo de uma raiz de $f'$. # Além disso, o método de Newton não fornece um "intervalo de confiança" como no caso da bisseção. # # Assim, é muito importante ter aqui um critério de convergência para poder parar as iterações. # Em geral, este pode ser dado por três diferentes parâmetros: # # - O número de iterações feitas: se estamos calculando "há muito tempo", talvez o método esteja "perdido" # - A distância de $f(x)$ para zero: talvez já tenhamos calculado algo suficientemente próximo de uma raiz, # se $\lvert f(x)\rvert \ll 1$ # - A distância de $x$ para um zero: se a diferença entre dois pontos sucessivos ($x$ e $z$ no nosso exemplo) # for pequena, então é _provável_ que estejamos perto de uma raiz. # ## Impementação # # Para programar o método de Newton como uma função recursiva, # podemos fazer um paralelo com o método da bisseção. # No caso da bisseção, a cada etapa testávamos se a aproximação já estava suficientemente perto # (por exemplo, se uma estimativa do erro absoluto fosse pequena), # e caso contrário dividíamos o intervalo por 2 para continuar buscando a raiz. # # Aqui, também vamos estimar o erro (só que desta vez não há mais _garantia_ de que o erro será menor do que a estimativa), # e, se este ainda for "grande", vamos produzir um novo ponto (usando a tangente) para continuar buscando uma raiz. def newton(f,df,x, prec=1e-8, maxiter=100): if maxiter == 0: return None dx = f(x)/df(x) newx = x - dx if abs(dx) < prec: return newx else: return newton(f,df,newx, prec,maxiter-1) from math import sin, cos, pi def d_cos(x): return -sin(x) newton(cos, d_cos, 2) # Funciona bem MESMO! _ - pi/2 # ## Usando `print` e `format` # # Vamos escrever uma função para nos ajudar a fazer os testes do método de Newton, e comparar com a bisseção. # Como desejamos comparar os valores retornados por ambos os métodos, # é importante que estes sejam fáceis de ler na tela. # # O mecanismo do `Out[]` do IPython (onde bastaria que retornássemos alguns valores) é bastante útil, # mas obriga a lembrar todo o contexto. # Com `print`, podemos incluir informações textuais a mais, # além de formatar os valores de maneira uniforme (usando `.format()` ou `%`), # o que ajuda a comparação. # # - Exemplos: de usos [mais comuns][mkaz], e os [oficiais do Python][py-ex] # - Referência: [a documentação do Python][doc] # # [py-ex]: https://docs.python.org/3/library/string.html#format-examples # [doc]: https://docs.python.org/3/library/string.html#format-string-syntax # [mkaz]: https://mkaz.github.io/2012/10/10/python-string-format/ from rootfinding import bissection def testar(f,df,a,b): x = bissection(f,a,b, tol=1e-10) y = newton(f,df,a, prec=1e-10) print('''\ Bisseção: z ~= {: 18.10e} (f(z) = {: .8f}) Newton : z ~= {: 18.10e} (f(z) = {: .8f})'''.format(x,f(x),y,f(y))) testar(sin,cos,1,4) def f(x): return x**3 - 2 def df(x): return 3*x**2 testar(f,df,2.5,0) # ### Exercício # # Modifique os métodos da bisseção e de Newton para que eles retornem também o número total de vezes que a função "entrou" na recursão, e use essa informação numa nova função `testar`. def newton(f,df,x, prec=1e-8, maxiter=100): ### Resposta aqui def bissecao(f,a,b,prec=1e-8): ### Resposta aqui def testar(f,df,a,b): ### Resposta aqui testar(sin,cos,2,4) testar(f,df,2.5,0) # ### Exercício # # Modifique o método de Newton para que ele também pare quando o valor de $f(x)$ seja menor do que `y_tol`. def newton(f,df,x, prec=1e-8, tol=1e-8, maxiter=100): ### Resposta aqui testar(sin,cos,2,4) # Quantos "casos base" para a recorrência esta função possui?
comp-cientifica-I-2018-2/semana-5/raw_files/Semana3-Parte2-Newton.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Vizzuality/copernicus-climate-data/blob/master/upload_and_define_datasets.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="RTLRdjsHkjtl" # # Prepare data for the copernicus-climate project # # https://github.com/Vizzuality/copernicus-climate-data # # `<NAME> (vizzuality.)` # # ## Description # This notebook exports tables of time-series per location, and defines datasets and layers using the API. # # ### TODO # # + add breaks as attributes to zarr data sources # # ``` # MIT License # # Copyright (c) 2020 Vizzuality # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ``` # + [markdown] id="uKARM1_f3Ccg" # # Setup # # Instructions for setting up the computing environment. # + id="5f327xg8JujU" language="bash" # # Remove sample_data # rm -r sample_data # + [markdown] id="FjdjX-Vc3LVK" # ## Linux dependencies # # Instructions for adding linux (including node, ect.) system packages. # + id="-5S4mxFswFUs" outputId="ed5f9e25-bc4d-4a35-f7cd-013b1fb2b087" colab={"base_uri": "https://localhost:8080/", "height": 955} # Packages for projections and geospatial processing # !apt install -q -y libspatialindex-dev libproj-dev proj-data proj-bin libgeos-dev # + [markdown] id="LGWeJje13Tcw" # ## Python packages # + id="JBTcBnwjcQ2x" # connect to Google cloud storage # !pip install -q gcsfs # + id="QcxYM8yVbBgS" outputId="f792c12b-97f1-4f35-d3b1-f8d5d640f708" colab={"base_uri": "https://localhost:8080/", "height": 225} # xarray, Zarr and geometry tools # !pip install -q cftime netcdf4 nc-time-axis zarr xarray bottleneck rtree geopandas shapely --upgrade # + id="vtO6w1MJdgUg" # #!pip uninstall -y earthengine-api # #!pip install 'earthengine-api==0.1.215' # + id="j-r4PF7veZ2r" # Need to restart kernal #import importlib #importlib.reload(earthengine-api) # + id="U53feFohKqnU" outputId="2d8eba2b-4cb6-4732-b602-6c7a36053bad" colab={"base_uri": "https://localhost:8080/", "height": 139} # !pip install -q Skydipper jenkspy palettable ipythonblocks #carto # + id="QhNWobYld0pr" import Skydipper # + id="8s7-DYbxjesM" outputId="27382ca4-dcd4-446d-d50e-8bf26f192439" colab={"base_uri": "https://localhost:8080/", "height": 1000} # Show python package versions # !pip list # + [markdown] id="IK4kYzml3bDD" # ## Authorisation # # Setting up connections and authorisation to cloud services. # + id="eaahqMTiD2VE" outputId="444c27cf-19d2-4bfc-c78b-fe130754148f" colab={"base_uri": "https://localhost:8080/", "height": 124} from google.colab import drive drive.mount('/content/drive') # + id="4jTEqa9UEs1G" import os import json import shutil env_fn = 'env-variables.json' # Get json file defining env variable key-value pairs shutil.copyfile(f"/content/drive/My Drive/{env_fn}", f"/root/.{env_fn}") with open(f"/root/.{env_fn}") as f: for k,v in json.load(f).items(): os.environ[k] = v # + [markdown] id="T7jp5JdFrxGm" # ### Google Cloud # # This can be done in the URL or via adding service account credentials. # # If you do not share the notebook, you can mount your Drive and and transfer credentials to disk. Note if the notebook is shared you always need to authenticate via URL. # + id="pIeVojTV21Nr" # Set Google Cloud information gc_project = "skydipper-196010" gc_creds = "<PASSWORD>" gc_user = "<EMAIL>" gcs_prefix = "gs://copernicus-climate" gcs_http_url = "https://storage.googleapis.com/copernicus-climate" # + id="oY_ymKY_oIY8" # For auth WITHOUT service account # https://cloud.google.com/resource-manager/docs/creating-managing-projects #from google.colab import auth #auth.authenticate_user() # #!gcloud config set project {project_id} # + id="7akT-7lZ9x3R" # If the notebook is shared #from google.colab import drive #drive.mount('/content/drive') # + id="dCFS8FOnzScr" # If Drive is mounted, copy GC credentials to home (place in your GDrive, and connect Drive) # !cp "/content/drive/My Drive/{gc_creds}" "/root/.{gc_creds}" # + id="03Tbqeq9zSc0" outputId="ab8c91ad-7fa0-4180-a04a-dd5574ada98e" colab={"base_uri": "https://localhost:8080/", "height": 34} # Auth WITH service account # !gcloud auth activate-service-account {gc_user} --key-file=/root/.{gc_creds} --project={gc_project} # + id="PA4tPZ4-zSc9" outputId="f87cccf0-701d-4d00-c98d-38a40650a35b" colab={"base_uri": "https://localhost:8080/", "height": 347} # Test GC auth # !gsutil ls {gcs_prefix} # + [markdown] id="e6M0C53YD1Sv" # ### Skydipper API # # You need to register with the API at https://api.skydipper.com/auth , we then login with our email and password to get an authorisation token. Be aware users need specific authorisation scopes linked to projects. # + id="sjJxtfL6FFkL" # Set API information (note credentials should be defined in ENV) sky_api_app = "copernicusClimate" sky_creds = "skydipper-creds.txt" # + id="SVPJQSi6D56r" # Set up first time # Get auth token from API #import requests #import os #payload = { # "email":os.environ['SKY_API_EMAIL'], # "password":<PASSWORD>['<PASSWORD>'] #} #url = 'https://api.skydipper.com/auth/login' #headers = {'Content-Type': 'application/json'} #r = requests.post(url, json=payload, headers=headers) #r.json() #token= r.json().get('data').get('token') #headers = {'Authorization': f"Bearer {token}"} # + id="ghjaAumrg2IR" # Copy previously generated creds # !mkdir /root/.Skydipper # !cp "/content/drive/My Drive/{sky_creds}" /root/.Skydipper/creds with open("/root/.Skydipper/creds") as f: sky_api_token = f.read() headers = {'Authorization': f"Bearer {sky_api_token}"} # + id="-gDKNDbPgHJQ" outputId="91e54269-6243-4aba-db38-98834f5e43a4" colab={"base_uri": "https://localhost:8080/", "height": 107} # Check it works import Skydipper Skydipper.Dataset('3a46bbff-73bc-4abc-bad6-11be6e99e2cb') # + [markdown] id="sVTq7fzWZxfI" # ### Carto # + id="neu2c80uaSe7" # Set API information (note credentials should be defined in ENV) carto_user = "skydipper" carto_base_url = f"http://172.16.58.3/user/{carto_user}" # + id="z0GltC8-Z0LK" #from carto.auth import APIKeyAuthClient #import os #auth_client = APIKeyAuthClient(api_key=os.environ['CARTO_API_KEY'], base_url=carto_base_url) # + [markdown] id="Vb3bFXTp0JFz" # # Utils # # Generic helper functions used in the subsequent processing. For easy navigation each function seperated into a section with the function name. # + [markdown] id="-l-NVtcE8KdV" # ## copy_gcs # + id="h1KygWllicHk" import os import subprocess def copy_gcs(source_list, dest_list, opts=""): """ Use gsutil to copy each corresponding item in source_list to dest_list. Example: copy_gcs(["gs://my-bucket/data-file.csv"], ["."]) """ for s, d in zip(source_list, dest_list): cmd = f"gsutil -m cp -r {opts} {s} {d}" print(f"Processing: {cmd}") r = subprocess.call(cmd, shell=True) if r == 0: print("Task created") else: print("Task failed") print("Finished copy") # + [markdown] id="gTOD9R2KyFJ_" # ## get_cached_remote_zarr # + id="rwZhd30U2hso" import gcsfs import zarr import xarray as xr def get_cached_remote_zarr( group, root, project_id = gc_project, token=f"/root/.{gc_creds}", force_consolidate=False): # Connect to GS gc = gcsfs.GCSFileSystem(project=project_id, token=token) store = gc.get_mapper(root, check=False, create=True) if force_consolidate: # consolidate metadata at root zarr.consolidate_metadata(store) # Check zarr is consolidated consolidated = gc.exists(f'{root}/.zmetadata') # Cache the zarr store #store = zarr.ZipStore(store, mode='r') cache = zarr.LRUStoreCache(store, max_size=4737418240) # Return cached zarr group return xr.open_zarr(cache, group=group, consolidated=consolidated) # + [markdown] id="z1y_R6gT8btq" # ## set_acl_to_public # + id="aWaLaClDVSaK" import subprocess # Set to asset permissions to public for https read def set_acl_to_public(gs_path): """ Set all Google Storage assets to puplic read access. Requires GS authentication Parameters ---------- gs_path str The google storage path, note the "-r" option is used, setting the acl of all assets below this path """ cmd = f"gsutil -m acl -r ch -u AllUsers:R {gs_path}" print(cmd) r = subprocess.call(cmd, shell=True) if r is 0: print("Set acl(s) sucsessful") else: print("Set acl(s) failed") #set_acl_to_public("gs://skydipper-water-quality/cloud-masks") # + [markdown] id="G1FpqqZh-lhz" # ## to_geopandas # + id="ddWMvYVQ-qm1" import geopandas as gpd import shapely def to_geopandas(ds, rounding_precision=False): df = ds.reset_coords().to_dataframe().dropna().reset_index() # Return as geopandas object, converting geometry to shapley objects geoms = [shapely.wkb.loads(g, hex=True) for g in df.geometry.values] # Adjust precision if rounding_precision: geoms = [shapely.wkt.loads(shapely.wkt.dumps(g, rounding_precision=rounding_precision)) for g in geoms] return gpd.GeoDataFrame(df, geometry = geoms) # + [markdown] id="zVzHuI3jOwlE" # ## create_admin_dict # + id="7ZK6KwUYSnIw" # Create gid lookup tables import geopandas as gpd import rtree def create_admin_dict(gdfs, debug=False): """ Generates dictionary of admin to lower admin gid codes. Input should be a list of geopandas dfs, level 0 to 4.""" # Buffer geometry gdfbs =[gdfs[i][['gid', 'geometry']] for i in range(0, len(gdfs) -1)] for gdfb in gdfbs: gdfb.loc[:,'geometry'] = gdfb.buffer(0.1).values # create dict of conversions return { "admin0to1": gpd.sjoin(gdfbs[0], gdfs[1][['gid', 'geometry', 'geoname', 'admin_level']], op='contains').drop('geometry', axis=1), "admin0to2": gpd.sjoin(gdfbs[0], gdfs[2][['gid', 'geometry', 'geoname', 'admin_level']], op='contains').drop('geometry', axis=1), "admin0to3": gpd.sjoin(gdfbs[0], gdfs[3][['gid', 'geometry', 'geoname', 'admin_level']], op='contains').drop('geometry', axis=1), "admin0to4": gpd.sjoin(gdfbs[0], gdfs[4][['gid', 'geometry', 'geoname', 'admin_level']], op='contains').drop('geometry', axis=1), "admin1to2": gpd.sjoin(gdfbs[1], gdfs[2][['gid', 'geometry', 'geoname', 'admin_level']], op='contains').drop('geometry', axis=1), "admin1to3": gpd.sjoin(gdfbs[1], gdfs[3][['gid', 'geometry', 'geoname', 'admin_level']], op='contains').drop('geometry', axis=1), "admin1to4": gpd.sjoin(gdfbs[1], gdfs[4][['gid', 'geometry', 'geoname', 'admin_level']], op='contains').drop('geometry', axis=1), "admin2to3": gpd.sjoin(gdfbs[2], gdfs[3][['gid', 'geometry', 'geoname', 'admin_level']], op='contains').drop('geometry', axis=1), "admin2to4": gpd.sjoin(gdfbs[2], gdfs[4][['gid', 'geometry', 'geoname', 'admin_level']], op='contains').drop('geometry', axis=1), "admin3to4": gpd.sjoin(gdfbs[3], gdfs[4][['gid', 'geometry', 'geoname', 'admin_level']], op='contains').drop('geometry', axis=1), } # + [markdown] id="acmA04SLSiBX" # ## show_color_blocks # + id="XeEfD25lC-7C" def show_colors_as_blocks(colors, block_size=90): """ Show colors in the IPython Notebook using ipythonblocks. Parameters ---------- block_size : int, optional Size of displayed blocks. """ from ipythonblocks import BlockGrid from PIL import ImageColor grid = BlockGrid(len(colors), 1, block_size=block_size) for block, color in zip(grid, colors): block.rgb = ImageColor.getcolor(color, "RGB") grid.show() print(f"\n {colors}:") # + [markdown] id="HyHBss8WuT77" # ## create_breaks # + id="k3ZpSmv7uXho" import numpy as np import jenkspy def create_breaks(da, n, decimals, method='quantiles', null_value = -9999): if method == 'quantiles': q = np.linspace(0, 1, n) #print(q) out = da.quantile(q, skipna=True).values.round(decimals).tolist() if method == 'jenks': out = np.round(jenkspy.jenks_breaks(da.values[np.logical_not(np.isnan(da.values))], nb_class=n), decimals).tolist() if null_value is not None: #print('Adding null value') out = [null_value] + out return out # + [markdown] id="meETwfBJtgED" # ## create_carto_css_cloropleth_ramp # + id="hjmWVbQJtk1B" def create_carto_css_cloropleth_ramp(data_var, color_ramp, breaks, null_color=None, line_width= 0.5, line_color= '#FFFFFF', line_opacity= 0.5): if type(null_color) is str: colors = [null_color] + color_ramp colors = ",".join(colors) breaks = [str(b) for b in breaks] breaks = ",".join(breaks) #print(breaks) out = "#layer {"\ f"polygon-fill: ramp([{data_var}], "\ f"({colors}), "\ f"({breaks}), "\ "'>=')"\ " } #layer::outline { "\ f"line-width: {line_width}; line-color: {line_color}; line-opacity: {line_opacity};"\ "}" return out #data_var = 'max_tasmax' #color_ramp = ['#FEE0D2', '#FCBBA1', '#FC9272', '#FB694A', '#EF3B2C', '#CB181D', '#67000D'] #breaks = create_breaks(da, 7, 1, 'jenks', add_null = True) #create_carto_css_cloropleth_ramp(data_var, color_ramp, breaks, null_color='#F5F5F5', line_width= 0.5, line_color= '#FFFFFF', line_opacity= 0.5) # + [markdown] id="yyIyw_V28ey6" # ## create_mapbox_cloropleth_paint # + id="Dkxf3u9E8oZa" def create_mapbox_cloropleth_paint(nbreaks): ramp = ['interpolate', ['linear'],['get', "{column_name}"]] for n in range(0, nbreaks): ramp = ramp + ["{" + f"break{n}" + "}"] + ["{" + f"color{n}" + "}"] return {'fill-color': ramp, 'fill-opacity': "{fill_opacity}"} # Example # -------- #create_mapbox_cloropleth_paint(7) # + [markdown] id="qaVV9kM-zEK4" # ## create_layer_config # + id="xVKEPS_jzJHt" outputId="f260296b-91fe-4833-d41b-5bf2a19b9e0e" colab={"base_uri": "https://localhost:8080/", "height": 1000} import json def create_layer_config( layer_id, layer_type, layer_params, render_layers, provider_type, provider_account, provider_layer_sql = "SELECT * FROM {table_name}", sql_params = None): # check for sql_parans and add to sql if sql_params: for k in sql_params.keys(): provider_layer_sql = provider_layer_sql + " {" + f"{k}" + "}" if layer_type == "vector": out = { "id": layer_id, "params": layer_params, "source": { "type": "vector", "provider": { "type": provider_type, "account": provider_account, "layers": [{ "options": { "sql": provider_layer_sql, "type": "cartodb" } }] } }, "render": { "layers": render_layers, "type": "vector", "version": "3.0" } } if sql_params: # check for sql_parans and add to sql for k in sql_params.keys(): provider_layer_sql = provider_layer_sql + " {" + f"{k}" + "}" # add sql_params out.update({"sqlParams": sql_params}) return out # Example # ------- # Create a layer where the data source is carto-skydipper # and the layer is rendered as a mapboxGL chloropleth # Set layer type layer_type = "vector" provider_type = "carto-skydipper" provider_account = "skydipper" # Set the layers parameters, each <key> in the config will be replaced by value layer_params = { "table_name": 'historical_total_zs_nuts_level_234', "column_name" : 'max_tasmax', } # Set the colors, breaks, fill opacity, line color and width for the cloropleth colors = ['#FEE0D2', '#FCBBA1', '#FC9272', '#FB694A', '#EF3B2C', '#CB181D', '#67000D'] breaks = [0,2,4,5,7,10,15] fill_opacity = 0.75 # Add to layers parameters layer_params.update(zip([f"break{n}" for n in range(0, len(breaks))], breaks)) layer_params.update(zip([f"color{n}" for n in range(0, len(colors))], colors)) layer_params.update({"fill_opacity": fill_opacity}) # Generate the layer id layer_id = f"map-box-chloropleth-{len(breaks)}" # Set data SOURCE # set the provider sql provider_layer_sql = "SELECT * FROM {table_name}" # add extra sql value sql_params = None#{"where": {"admin_level": 2}} #,"and": { "experiment" : "{experiment}"}} # Set RENDER for MapboxGL Chloropleth map # generate mapbox paint object paint_object = create_mapbox_cloropleth_paint(nbreaks = len(breaks)) # make the render_layers list render_layers = [{"paint": paint_object, "source-layer": "layer0", "type": "fill"}] # make layerConig dict print(json.dumps(create_layer_config(layer_id, layer_type, layer_params, render_layers, provider_type, provider_account, provider_layer_sql, sql_params), indent=4)) # + [markdown] id="J2Jpg5Xx2hmq" # ## create_legend_config # + id="Bb8-EHMD2mYG" outputId="76781b2a-2a88-4a20-b1c8-f2d1ba1b76aa" colab={"base_uri": "https://localhost:8080/", "height": 156} def create_legend_config(names, colors): out = {"type": "basic", "items": [{ "color": f"{str(color)}", "name": f"{str(name)}"} for color, name in zip(colors, names)]} return out create_legend_config(['NA',0.0,0.0,1.6,2.0,2.5,3.2,4.2,5.2], ['#FEE0D2', '#FCBBA1', '#FC9272', '#FB694A', '#EF3B2C', '#CB181D', '#67000D']) # + [markdown] id="gttxdYICLhIy" # ## create_interaction_config # + id="lPA2iez9Llqa" def create_interaction_config(dvars, dtypes, formats, names): out = {"output": [{"format": f, "type": dtype, "property": name, "column": dvar} for dvar, dtype, f, name in zip(dvars, dtypes, formats, names)]} return out #import json #dvars = ["max_tasmax", "min_tasmin", "total_heatwave_alerts", "total_coldsnap_warnings", "total_tasmin_std"] #dtypes = [dss[dvar].dtype.name for dvar in dvars] #formats = [None for i in dvars] #names = [dvar.replace("_", " ").capitalize() for dvar in dvars] #json.dumps(create_interaction_config(dvars, dtypes, formats, names)) # + [markdown] id="GBPS3DALtie0" # # Processing # # Data processing organised into sections. # + [markdown] id="2iUjtVp0LNaG" # ## Geometries and GID look-up table # + [markdown] id="7KVRAqCxxWsX" # ### Write admin lookup to CSV and geometries to GeoJSON # + id="RsH6Ded3MMfG" outputId="db01d112-9855-4b46-ec3e-42fb8a35d6ee" colab={"base_uri": "https://localhost:8080/", "height": 51} import pprint import gcsfs from dask.diagnostics import ProgressBar, Profiler, ResourceProfiler, CacheProfiler, visualize import json import encodings import numpy as np p= "zonal_stats" # Make name id JSON gda = get_cached_remote_zarr(group = 'nuts-2016-lau-2018', root = "copernicus-climate/european-nuts-lau-geometries.zarr") gdas = gda.where((gda.admin_level.isin([0, 2, 3, 4]))&(gda.iso3=='ESP'), drop=True) gdf = to_geopandas(gdas, 6) centroids = gdf.centroid fs = gcsfs.GCSFileSystem(project=gc_project, token=f"/root/.{gc_creds}") with Profiler() as prof, ResourceProfiler(dt=1) as rprof, CacheProfiler() as cprof: with ProgressBar(): with fs.open(f"{gcs_prefix}/{p}/geoname_gid_lookup_esp_nuts_lau_levels_0234.json", 'w', encoding='utf-8') as f: out = {"locations":[ {"geoname":geoname, "gid":gid, "admin_level":admin_level, "longitude": np.round(x,6), "latitude": np.round(y,6)}\ for geoname, gid, admin_level, x, y \ in zip(gdas.coords['geoname'].values, gdas.coords['gid'].values, gdas.coords['admin_level'].values.tolist(), centroids.x, centroids.y)]} json.dump(out, f) #pprint.pprint(json.dumps(out, ensure_ascii=False),indent=4) # + [markdown] id="ExFTueERShw2" # ### Set ACLs to public # + id="N5LC-quWShw4" outputId="84834805-b39d-406d-d62c-56ad0d4361a1" colab={"base_uri": "https://localhost:8080/", "height": 51} # Set ACLs to public p="zonal_stats" set_acl_to_public(f"{gcs_prefix}/{p}/") # + id="K-zjxgsfHy2a" outputId="23288db7-b120-427f-8c67-2bd31290249f" colab={"base_uri": "https://localhost:8080/", "height": 1000} # %%time # Write CSV to GCS import gcsfs import pandas as pd from dask.diagnostics import ProgressBar, Profiler, ResourceProfiler, CacheProfiler, visualize import json # Geometries gda = get_cached_remote_zarr(group = 'nuts-2016-lau-2018', root = "copernicus-climate/european-nuts-lau-geometries.zarr") print(gda) p = "zonal_stats" fs = gcsfs.GCSFileSystem(project=gc_project, token=f"/root/.{gc_creds}") with Profiler() as prof, ResourceProfiler(dt=1) as rprof, CacheProfiler() as cprof: with ProgressBar(): with fs.open(f"{gcs_prefix}/{p}/admin_lookup_esp_nuts_lau_levels_0to4.csv", 'w') as f: print("\nwriting Admin. lookup\n") # Create admin lookup dictionary and GeoJSON files levels = [0,1,2,3,4] gdfs = [to_geopandas(gda.where((gda.admin_level==l)&(gda.iso3=='ESP'), drop=True), rounding_precision=6) for l in levels] admin_dict = create_admin_dict([to_geopandas(gda.where((gda.admin_level==l)&(gda.iso3=='ESP'), drop=True), rounding_precision=6) for l in levels]) pd.concat(admin_dict.values())[['admin_level', 'gid_left','gid_right', 'geoname']].to_csv(f, index=False) print("\nwriting GeoJSON\n") # Write to GeoJSON # FIXME: Geopandas does not play well with stream as path! pd.concat(gdfs).to_file("geometries_esp_nuts_lau_levels_0to4.geojson", driver="GeoJSON") copy_gcs(["geometries_esp_nuts_lau_levels_0to4.geojson"], [f"{gcs_prefix}/{p}/geometries_esp_nuts_lau_levels_0to4.geojson"]) # + [markdown] id="UrBZ8lgD5IK-" # ### Set ACLs to public # + id="YkSeny5f5ILA" outputId="a4ff1368-2158-4f44-d7d8-9c959e82e424" colab={"base_uri": "https://localhost:8080/", "height": 51} # Set ACLs to public set_acl_to_public(f"{gcs_prefix}/{p}/") # + [markdown] id="9ki4IzmVxUYE" # ### Upload to Carto # + id="ZUn1SWcvxUYH" outputId="ba12b1ce-1475-407d-d8b1-bc44342a4743" colab={"base_uri": "https://localhost:8080/", "height": 71} # Upload to Carto # FIXME how to automatically make public?? import requests import os # Set some parameters p = 'zonal_stats' tis = ['admin_lookup', 'geometries'] ends = ['csv', 'geojson'] upload_tasks = list() for ti, e in zip(tis, ends): payload = { "api_key":os.environ['CARTO_API_KEY'], "url":f"{gcs_http_url}/{p}/{ti}_esp_nuts_lau_levels_0to4.{e}", "privacy":"public", "interval":86400*7 } url = f"{carto_base_url}/api/v1/synchronizations" headers = {'Content-Type': 'application/json'} r = requests.post(url=url, json=payload, headers=headers) upload_tasks.append(r.json()) for task in upload_tasks: print(task) # + [markdown] id="4wIOXd-5yQ3-" # ### Create Sky API datasets # + id="_4K-ihDLyQ3_" import Skydipper as sky # + id="CBWZKUBvyQ4D" outputId="0c1f2387-fc26-4361-e797-d9c2169307d8" colab={"base_uri": "https://localhost:8080/", "height": 51} # Remember Carto changes all '-' to '_' ! tis = ['admin_lookup', 'geometries'] datasets = list() for ti in tis: atts = { 'name': f"{ti}_esp_nuts_lau_levels_0to4", 'application': ['copernicusClimate'], 'connectorType': 'rest', 'provider': 'cartodb', 'connectorUrl': f"http://172.16.58.3/user/skydipper/dataset/{ti}_esp_nuts_lau_levels_0to4", 'tableName': f"{ti}_esp_nuts_lau_levels_0to4", 'env': 'production' } #print(atts) ds = sky.Dataset(attributes=atts) datasets.append(ds) print(ds) # + id="AX_gdZ3YzGf8" outputId="528bf663-b062-46b2-d28f-f785a7e15508" colab={"base_uri": "https://localhost:8080/", "height": 107} datasets[0] # + id="-ZoV97uwzRgf" outputId="0d1632fb-a5f7-4d4f-c6a7-6eeefe1d27e0" colab={"base_uri": "https://localhost:8080/", "height": 107} datasets[1] # + [markdown] id="9_TVZgmWGBQD" # ## Create datasets for WIDGETS monthly climatic variables per location # + [markdown] id="_UXCGfmRHKZj" # ### Write CSV tables to storage # + id="eJXS-r4SHIaM" outputId="01294a08-abff-469e-8f25-106f48723890" colab={"base_uri": "https://localhost:8080/", "height": 1000} # %%time # Write CSV to GCS import gcsfs import pandas as pd from dask.diagnostics import ProgressBar, Profiler, ResourceProfiler, CacheProfiler, visualize # Set some parameters p = 'zonal_stats' tis = ['historical', 'future-seasonal', 'future-longterm'] fs = gcsfs.GCSFileSystem(project=gc_project, token=f"/root/.{gc_creds}") with Profiler() as prof, ResourceProfiler(dt=1) as rprof, CacheProfiler() as cprof: with ProgressBar(): for ti in tis: print(f"writing {ti}") with fs.open(f"{gcs_prefix}/{p}/{ti}_monthly_zs_nuts-level-234.csv", 'w') as f: xr.merge([get_cached_remote_zarr(f"{ti}-monthly-zs-nuts-level-{l}", 'copernicus-climate/spain-zonal-stats.zarr') for l in [2,3,4]])\ .to_dataframe().reset_index(drop=False).to_csv(f, index=False) # + [markdown] id="au8JNBKa4_0H" # ### Set ACLs to public # + id="RhwLHAP5iO75" outputId="0620dedd-ac12-4564-8c7e-1771652adf29" colab={"base_uri": "https://localhost:8080/", "height": 51} # Set ACLs to public set_acl_to_public(f"{gcs_prefix}/{p}/") # + [markdown] id="v-WxIOp_iGC_" # ### Upload to Carto # + id="w1fVXV_qrEgG" outputId="8513d988-25d6-4e9d-82b0-db3294122173" colab={"base_uri": "https://localhost:8080/", "height": 141} # Upload to Carto # FIXME how to automatically make public?? import requests import os # Set some parameters p = 'zonal_stats' tis = ['historical', 'future-seasonal', 'future-longterm'] upload_tasks = list() for ti in tis: #print(f"{gcs_http_url}/{p}/{ti}_monthly_zs_nuts-level-234.csv") payload = { "api_key":os.environ['CARTO_API_KEY'], "url":f"{gcs_http_url}/{p}/{ti}_monthly_zs_nuts-level-234.csv", "privacy":"public", "interval":86400*7 } url = f"{carto_base_url}/api/v1/synchronizations" headers = {'Content-Type': 'application/json'} r = requests.post(url=url, json=payload, headers=headers) upload_tasks.append(r.json()) for task in upload_tasks: print(task) # + [markdown] id="FAGilWksiQiu" # ### Create Sky API datasets # + id="cebytWtzE3s0" import Skydipper as sky # + id="VMDO4zWoFA_D" outputId="8740bd9f-112d-4f5d-e74f-216dcb25cf17" colab={"base_uri": "https://localhost:8080/", "height": 68} # Remember Carto changes all '-' to '_' ! tis = ['historical', 'future_seasonal', 'future_longterm'] datasets = list() for ti in tis: #print(f"{ti}_monthly_zs_nuts-level-234") atts = { 'name': f"{ti}_monthly_zs_nuts-level-234", 'application': ['copernicusClimate'], 'connectorType': 'rest', 'provider': 'cartodb', 'connectorUrl': f"http://35.233.41.65/user/skydipper/dataset/{ti}_monthly_zs_nuts_level_234", 'tableName': f"{ti}_monthly_zs_nuts_level_234", 'env': 'production' } #print(atts) ds = sky.Dataset(attributes=atts) datasets.append(ds) print(ds) # + id="bLpB3woAzY9m" outputId="2d8d0b8b-3144-4ddb-b660-624f2f622a13" colab={"base_uri": "https://localhost:8080/", "height": 107} datasets[0] # + id="uVG6KglUzbKU" outputId="de0f08ee-133b-4a24-ce9e-8cc22ac20449" colab={"base_uri": "https://localhost:8080/", "height": 107} datasets[1] # + id="k2hjtadBzc87" outputId="8ae956bf-0ff9-41e0-be81-7a1e103ffac8" colab={"base_uri": "https://localhost:8080/", "height": 107} datasets[2] # + [markdown] id="02a8IsQzuxG-" # ### Create queries # + id="XJxHuAOEjBHk" # Access Carto queries response def get_timeseries(theme, time_interval, gid = "ES11", start_date = "1980-01-01", end_date = "2100-01-01"): # Define SQL conditions # experiment is only future_longterm se = "" we = "" dvs = "" # choose dataset # for future use mean {data_var}_mean # and standard deviation {data_var}_std if time_interval == "future_longterm": se = "experiment, " we = "AND experiment = 'rcp85'" dataset_id = 'bef42c82-2714-4ba0-8694-75e49916013a' table_name = 'future_longterm_monthly_zs_nuts_level_234' if theme == 'heatwaves': data_vars = ["tasmax", "heatwave_alarms", "heatwave_alerts", "heatwave_warnings"] if theme == 'coldsnaps': data_vars = ["tasmin", "coldsnap_alarms", "coldsnap_alerts", "coldsnap_warnings"] dvs = [f"{data_var}_mean, {data_var}_std " for data_var in data_vars] if time_interval == "future_seasonal": dataset_id = 'e1cc3f3e-133a-4a14-b2c2-f3192ee213c3' table_name = "future_seasonal_monthly_zs_nuts_level_234" if theme == 'heatwaves': data_vars = ["tasmax", "heatwave_alarms", "heatwave_alerts", "heatwave_warnings"] if theme == 'coldsnaps': data_vars = ["tasmin", "coldsnap_alarms", "coldsnap_alerts", "coldsnap_warnings"] dvs = [f"{data_var}_mean, {data_var}_std " for data_var in data_vars] if time_interval == "historical": dataset_id = '3a46bbff-73bc-4abc-bad6-11be6e99e2cb' table_name = 'historical_monthly_zs_nuts_level_234' if theme == 'heatwaves': data_vars = ["tasmax", "heatwave_alarms", "heatwave_alerts", "heatwave_warnings", "heatstress_extreme", "heatstress_strong", "heatstress_moderate"] if theme == 'coldsnaps': data_vars = ["tasmin", "coldsnap_alarms", "coldsnap_alerts", "coldsnap_warnings", "coldstress_extreme", "coldstress_strong", "coldstress_moderate"] dvs = [f"{data_var}_mean " for data_var in data_vars] # Convert variables to string dvstring = ", ".join(dvs) #print(dvstring) sql = \ f"SELECT gid, {se}time, {dvstring}"\ f"FROM {table_name} "\ f"WHERE gid = '{gid}' AND time between '{start_date}' AND '{end_date}' {we} "\ "ORDER BY time" #print(sql) url = f"http://api.skydipper.com/v1/query/{dataset_id}/" params = {"sql": sql} headers = {'Authorization': f"Bearer {sky_api_token}"} r = requests.post(url=url, params=params, headers=headers) return r # + id="JHkfpwm-s99i" outputId="a16f31ff-270f-42ff-cd63-aa817ccc4feb" colab={"base_uri": "https://localhost:8080/", "height": 666} import urllib themes = ['heatwaves', 'coldsnaps'] time_intervals = ['historical', 'future_seasonal', 'future_longterm'] headers = {'Authorization': f"Bearer {sky_api_token}"} print("\nHeader:\n") print(headers) print("\nAPI queries:\n") for theme in themes: print(f"\n{theme}:\n") for time_interval in time_intervals: r = get_timeseries(theme, time_interval) print(f"\n{time_interval}:\n") print(urllib.parse.unquote_plus(r.url)) # + [markdown] id="kD6GRuUDrxiK" # ## Create datasets for WIDGETS daily PET climatology per month per location # + id="3SMvv89MtdCd" outputId="fcce9e56-c83f-4835-818c-21aac850ef58" colab={"base_uri": "https://localhost:8080/", "height": 214} tst = get_cached_remote_zarr(f"historical-hourly-petmax-quantiles-zs-nuts-level-3", 'copernicus-climate/spain-zonal-stats.zarr').chunk({'gid':-1}) tst.pet_mean.where(tst.pet_mean.notnull(), drop=True) # + [markdown] id="3eF9ZWIrrxiL" # ### Write CSV tables to storage # + id="iQg-3VICrxiL" outputId="954f4b41-e378-4c2a-b33e-bdda1b1b9383" colab={"base_uri": "https://localhost:8080/", "height": 102} # %%time # Write CSV to GCS import gcsfs import pandas as pd from dask.diagnostics import ProgressBar, Profiler, ResourceProfiler, CacheProfiler, visualize # Set some parameters p = 'zonal_stats' fs = gcsfs.GCSFileSystem(project=gc_project, token=f"/root/.{gc_creds}") with Profiler() as prof, ResourceProfiler(dt=1) as rprof, CacheProfiler() as cprof: with ProgressBar(): for l in [2,3,4]: with fs.open(f"{gcs_prefix}/{p}/historical-hourly-petmax-quantiles-zs-nuts-level-{l}.csv", 'w') as f: get_cached_remote_zarr(f"historical-hourly-petmax-quantiles-zs-nuts-level-{l}", 'copernicus-climate/spain-zonal-stats.zarr').chunk({'gid':-1})\ .to_dataframe().reset_index(drop=False).to_csv(f, index=False) # + id="NG0l6ptw14hS" outputId="4f59b1d8-2ded-4aa4-d46b-25669c90f818" colab={"base_uri": "https://localhost:8080/", "height": 419} import pandas as pd tst = pd.read_csv(f"{gcs_http_url}/{p}/historical-hourly-petmax-quantiles-zs-nuts-level-3.csv") tst.dropna() # + [markdown] id="8_QxxVdTrxiP" # ### Set ACLs to public # + id="ORPgZ3eMrxiQ" outputId="9e574f35-c40c-4f93-c43e-1b5e6885d842" colab={"base_uri": "https://localhost:8080/", "height": 51} # Set ACLs to public p ="zonal_stats" set_acl_to_public(f"{gcs_prefix}/{p}/") # + [markdown] id="URURPpS-rxiU" # ### Upload to Carto # + id="ziWGG03lrxiU" outputId="20fa4d19-c7e3-476d-d211-5f3e24f980f0" colab={"base_uri": "https://localhost:8080/", "height": 89} # Upload to Carto # FIXME how to automatically make public?? import requests import os # Set some parameters p = 'zonal_stats' upload_tasks = list() for l in [2,3,4]: #print(f"{gcs_http_url}/{p}/historical-hourly-petmax-quantiles-zs-nuts-level-234.csv") payload = { "api_key":os.environ['CARTO_API_KEY'], "url":f"{gcs_http_url}/{p}/historical-hourly-petmax-quantiles-zs-nuts-level-{l}.csv", "privacy":"public", "interval":86400*7 } url = f"{carto_base_url}/api/v1/synchronizations" headers = {'Content-Type': 'application/json'} r = requests.post(url=url, json=payload, headers=headers) upload_tasks.append(r.json()) for task in upload_tasks: print(task) # + [markdown] id="sfU7cWKUrxiY" # ### Create Sky API datasets # + id="THmWbBDbrxia" import Skydipper as sky # + id="SFxb7Z0frxid" # Remember Carto changes all '-' to '_' ! datasets = list() for l in [2,3,4]: #print(f"{ti}_monthly_zs_nuts-level-234") atts = { 'name': f"historical_hourly_petmax_quantiles_zs_nuts_level_{l}", 'application': ['copernicusClimate'], 'connectorType': 'rest', 'provider': 'cartodb', 'connectorUrl': f"http://172.16.58.3/user/skydipper/dataset/historical_hourly_petmax_quantiles_zs_nuts_level_{l}", 'tableName': f"historical_hourly_petmax_quantiles_zs_nuts_level_{l}", 'env': 'production' } #print(atts) ds = sky.Dataset(attributes=atts) datasets.append(ds) # + id="kGl6xyWsrxif" outputId="1b8b3aa8-4ae8-4a8c-d3b0-92a8cb570f7e" colab={"base_uri": "https://localhost:8080/", "height": 107} datasets[0] # + id="pv17Evo8Es3g" outputId="13933b51-3f69-41b1-cedc-fb96313c61a3" colab={"base_uri": "https://localhost:8080/", "height": 107} datasets[1] # + id="eX2edigfEwXQ" outputId="f687bd4e-5189-4018-a161-f9f371f1664e" colab={"base_uri": "https://localhost:8080/", "height": 107} datasets[2] # + [markdown] id="U8LgG9lova11" # ### Create queries # + id="bw_SPYdaviqm" # Access Carto queries response import requests def get_pet_climatology(month = 8, gid = "ES11", admin_level= 2): # Set data_variables data_vars = ["pet_mean"] # Convert variables to string dvstring = ", ".join(data_vars) # Set table name table_name = f"historical_hourly_petmax_quantiles_zs_nuts_level_{admin_level}" # Set dataset ID and table name if admin_level == 2: dataset_id = "3b07a5ef-05fe-4b0d-b6af-8dee4784714e" # Set dataset ID and table name if admin_level == 3: dataset_id = "0fcceb53-ac29-45c3-b0e3-7dab2970f448" # Create SQL query sql = \ f"SELECT gid, month, hour, quantile, {dvstring} "\ f"FROM {table_name} "\ f"WHERE gid = '{gid}' AND month = {month} "\ "ORDER BY hour" print(sql) # Create request url = f"http://api.skydipper.com/v1/query/{dataset_id}/" print(url) params = {"sql": sql} headers = {'Authorization': f"Bearer {sky_api_token}"} r = requests.post(url=url, params=params, headers=headers) return r # + id="Z1aslNHYwu5y" outputId="72f99f2c-1a4c-4c6b-f6b4-1b8d6e33de63" colab={"base_uri": "https://localhost:8080/", "height": 88} r = get_pet_climatology(month = 8, gid = "ES11", admin_level=3) print(r.url) # + [markdown] id="FhPi-HYwDPhM" # ## Create datasets for MAPS total climatic variables per location # + [markdown] id="_ytQZtd2XfQx" # ### Write CSV tables to storage # + id="wvPateFVVM-n" outputId="fe907be6-dd47-4045-ab98-5d598544b119" colab={"base_uri": "https://localhost:8080/", "height": 1000} # %%time # Write CSV to GCS import gcsfs import pandas as pd from dask.diagnostics import ProgressBar, Profiler, ResourceProfiler, CacheProfiler, visualize # Set some parameters p = 'zonal_stats' tis = ['historical', 'future-seasonal', 'future-longterm'] fs = gcsfs.GCSFileSystem(project=gc_project, token=f"/root/.{gc_creds}") with Profiler() as prof, ResourceProfiler(dt=1) as rprof, CacheProfiler() as cprof: with ProgressBar(): for ti in tis: print(f"writing {ti}") with fs.open(f"{gcs_prefix}/{p}/{ti}_total_zs_nuts-level-234.csv", 'w') as f: get_cached_remote_zarr(f"{ti}-total-zs-nuts-level-234", 'copernicus-climate/spain-zonal-stats.zarr')\ .to_dataframe().reset_index(drop=False).to_csv(f, index=False) # + [markdown] id="Oh67Il0zyFUh" # ### Set ACLs to public # + id="KXC8x3HryFUp" outputId="3e12cbe6-ce83-4dc8-87cf-2114c4cd518b" colab={"base_uri": "https://localhost:8080/", "height": 51} # Set ACLs to public set_acl_to_public(f"{gcs_prefix}/{p}/") # + [markdown] id="dOB3Jqb5V6I4" # ### Upload to Carto # + id="XwqD0taxV6I8" outputId="dad8da84-f1b3-4a58-da2f-45ef64439d90" colab={"base_uri": "https://localhost:8080/", "height": 88} # Upload to Carto # FIXME how to automatically make public?? import requests import os # Set some parameters p = 'zonal_stats' tis = ['historical', 'future-seasonal', 'future-longterm'] upload_tasks = list() for ti in tis: #print(f"{gcs_http_url}/{p}/{ti}_monthly_zs_nuts-level-234.csv") payload = { "api_key":os.environ['CARTO_API_KEY'], "url":f"{gcs_http_url}/{p}/{ti}_total_zs_nuts-level-234.csv", "privacy":"public", "interval":86400*7 } url = f"{carto_base_url}/api/v1/synchronizations" headers = {'Content-Type': 'application/json'} r = requests.post(url=url, json=payload, headers=headers) upload_tasks.append(r.json()) # print overview for task in upload_tasks: print(task) # + [markdown] id="S8s_MSbJW8Yz" # ### Create Sky API datasets # + id="egwjiTE0W8Y1" import Skydipper as sky # + id="3s-KZiF5W8Y7" outputId="0f07d004-7cf1-4268-b380-bbf49842572e" colab={"base_uri": "https://localhost:8080/", "height": 69} # Remember Carto changes all '-' to '_' ! tis = ['historical', 'future_seasonal', 'future_longterm'] datasets = list() for ti in tis: atts = { 'name': f"{ti}_total_zs_nuts-level-234", 'application': ['copernicusClimate'], 'connectorType': 'rest', 'provider': 'cartodb', 'connectorUrl': f"http://172.16.58.3/user/skydipper/dataset/{ti}_total_zs_nuts_level_234", 'tableName': f"{ti}_total_zs_nuts_level_234", 'env': 'production' } #print(atts) ds = sky.Dataset(attributes=atts) datasets.append(ds) print(ds) # + id="JtRjn98Yzllh" outputId="bb342fbf-16ac-4a7f-c4b5-2eff7f3a7d92" colab={"base_uri": "https://localhost:8080/", "height": 107} datasets[0] # + id="3rTA9-lizllo" outputId="4129100f-3c0d-4ee2-85fe-a77ef30f5912" colab={"base_uri": "https://localhost:8080/", "height": 107} datasets[1] # + id="NKcXwSpnzllq" outputId="445a272c-84dd-4649-9ad0-e6e2da54155c" colab={"base_uri": "https://localhost:8080/", "height": 107} datasets[2] # + [markdown] id="HXAPn6PXXlCd" # ### Add Metadata # + id="D7x8Z25AXoBi" "metadata": [{ "id": "59a4226f7b6c000012baa6f5", "type": "metadata", "attributes": { "dataset": "134caa0a-21f7-451d-a7fe-30db31a424aa", "application": "gfw", "resource": { "id": "134caa0a-21f7-451d-a7fe-30db31a424aa", "type": "dataset" }, "language": "es", "name": "", "description": "", "source": "", "citation": "", "license": "", "info": { "dataDownload": "", "organization": "", "source-long": "", "short-description": "", "caution": "", "updateFrequence": "", "dateContent": "", "spatialResolution": "", "geographicCoverage": "", "function": "", "subtitle": "" }, "createdAt": "2017-08-28T14:02:23.744Z", "updatedAt": "2017-08-28T14:02:23.744Z", "status": "published" } }] # + id="M_yWzjuyMuYt" outputId="0161ed4c-a05d-4127-d578-7144bb61f1b8" colab={"base_uri": "https://localhost:8080/", "height": 54} # Get list of subadmins def get_gids(gid='ES', admin_level=2): url = f"http://api.skydipper.com/v1/query/29039f99-5300-4aa9-905b-632e963ee3f4/" sql = \ f"SELECT gid_left, gid_right "\ f"FROM admin_lookup_esp_nuts_lau_levels_0to4 "\ f"WHERE gid_left = '{gid}' AND admin_level = {admin_level}" #print(sql) params = {"sql": sql} headers = {'Authorization': f"Bearer {sky_api_token}"} r = requests.post(url=url, params=params, headers=headers) admin_lookup = r.json().get('data') admin_lookup = [d.get('gid_right') for d in admin_lookup] return admin_lookup, r a, r = get_gids() print(urllib.parse.unquote_plus(r.url)) # + id="6TA6BwYkKHPm" # Access Carto queries response def get_map(theme, time_interval, admin_level=2, gid='ES'): # Get list of subadmins gids = get_gids(gid, admin_level) gids = [f"'{gid}'" for gid in gids] gidstring = ", ".join(gids) #print(gidstring) # Define SQL conditions # experiment is only future_longterm se = "" we = "" dvs = "" # choose dataset # for future use mean {data_var}_mean # and standard deviation {data_var}_std if time_interval == "future_longterm": table_name = 'future_longterm_total_zs_nuts_level_234' dataset_id = "817e02ec-802c-4594-a755-8dca6891175a" se = f"{table_name}.experiment, " we = "AND experiment = 'rcp85'" if theme == 'heatwaves': data_vars = ["max_tasmax"]#, "heatwave_alarms", "heatwave_alerts", "heatwave_warnings"] if theme == 'coldsnaps': data_vars = ["min_tasmin"]#, "coldsnap_alarms", "coldsnap_alerts", "coldsnap_warnings"] dvs = [f"{table_name}.{data_var} " for data_var in data_vars] if time_interval == "future_seasonal": dataset_id = "075eb3e5-77bb-4fd7-a6b9-3108ae6ba166" table_name = "future_seasonal_total_zs_nuts_level_234" if theme == 'heatwaves': data_vars = ["max_tasmax"]#, "heatwave_alarms", "heatwave_alerts", "heatwave_warnings"] if theme == 'coldsnaps': data_vars = ["min_tasmin"]#, "coldsnap_alarms", "coldsnap_alerts", "coldsnap_warnings"] dvs = [f"{table_name}.{data_var} " for data_var in data_vars] if time_interval == "historical": dataset_id = "5d0bc927-6780-4f64-ba3d-dc241be6c26d" table_name = 'historical_total_zs_nuts_level_234' if theme == 'heatwaves': data_vars = ["max_tasmax"]#, "heatwave_alarms", "heatwave_alerts", "heatwave_warnings", "heatstress_extreme", "heatstress_strong", "heatstress_moderate"] if theme == 'coldsnaps': data_vars = ["min_tasmin"]#, "coldsnap_alarms", "coldsnap_alerts", "coldsnap_warnings", "coldstress_extreme", "coldstress_strong", "coldstress_moderate"] dvs = [f"{table_name}.{data_var} " for data_var in data_vars] # Convert variables to string dvstring = ", ".join(dvs) #print(dvstring) #SELECT geometries_esp_nuts_lau_levels_0to4.the_geom, geometries_esp_nuts_lau_levels_0to4.geoname, historical_total_zs_nuts_level_234.gid, historical_total_zs_nuts_level_234.max_tasmax FROM historical_total_zs_nuts_level_234 INNER JOIN geometries_esp_nuts_lau_levels_0to4 ON historical_total_zs_nuts_level_234.gid=geometries_esp_nuts_lau_levels_0to4.gid WHERE historical_total_zs_nuts_level_234.gid IN ('ES70', 'ES11', 'ES43', 'ES12', 'ES63', 'ES61', 'ES41', 'ES13', 'ES30', 'ES42', 'ES64', 'ES21', 'ES23', 'ES22', 'ES62', 'ES24', 'ES52', 'ES51', 'ES53') ORDER BY historical_total_zs_nuts_level_234.gid sql = \ f"SELECT geometries_esp_nuts_lau_levels_0to4.the_geom, geometries_esp_nuts_lau_levels_0to4.geoname, {table_name}.gid, {se} {dvstring}"\ f"FROM {table_name} "\ f"JOIN geometries_esp_nuts_lau_levels_0to4 ON {table_name}.gid=geometries_esp_nuts_lau_levels_0to4.gid "\ f"WHERE {table_name}.gid IN ({gidstring}) {we} "\ f"ORDER BY {table_name}.gid" print(sql) url = f"http://api.skydipper.com/v1/query/{dataset_id}/" params = {"sql": sql} headers = {'Authorization': f"Bearer {sky_api_token}"} r = requests.post(url=url, params=params, headers=headers) return r # + id="8VxceMw3KHPy" outputId="c40993c1-6554-4417-d8c9-e8e6b09856f8" colab={"base_uri": "https://localhost:8080/", "height": 768} import urllib themes = ['heatwaves', 'coldsnaps'] time_intervals = ['historical', 'future_seasonal', 'future_longterm'] headers = {'Authorization': f"Bearer {sky_api_token}"} print("\nHeader:\n") print(headers) print("\nAPI queries:\n") for theme in themes: print(f"\n{theme}:\n") for time_interval in time_intervals: r = get_map(theme, time_interval) print(f"\n{time_interval}:\n") print(urllib.parse.unquote_plus(r.url)) # + id="yhAD6rbrcWoy" outputId="17580865-645b-4314-c2e1-e664083355c9" colab={"base_uri": "https://localhost:8080/", "height": 336} ds = sky.Dataset(id_hash="5d0bc927-6780-4f64-ba3d-dc241be6c26d") sql = "SELECT * FROM historical_total_zs_nuts_level_234 JOIN geometries_esp_nuts_lau_levels_0to4 ON historical_total_zs_nuts_level_234.gid=geometries_esp_nuts_lau_levels_0to4.gid WHERE historical_total_zs_nuts_level_234.gid IN ('['ES70', 'ES11', 'ES43', 'ES12', 'ES63', 'ES61', 'ES41', 'ES13', 'ES30', 'ES42', 'ES64', 'ES21', 'ES23', 'ES22', 'ES62', 'ES24', 'ES52', 'ES51', 'ES53']', '<Response [200]>') ORDER BY historical_total_zs_nuts_level_234.gid" ds.query(sql=sql) # + [markdown] id="IggSHWvrSlmT" # ## Create Sky API Dataset Layers # # Create a single layer for each variable of each theme per admin level. For future-longterm, also per experiment. # # # + define basic structure # # + get breaks for each variable # # + create cartocss # # + create layers # + id="gLN5ti-NQAox" import Skydipper as sky # + [markdown] id="Gg5iZ84nSsMw" # ### Define color palletes # + id="_1TN_tLsV3s5" outputId="05f69bf0-f10b-41e0-a261-0c6bd61f4a63" colab={"base_uri": "https://localhost:8080/", "height": 1000} #import palettable as pal # Define function to choose color ramps print("\nDiverging blues to reds:\n") color_ramp_blue_red = ["#08306B", "#0A519C", "#2171B5", "#4292C5", "#6BAED6", "#C6DBEF", "#FEE0D2", "#FCBBA1", "#FC9272", "#FB694A", "#EF3B2C", "#CB181D", "#67000D"] show_colors_as_blocks(color_ramp_blue_red) # negative color_ramp_blue_white = ["#08306B", "#0A519C", "#2171B5", "#4292C5", "#6BAED6", "#C6DBEF", "#F1EEF6"] # positive color_ramp_white_red = ["#FEE0D2", "#FCBBA1", "#FC9272", "#FB694A", "#EF3B2C", "#CB181D", "#67000D"] # positive color_ramp_white_blue = ["#F1EEF6", "#D0D1E6", "#A6BDDB", "#74A9CF", "#3690C0", "#0570B0", "#034E7B"] def cr(dvar): out = ['Error no color ramp found for variable'] if "cold" in dvar: out = color_ramp_white_blue if "heat" in dvar: out = color_ramp_white_red if "min" in dvar: out = color_ramp_blue_white if "max" in dvar: out = color_ramp_white_red return out for dvar in ["max_tasmax", "min_tasmin", "total_heatwave_alerts", "total_coldsnap_warnings", "total_tasmin_std"]: print(f"\n{dvar}:\n") show_colors_as_blocks(cr(dvar)) # + [markdown] id="_CaH0NGbS0oK" # ### Create layer attributes # + id="YR7B8SL4WsKG" outputId="9ccd7640-a594-4c21-a68c-cd84d50a69f0" colab={"base_uri": "https://localhost:8080/", "height": 1000} # %%time # Create layer attributes # for a VCS-API carto table rendered using MapBOXGL as a chloropleth # admin_level is external selector in all layers # experiment is also selectable for future-longterm # Dataset specific params tis = ['historical', 'future-seasonal', 'future-longterm'] provider_type = "carto-skydipper" provider_account = "skydipper" # chloropleth graphical params nbreaks = 7 breaks_method = 'jenks' fill_opacity = 0.75 paint_object = create_mapbox_cloropleth_paint(nbreaks) # make the render_layers list render_layers = [{"paint": paint_object, "source-layer": "layer0", "type": "fill"}] # loop through time_intervals to create layer attributes per dataset ly_list = list() for ti in tis: print(f"\nProcessing {ti}") scenario_text = "" if ti == "future-longterm": table_name = 'future_longterm_total_zs_nuts_level_234' dataset_id = "5cc5ee88-13f1-464c-a30e-73d3d556a8cd" scenario_text = " for scenario CMIP5 RCP45 and RCP85" provider_layer_sql = "SELECT * FROM {table_name} WHERE admin_level = {admin_level} AND experiment = {experiment}" sd = '2020' ed = '2100' if ti == "future-seasonal": dataset_id = "df6f7198-3b05-4d14-919a-6726e34f1603" table_name = "future_seasonal_total_zs_nuts_level_234" provider_layer_sql = "SELECT * FROM {table_name} WHERE admin_level = {admin_level}" sd = '2020-02-01' ed = '2020-07-30' if ti == "historical": dataset_id = "4212100b-d1da-47b4-9fdd-e2564ca955bb" table_name = 'historical_total_zs_nuts_level_234' provider_layer_sql = "SELECT * FROM {table_name} WHERE admin_level = {admin_level}" sd = '1980' ed = '2019' print(f"Dataset {dataset_id} {table_name}") # get zarr dataset dss = get_cached_remote_zarr(f"{ti}-total-zs-nuts-level-234", 'copernicus-climate/spain-zonal-stats.zarr') print("\n...getting data variable names\n") dvars = list(dss.data_vars.keys()) dvars = [dvar for dvar in dvars if (dvar.startswith('min_')) or (dvar.startswith('max_')) or (dvar.startswith('total_'))] print(dvars) print("\n...creating breaks\n") breaks = [create_breaks(dss[dvar], 7, 1, method=breaks_method, null_value = None) for dvar in dvars] print(breaks) print("\n...creating legendConfig\n") legendConfigs = [create_legend_config(b, cr(dvar)) for b, dvar in zip(breaks, dvars)] print(legendConfigs) print("\n...creating interaction config\n") dvars_ic = list(dss.data_vars.keys()) dvars_ic = [dvar for dvar in dvars_ic if (dvar.startswith('min_')) or (dvar.startswith('max_')) or (dvar.startswith('total_')) or (dvar.startswith('date_'))] dtypes = [dss[dvar].dtype.name for dvar in dvars_ic] formats = [None for i in dvars_ic] names = [dvar.replace("_", " ").capitalize() for dvar in dvars_ic] interactionConfig = create_interaction_config(dvars_ic, dtypes, formats, names) print(interactionConfig) print("\n...creating layer config\n") # Create a layer where the data source is carto-skydipper # and the layer is rendered as a mapboxGL chloropleth def mk_lc_params(b, dvar): # Set the layers parameters, each <key> in the config will be replaced by value lp = { "admin_level": 2, "experiment": "'rcp85'", "table_name": table_name, "column_name" : dvar, "fill_opacity": fill_opacity } # Add to layers parameters colors = cr(dvar) lp.update(zip([f"break{n}" for n in range(0, len(b))], b)) lp.update(zip([f"color{n}" for n in range(0, len(colors))], colors)) return lp layerConfigs = [create_layer_config( f"map-box-chloropleth-{nbreaks}", "vector", mk_lc_params(b, dvar), render_layers, provider_type, provider_account, provider_layer_sql, sql_params = None) for b, dvar in zip(breaks, dvars)] # Create the layers print("\n...creating api layer attributes\n") atts_list = list() for dvar, layerConfig, legendConfig, in zip(dvars, layerConfigs, legendConfigs): # create name dvar_name = dvar.replace("_", " ") print(dvar_name) # time interval name time_name = ti.replace("-", " ").capitalize() new_atts = { "name": f"{time_name} {dvar_name} admin level 2, 3, and 4", "dataset": dataset_id, "description": f"{time_name} {dvar_name} averaged per admin. 2, 3, and 4 geometry between {sd} to {ed}{scenario_text}", "application": ["copernicusClimate"], "iso": ['ESP'], "env": "production", "provider": "cartodb", "layerConfig": layerConfig, "legendConfig": legendConfig, "interactionConfig": interactionConfig, "applicationConfig": {}, "staticImageConfig": {} } atts_list.append(new_atts) # add to list ly_list.append(atts_list) print("\ndone!\n") # + id="HvnBV-37pOfw" outputId="51eb62c9-67f0-4a6d-810d-2ac0c918191e" colab={"base_uri": "https://localhost:8080/", "height": 1000} import pprint print(len(ly_list)) # View first result for att_list in ly_list: print(len(att_list)) pprint.pprint(att_list[0], indent=2) # + [markdown] id="8ACL51v_fxti" # ### Remove previous layers # + id="FZ5M7RUUVsj_" outputId="817f69e6-381f-40e4-d4a3-7494bcf4357b" colab={"base_uri": "https://localhost:8080/", "height": 104} # %%time import requests def rmv_layers_from_dataset(dataset_id): ds = sky.Dataset(id_hash=dataset_id) #print(ds.layers) lyids = [ly.id for ly in ds.layers] for layer_id in lyids: url = f"https://api.skydipper.com/v1/dataset/{dataset_id}/layer/{layer_id}" headers = {'Authorization': f"Bearer {sky_api_token}"} print(layer_id) r = requests.delete(url=url, headers=headers) print(r) # Remove any previous layers for dataset_id in ["5cc5ee88-13f1-464c-a30e-73d3d556a8cd", "df6f7198-3b05-4d14-919a-6726e34f1603" , "4212100b-d1da-47b4-9fdd-e2564ca955bb"]: print(dataset_id) rmv_layers_from_dataset(dataset_id) # + [markdown] id="kgLBdLgSgzNp" # ### Add layers to API # + id="6k41F9GzX21Y" import Skydipper as sky # + id="M8xua-JUbHtT" outputId="385ecdf1-c384-4e14-e721-85d7fdb81ff5" colab={"base_uri": "https://localhost:8080/", "height": 885} # %%time for att_list in ly_list: for att in att_list: #print(att) ly = sky.Layer(attributes=att) print(ly) # + [markdown] id="lTBr2nnBBuXO" # ### Export layer summary # + id="Su4nD230TbeU" import Skydipper as sky # + id="N82iyVz7uapp" outputId="f57c23ce-5868-4e5d-b6ea-d27f731dfeb4" colab={"base_uri": "https://localhost:8080/", "height": 1000} # %%time # Export layer summary import json import gcsfs tis = ['historical', 'future-seasonal', 'future-longterm'] out = {} for ti in tis: dataset_id = "" if ti == "future-longterm": dataset_id = "5cc5ee88-13f1-464c-a30e-73d3d556a8cd" if ti == "future-seasonal": dataset_id = "df6f7198-3b05-4d14-919a-6726e34f1603" if ti == "historical": dataset_id = "4212100b-d1da-47b4-9fdd-e2564ca955bb" ds = sky.Dataset(id_hash=dataset_id) layers = ds.layers out[ti] = {"name":ds.attributes["name"], "id":ds.id, "layers": [{"name":ly.attributes["name"], "id":ly.id, "endpoint": f"https://api.skydipper.com/v1/layer/{ly.id}"} for ly in layers] } print(f"\n{ti}\n") print(ds, ":") for ly in layers: print(" --- ", ly) #print(" ", ly.attributes.get("description")) # Export #pprint.pprint(out) fs = gcsfs.GCSFileSystem(project=gc_project, token=f"/root/.{gc_creds}") with fs.open(f"{gcs_prefix}/zonal_stats/layer_definitions.json", 'w') as f: json.dump(obj=out, fp=f) # + [markdown] id="gDYCaelb66Zm" # ### Export demo layer summary # + id="de0H7cdxkKlK" outputId="c6123529-5033-468f-8bb3-8a43bf8d04a4" colab={"base_uri": "https://localhost:8080/", "height": 1000} # Export demo layer summary import json fs = gcsfs.GCSFileSystem(project=gc_project, token=f"/root/.{gc_creds}") with fs.open(f"{gcs_prefix}/zonal_stats/layer_definitions.json", 'r') as f: ls = json.load(f) #print(ls) def get_layer(ti, name): return [l for l in ls.get(ti).get('layers') if l.get('name') == name] out = { "historical": { "heatwaves": { "layers": get_layer('historical', "Historical max tasmax admin level 2, 3, and 4") }, "coldsnaps": { "layers": get_layer('historical', "Historical min tasmin admin level 2, 3, and 4") }, "thermalcomfort": { "layers": get_layer('historical', "Historical max petmax admin level 2, 3, and 4") + get_layer('historical', "Historical min petmin admin level 2, 3, and 4") } }, "future-seasonal": { "heatwaves": { "layers": get_layer('future-seasonal', "Future seasonal max tasmax admin level 2, 3, and 4") }, "coldsnaps": { "layers": get_layer('future-seasonal', "Future seasonal min tasmin admin level 2, 3, and 4") } }, "future-longterm": { "heatwaves": { "layers": get_layer('future-longterm', "Future longterm max tasmax admin level 2, 3, and 4") }, "coldsnaps": { "layers": get_layer('future-longterm', "Future longterm min tasmin admin level 2, 3, and 4") } } } # Export print(json.dumps(out, indent=4)) fs = gcsfs.GCSFileSystem(project=gc_project, token=f"/root/.{gc_creds}") with fs.open(f"{gcs_prefix}/zonal_stats/demo_map_layer_definitions.json", 'w') as f: json.dump(obj=out, fp=f) # + [markdown] id="QZEkIf_I0DPN" # ### Set ACLs to public # + id="gTpj-gLs0DPX" outputId="a1e43c60-9b1a-40a8-b59a-fafefa577526" colab={"base_uri": "https://localhost:8080/", "height": 52} # Set ACLs to public p = "zonal_stats" set_acl_to_public(f"{gcs_prefix}/{p}/")
upload_and_define_datasets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #notebook based on zflemings:https://nbviewer.jupyter.org/github/zflamig/dask-era5/blob/main/notebook/era5_fargate_dask.ipynb import xarray as xr import fsspec import dask import s3fs import numpy as np xr.set_options(display_style="html") #display dataset nicely # + #ds = xr.open_zarr('https://era5-pds.s3.us-east-1.amazonaws.com/zarr/1983/05/data/northward_wind_at_10_metres.zarr')#,consolidated=True) #fname = ' http://era5-pds.s3.amazonaws.com/zarr/2008/01/data/air_temperature_at_2_metres.zarr' fname = 'https://era5-pds.s3.us-east-1.amazonaws.com/zarr/1983/05/data/air_pressure_at_mean_sea_level.zarr' #ds = xr.open_zarr(fname)#,consolidated=True) #ds fs = s3fs.S3FileSystem(anon=True) datestring = 'era5-pds/zarr/1983/05/data/air_pressure_at_mean_sea_level.zarr/' mslp_zarr = xr.open_zarr(s3fs.S3Map(datestring, s3=fs)) #mslp_zarr # - #url = 's3://era5-pds/2010/01/data/air_temperature_at_2_metres.nc' url = 's3://era5-pds/2010/01/data/precipitation_amount_1hour_Accumulation.nc' ncfile = fsspec.open(url) ds = xr.open_dataset(ncfile.open()) ds # + years,months=[],[] [years.append(str(y)) for y in np.arange(1979,2020)] [months.append(str(m).zfill(2)) for m in np.arange(1,13)] #years = ["2010", "2011", "2012", "2013", "2014", "2015", "2016", "2017", "2018", "2019"] #months = ["01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12"] file_pattern = 's3://era5-pds/{year}/{month}/data/precipitation_amount_1hour_Accumulation.nc' #air_temperature_at_2_metres.nc' @dask.delayed def s3open(path): fs = s3fs.S3FileSystem(anon=True, default_fill_cache=False) return fs.open(path) files_mapper = [s3open(file_pattern.format(year=year,month=month)) for year in years for month in months] # + # %%time ds = xr.open_mfdataset(files_mapper, engine='h5netcdf', chunks={'lon':200,'lat':200,'time1':720}, concat_dim='time1', combine='nested', coords='minimal', compat='override', parallel=True) ds # + # #%%time #ds = xr.open_mfdataset(files_mapper, engine='h5netcdf', chunks={'lon':200,'lat':200,'time0':720}, concat_dim='time0', combine='nested', coords='minimal', compat='override', parallel=True) #ds # - ds_daily = ds.resample(time1='1D').sum('time1',keep_attrs=True,skipna=False) ds_daily ds_daily.precipitation_amount_1hour_Accumulation[-1,:,:].plot(vmin=0,vmax=0.001) ds_cali = ds_daily.sel(lat=slice(42,32),lon=slice(236,246)).sum({'lat','lon'}).chunk({'time1':len(ds_daily.time1)}) ds_cali ds_cali.to_zarr('./../../data/cali_box_24hr_precip_zarr2') ds_cali.precipitation_amount_1hour_Accumulation.plot() import numpy as np ds_cali.sel(time1=slice(np.datetime64('2019-01-01'),np.datetime64('2019-01-31'))).precipitation_amount_1hour_Accumulation.plot() ds_cali.sel(time1=slice(np.datetime64('2019-01-01'),np.datetime64('2019-01-31'))).precipitation_amount_1hour_Accumulation.plot() storm_list = ds_cali.time1.where(ds_cali.precipitation_amount_1hour_Accumulation>20,drop=True) from dask_gateway import Gateway from dask.distributed import Client gateway = Gateway() cluster = gateway.new_cluster(worker_memory=8) cluster.adapt(minimum=1, maximum=20) client = Client(cluster) cluster
Cloud_testing/.ipynb_checkpoints/ERA5_storm_notebook-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''base'': conda)' # language: python # name: python3 # --- """ You will receive a keyboard input (integer). Using that input, you will need to display a list of all items up to that number, if that number is even, otherwise you will display a list of all the squares numbers up to that number. example: You will receive 6, you will display [1, 2, 3, 4, 5] You will receive 5, you will display [1, 4, 9, 16] """ val = int(input()) if val % 2 != 0: res = [x**2 for x in range(1, val)] print(res) else: res = [x for x in range(1, val)] print(res)
Python Random old, new challenges/Section 1/ex7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # The Devito domain specific language: an overview # # This notebook presents an overview of the Devito symbolic language, used to express and discretise operators, in particular partial differential equations (PDEs). # # For convenience, we import all Devito modules: from devito import * # ## From equations to code in a few lines of Python # # The main objective of this tutorial is to demonstrate how Devito and its [SymPy](http://www.sympy.org/en/index.html)-powered symbolic API can be used to solve partial differential equations using the finite difference method with highly optimized stencils in a few lines of Python. We demonstrate how computational stencils can be derived directly from the equation in an automated fashion and how Devito can be used to generate and execute, at runtime, the desired numerical scheme in the form of optimized C code. # # # ## Defining the physical domain # # Before we can begin creating finite-difference (FD) stencils we will need to give Devito a few details regarding the computational domain within which we wish to solve our problem. For this purpose we create a `Grid` object that stores the physical `extent` (the size) of our domain and knows how many points we want to use in each dimension to discretise our data. # # <img src="figures/grid.png" style="width: 220px;"/> grid = Grid(shape=(5, 6), extent=(1., 1.)) grid # ## Functions and data # # To express our equation in symbolic form and discretise it using finite differences, Devito provides a set of `Function` types. A `Function` object: # # 1. Behaves like a `sympy.Function` symbol # 2. Manages data associated with the symbol # # To get more information on how to create and use a `Function` object, or any type provided by Devito, we can take a look at the documentation. print(Function.__doc__) # Ok, let's create a function $f(x, y)$ and look at the data Devito has associated with it. Please note that it is important to use explicit keywords, such as `name` or `grid` when creating `Function` objects. f = Function(name='f', grid=grid) f f.data # By default, Devito `Function` objects use the spatial dimensions `(x, y)` for 2D grids and `(x, y, z)` for 3D grids. To solve a PDE over several timesteps a time dimension is also required by our symbolic function. For this Devito provides an additional function type, the `TimeFunction`, which incorporates the correct dimension along with some other intricacies needed to create a time stepping scheme. g = TimeFunction(name='g', grid=grid) g # Since the default time order of a `TimeFunction` is `1`, the shape of `f` is `(2, 5, 6)`, i.e. Devito has allocated two buffers to represent `g(t, x, y)` and `g(t + dt, x, y)`: g.shape # ## Derivatives of symbolic functions # # The functions we have created so far all act as `sympy.Function` objects, which means that we can form symbolic derivative expressions from them. Devito provides a set of shorthand expressions (implemented as Python properties) that allow us to generate finite differences in symbolic form. For example, the property `f.dx` denotes $\frac{\partial}{\partial x} f(x, y)$ - only that Devito has already discretised it with a finite difference expression. There are also a set of shorthand expressions for left (backward) and right (forward) derivatives: # # | Derivative | Shorthand | Discretised | Stencil | # | ---------- |:---------:|:-----------:|:-------:| # | $\frac{\partial}{\partial x}f(x, y)$ (right) | `f.dxr` | $\frac{f(x+h_x,y)}{h_x} - \frac{f(x,y)}{h_x}$ | <img src="figures/stencil_forward.png" style="width: 180px;"/> | # | $\frac{\partial}{\partial x}f(x, y)$ (left) | `f.dxl` | $\frac{f(x,y)}{h_x} - \frac{f(x-h_x,y)}{h_x}$ | <img src="figures/stencil_backward.png" style="width: 180px;"/> | # # A similar set of expressions exist for each spatial dimension defined on our grid, for example `f.dy` and `f.dyl`. Obviously, one can also take derivatives in time of `TimeFunction` objects. For example, to take the first derivative in time of `g` you can simply write: g.dt # We may also want to take a look at the stencil Devito will generate based on the chosen discretisation: g.dt.evaluate # There also exist convenient shortcuts to express the forward and backward stencil points, `g(t+dt, x, y)` and `g(t-dt, x, y)`. g.forward g.backward # And of course, there's nothing to stop us taking derivatives on these objects: g.forward.dt g.forward.dy # ## A linear convection operator # # **Note:** The following example is derived from [step 5](http://nbviewer.ipython.org/github/barbagroup/CFDPython/blob/master/lessons/07_Step_5.ipynb) in the excellent tutorial series [CFD Python: 12 steps to Navier-Stokes](http://lorenabarba.com/blog/cfd-python-12-steps-to-navier-stokes/). # # In this simple example we will show how to derive a very simple convection operator from a high-level description of the governing equation. We will go through the process of deriving a discretised finite difference formulation of the state update for the field variable $u$, before creating a callable `Operator` object. Luckily, the automation provided by SymPy makes the derivation very nice and easy. # # The governing equation we want to implement is the linear convection equation: # $$\frac{\partial u}{\partial t}+c\frac{\partial u}{\partial x} + c\frac{\partial u}{\partial y} = 0.$$ # # Before we begin, we must define some parameters including the grid, the number of timesteps and the timestep size. We will also initialize our velocity `u` with a smooth field: # + tags=["nbval-ignore-output"] from examples.cfd import init_smooth, plot_field nt = 100 # Number of timesteps dt = 0.2 * 2. / 80 # Timestep size (sigma=0.2) c = 1 # Value for c # Then we create a grid and our function grid = Grid(shape=(81, 81), extent=(2., 2.)) u = TimeFunction(name='u', grid=grid) # We can now set the initial condition and plot it init_smooth(field=u.data[0], dx=grid.spacing[0], dy=grid.spacing[1]) init_smooth(field=u.data[1], dx=grid.spacing[0], dy=grid.spacing[1]) plot_field(u.data[0]) # - # Next, we wish to discretise our governing equation so that a functional `Operator` can be created from it. We begin by simply writing out the equation as a symbolic expression, while using shorthand expressions for the derivatives provided by the `Function` object. This will create a symbolic object of the dicretised equation. # # Using the Devito shorthand notation, we can express the governing equations as: eq = Eq(u.dt + c * u.dxl + c * u.dyl) eq # We now need to rearrange our equation so that the term $u(t+dt, x, y)$ is on the left-hand side, since it represents the next point in time for our state variable $u$. Devito provides a utility called `solve`, built on top of SymPy's `solve`, to rearrange our equation so that it represents a valid state update for $u$. Here, we use `solve` to create a valid stencil for our update to `u(t+dt, x, y)`: stencil = solve(eq, u.forward) update = Eq(u.forward, stencil) update # The right-hand side of the 'update' equation should be a stencil of the shape # <img src="figures/stencil_convection.png" style="width: 160px;"/> # # Once we have created this 'update' expression, we can create a Devito `Operator`. This `Operator` will basically behave like a Python function that we can call to apply the created stencil over our associated data, as long as we provide all necessary unknowns. In this case we need to provide the number of timesteps to compute via the keyword `time` and the timestep size via `dt` (both have been defined above): # + tags=["nbval-ignore-output"] op = Operator(update) op(time=nt+1, dt=dt) plot_field(u.data[0]) # - # Note that the real power of Devito is hidden within `Operator`, it will automatically generate and compile the optimized C code. We can look at this code (noting that this is not a requirement of executing it) via: # + tags=["nbval-ignore-output"] print(op.ccode) # - # ## Second derivatives and high-order stencils # # In the above example only a combination of first derivatives was present in the governing equation. However, second (or higher) order derivatives are often present in scientific problems of interest, notably any PDE modeling diffusion. To generate second order derivatives we must give the `devito.Function` object another piece of information: the desired discretisation of the stencil(s). # # First, lets define a simple second derivative in `x`, for which we need to give $u$ a `space_order` of (at least) `2`. The shorthand for this second derivative is `u.dx2`. u = TimeFunction(name='u', grid=grid, space_order=2) u.dx2 u.dx2.evaluate # We can increase the discretisation arbitrarily if we wish to specify higher order FD stencils: u = TimeFunction(name='u', grid=grid, space_order=4) u.dx2 u.dx2.evaluate # To implement the diffusion or wave equations, we must take the Laplacian $\nabla^2 u$, which is the sum of the second derivatives in all spatial dimensions. For this, Devito also provides a shorthand expression, which means we do not have to hard-code the problem dimension (2D or 3D) in the code. To change the problem dimension we can create another `Grid` object and use this to re-define our `Function`'s: # + grid_3d = Grid(shape=(5, 6, 7), extent=(1., 1., 1.)) u = TimeFunction(name='u', grid=grid_3d, space_order=2) u # - # We can re-define our function `u` with a different `space_order` argument to change the discretisation order of the stencil expression created. For example, we can derive an expression of the 12th-order Laplacian $\nabla^2 u$: u = TimeFunction(name='u', grid=grid_3d, space_order=12) u.laplace # The same expression could also have been generated explicitly via: u.dx2 + u.dy2 + u.dz2 # ## Derivatives of composite expressions # # Derivatives of any arbitrary expression can easily be generated: u = TimeFunction(name='u', grid=grid, space_order=2) v = TimeFunction(name='v', grid=grid, space_order=2, time_order=2) v.dt2 + u.laplace (v.dt2 + u.laplace).dx2 # Which can, depending on the chosen discretisation, lead to fairly complex stencils: (v.dt2 + u.laplace).dx2.evaluate
examples/userapi/00_dsl.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: dev # language: python # name: dev # --- import numpy as np from sklearn.metrics import fbeta_score as skfbeta_score def fbeta_score(y_true, y_pred, beta, average): n_labels = len(set(y_true) | set(y_pred)) true_sum = np.bincount(y_true, minlength=n_labels) pred_sum = np.bincount(y_pred, minlength=n_labels) tp = np.bincount(y_true[y_true == y_pred], minlength=n_labels) if average == "binary": tp = np.array([tp[1]]) true_sum = np.array([true_sum[1]]) pred_sum = np.array([pred_sum[1]]) elif average == "micro": tp = np.array([np.sum(tp)]) true_sum = np.array([np.sum(true_sum)]) pred_sum = np.array([np.sum(pred_sum)]) precision = np.zeros(len(pred_sum)) mask = pred_sum != 0 precision[mask] = tp[mask] / pred_sum[mask] recall = np.zeros(len(true_sum)) mask = true_sum != 0 recall[mask] = tp[mask] / true_sum[mask] denom = (beta ** 2) * precision + recall denom[denom == 0.] = 1 fscore = (1 + beta ** 2) * precision * recall / denom if average == "weighted": fscore = np.average(fscore, weights=true_sum) elif average is not None: fscore = np.mean(fscore) return fscore # binary for i in range(10): rng = np.random.RandomState(i) y_true = rng.randint(2, size=10) y_pred = rng.randint(2, size=10) score1 = fbeta_score(y_true, y_pred, beta=0.5, average="binary") score2 = skfbeta_score(y_true, y_pred, beta=0.5, average="binary") assert np.isclose(score1, score2) # multiclass for i in range(10): for average in (None, "micro", "macro", "weighted"): rng = np.random.RandomState(i) y_true = rng.randint(3, size=10) y_pred = rng.randint(3, size=10) score1 = fbeta_score(y_true, y_pred, beta=0.5, average=average) score2 = skfbeta_score(y_true, y_pred, beta=0.5, average=average) if average is None: assert np.array_equal(score1, score2) else: assert np.isclose(score1, score2)
metrics/fbeta_score.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PaddlePaddle 1.6.0 (Python 3.5) # language: python # name: py35-paddle1.2.0 # --- # 启动训练前,复用前面章节的数据处理和神经网络模型代码,已阅读可直接跳过。 # # + import paddle import paddle.fluid as fluid import paddle.fluid.dygraph as dygraph from paddle.fluid.dygraph import FC, Conv2D, Embedding, Pool2D import numpy as np import random class MovieLen(object): def __init__(self, use_poster): self.use_poster = use_poster # 声明每个数据文件的路径 usr_info_path = "./work/ml-1m/users.dat" if not use_poster: rating_path = "./work/ml-1m/ratings.dat" else: rating_path = "./work/ml-1m/new_rating.txt" movie_info_path = "./work/ml-1m/movies.dat" self.poster_path = "./work/ml-1m/posters/" # 得到电影数据 self.movie_info, self.movie_cat, self.movie_title = self.get_movie_info(movie_info_path) # 记录电影的最大ID self.max_mov_cat = np.max([self.movie_cat[k] for k in self.movie_cat]) self.max_mov_tit = np.max([self.movie_title[k] for k in self.movie_title]) self.max_mov_id = np.max(list(map(int, self.movie_info.keys()))) # 记录用户数据的最大ID self.max_usr_id = 0 self.max_usr_age = 0 self.max_usr_job = 0 # 得到用户数据 self.usr_info = self.get_usr_info(usr_info_path) # 得到评分数据 self.rating_info = self.get_rating_info(rating_path) # 构建数据集 self.dataset = self.get_dataset(usr_info=self.usr_info, rating_info=self.rating_info, movie_info=self.movie_info) # 划分数据及,获得数据加载器 self.train_dataset = self.dataset[:int(len(self.dataset)*0.9)] self.valid_dataset = self.dataset[int(len(self.dataset)*0.9):] print("##Total dataset instances: ", len(self.dataset)) print("##MovieLens dataset information: \nusr num: {}\n" "movies num: {}".format(len(self.usr_info),len(self.movie_info))) # 得到电影数据 def get_movie_info(self, path): # 打开文件,编码方式选择ISO-8859-1,读取所有数据到data中 with open(path, 'r', encoding="ISO-8859-1") as f: data = f.readlines() # 建立三个字典,分别用户存放电影所有信息,电影的名字信息、类别信息 movie_info, movie_titles, movie_cat = {}, {}, {} # 对电影名字、类别中不同的单词计数 t_count, c_count = 1, 1 count_tit = {} # 按行读取数据并处理 for item in data: item = item.strip().split("::") v_id = item[0] v_title = item[1][:-7] cats = item[2].split('|') v_year = item[1][-5:-1] titles = v_title.split() # 统计电影名字的单词,并给每个单词一个序号,放在movie_titles中 for t in titles: if t not in movie_titles: movie_titles[t] = t_count t_count += 1 # 统计电影类别单词,并给每个单词一个序号,放在movie_cat中 for cat in cats: if cat not in movie_cat: movie_cat[cat] = c_count c_count += 1 # 补0使电影名称对应的列表长度为15 v_tit = [movie_titles[k] for k in titles] while len(v_tit)<15: v_tit.append(0) # 补0使电影种类对应的列表长度为6 v_cat = [movie_cat[k] for k in cats] while len(v_cat)<6: v_cat.append(0) # 保存电影数据到movie_info中 movie_info[v_id] = {'mov_id': int(v_id), 'title': v_tit, 'category': v_cat, 'years': int(v_year)} return movie_info, movie_cat, movie_titles def get_usr_info(self, path): # 性别转换函数,M-0, F-1 def gender2num(gender): return 1 if gender == 'F' else 0 # 打开文件,读取所有行到data中 with open(path, 'r') as f: data = f.readlines() # 建立用户信息的字典 use_info = {} max_usr_id = 0 #按行索引数据 for item in data: # 去除每一行中和数据无关的部分 item = item.strip().split("::") usr_id = item[0] # 将字符数据转成数字并保存在字典中 use_info[usr_id] = {'usr_id': int(usr_id), 'gender': gender2num(item[1]), 'age': int(item[2]), 'job': int(item[3])} self.max_usr_id = max(self.max_usr_id, int(usr_id)) self.max_usr_age = max(self.max_usr_age, int(item[2])) self.max_usr_job = max(self.max_usr_job, int(item[3])) return use_info # 得到评分数据 def get_rating_info(self, path): # 读取文件里的数据 with open(path, 'r') as f: data = f.readlines() # 将数据保存在字典中并返回 rating_info = {} for item in data: item = item.strip().split("::") usr_id,movie_id,score = item[0],item[1],item[2] if usr_id not in rating_info.keys(): rating_info[usr_id] = {movie_id:float(score)} else: rating_info[usr_id][movie_id] = float(score) return rating_info # 构建数据集 def get_dataset(self, usr_info, rating_info, movie_info): trainset = [] for usr_id in rating_info.keys(): usr_ratings = rating_info[usr_id] for movie_id in usr_ratings: trainset.append({'usr_info': usr_info[usr_id], 'mov_info': movie_info[movie_id], 'scores': usr_ratings[movie_id]}) return trainset def load_data(self, dataset=None, mode='train'): use_poster = False # 定义数据迭代Batch大小 BATCHSIZE = 256 data_length = len(dataset) index_list = list(range(data_length)) # 定义数据迭代加载器 def data_generator(): # 训练模式下,打乱训练数据 if mode == 'train': random.shuffle(index_list) # 声明每个特征的列表 usr_id_list,usr_gender_list,usr_age_list,usr_job_list = [], [], [], [] mov_id_list,mov_tit_list,mov_cat_list,mov_poster_list = [], [], [], [] score_list = [] # 索引遍历输入数据集 for idx, i in enumerate(index_list): # 获得特征数据保存到对应特征列表中 usr_id_list.append(dataset[i]['usr_info']['usr_id']) usr_gender_list.append(dataset[i]['usr_info']['gender']) usr_age_list.append(dataset[i]['usr_info']['age']) usr_job_list.append(dataset[i]['usr_info']['job']) mov_id_list.append(dataset[i]['mov_info']['mov_id']) mov_tit_list.append(dataset[i]['mov_info']['title']) mov_cat_list.append(dataset[i]['mov_info']['category']) mov_id = dataset[i]['mov_info']['mov_id'] if use_poster: # 不使用图像特征时,不读取图像数据,加快数据读取速度 poster = Image.open(self.poster_path+'mov_id{}.jpg'.format(str(mov_id[0]))) poster = poster.resize([64, 64]) if len(poster.size) <= 2: poster = poster.convert("RGB") mov_poster_list.append(np.array(poster)) score_list.append(int(dataset[i]['scores'])) # 如果读取的数据量达到当前的batch大小,就返回当前批次 if len(usr_id_list)==BATCHSIZE: # 转换列表数据为数组形式,reshape到固定形状 usr_id_arr = np.expand_dims(np.array(usr_id_list), axis=-1) usr_gender_arr = np.expand_dims(np.array(usr_gender_list), axis=-1) usr_age_arr = np.expand_dims(np.array(usr_age_list), axis=-1) usr_job_arr = np.expand_dims(np.array(usr_job_list), axis=-1) mov_id_arr = np.expand_dims(np.array(mov_id_list), axis=-1) mov_cat_arr = np.reshape(np.array(mov_cat_list), [BATCHSIZE, 1, 6, 1]).astype(np.int64) mov_tit_arr = np.reshape(np.array(mov_tit_list), [BATCHSIZE, 1, 15, 1]).astype(np.int64) if use_poster: mov_poster_arr = np.reshape(np.array(mov_poster_list)/127.5 - 1, [BATCHSIZE, 3, 64, 64]).astype(np.float32) else: mov_poster_arr = np.array([0.]) scores_arr = np.reshape(np.array(score_list), [-1, 1]).astype(np.float32) # 放回当前批次数据 yield [usr_id_arr, usr_gender_arr, usr_age_arr, usr_job_arr], \ [mov_id_arr, mov_cat_arr, mov_tit_arr, mov_poster_arr], scores_arr # 清空数据 usr_id_list, usr_gender_list, usr_age_list, usr_job_list = [], [], [], [] mov_id_list, mov_tit_list, mov_cat_list, score_list = [], [], [], [] mov_poster_list = [] return data_generator class Model(dygraph.layers.Layer): def __init__(self, name_scope, use_poster, use_mov_title, use_mov_cat, use_age_job): super(Model, self).__init__(name_scope) name = self.full_name() # 将传入的name信息和bool型参数添加到模型类中 self.use_mov_poster = use_poster self.use_mov_title = use_mov_title self.use_usr_age_job = use_age_job self.use_mov_cat = use_mov_cat # 获取数据集的信息,并构建训练和验证集的数据迭代器 Dataset = MovieLen(self.use_mov_poster) self.Dataset = Dataset self.trainset = self.Dataset.train_dataset self.valset = self.Dataset.valid_dataset self.train_loader = self.Dataset.load_data(dataset=self.trainset, mode='train') self.valid_loader = self.Dataset.load_data(dataset=self.valset, mode='valid') """ define network layer for embedding usr info """ USR_ID_NUM = Dataset.max_usr_id + 1 # 对用户ID做映射,并紧接着一个FC层 self.usr_emb = Embedding(name, [USR_ID_NUM, 32], is_sparse=False) self.usr_fc = FC(name, size=32) # 对用户性别信息做映射,并紧接着一个FC层 USR_GENDER_DICT_SIZE = 2 self.usr_gender_emb = Embedding(name, [USR_GENDER_DICT_SIZE, 16]) self.usr_gender_fc = FC(name, 16) # 对用户年龄信息做映射,并紧接着一个FC层 USR_AGE_DICT_SIZE = Dataset.max_usr_age + 1 self.usr_age_emb = Embedding(name, [USR_AGE_DICT_SIZE, 16]) self.usr_age_fc = FC(name, 16) # 对用户职业信息做映射,并紧接着一个FC层 USR_JOB_DICT_SIZE = Dataset.max_usr_job + 1 self.usr_job_emb = Embedding(name, [USR_JOB_DICT_SIZE, 16]) self.usr_job_fc = FC(name, 16) # 新建一个FC层,用于整合用户数据信息 self.usr_combined = FC(name, 200, act='tanh') """ define network layer for embedding usr info """ # 对电影ID信息做映射,并紧接着一个FC层 MOV_DICT_SIZE = Dataset.max_mov_id + 1 self.mov_emb = Embedding(name, [MOV_DICT_SIZE, 32]) self.mov_fc = FC(name, 32) # 对电影类别做映射 CATEGORY_DICT_SIZE = len(Dataset.movie_cat) + 1 self.mov_cat_emb = Embedding(name, [CATEGORY_DICT_SIZE, 32], is_sparse=False) self.mov_cat_fc = FC(name, 32) # 对电影名称做映射 MOV_TITLE_DICT_SIZE = len(Dataset.movie_title) + 1 self.mov_title_emb = Embedding(name, [MOV_TITLE_DICT_SIZE, 32], is_sparse=False) self.mov_title_conv = Conv2D(name, 1, filter_size=(3, 1), stride=(2,1), padding=0, act='relu') self.mov_title_conv2 = Conv2D(name, 1, filter_size=(3, 1), stride=1, padding=0, act='relu') # 新建一个FC层,用于整合电影特征 self.mov_concat_embed = FC(name, size=200, act='tanh') # 定义计算用户特征的前向运算过程 def get_usr_feat(self, usr_var): """ get usr features""" # 获取到用户数据 usr_id, usr_gender, usr_age, usr_job = usr_var # 将用户的ID数据经过embedding和FC计算,得到的特征保存在feats_collect中 feats_collect = [] usr_id = self.usr_emb(usr_id) usr_id = self.usr_fc(usr_id) usr_id = fluid.layers.relu(usr_id) feats_collect.append(usr_id) # 计算用户的性别特征,并保存在feats_collect中 usr_gender = self.usr_gender_emb(usr_gender) usr_gender = self.usr_gender_fc(usr_gender) usr_gender = fluid.layers.relu(usr_gender) feats_collect.append(usr_gender) # 选择是否使用用户的年龄-职业特征 if self.use_usr_age_job: # 计算用户的年龄特征,并保存在feats_collect中 usr_age = self.usr_age_emb(usr_age) usr_age = self.usr_age_fc(usr_age) usr_age = fluid.layers.relu(usr_age) feats_collect.append(usr_age) # 计算用户的职业特征,并保存在feats_collect中 usr_job = self.usr_job_emb(usr_job) usr_job = self.usr_job_fc(usr_job) usr_job = fluid.layers.relu(usr_job) feats_collect.append(usr_job) # 将用户的特征级联,并通过FC层得到最终的用户特征 usr_feat = fluid.layers.concat(feats_collect, axis=1) usr_feat = self.usr_combined(usr_feat) return usr_feat # 定义电影特征的前向计算过程 def get_mov_feat(self, mov_var): """ get movie features""" # 获得电影数据 mov_id, mov_cat, mov_title, mov_poster = mov_var feats_collect = [] # 获得batchsize的大小 batch_size = mov_id.shape[0] # 计算电影ID的特征,并存在feats_collect中 mov_id = self.mov_emb(mov_id) mov_id = self.mov_fc(mov_id) mov_id = fluid.layers.relu(mov_id) feats_collect.append(mov_id) # 如果使用电影的种类数据,计算电影种类特征的映射 if self.use_mov_cat: # 计算电影种类的特征映射,对多个种类的特征求和得到最终特征 mov_cat = self.mov_cat_emb(mov_cat) mov_cat = fluid.layers.reduce_sum(mov_cat, dim=1, keep_dim=False) mov_cat = self.mov_cat_fc(mov_cat) feats_collect.append(mov_cat) if self.use_mov_title: # 计算电影名字的特征映射,对特征映射使用卷积计算最终的特征 mov_title = self.mov_title_emb(mov_title) mov_title = self.mov_title_conv2(self.mov_title_conv(mov_title)) mov_title = fluid.layers.reduce_sum(mov_title, dim=2, keep_dim=False) mov_title = fluid.layers.relu(mov_title) mov_title = fluid.layers.reshape(mov_title, [batch_size, -1]) feats_collect.append(mov_title) # 使用一个全连接层,整合所有电影特征,映射为一个200维的特征向量 mov_feat = fluid.layers.concat(feats_collect, axis=1) mov_feat = self.mov_concat_embed(mov_feat) return mov_feat # 定义个性化推荐算法的前向计算 def forward(self, usr_var, mov_var): # 计算用户特征和电影特征 usr_feat = self.get_usr_feat(usr_var) mov_feat = self.get_mov_feat(mov_var) # 根据计算的特征计算相似度 res = fluid.layers.cos_sim(usr_feat, mov_feat) # 将相似度扩大范围到和电影评分相同数据范围 res = fluid.layers.scale(res, scale=5) return usr_feat, mov_feat, res # - # # 模型训练 # # 首先需要定义好训练的配置,包括是否使用GPU、设置损失函数、选择优化器以及学习率等。 # 在本次实验中,由于数据较为简单,我们选择在CPU上训练,优化器使用Adam,学习率设置为0.01,一共训练5个epoch。 # # 然而,针对推荐算法的网络,如何设计损失函数呢?在CV和NLP章节中我们了解,分类可以用交叉熵损失函数,损失函数的大小可以衡量出算法当前分类的准确性。在推荐算法中,没有一个准确的度量既能衡量推荐的好坏,并具备可导性质,又能监督神经网络的训练。在电影推荐中,可以作为标签的只有评分数据,因此,我们可以用评分数据作为监督信息,神经网络的输出作为预测值,使用均方差(Mean Square Error)损失函数去训练网络模型。 # # 注:使用均方差损失函数即是使用回归的方法完成模型训练,观察到,电影的评分数据只有5个,是否可以使用分类损失函数完成训练?事实上,评分数据应该是一个连续数据,比如,评分3和评分4是接近的,如果使用分类的方法,评分3和评分4是两个类别,容易割裂评分间的连续性。 # # 整个训练过程和一般的模型训练大同小异,不再赘述。 def train(model): # 配置训练参数 use_gpu = False lr = 0.01 Epoches = 10 place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() with fluid.dygraph.guard(place): # 启动训练 model.train() # 获得数据读取器 data_loader = model.train_loader # 使用adam优化器,学习率使用0.01 opt = fluid.optimizer.Adam(learning_rate=lr) for epoch in range(0, Epoches): for idx, data in enumerate(data_loader()): # 获得数据,并转为动态图格式 usr, mov, score = data usr_v = [dygraph.to_variable(var) for var in usr] mov_v = [dygraph.to_variable(var) for var in mov] scores_label = dygraph.to_variable(score) # 计算出算法的前向计算结果 _, _, scores_predict = model(usr_v, mov_v) # 计算loss loss = fluid.layers.square_error_cost(scores_predict, scores_label) avg_loss = fluid.layers.mean(loss) if idx % 500 == 0: print("epoch: {}, batch_id: {}, loss is: {}".format(epoch, idx, avg_loss.numpy())) # 损失函数下降,并清除梯度 avg_loss.backward() opt.minimize(avg_loss) model.clear_gradients() # 每个epoch 保存一次模型 fluid.save_dygraph(model.state_dict(), './checkpoint/epoch'+str(epoch)) # 启动训练 with dygraph.guard(): use_poster, use_mov_title, use_mov_cat, use_age_job = False, True, True, True model = Model('Recommend', use_poster, use_mov_title, use_mov_cat, use_age_job) train(model) # 从训练结果来看,loss保持在0.9左右就难以下降了,主要是因为使用的均方差loss,计算得到预测评分和真实评分的均方差,真实评分的数据是1-5之间的整数,评分数据较大导致计算出来的loss也偏大。 # # 不过不用担心,我们只是通过训练神经网络提取特征向量,loss只要收敛即可。 # 对训练的模型在验证集上做评估,除了训练所使用的Loss之外,还有两个选择: # 1. 评分预测精度ACC(Accuracy):将预测的float数字转成整数,计算和真实评分的匹配度。评分误差在0.5分以内的算正确,否则算错误。 # 2. 评分预测误差(Mean Absolut Error)MAE:计算和真实评分之间的平均绝对误差。 # # 下面是使用训练集评估这两个指标的代码实现。 # + def evaluation(model, params_file_path): use_gpu = False place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() with fluid.dygraph.guard(place): model_state_dict, _ = fluid.load_dygraph(params_file_path) model.load_dict(model_state_dict) model.eval() acc_set = [] avg_loss_set = [] for idx, data in enumerate(model.valid_loader()): usr, mov, score_label = data usr_v = [dygraph.to_variable(var) for var in usr] mov_v = [dygraph.to_variable(var) for var in mov] _, _, scores_predict = model(usr_v, mov_v) pred_scores = scores_predict.numpy() avg_loss_set.append(np.mean(np.abs(pred_scores - score_label))) diff = np.abs(pred_scores - score_label) diff[diff>0.5] = 1 acc = 1 - np.mean(diff) acc_set.append(acc) return np.mean(acc_set), np.mean(avg_loss_set) # - param_path = "./checkpoint/epoch" for i in range(10): acc, mae = evaluation(model, param_path+str(i)) print("ACC:", acc, "MAE:", mae) # 上述结果中,我们采用了ACC和MAE指标测试在验证集上的评分预测的准确性,其中ACC值越大越好,MAE值越小越好。 # # ><font size=2>可以看到ACC和MAE的值不是很理想,但是这仅仅是对于评分预测不准确,不能直接衡量推荐结果的准确性。考虑到我们设计的神经网络是为了完成推荐任务而不是评分任务,所以总结一下: # <br>1. 只针对预测评分任务来说,我们设计的神经网络结构和损失函数是不合理的,导致评分预测不理想; # <br>2. 从损失函数的收敛可以知道网络的训练是有效的。评分预测的好坏不能反应推荐结果的好坏。</font> # # 到这里,我们已经完成了推荐算法的前三步,包括:1. 数据的准备,2. 神经网络的设计,3. 神经网络的训练。 # # 目前还需要完成剩余的两个步骤:1. 提取用户、电影数据的特征并保存到本地, 2. 利用保存的特征计算相似度矩阵,利用相似度完成推荐。 # # 下面,我们利用训练的神经网络提取数据的特征,进而完成电影推荐,并观察推荐结果是否令人满意。 # # ## 保存特征 # # 训练完模型后,我们得到每个用户、电影对应的特征向量,接下来将这些特征向量保存到本地,这样在进行推荐时,不需要使用神经网络重新提取特征,节省时间成本。 # # 保存特征的流程是: # - 加载预训练好的模型参数。 # - 输入数据集的数据,提取整个数据集的用户特征和电影特征。注意数据输入到模型前,要先转成内置vairable类型并保证尺寸正确。 # - 分别得到用户特征向量和电影特征向量,以使用pickle库保存字典形式的特征向量。 # # 使用用户和电影ID为索引,以字典格式存储数据,可以通过用户或者电影的ID索引到用户特征和电影特征。 # # 下面代码中,我们使用了一个pickle库。pickle库为python提供了一个简单的持久化功能,可以很容易的将Python对象保存到本地,但是缺点是,保存的文件对人来说可读性很差。 # + from PIL import Image # 加载第三方库Pickle,用来保存Python数据到本地 import pickle # 定义特征保存函数 def get_usr_mov_features(model, params_file_path, poster_path): use_gpu = False place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() usr_pkl = {} mov_pkl = {} # 定义将list中每个元素转成variable的函数 def list2variable(inputs, shape): inputs = np.reshape(np.array(inputs).astype(np.int64), shape) return fluid.dygraph.to_variable(inputs) with fluid.dygraph.guard(place): # 加载模型参数到模型中,设置为验证模式eval() model_state_dict, _ = fluid.load_dygraph(params_file_path) model.load_dict(model_state_dict) model.eval() # 获得整个数据集的数据 dataset = model.Dataset.dataset for i in range(len(dataset)): # 获得用户数据,电影数据,评分数据 # 本案例只转换所有在样本中出现过的user和movie,实际中可以使用业务系统中的全量数据 usr_info, mov_info, score = dataset[i]['usr_info'], dataset[i]['mov_info'],dataset[i]['scores'] usrid = str(usr_info['usr_id']) movid = str(mov_info['mov_id']) # 获得用户数据,计算得到用户特征,保存在usr_pkl字典中 if usrid not in usr_pkl.keys(): usr_id_v = list2variable(usr_info['usr_id'], [1, 1]) usr_age_v = list2variable(usr_info['age'], [1, 1]) usr_gender_v = list2variable(usr_info['gender'], [1, 1]) usr_job_v = list2variable(usr_info['job'], [1, 1]) usr_in = [usr_id_v, usr_gender_v, usr_age_v, usr_job_v] usr_feat = model.get_usr_feat(usr_in) usr_pkl[usrid] = usr_feat.numpy() # 获得电影数据,计算得到电影特征,保存在mov_pkl字典中 if movid not in mov_pkl.keys(): mov_id_v = list2variable(mov_info['mov_id'], [1, 1]) mov_tit_v = list2variable(mov_info['title'], [1, 1, 15, 1]) mov_cat_v = list2variable(mov_info['category'], [1, 1, 6, 1]) mov_in = [mov_id_v, mov_cat_v, mov_tit_v, None] mov_feat = model.get_mov_feat(mov_in) mov_pkl[movid] = mov_feat.numpy() print(len(mov_pkl.keys())) # 保存特征到本地 pickle.dump(usr_pkl, open('./usr_feat.pkl', 'wb')) pickle.dump(mov_pkl, open('./mov_feat.pkl', 'wb')) print("usr / mov features saved!!!") param_path = "./checkpoint/epoch7" poster_path = "./work/ml-1m/posters/" get_usr_mov_features(model, param_path, poster_path) # - # 保存好有效代表用户和电影的特征向量后,在下一节我们讨论如何基于这两个向量构建推荐系统。 # # 作业 10-2 # # 1. 作业1:以上算法使用了用户与电影的所有特征(除Poster外),可以设计对比实验,验证哪些特征是重要的,把最终的特征挑选出来。 # 为了验证哪些特征起到关键作用, 读者可以启用或弃用其中某些特征,或者加入电影海报特征,观察是否对模型Loss或评价指标有提升。 # # 2. 作业2:加入电影海报数据,验证电影海报特征(Poster)对推荐结果的影响,实现并分析推荐结果(有没有效果?为什么?)。
chapter-5-Recommended_System/5-4-Recommended_System-Training_and_Extract_Feature.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.10.0 64-bit # name: python3 # --- # # Kth Largest Element in an Array # Given an integer array nums and an integer k, return the kth largest element in the array. # Note that it is the kth largest element in the sorted order, not the kth distinct element. # # ### Example 1: # - Input: nums = [3,2,1,5,6,4], k = 2 # - Output: 5 # # ### Example 2: # - Input: nums = [3,2,3,1,2,4,5,5,6], k = 4 # - Output: 4 # # ### Constraints: # 1 <= k <= nums.length <= 104 # -104 <= nums[i] <= 104 # # ## Solution # # ### Intuition # We can just sort the array and return the first K elements in sorted order. The time complexity of such an approach is # O(n*log(n)+k). By using heap we can make it in O(n + k). # + def kth_largest_element(numbers: list, k: int) -> list: import heapq heapq.heapify(numbers) return heapq.nlargest(k, numbers)[k-1] assert(kth_largest_element([3,2,1,5,6,4], 2) == 5) assert(kth_largest_element([3,2,3,1,2,4,5,5,6], 4) == 4) # - # # Analysis # - Time Complexity: O(n+k). O(n) to make a heap from the list + k to return the result # - Space Complexity: O(n) for heap
python-data-structures/leetocde/kth-largest-element.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/StephenTGibson/MScDataSci_21-22/blob/main/COMP527-DataMiningAndVisualisation/k_medoids.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="palestinian-embassy" # # Clustering: k-Medoids # COMP527-DataMiningAndVisualisation/k_medoids.ipynb # + [markdown] tags=[] id="adjacent-irrigation" # In this lab tasks we implement k-Medoids algorithm. # + id="complimentary-sleeve" import numpy as np import matplotlib.pyplot as plt # + [markdown] id="hungarian-protocol" # We start with generating synthetic dataset consisting of four clusters. # + tags=[] id="narrative-mustang" N = 100 #Generate dataset np.random.seed(844) clust1 = np.random.normal(5, 2, (N,2)) clust2 = np.random.normal(15, 2, (N,2)) clust3 = np.random.multivariate_normal([17,3], [[1,0],[0,1]], N) clust4 = np.random.multivariate_normal([2,16], [[1,0],[0,1]], N) dataset = np.concatenate((clust1, clust2, clust3, clust4)) #Create an array that stores cluster IDs of every object in our dataset #We will use the index of a medoid as a cluster ID clusters = np.zeros((len(dataset))) # + colab={"base_uri": "https://localhost:8080/"} id="GxISW0kzXZU3" outputId="aee36fae-11a6-44b9-c7bc-d236e2f32cdf" dataset.shape # + id="included-penguin" def cluster_plots(dataset, medoidInd=[], colours = 'gray', title = 'Dataset'): fig,ax = plt.subplots(figsize=(4,4)) #fig.set_size_inches(12, 12) ax.set_title(title,fontsize=14) ax.set_xlim(min(dataset[:,0]), max(dataset[:,0])) ax.set_ylim(min(dataset[:,1]), max(dataset[:,1])) ax.scatter(dataset[:, 0], dataset[:, 1],s=8,lw=1,c= colours) #Plot medoids if they are given if len(medoidInd) > 0: ax.scatter(dataset[medoidInd, 0], dataset[medoidInd, 1],s=8,lw=6,c='red') fig.tight_layout() plt.show() # + [markdown] id="tracked-calculation" # Let's plot our dataset # + id="6kYzwR2RZeMD" medoidTest = np.array([1,2,3]) # + id="regulation-hollow" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="c6187282-856e-48f0-c358-73b0d1c75746" cluster_plots(dataset, medoidInd=medoidTest, colours=clusters, title='Unclustered data') # + [markdown] id="trained-westminster" # ## Exercise 1 # + [markdown] id="legitimate-forum" # Implement k-medoids algorithm. Use the Euclidean distance as the dissimilarity measure. # + id="ScVxL49Xcu-D" # Assignment - all objects to closest medoid # + id="8zWBr2coc-8Q" def dissimilarityComputer(obj1, obj2): # Euclidean return np.sqrt((obj1[0]-obj2[0])**2 + (obj1[1]-obj2[1])**2) # + id="FVlxS7sCdLjX" def clusterIdentifier(objectFeatures, medoids): distances = [] for medoid in medoids: distances.append(dissimilarityComputer(objectFeatures, dataset[medoid])) return medoids[distances.index(min(distances))] # + id="61Yosa8Rdxis" def objectClusterAssigner(dataset, clusters, medoids): for objectIdx in range(dataset.shape[0]): #if objectIdx not in medoids: clusters[objectIdx] = clusterIdentifier(dataset[objectIdx], medoids) return clusters.astype(int) # + id="hnu4JTeCiKmz" # Initialisation: choose k random objects k = 6 medoids = np.random.choice(dataset.shape[0], k).astype(int) # Assignment clusters = objectClusterAssigner(dataset, clusters, medoids) # + id="_kbuKOEHvNXR" outputId="1921a442-d4a6-4204-9717-959f57298e91" colab={"base_uri": "https://localhost:8080/"} medoids # + colab={"base_uri": "https://localhost:8080/"} id="t5ci5XYSpSk_" outputId="bd0b5fba-29c2-4567-c27f-8719a0dde8d1" clusters[:5] # + id="J3imxcY7wJjJ" outputId="ef69c999-044d-472f-f7ab-bc83b1635f18" colab={"base_uri": "https://localhost:8080/", "height": 297} cluster_plots(dataset, medoidInd=medoids, colours=clusters, title='Unclustered data') # + id="RmMY9ZOgnyCH" def objectiveScorer(dataset, clusters): objectiveScore = 0 for objectIdx in range(dataset.shape[0]): objectiveScore += dissimilarityComputer(dataset[objectIdx], dataset[clusters[objectIdx]]) return objectiveScore # + colab={"base_uri": "https://localhost:8080/"} id="giHRcxPxo4hY" outputId="ac583f09-eb73-4c29-bda6-8b219d5ed093" objectiveScorer(dataset, clusters) # + id="YE9jECxWmJNq" # loop over medoids for medoidIdx in range(medoids.shape[0]): scores = [] # loop over objects for objectIdx in range(dataset.shape[0]): medoidsTest = medoids.copy() # replace update medoid to be current object medoidsTest[medoidIdx] = objectIdx # assign objects to clusters using new test medoid clustersTest = objectClusterAssigner(dataset, clusters, medoidsTest) # compute objective func score = objectiveScorer(dataset, clustersTest) # store scores.append(score) # get best objective func best = min(scores) # if best obj func better than current if best < objectiveScorer(dataset, clusters): # replace medoid with object giving best obj func medoids[medoidIdx] = scores.index(best) # Assign new clusters clusters = objectClusterAssigner(dataset, clusters, medoids) # + id="3o8Ktd9tvGGL" outputId="04c107bb-09a8-41f5-eefd-3634b152e617" colab={"base_uri": "https://localhost:8080/", "height": 297} cluster_plots(dataset, medoidInd=medoids, colours=clusters, title='Unclustered data') # + id="M7UD6DKKvmhe" outputId="24f02dba-d6ed-4d73-a4ed-756373448454" colab={"base_uri": "https://localhost:8080/", "height": 297} cluster_plots(dataset, medoidInd=medoids, colours=clusters, title='Unclustered data') # + id="dZGwbLztv3Qu" outputId="4e950313-4ea1-4b3b-8b62-f3f42df82cff" colab={"base_uri": "https://localhost:8080/", "height": 297} cluster_plots(dataset, medoidInd=medoids, colours=clusters, title='Unclustered data') # + [markdown] tags=[] id="welcome-vietnam" # ## Exercise 2 # + [markdown] tags=[] id="american-quilt" # 1. Apply the implemented k-medoids algorithm to the generated dataset with the parameter k=4. # 2. For every iteration of the algorithm # - output the current value of the objective function # - output the indices of the current medoid objects # - plot the clustering together with medoids # + id="academic-provision"
COMP527-DataMiningAndVisualisation/k_medoids.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.5 64-bit # name: python3 # --- # hide from cute_deltas.algos import * # # Cute Deltas # # > A cute little python module for finding the deltas between different things. # [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/cute-deltas)](https://pypi.org/project/cute-deltas/) # [![PyPI Status](https://badge.fury.io/py/two-to-tango.svg)](https://badge.fury.io/py/cute-deltas) # [![PyPI Status](https://pepy.tech/badge/two-to-tango)](https://pepy.tech/project/cute-deltas) # [![license](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/ncoop57/cute-deltas/blob/main/LICENSE) # ## Install # `pip install cute_deltas` # ## How to use # Fill me in please! Don't forget code examples: 1+1
nbs/index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .sh # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Bash # language: bash # name: bash # --- # # Aligning the control sample to the genome # In the ChIP-Seq folder you will find another `.fastq` file called **`Control.fastq`**. # # **If you are not in there already, change into the data directory.** cd data # **Use the head command to look at this file:** zless Control.fastq.gz | head # **Use the information on the FASTQ Wikipedia page ([http://en.wikipedia.org/wiki/FASTQ_format](http://en.wikipedia.org/wiki/FASTQ_format)) to determine the quality encoding this FASTQ file is using. Then, adapting your commands to the quality encoding where needed, follow the steps you used to align the PAX5 sample to the genome and manipulate the SAM file in order to align the control reads to the human genome.** # *** # ## What's next? # # You can head back to **[visualising alignments in IGV](alignment-visualisation.ipynb)** or continue on to **[identifying enriched areas using MACS](identifying-enriched-areas.ipynb)**.
practical/Notebooks/control-alignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (geog) # language: python # name: geog # --- # + import sys, os, importlib import rasterio import pandas as pd import geopandas as gpd from rasterio import MemoryFile from contextlib import contextmanager sys.path.append("../src") import GOSTRocks.rasterMisc as rMisc from GOSTRocks.misc import tPrint # + in_admin_file = "/home/public/Data/COUNTRY/ETH/ADMIN/gadm36_2_hoa.shp" inD = gpd.read_file(in_admin_file) ntl_folder = "/home/public/Data/GLOBAL/NighttimeLights/LI_HARMONIZED" ntl_files = [os.path.join(ntl_folder, x) for x in os.listdir(ntl_folder)] # - @contextmanager def create_rasterio_inmemory(src, curData): '''Create a rasterio object in memory from a :param: src - data dictionary describing the rasterio template i.e. - rasterio.open().profile :param: curData - numpy array from which to create rasterio object ''' with MemoryFile() as memFile: with memFile.open(**src) as dataset: dataset.write(curData) del curData with memFile.open() as dataset: yield(dataset) ntl_file = ntl_files[0] for ntl_file in ntl_files: file_name = "_".join(os.path.basename(ntl_file.replace(".tif", "")).split("_")[-2:]) inR = rasterio.open(ntl_file) rData = inR.read() for thresh in [7,20,30]: tPrint(f"{file_name}: {thresh}") curData = (rData > thresh) * rData with create_rasterio_inmemory(inR.profile, curData) as curR: res = rMisc.zonalStats(inD, curR) xx = pd.DataFrame(res, columns=["SUM","MIN",'MAX','MEAN']) inD[f"{file_name}_{thresh}"] = xx['SUM'] inD = inD.reindex(sorted(inD.columns), axis=1) finalD = inD.copy() for col in finalD.columns: try: finalD[col] = finalD[col].astype(float) except: pass finalD.to_file("/home/wb411133/Code/gostrocks/data/RAW/gadm2_HoA_SoL.geojson", driver="GeoJSON") pd.DataFrame(finalD.drop(['geometry'], axis=1)).to_csv("/home/wb411133/Code/gostrocks/data/RAW/gadm2_HoA_SoL.csv")
notebooks/ZON_DECAT_B_ZonalStatsNTLHarmonized.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from more import viz_helper as vh df = pd.read_csv("../data/titanic/train.csv") df.info() # ### Basic Plotting vh.plot_crosstab_barplot(data=df, y='Survived', by='Embarked') # ### Other Available Arguments # Change Figure Size and Center the legend vh.plot_crosstab_barplot(data=df, y='Survived', by='Embarked' ,figsize=(6,4), anchor = (1,0.5))
examples/visualization_helper/.ipynb_checkpoints/plot_crosstab_barplot-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # --- # # _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-machine-learning/resources/bANLa) course resource._ # # --- # ## Assignment 4 - Understanding and Predicting Property Maintenance Fines # # This assignment is based on a data challenge from the Michigan Data Science Team ([MDST](http://midas.umich.edu/mdst/)). # # The Michigan Data Science Team ([MDST](http://midas.umich.edu/mdst/)) and the Michigan Student Symposium for Interdisciplinary Statistical Sciences ([MSSISS](https://sites.lsa.umich.edu/mssiss/)) have partnered with the City of Detroit to help solve one of the most pressing problems facing Detroit - blight. [Blight violations](http://www.detroitmi.gov/How-Do-I/Report/Blight-Complaint-FAQs) are issued by the city to individuals who allow their properties to remain in a deteriorated condition. Every year, the city of Detroit issues millions of dollars in fines to residents and every year, many of these fines remain unpaid. Enforcing unpaid blight fines is a costly and tedious process, so the city wants to know: how can we increase blight ticket compliance? # # The first step in answering this question is understanding when and why a resident might fail to comply with a blight ticket. This is where predictive modeling comes in. For this assignment, your task is to predict whether a given blight ticket will be paid on time. # # All data for this assignment has been provided to us through the [Detroit Open Data Portal](https://data.detroitmi.gov/). **Only the data already included in your Coursera directory can be used for training the model for this assignment.** Nonetheless, we encourage you to look into data from other Detroit datasets to help inform feature creation and model selection. We recommend taking a look at the following related datasets: # # * [Building Permits](https://data.detroitmi.gov/Property-Parcels/Building-Permits/xw2a-a7tf) # * [Trades Permits](https://data.detroitmi.gov/Property-Parcels/Trades-Permits/635b-dsgv) # * [Improve Detroit: Submitted Issues](https://data.detroitmi.gov/Government/Improve-Detroit-Submitted-Issues/fwz3-w3yn) # * [DPD: Citizen Complaints](https://data.detroitmi.gov/Public-Safety/DPD-Citizen-Complaints-2016/kahe-efs3) # * [Parcel Map](https://data.detroitmi.gov/Property-Parcels/Parcel-Map/fxkw-udwf) # # ___ # # We provide you with two data files for use in training and validating your models: train.csv and test.csv. Each row in these two files corresponds to a single blight ticket, and includes information about when, why, and to whom each ticket was issued. The target variable is compliance, which is True if the ticket was paid early, on time, or within one month of the hearing data, False if the ticket was paid after the hearing date or not at all, and Null if the violator was found not responsible. Compliance, as well as a handful of other variables that will not be available at test-time, are only included in train.csv. # # Note: All tickets where the violators were found not responsible are not considered during evaluation. They are included in the training set as an additional source of data for visualization, and to enable unsupervised and semi-supervised approaches. However, they are not included in the test set. # # <br> # # **File descriptions** (Use only this data for training your model!) # # train.csv - the training set (all tickets issued 2004-2011) # test.csv - the test set (all tickets issued 2012-2016) # addresses.csv & latlons.csv - mapping from ticket id to addresses, and from addresses to lat/lon coordinates. # Note: misspelled addresses may be incorrectly geolocated. # # <br> # # **Data fields** # # train.csv & test.csv # # ticket_id - unique identifier for tickets # agency_name - Agency that issued the ticket # inspector_name - Name of inspector that issued the ticket # violator_name - Name of the person/organization that the ticket was issued to # violation_street_number, violation_street_name, violation_zip_code - Address where the violation occurred # mailing_address_str_number, mailing_address_str_name, city, state, zip_code, non_us_str_code, country - Mailing address of the violator # ticket_issued_date - Date and time the ticket was issued # hearing_date - Date and time the violator's hearing was scheduled # violation_code, violation_description - Type of violation # disposition - Judgment and judgement type # fine_amount - Violation fine amount, excluding fees # admin_fee - $20 fee assigned to responsible judgments # state_fee - $10 fee assigned to responsible judgments # late_fee - 10% fee assigned to responsible judgments # discount_amount - discount applied, if any # clean_up_cost - DPW clean-up or graffiti removal cost # judgment_amount - Sum of all fines and fees # grafitti_status - Flag for graffiti violations # # train.csv only # # payment_amount - Amount paid, if any # payment_date - Date payment was made, if it was received # payment_status - Current payment status as of Feb 1 2017 # balance_due - Fines and fees still owed # collection_status - Flag for payments in collections # compliance [target variable for prediction] # Null = Not responsible # 0 = Responsible, non-compliant # 1 = Responsible, compliant # compliance_detail - More information on why each ticket was marked compliant or non-compliant # # # ___ # # ## Evaluation # # Your predictions will be given as the probability that the corresponding blight ticket will be paid on time. # # The evaluation metric for this assignment is the Area Under the ROC Curve (AUC). # # Your grade will be based on the AUC score computed for your classifier. A model which with an AUROC of 0.7 passes this assignment, over 0.75 will recieve full points. # ___ # # For this assignment, create a function that trains a model to predict blight ticket compliance in Detroit using `train.csv`. Using this model, return a series of length 61001 with the data being the probability that each corresponding ticket from `test.csv` will be paid, and the index being the ticket_id. # # Example: # # ticket_id # 284932 0.531842 # 285362 0.401958 # 285361 0.105928 # 285338 0.018572 # ... # 376499 0.208567 # 376500 0.818759 # 369851 0.018528 # Name: compliance, dtype: float32 # + import pandas as pd import numpy as np def blight_model(): # Your code here return # Your answer here
3_machine_learning/course3_downloads/Assignment 4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # Like common file, but with another table structure # + import yaml import os from yaml import Loader as Loader from pathlib import Path import pandas as pd import numpy as np import json from sklearn.metrics import roc_auc_score from analyze_results import ( extract_result, aggregate_runs, from_model_outputs_calc_rcc_auc, ) from analyze_results import ( format_results2, improvement_over_baseline, from_model_outputs_calc_pr_auc, from_model_outputs_calc_rpp, from_model_outputs_calc_roc_auc, from_model_outputs_calc_arc_auc ) from utils.utils_wandb import init_wandb, wandb from ue4nlp.ue_scores import * # + def choose_metric(metric_type): if metric_type == "rejection-curve-auc": return from_model_outputs_calc_arc_auc if metric_type == "roc-auc": return from_model_outputs_calc_roc_auc elif metric_type == "rcc-auc": return from_model_outputs_calc_rcc_auc elif metric_type == "pr-auc": return from_model_outputs_calc_pr_auc elif metric_type == "rpp": return from_model_outputs_calc_rpp else: raise ValueError("Wrong metric type!") def get_one_table(runs_dir, metric_types=["rejection-curve-auc", "roc-auc", "rcc-auc", "pr-auc", "rpp"], baseline=None, methods=None): default_methods = { "bald": bald, "sampled_max_prob": sampled_max_prob, "variance": probability_variance, "var.ratio": var_ratio, #"sampled_entropy": mean_entropy, } if methods is None: methods = default_methods table = [] for metric_type in metric_types: metric = choose_metric(metric_type=metric_type) agg_res = aggregate_runs( runs_dir, methods=methods, metric=metric ) if agg_res.empty: print("Broken\n") continue if metric_type == "rcc-auc": final_score = format_results2(agg_res, percents=False) elif metric_type == "rpp": final_score = format_results2(agg_res, percents=True) else: final_score = improvement_over_baseline(agg_res, baseline_col="max_prob", baseline=baseline, metric=metric_type, percents=True, subtract=True) table.append(final_score) res_table = pd.concat(table, axis=1) res_table.columns = metric_types # fix for rcc-auc and rpp if 'baseline (max_prob)' not in res_table.index: res_table.loc['baseline (max_prob)'] = 0 for metric in ['rcc-auc', 'rpp']: try: res_table[metric].loc['baseline (max_prob)'] = res_table[metric].loc['max_prob'] except: pass try: res_table = res_table.drop(['max_prob', 'count']) except: res_table = res_table.drop(['max_prob']) return res_table def collect_tables(run_dirs, names, metric_types=["rejection-curve-auc", "roc-auc", "rcc-auc", "pr-auc", "rpp"], baseline=None, methods=None): all_tables = [] for run_dir, name in zip(run_dirs, names): buf_table = get_one_table(run_dir, metric_types, baseline, methods) #print(buf_table) # add name to index indices = [(name, ind) for ind in list(buf_table.index)] baseline_name = 'baseline|'+'|'.join(name.split('|')[-1:]) buf_table.loc[baseline_name] = buf_table.loc['baseline (max_prob)'] # add reindex indices = indices + [(baseline_name, 'max_prob')] index = pd.MultiIndex.from_tuples(indices, names=['Method', 'UE Score']) buf_table.index = index buf_table.drop((name, 'baseline (max_prob)'), inplace=True) # add buf_table to final_table all_tables.append(buf_table) return pd.concat(all_tables) def collect_datasets(runs_dirs, names, dataset_names, metric_types=["rejection-curve-auc", "roc-auc", "rcc-auc", "pr-auc", "rpp"], baselines={}, methods=None): all_tables = [] for run_dir, dataset_name in zip(runs_dirs, dataset_names): try: dataset_table = collect_tables(run_dir, names, metric_types, baselines.get(dataset_name, None), methods) columns = pd.MultiIndex.from_tuples([(dataset_name, ind) for ind in list(dataset_table.columns)]) dataset_table.columns = columns all_tables.append(dataset_table) except: print(f'empty dir {run_dir}') return pd.concat(all_tables, axis=1) # + jupyter={"outputs_hidden": true, "source_hidden": true} tags=[] import os default_methods = { "bald": bald, "sampled_max_prob": sampled_max_prob, "variance": probability_variance, } metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] methods = ['mahalanobis'] regs = ['raw'] dataset_names = ['MRPC', 'CoLA', 'SST2 (10%)'] dataset_fnames = ['mrpc', 'cola', 'sst2'] names = [] tables = [] raw_baselines = {} for ds_fname, ds_name in zip(dataset_fnames, dataset_names): model_series_dir = f'../workdir/run_glue_for_model_series/electra-raw/{ds_fname}/0.0/ddpp_dpp_0.3_20/' table = [] for metric_type in metric_types: metric = choose_metric(metric_type=metric_type) agg_res = aggregate_runs( model_series_dir, methods=default_methods, metric=metric ) mean_res = agg_res.mean(axis=0) final_results = mean_res.T table.append(final_results.loc[['max_prob']]) res_table = pd.concat(table, axis=1) res_table.columns = metric_types raw_baselines[ds_name] = res_table# #raw_baselines[ds_name]={k:v for k,v in zip(res_table.columns.values.tolist(), res_table.values[0].tolist())} # - # # Determenistic methods # + import os metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] methods = ['mahalanobis', 'mc_mahalanobis']#'nuq', regs = ['raw','reg'] dataset_names = ['MRPC', 'CoLA', 'SST-2'] dataset_fnames = ['mrpc', 'cola', 'sst2'] names = [] tables = [] baselines = [] for method in methods: for reg in regs: for sn in ['True']:#['sn', 'no_sn']: run_dirs = [] name_sn = '' names = [f'{method}|last|{reg}_{sn}'] for name in dataset_fnames: #model_series_dir = f'../workdir/final_res/run_glue_for_model_series/{name}_electra_{reg}_{sn}/{method}/'\ model_series_dir = f'/mnt/users/avazhentsev/uncertainty-estimation/workdir/run_glue_for_model_series_sn/electra-{reg}-True/{name}/0.0/{method}' print(model_series_dir) run_dirs.append([model_series_dir]) res_df = collect_datasets(run_dirs, names, dataset_names, metric_types=metric_types, baselines=raw_baselines) baselines.append(res_df.iloc[-1:]) tables.append(res_df.iloc[:-1]) table_det_sn = pd.concat([pd.concat(tables), pd.concat(baselines[-2:])]) # - table_det_sn = table_det_sn.iloc[[0,1,3,5,6]].reset_index() table_det_sn.Method = ['MD SN (ours)']*2+['SMD SN (ours)']*2+['SR SN'] table_det_sn['Reg. Type'] = ['-', 'CER', '-', 'CER', '-'] table_det_sn = table_det_sn[list(table_det_sn.columns[[0,-1]]) + list(table_det_sn.columns[list(range(2,11))])] # + import os metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] methods = ['mahalanobis']#'nuq', regs = ['reg', 'raw'] dataset_names = ['MRPC', 'CoLA', 'SST-2'] dataset_fnames = ['mrpc', 'cola', 'sst2'] names = [] tables = [] baselines = [] for method in methods: for reg in regs: for sn in ['sn', 'no_sn']: run_dirs = [] name_sn = '' names = [f'{method}|last|{reg}_{sn}'] for name in dataset_fnames: model_series_dir = f'../workdir/final_res_det/run_glue_for_model_series/{name}_electra_{reg}_{sn}/{method}/' print(model_series_dir) run_dirs.append([model_series_dir]) res_df = collect_datasets(run_dirs, names, dataset_names, metric_types=metric_types, baselines=raw_baselines) baselines.append(res_df.iloc[-1:]) tables.append(res_df.iloc[:-1]) table_det = pd.concat([pd.concat(tables), pd.concat(baselines[-2:])]) # - table_det # + import os metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] methods = ['sngp'] regs = ['raw'] dataset_names = ['MRPC', 'CoLA', 'SST-2'] dataset_fnames = ['mrpc', 'cola', 'sst2'] names = [] tables = [] baselines = [] for method in methods: for reg in regs: run_dirs = [] names = [f'{method}|last|{reg}'] for name in dataset_fnames: model_series_dir = f'../workdir/run_glue_for_model_series/electra-{reg}-sngp/{name}/0.0/' print(model_series_dir) run_dirs.append([model_series_dir]) res_df = collect_datasets(run_dirs, names, dataset_names, metric_types=metric_types, baselines=raw_baselines) baselines.append(res_df.iloc[-1:]) tables.append(res_df.iloc[:-1]) table_sngp = pd.concat([pd.concat(tables), pd.concat(baselines[-2:])]) # - table_sngp # + import os def choose_metric(metric_type): if metric_type == "rejection-curve-auc": return "rejection-curve-auc"#from_model_outputs_calc_arc_auc elif metric_type == "rcc-auc": return from_model_outputs_calc_rcc_auc elif metric_type == "rpp": return from_model_outputs_calc_rpp else: raise ValueError("Wrong metric type!") nuq_aleatoric = lambda x: np.squeeze(x[0], axis=-1) nuq_epistemic = lambda x: np.squeeze(x[1], axis=-1) nuq_total = lambda x: np.squeeze(x[2], axis=-1) agg_methods = { "nuq_aleatoric": nuq_aleatoric, "nuq_epistemic": nuq_epistemic, "nuq_total": nuq_total, } metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] methods = ['nuq']#'', regs = ['metric', 'reg', 'raw'] dataset_names = ['MRPC', 'CoLA', 'SST-2'] dataset_fnames = ['mrpc', 'cola', 'sst2'] names = [] tables = [] baselines = [] for method in methods: for reg in regs: for sn in ['sn', 'no_sn']: run_dirs = [] name_sn = '' names = [f'{method}|last|{reg}_{sn}'] for name in dataset_fnames: model_series_dir = f'/home/user/uncertainty-estimation/workdir/run_glue_for_model_series_sn/electra_{reg}_{sn}/{name}/0.0/{method}/' print(model_series_dir) run_dirs.append([model_series_dir]) try: res_df = collect_datasets(run_dirs, names, dataset_names, metric_types=metric_types, baselines={}, methods=agg_methods) baselines.append(res_df.iloc[-1:]) tables.append(res_df.iloc[:-1]) except: print('pass') pass table_nuq = pd.concat([pd.concat(tables), pd.concat(baselines[-2:])]) # - table_nuq # # MC-Mahalanobis # + import os metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] methods = ['mc_mahalanobis']#'nuq', regs = ['reg', 'raw'] dataset_names = ['MRPC', 'CoLA', 'SST-2'] dataset_fnames = ['mrpc', 'cola', 'sst2'] names = [] tables = [] baselines = [] for method in methods: for reg in regs: for sn in ['sn', 'no_sn']: run_dirs = [] name_sn = '' names = [f'{method}|last|{reg}_{sn}'] for name in dataset_fnames: model_series_dir = f'../workdir/final_res_det/run_glue_for_model_series/{name}_electra_{reg}_{sn}/{method}/' print(model_series_dir) run_dirs.append([model_series_dir]) res_df = collect_datasets(run_dirs, names, dataset_names, metric_types=metric_types, baselines=raw_baselines) baselines.append(res_df.iloc[-1:]) tables.append(res_df.iloc[:-1]) table_mc_det = pd.concat([pd.concat(tables), pd.concat(baselines[-2:])]) # - table_mc_det det_res = pd.concat([table_det.iloc[[3,1,2,0]], table_mc_det.iloc[[7,3,5,1]], table_sngp.iloc[:1], pd.concat(baselines).iloc[[2,0,1,3]]]) det_res = det_res.reset_index() det_res.Method = ['MD']*2+['MD SN (ours)']*2+['SMD (ours)']*2+['SMD SN (ours)']*2+['SNGP']+['SR SN']*2+['SR']*2 det_res['Reg. Type'] = ['-', 'CER']*4+['-']+['-','CER','CER','-'] det_res = det_res[list(det_res.columns[[0,-1]]) + list(det_res.columns[list(range(2,11))])] det_res#.iloc[:2] tab_res_det = pd.concat([det_res.iloc[[0,1]], table_det_sn.iloc[[0,1]], det_res.iloc[[4,5,6,7,8]], table_det_sn.iloc[[-1]], det_res.iloc[-3:]]) tab_res_det # + jupyter={"outputs_hidden": true} print(str(tab_res_det.to_latex(index=False)).replace('±', '$\pm$')) # - # # MC-Dropout # + import os metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] methods = ['mc_all']#, 'mc_last'] regs = ['raw', 'reg'] dataset_names = ['MRPC', 'CoLA', 'SST-2'] dataset_fnames = ['mrpc', 'cola', 'sst2'] names = [] tables = [] baselines = [] for reg in regs: run_dirs = [] #layer = method.split('_')[-1] names = [f'mc|{reg}'] print(names) for name in dataset_fnames: model_series_dir = f'../workdir/final_res/run_mc_all/{name}_electra_{reg}_no_sn/mc_all/' print(model_series_dir) run_dirs.append([model_series_dir]) try: res_df = collect_datasets(run_dirs, names, dataset_names, metric_types=metric_types, baselines=raw_baselines) baselines.append(res_df.iloc[-1:]) tables.append(res_df.iloc[:-1]) except: print('skip') pass table_mc = pd.concat([pd.concat(tables), pd.concat(baselines[-2:])]) table_mc = table_mc.reset_index() # + def preproc_regs(x): regs = x.split('|') return '-' if (regs[-1]=='raw' or regs[-1] == '') else 'CER' table_mc['Reg. Type'] = table_mc.Method.apply(lambda x: preproc_regs(x)) table_mc['Dropout Layers'] = table_mc['Method'].apply(lambda x: x.split('|')[1] if 'baseline' not in x else '-') table_mc['Method'] = table_mc['Method'].apply(lambda x: x.split('|')[0].upper() if 'baseline' not in x else x.split('|')[0]) table_mc = table_mc[list(table_mc.columns[:1]) + list(table_mc.columns[-2:]) + list(table_mc.columns[1:-2])] # - table_mc['UE Score'] = ['BALD', 'SMP', 'PV', 'VR']*2+['MP']*2 table_mc['Reg. Type'] = list(table_mc['Reg. Type'].iloc[:-2].values) + ['-', 'CER'] table_mc[list(table_mc.columns[:2]) + list(table_mc.columns[3:])] # + jupyter={"outputs_hidden": true} print(str(table_mc[list(table_mc.columns[:2]) + list(table_mc.columns[3:])].iloc[:8].to_latex(index=False)).replace('±', '$\pm$')) # - # # MC-DPP all # + import os metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] methods = ['ddpp_dpp', 'ddpp_ood'] regs = ['raw'] max_fracs = [0.3, 0.4, 0.5, 0.6] comsizes = [20, 50] dataset_names = ['MRPC', 'CoLA', 'SST-2'] dataset_fnames = ['mrpc', 'cola', 'sst2'] names = [] tables = [] baselines = [] for method in methods: for max_frac in max_fracs: for cs in comsizes: for reg in regs: run_dirs = [] names = [f'ddpp_{method}|{max_frac}|{cs}'] for name in dataset_fnames: model_series_dir = f'../workdir/run_glue_for_model_series/electra-raw/{name}/0.0/{method}_{max_frac}_{cs}' #print(model_series_dir) run_dirs.append([model_series_dir]) try: res_df = collect_datasets(run_dirs, names, dataset_names, metric_types=metric_types) baselines.append(res_df.iloc[-1:]) tables.append(res_df.iloc[:-1]) except: print(f'Not exists one of this dirs: {run_dirs}') # - table_dpp = pd.concat([pd.concat(tables), pd.concat(baselines[-1:])]) table_dpp table_dpp.sort_values(by=('SST-2', 'rcc-auc')).iloc[:50] {'MRPC': {'ddpp_ood' : 0.6, 'ddpp_dpp' : 0.5}, 'CoLA': {'ddpp_ood' : 0.5, 'ddpp_dpp' : 0.6}, 'SST-2': {'ddpp_ood' : 0.5, 'ddpp_dpp' : 0.5}} # # MC-DPP calibration # + import os metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] methods = ['False', 'True'] regs = ['raw'] max_fracs = [0.3, 0.4, 0.6] val_subsamples = [0.0, 0.1] max_fracs_dicts = {'mrpc': {'False': 0.6, 'True': 0.6}, 'cola': {'False': 0.6, 'True': 0.6}, 'sst2': {'False': 0.3, 'True': 0.3}} dataset_names = ['MRPC', 'CoLA', 'SST2 (10%)'] dataset_fnames = ['mrpc', 'cola', 'sst2'] names = [] tables = [] baselines = [] for method in methods: for val_subsample in val_subsamples: for reg in regs: run_dirs = [] dpp_type = 'with_ood' if method=='True' else 'on_masks' names = [f'DPP_{dpp_type}|{val_subsample}|{reg}'] for name in dataset_fnames: max_frac = max_fracs_dicts[name][method] model_series_dir = f'../workdir/run_glue_for_model_series/electra-{reg}/{name}/{val_subsample}/dpp/{method}/{max_frac}/' model_series_dir += np.sort(os.listdir(model_series_dir))[-1] model_series_dir += f'/{np.sort(os.listdir(model_series_dir))[-1]}/' print(model_series_dir) run_dirs.append([model_series_dir]) res_df = collect_datasets(run_dirs, names, dataset_names, metric_types=metric_types, baselines=raw_baselines) baselines.append(res_df.iloc[-1:]) tables.append(res_df.iloc[:-1]) table_dpp_cal = pd.concat([pd.concat(tables), pd.concat(baselines[-2:-1])]) # + def preproc_cal(x): if 'baseline' in x: return '-' ds = x.split('|')[1] if ds == '0.1': return 'val.' return 'train' table_dpp_cal = table_dpp_cal.reset_index() table_dpp_cal['Calibr. Dataset'] = table_dpp_cal.Method.apply(lambda x: preproc_cal(x)) table_dpp_cal['Method'] = table_dpp_cal['Method'].apply(lambda x: x.split('|')[0]) table_dpp_cal = table_dpp_cal[list(table_dpp_cal.columns[:1]) + list(table_dpp_cal.columns[-1:]) + list(table_dpp_cal.columns[1:-1])] # - table_dpp_cal # + jupyter={"outputs_hidden": true} print(str(table_dpp_cal.to_latex(index=False)).replace('±', '$\pm$')) # - # # MC-DPP regs # + jupyter={"outputs_hidden": true} table_dpp.sort_values(by=('SST2 (10%)', 'rcc-auc')).iloc[:50] # + import os metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] methods = ['False', 'True'] regs = ['reg'] max_fracs = [0.3, 0.4, 0.6] val_subsamples = [0.0, 0.1] max_fracs_dicts = {'mrpc': {'False': 0.3, 'True': 0.6}, 'cola': {'False': 0.6, 'True': 0.3}, 'sst2': {'False': 0.4, 'True': 0.6}} val_subsamples_dicts = {'mrpc': {'False': 0.0, 'True': 0.0}, 'cola': {'False': 0.0, 'True': 0.0}, 'sst2': {'False': 0.0, 'True': 0.0}} dataset_names = ['MRPC', 'CoLA', 'SST2 (10%)'] dataset_fnames = ['mrpc', 'cola', 'sst2'] names = [] tables = [] baselines = [] for method in methods: for reg in regs: run_dirs = [] dpp_type = 'with_ood' if method=='True' else 'on_masks' for name in dataset_fnames: val_subsample = val_subsamples_dicts[name][method] max_frac = max_fracs_dicts[name][method] names = [f'DPP_{dpp_type}|{val_subsample}|{reg}'] model_series_dir = f'../workdir/run_glue_for_model_series/electra-{reg}/{name}/{val_subsample}/dpp/{method}/{max_frac}/' model_series_dir += os.listdir(model_series_dir)[-1] model_series_dir += f'/{os.listdir(model_series_dir)[-1]}/' print(model_series_dir) run_dirs.append([model_series_dir]) res_df = collect_datasets(run_dirs, names, dataset_names, metric_types=metric_types, baselines=raw_baselines) baselines.append(res_df.iloc[-1:]) tables.append(res_df.iloc[:-1]) # + def preproc_regs(x): regs = x.split('|') return regs[-1] table_dpp_reg = pd.concat([pd.concat(tables), pd.concat(baselines[-2:])]) table_dpp_reg = table_dpp_reg.reset_index() table_dpp_reg['Reg. Type'] = table_dpp_reg.Method.apply(lambda x: preproc_regs(x)) table_dpp_reg['Method'] = table_dpp_reg['Method'].apply(lambda x: x.split('|')[0]) table_dpp_reg = table_dpp_reg[list(table_dpp_reg.columns[:1]) + list(table_dpp_reg.columns[-1:]) + list(table_dpp_reg.columns[1:-1])] # - table_dpp_reg # + jupyter={"outputs_hidden": true} print(str(table_dpp_reg.to_latex(index=False)).replace('±', '$\pm$')) # - # # Raw DPP # + jupyter={"outputs_hidden": true} import os metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] regs = ['reg', 'raw'] max_fracs = [0.3, 0.4, 0.6] val_subsamples = [0.0, 0.1] dataset_names = ['MRPC', 'CoLA', 'SST2 (10%)'] dataset_fnames = ['mrpc', 'cola', 'sst2'] names = [] tables = [] baselines = [] for max_frac in max_fracs: for val_subsample in val_subsamples: for reg in regs: run_dirs = [] names = [f'dpp|{max_frac}|{val_subsample}|{reg}'] for name in dataset_fnames: model_series_dir = f'../workdir/run_glue_for_model_series/electra-{reg}/{name}/{val_subsample}/raw_dpp/{max_frac}/' model_series_dir += np.sort(os.listdir(model_series_dir))[-1] model_series_dir += f'/{np.sort(os.listdir(model_series_dir))[-1]}/' print(model_series_dir) run_dirs.append([model_series_dir]) res_df = collect_datasets(run_dirs, names, dataset_names, metric_types=metric_types) baselines.append(res_df.iloc[-1:]) tables.append(res_df.iloc[:-1]) table_dpp = pd.concat([pd.concat(tables), pd.concat(baselines[-2:])]) # - table_dpp.sort_values(by=('SST2 (10%)', 'rcc-auc')).iloc[:50] # + import os metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] regs = ['raw'] max_fracs = [0.3, 0.4, 0.6] val_subsamples = [0.0, 0.1] max_fracs_dicts = {'mrpc': 0.3, 'cola': 0.4, 'sst2': 0.6} val_subsamples_dicts = {'mrpc': 0.0, 'cola': 0.1, 'sst2': 0.0} dataset_names = ['MRPC', 'CoLA', 'SST2 (10%)'] dataset_fnames = ['mrpc', 'cola', 'sst2'] names = [] tables = [] baselines = [] for reg in regs: run_dirs = [] for name in dataset_fnames: val_subsample = val_subsamples_dicts[name] max_frac = max_fracs_dicts[name] names = [f'DPP|{val_subsample}|{reg}'] model_series_dir = f'../workdir/run_glue_for_model_series/electra-{reg}/{name}/{val_subsample}/dpp/{method}/{max_frac}/' model_series_dir += os.listdir(model_series_dir)[-1] model_series_dir += f'/{os.listdir(model_series_dir)[-1]}/' print(model_series_dir) run_dirs.append([model_series_dir]) res_df = collect_datasets(run_dirs, names, dataset_names, metric_types=metric_types, baselines=raw_baselines) baselines.append(res_df.iloc[-1:]) tables.append(res_df.iloc[:-1]) # + def preproc_regs(x): regs = x.split('|') return regs[-1] table_raw_dpp = pd.concat([pd.concat(tables), pd.concat(baselines[-2:])]) table_raw_dpp = table_raw_dpp.reset_index() table_raw_dpp['Method'] = table_raw_dpp['Method'].apply(lambda x: x.split('|')[0]) #table_raw_dpp = table_raw_dpp[list(table_raw_dpp.columns[:1]) + list(table_raw_dpp.columns[-1:]) + list(table_raw_dpp.columns[1:-1])] # - table_raw_dpp # + jupyter={"outputs_hidden": true} print(str(table_raw_dpp.to_latex(index=False)).replace('±', '$\pm$')) # + import os metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] methods = ['False', 'True'] regs = ['raw'] max_fracs = [0.3, 0.4, 0.6] val_subsamples = [0.0, 0.1] max_fracs_dicts = {'mrpc': {'False': 0.6, 'True': 0.6}, 'cola': {'False': 0.6, 'True': 0.6}, 'sst2': {'False': 0.3, 'True': 0.3}} val_subsamples_dicts = {'mrpc': {'False': 0.0, 'True': 0.0}, 'cola': {'False': 0.0, 'True': 0.0}, 'sst2': {'False': 0.0, 'True': 0.1}} dataset_names = ['MRPC', 'CoLA', 'SST2 (10%)'] dataset_fnames = ['mrpc', 'cola', 'sst2'] names = [] tables = [] baselines = [] for method in methods: for reg in regs: run_dirs = [] dpp_type = 'with_ood' if method=='True' else 'on_masks' for name in dataset_fnames: val_subsample = val_subsamples_dicts[name][method] max_frac = max_fracs_dicts[name][method] names = [f'DPP_{dpp_type}|{val_subsample}|{reg}'] model_series_dir = f'../workdir/run_glue_for_model_series/electra-{reg}/{name}/{val_subsample}/dpp/{method}/{max_frac}/' model_series_dir += np.sort(os.listdir(model_series_dir))[-1] model_series_dir += f'/{np.sort(os.listdir(model_series_dir))[-1]}/' print(model_series_dir) run_dirs.append([model_series_dir]) res_df = collect_datasets(run_dirs, names, dataset_names, metric_types=metric_types, baselines=raw_baselines) baselines.append(res_df.iloc[-1:]) tables.append(res_df.iloc[:-1]) # + def preproc_regs(x): regs = x.split('|') return regs[-1] table_dpp_2 = pd.concat([pd.concat(tables), pd.concat(baselines[-2:])]) table_dpp_2 = table_dpp_2.reset_index() table_dpp_2['Method'] = table_dpp_2['Method'].apply(lambda x: x.split('|')[0]) # - table_dpp_2 # + jupyter={"outputs_hidden": true} print(str(table_dpp_2.to_latex(index=False)).replace('±', '$\pm$')) # - # # Ensemble # + import os metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] dataset_names = ['MRPC', 'CoLA', 'SST-2'] dataset_fnames = ['mrpc', 'cola', 'sst2'] dataset_to_time = {'mrpc':'17-25-06', 'cola': '17-34-50', 'sst2': '17-45-49'} run_dirs = [] for name in dataset_fnames: names = [f'Deep Ensemble'] time = dataset_to_time[name] model_series_dir = f'/mnt/users/avazhentsev/uncertainty-estimation/workdir/run_glue_for_ensemble_series/2021-10-12/{time}/final_results/' print(model_series_dir) run_dirs.append([model_series_dir]) ens_tab = collect_datasets(run_dirs, names, dataset_names, metric_types=metric_types, baselines=raw_baselines) # + def preproc_regs(x): regs = x.split('|') return regs[-1] ens_tab = ens_tab.reset_index() ens_tab['Reg. Type'] = '-' ens_tab['Method'] = ens_tab['Method'].apply(lambda x: x.split('|')[0]) ens_tab['UE Score'] = ['BALD', 'SMP', 'PV', 'VR', 'MP'] ens_tab = ens_tab[list(ens_tab.columns[:1]) + list(ens_tab.columns[-1:]) + list(ens_tab.columns[1:-1])] # - ens_tab print(str(ens_tab.iloc[:-1].to_latex(index=False)).replace('±', '$\pm$')) # # DE + mahalanobis # + import os metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] dataset_names = ['MRPC', 'CoLA', 'SST-2'] dataset_fnames = ['mrpc', 'cola', 'sst2'] dataset_to_time = {'mrpc': '12-56-53', 'cola': '13-08-59', 'sst2': '13-44-40'} run_dirs = [] for name in dataset_fnames: names = [f'Deep Ensemble'] time = dataset_to_time[name] model_series_dir = f'/mnt/users/avazhentsev/uncertainty-estimation/workdir/run_glue_for_ensemble_series/2021-10-19/{time}/final_results/' print(model_series_dir) run_dirs.append([model_series_dir]) ens_tab = collect_datasets(run_dirs, names, dataset_names, metric_types=metric_types, baselines=raw_baselines) # + def preproc_regs(x): regs = x.split('|') return regs[-1] ens_tab = ens_tab.reset_index() ens_tab['Reg. Type'] = '-' ens_tab['UE Score'] = ['MD', 'SMD', 'MP'] ens_tab['Method'] = ens_tab['Method'].apply(lambda x: x.split('|')[0]) ens_tab = ens_tab[list(ens_tab.columns[:1]) + list(ens_tab.columns[-1:]) + list(ens_tab.columns[1:-1])] # - ens_tab['Method'] = 'DE+'+ens_tab['UE Score'] ens_tab = ens_tab.drop(columns=['UE Score']) tab_res_det = tab_res_det.reset_index(drop=True) pd.concat([tab_res_det.iloc[:8], ens_tab.iloc[1:2], det_res.iloc[8:]]).reset_index(drop=True) # # Combine all table_dpp_reg['Dropout Layers'] = 'last' table_det['Dropout Layers'] = '-' table_dpp_reg res = pd.concat([table_mc.iloc[[0,1,2]], table_dpp_reg.iloc[:-2], table_det.iloc[[9,10,11,15]], table_mc_det.iloc[[3]], ens_tab.iloc[:-1], table_dpp_reg.iloc[-2:]]) res = res[table_mc.columns].reset_index(drop=True) res print(str(res.to_latex(index=False)).replace('±', '$\pm$')) # # DPP All 20ng # + import os metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] methods = ['ddpp_dpp', 'ddpp_ood'] regs = ['raw'] max_fracs = [0.3, 0.4, 0.5, 0.6] comsizes = [50] dataset_names = ['20newsgroups'] dataset_fnames = ['20newsgroups'] names = [] tables = [] baselines = [] for method in methods: for max_frac in max_fracs: for cs in comsizes: for reg in regs: run_dirs = [] names = [f'{method}|{max_frac}|{cs}'] for name in dataset_fnames: model_series_dir = f'../workdir/run_glue_for_model_series/electra_raw_no_sn/{name}/0.0/{method}_{max_frac}_50/results/' print(model_series_dir) run_dirs.append([model_series_dir]) try: res_df = collect_datasets(run_dirs, names, dataset_names, metric_types=metric_types) baselines.append(res_df.iloc[-1:]) tables.append(res_df.iloc[:-1]) except: print(f'Not exists one of this dirs: {run_dirs}') # - table_dpp = pd.concat([pd.concat(tables), pd.concat(baselines[-1:])]) table_dpp.sort_values(by= ('20newsgroups', 'rcc-auc')) # # 20 ng # + import os default_methods = { "bald": bald, "sampled_max_prob": sampled_max_prob, "variance": probability_variance, } metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] methods = ['mahalanobis'] regs = ['raw'] dataset_names = ['20newsgroups'] dataset_fnames = ['20newsgroups'] names = [] tables = [] raw_baselines = {} for ds_fname, ds_name in zip(dataset_fnames, dataset_names): model_series_dir = f'../workdir/run_glue_for_model_series/electra_raw_no_sn/{ds_fname}/0.0/ddpp_dpp_best/' table = [] for metric_type in metric_types: metric = choose_metric(metric_type=metric_type) agg_res = aggregate_runs( model_series_dir, methods=default_methods, metric=metric ) mean_res = agg_res.mean(axis=0) final_results = mean_res.T table.append(final_results.loc[['max_prob']]) res_table = pd.concat(table, axis=1) res_table.columns = metric_types raw_baselines[ds_name] = res_table# #raw_baselines[ds_name]={k:v for k,v in zip(res_table.columns.values.tolist(), res_table.values[0].tolist())} # + import os metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] methods = ['mahalanobis'] regs = ['metric', 'reg', 'raw'] dataset_names = ['20newsgroups'] dataset_fnames = ['20newsgroups'] names = [] tables = [] baselines = [] for method in methods: for reg in regs: for sn in ['sn', 'no_sn']: run_dirs = [] name_sn = '' names = [f'{method}|{reg}_{sn}'] for name in dataset_fnames: model_series_dir = f'../workdir/run_glue_for_model_series/electra_{reg}_{sn}/{name}/0.0/{method}' print(model_series_dir) run_dirs.append([model_series_dir]) try: res_df = collect_datasets(run_dirs, names, dataset_names, metric_types=metric_types, baselines=raw_baselines) baselines.append(res_df.iloc[-1:]) tables.append(res_df.iloc[:-1]) except: print('pass') pass table_det = pd.concat([pd.concat(tables), pd.concat(baselines[-2:])]) # + import os metric_types=["rejection-curve-auc", "rcc-auc", 'rpp'] methods = ['ddpp_dpp_best', 'ddpp_ood_best', 'mc_all'] regs = ['metric', 'reg', 'raw'] dataset_names = ['20newsgroups'] dataset_fnames = ['20newsgroups'] names = [] tables = [] #baselines = [] for method in methods: for reg in regs: for sn in ['no_sn']: run_dirs = [] name_sn = '' names = [f'{method}|{reg}_{sn}'] for name in dataset_fnames: model_series_dir = f'../workdir/run_glue_for_model_series/electra_{reg}_{sn}/{name}/0.0/{method}' print(model_series_dir) run_dirs.append([model_series_dir]) try: res_df = collect_datasets(run_dirs, names, dataset_names, metric_types=metric_types, baselines=raw_baselines) #baselines.append(res_df.iloc[-1:]) tables.append(res_df.iloc[:-1]) except: print('pass') pass table_mc = pd.concat([pd.concat(tables), pd.concat(baselines[-2:])]) # - res_table = pd.concat([table_mc.iloc[:-2], table_det.iloc[:-2], pd.concat(baselines[-6:])]).reset_index(drop=False) res_table = res_table[res_table['UE Score']!='var.ratio'] # + def preproc_regs(x): reg = x.split('|')[-1].split('_')[0] if reg == 'reg': return 'CER' elif reg == 'raw': return '-' else: return reg def preproc_method(x): method = x.split('|')[0] sn = x.split('|')[-1] if method == 'mahalanobis' and not 'no_sn' in sn: return 'MD SN (ours)' elif method == 'mahalanobis': return 'MD' elif 'ddpp_dpp' in method: return 'DDPP (+DPP) (ours)' elif 'ddpp_ood' in method: return 'DDPP (+OOD) (ours)' elif 'mc_all' in method: return 'MC dropout' elif 'baseline|raw_no_sn' in x: return 'SR (baseline)' elif 'baseline' in x and not 'no_sn' in x: return 'SR SN' return 'SR' def preproc_ue(x): if x == 'bald': return 'BALD' elif x == 'mahalanobis': return 'MD' elif 'sampled_max_prob' in x: return 'SMP' elif 'variance' in x: return 'PV' return 'MD' res_table['Reg. Type'] = res_table.Method.apply(lambda x: preproc_regs(x)) res_table['Method'] = res_table.Method.apply(lambda x: preproc_method(x)) res_table['UE Score'] = res_table['UE Score'].apply(lambda x: preproc_ue(x)) # - res_table = res_table[list(res_table.columns[:1]) + list(res_table.columns[-1:]) + list(res_table.columns[1:-1])].reset_index(drop=True) res_table res_table.iloc[list(range(12)) + list(range(27, 33))] res_table.iloc[list(range(12,33))] # # Legacy # + # Get results with new dpp models names = ['MC|last|reg', 'MC|all|reg'] dataset_names = ['SST2 (10%)', 'MRPC', 'CoLA'] dataset_fnames = ['SST2', 'MRPC', 'CoLA'] run_dirs = [[f'../workdir/run_glue_for_model_series/electra-reg/{name.lower()}/last/results', f'../workdir/run_glue_for_model_series/electra-reg/{name.lower()}/all/results'] for name in dataset_fnames] mc_table_reg = collect_datasets(run_dirs, names, dataset_names) names = ['DPP2|reg', 'DPP OOD|reg'] dataset_names = ['SST2 (10%)', 'MRPC', 'CoLA'] dataset_fnames = ['SST2', 'MRPC', 'CoLA'] run_dirs = [[f'../workdir/run_glue_for_model_series/electra-reg-calibrate/{name.lower()}/dpp/results', f'../workdir/run_glue_for_model_series/electra-reg-calibrate/{name.lower()}/dpp_with_ood/results'] for name in dataset_fnames] dpp_table_reg = collect_datasets(run_dirs, names, dataset_names) names = ['MC|last|no reg', 'MC|all|no reg'] dataset_names = ['SST2 (10%)', 'MRPC', 'CoLA'] dataset_fnames = ['SST2', 'MRPC', 'CoLA'] run_dirs = [[f'../workdir/run_glue_for_model_series/electra-raw/{name.lower()}/last/results', f'../workdir/run_glue_for_model_series/electra-raw/{name.lower()}/all/results'] for name in dataset_fnames] mc_table_no_reg = collect_datasets(run_dirs, names, dataset_names) names = ['DPP2|no reg', 'DPP OOD|no reg'] dataset_names = ['SST2 (10%)', 'MRPC', 'CoLA'] dataset_fnames = ['SST2', 'MRPC', 'CoLA'] run_dirs = [[f'../workdir/run_glue_for_model_series/electra-raw/{name.lower()}/dpp/results', f'../workdir/run_glue_for_model_series/electra-raw/{name.lower()}/dpp_with_ood/results'] for name in dataset_fnames] dpp_table_no_reg = collect_datasets(run_dirs, names, dataset_names) # - names = ['DPP2|no reg calibrated', 'DPP OOD|no reg calibrated'] dataset_names = ['SST2 (10%)', 'MRPC', 'CoLA'] dataset_fnames = ['SST2', 'MRPC', 'CoLA'] run_dirs = [[f'../workdir/run_glue_for_model_series/electra-raw-calibrate/{name.lower()}/dpp/results', f'../workdir/run_glue_for_model_series/electra-raw-calibrate/{name.lower()}/dpp_with_ood/results'] for name in dataset_fnames] dpp_table_no_reg_cal = collect_datasets(run_dirs, names, dataset_names) dpp_table_no_reg_cal names = ['DPP2|reg calibrated', 'DPP OOD|reg calibrated'] dataset_names = ['SST2 (10%)', 'MRPC', 'CoLA'] dataset_fnames = ['SST2', 'MRPC', 'CoLA'] run_dirs = [[f'../workdir/run_glue_for_model_series/electra-reg-calibrate/{name.lower()}/dpp/results', f'../workdir/run_glue_for_model_series/electra-reg-calibrate/{name.lower()}/dpp_with_ood/results'] for name in dataset_fnames] dpp_table_reg_cal = collect_datasets(run_dirs, names, dataset_names) overall_table_cal = pd.concat([mc_table_reg, dpp_table_reg_cal, mc_table_no_reg, dpp_table_no_reg, dpp_table_no_reg_cal]) overall_table_cal
src/exps_notebooks/paper_results_final_all_metrics_correct_hp_20ng.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Calculating excited states with the Variational Quantum Eigensolver(VQE) # # VQE optimizes a wave function to minimize the expectation value of Hamiltonian. Therefore, VQE can only calculate the ground state. In this time, we will explain how to calculate the excited state using VQE, and we will show the example of the calculation for the excited state of the hydrogen molecule. # # ## Theoretical Outline # This time we will use an [Overlap-based method](https://quantum-journal.org/papers/q-2019-07-01-156/) to calculate the $1^{st}$ excited state. This method uses a penalty term for the overlap between the wavefunctions from VQE and the ground state. The excited state can be obtained by adding the penalty term to Hamiltonian. An equation is as follows # $$ # H^\prime=H+\alpha \mid \psi_{gs}\rangle\langle\psi_{gs} \vert # $$ # The $H$ on the right side is the original Hamiltonian. The second term on the right side evaluates the value of the overlap with the ground-state wave function $ \mid \psi_{gs}\rangle$ (In this tutorial, $ \mid \psi_{gs}\rangle$ is obtained by a classical calculation, but it is also possible to use VQE). $\alpha$ in the second term is a parameter that represents a penalty, and we take a large enough value about it. By adding the penalty term to the original $H$ term, we define $H^\prime$ on the left side. # # We evaluate the expected value by using the wave function $ \mid \psi_{VQE}\rangle$ obtained from the VQE. # $$ # \langle\psi_{VQE} \mid H^\prime \mid \psi_{VQE}\rangle=\langle\psi_{VQE} \mid H \mid \psi_{VQE}\rangle+\alpha\langle\psi_{VQE} \mid \psi_{gs}\rangle\langle\psi_{gs} \mid \psi_{VQE}\rangle \\ # =\langle\psi_{VQE} \mid H \mid \psi_{VQE}\rangle+\alpha \mid \langle\psi_{gs} \mid \psi_{VQE}\rangle \mid ^2 # $$ # Since the excited state is orthogonal to the ground state, the second term in the above equation is zero if $ \mid \psi_{VQE}\rangle$ becomes the excited state. In this time, we will use this expectation value to implement the excited-state calculation of hydrogen molecule in VQE. # # Incidentally, If the wanted wavefunction is not the $1^{st}$ excited state but the $N^{th}$ excited state, penalty terms are created by using the wavefunction from the ground state to the ${N-1}^{th}$ excited state. # $$ # H^\prime=H+\sum_{i=gs}^{N-1}\alpha_i \mid \psi_{i}\rangle\langle\psi_{i} \mid # $$ # # ## Preparation # Install the necessary libraries. The Hamiltonian is obtained with OpenFermion. # !pip3 install blueqat openfermion # Import the necessary libraries. The optimization of VQE uses SciPy minimize. from blueqat import Circuit from openfermion.hamiltonians import MolecularData from openfermion.transforms import get_fermion_operator, jordan_wigner, get_sparse_operator import numpy as np from scipy.optimize import minimize # ## Ansatz definition and molecular information acquisition # Define the functions for calculating VQE and obtaining information about the molecule. The calculations are performed using Hardware Efficient Ansatz. The initial wavefunction is set to $\mid1010\rangle$. # The molecular information is obtained using OpenFermion. The basis set is STO-3G. # + def HEA(params,n_qubits,n_depth): #Wave function initialization |1010> circ=Circuit().x[1, 3] #Circuit creation params_devided=np.array_split(params,n_depth) for params_one_depth in params_devided: for i,param in enumerate(params_one_depth): if i < n_qubits: circ.ry(param)[i] else: circ.rz(param)[i%n_qubits] for qbit in range(n_qubits): if qbit < n_qubits-1: circ.cz[qbit,qbit+1] #Running the circuit wf = circ.run(backend="numpy") return wf def get_molecule(length): geometry = [('H',(0.,0.,0.)),('H',(0.,0.,length))] try: description = f'{length:.2f}' molecule = MolecularData(geometry, "sto-3g",1,description=description) molecule.load() except: description = f'{length:.1f}' molecule = MolecularData(geometry, "sto-3g",1,description=description) molecule.load() return molecule # - # ## Expected value and cost function # The expected value is calculated using the wave function obtained by VQE. In the expected value calculation, the term concerning the overlap between the ground state and VQE wave functions is added to the original expected value of the Hamiltonian. # + def expect_exited_state(wf,hamiltonian,penalty,wf_gs): #Calculating Hamiltonian+penalty hamiltonian_overlap = np.vdot(wf, hamiltonian.dot(wf)).real+penalty*np.abs(np.vdot(wf_gs,wf))**2 return hamiltonian_overlap def cost(params,hamiltonian,n_qubits,n_depth,penalty,wf_gs): wf=HEA(params,n_qubits,n_depth) return expect_exited_state(wf,hamiltonian,penalty,wf_gs) # - # ## Calculation execution and plotting # Run VQE on each bond length (this will take a few minutes). This time, the wave function of the ground state is classically obtained, but it can be calculated by VQE. The penalty value is 10 Hartree. # After the calculation, compare the calculated results of VQE, ground state, and first excited state for energy and bond length. # + #For recording bond length, VQE, ground state and 1st excited state results bond_len_list = [];VQE_energy_list=[];gs_list=[];es_list=[] #Execute the calculation for each bond length for bond_len in np.arange(0.2,2.5,0.1): molecule = get_molecule(bond_len) #Determination of the number of bits, depth, initial parameter, and penalty n_qubits=molecule.n_qubits n_depth=4 init_params=np.random.rand(2*n_qubits*n_depth)*0.1 penalty=10 #Hamiltonian Definition hamiltonian_gs = get_sparse_operator(jordan_wigner(get_fermion_operator(molecule.get_molecular_hamiltonian()))) #Obtaining the eigenvalues and eigenvectors using classical calculation eigen_energy, eigen_vec =np.linalg.eigh((hamiltonian_gs).toarray()) #Obtaining the wave function of the ground state wf_gs=eigen_vec.T[np.argmin(eigen_energy)] #Optimization run result=minimize(cost,x0=init_params,args=(hamiltonian_gs,n_qubits,n_depth,penalty,wf_gs)) #Recording bond length, VQE, ground state and 1st excited state results bond_len_list.append(bond_len) VQE_energy_list.append(result.fun) gs_list.append(sorted(eigen_energy)[0]) es_list.append(sorted(eigen_energy)[1]) #Plotting import matplotlib.pyplot as plt plt.plot(bond_len_list,gs_list,label="Ground state",color="black") plt.plot(bond_len_list,es_list,label="Exited state",color="blue") plt.plot(bond_len_list,VQE_energy_list, marker="o",label="VQE",color="red",linestyle='None') plt.xlabel("Bond length (Angstrom)") plt.ylabel("Energy (Hartree)") plt.legend() # - # The results are in good agreement with the exact value of the excited states. While the method used here is the Overlap-based method, there are various methods to calculate the excited states. Hence, you can try a method that suits your purpose by referencing the papers, for example, [this review](https://arxiv.org/abs/1808.10402).
tutorial/402_excitedstate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.6 64-bit (''venv'': venv)' # language: python # name: python386jvsc74a57bd0d6d8159a65db64be1e44edd21871961b8aab1031de333ef6218477a226e97456 # --- # + import re import os import sys import pandas as pd from pyfunctions.collocate_functions import collocate_analysis from pyfunctions.dict_functions import dict_keyword_lookup from pyfunctions.sentiment_functions import sentiment_score_df # - # ## Interactive Lab: # + notebook = is_notebook() if notebook == True: keywords_list = pd.read_csv('propertywords_cleaned_for_collocates.csv') kw_col_name = keywords_list.columns[0] keywords_list = keywords_list[kw_col_name].tolist() # can't use usecols here bc text is str andn year is int -- dumb data = collocate_analysis.import_data('/users/sbuongiorno/rerun_csv_chunk.csv', ',', text_col='text', year_col='year', sub='/users/sbuongiorno/preprocess_propertywords.csv') data = dict_keyword_lookup(data, keywords_list) data = collocate_analysis.extract_grammatical_collocates(data, keywords_list) # add a cli option for return type data = sentiment_score_df(data, 'grammatical_collocates') export_folder = 'collocates_sentiment' if not os.path.exists(export_folder): os.mkdir(export_folder) data.to_csv(export_folder + '/' + 'collocates_sentiment_scores.csv', index=False) # - # ## sbatch: keywords_list = ['absentee', 'adscription', 'agist', 'allod', 'allotment', 'almoign', 'amortized', 'apanage', 'atar', 'attorn', 'blettro', 'bond-land', 'bookland', 'bordage', 'bordar', 'boscage', 'burgage', 'burgery', 'cablicum', 'cammandery', 'cartbote', 'charter-hold', 'charter-land', 'chattel', 'co-feof', 'co-tenancy', 'co-tenant', 'coedcae', 'commonage', 'commonties', 'commonty', 'croft', 'curiality', 'demesn', 'depopulation', 'dispossess', 'domanial', 'domesday', 'dreng', 'eject', 'emin', 'enclosure','escuage', 'esplees', 'estover', 'ethel', 'evict', 'feu', 'ffridd', 'fiar', 'fief', 'firebote', 'fogg', 'folkland', 'frank-ferm', 'frank-marriage', 'frankalmoign', 'franklin', 'frith', 'gavelkind', 'gaveller', 'grasanese', 'gwely', 'haybote', 'herbage', 'holdership', 'homager', 'householdership', 'hypothec', 'inclosure', 'intercommoning', 'joint-tenancy', 'joint-tenant', 'knight-service', 'laen', 'laetic', 'land', 'lease', 'lessee', 'lifehold', 'liferent', 'livier', 'lotment', 'mail-payer', 'majorat', 'manorialize', 'manurance', 'mese', 'mesn', 'metayage', 'metayer', 'mivvy', 'occupance', 'occupancy', 'outland', 'pannage', 'parage', 'patrony', 'pendicle', 'perpetual', 'piscary', 'ploughbote', 'poffle', 'pollam', 'pre-emptive', 'property', 'rack-rent', 'radknight', 'radman', 'rent', 'rere-fief', 'roture', 'roturier', 'rundale', 'runrig', 'ryoti', 'ryotwar', 'scattald', 'seisin', 'severalty', 'socage', 'sokeman', 'solidate', 'sorning', 'sple', 'squat', 'steelbow', 'sub-fief', 'subaltern', 'subfeu', 'sublessee', 'subsman', 'subtenancy', 'subtenant', 'subvassal', 'suit-hold', 'swinamote', 'tanistic', 'tanistry', 'tariot', 'tenancy', 'tenant', 'tenement', 'tenurial', 'termon', 'termor', 'terre-tenant', 'thanage', 'thaneland', 'three-life', 'thring', 'turbary', 'udal', 'under-tenancy', 'under-tenant', 'underlessee', 'undertenant', 'undervassal', 'unfeued', 'unleased', 'unlet', 'urbarial', 'vassal', 'venville', 'vidame', 'villan', 'villar', 'villein', 'woodmote', 'zemindar'] # + arg1 = sys.argv[1] data = collocate_analysis.import_data(arg1, ',', text_col='text', year_col='year', sub='/users/sbuongiorno/preprocess_propertywords.csv') data = data_contains(data, keywords_list) data = collocate_analysis.extract_grammatical_collocates(data, keywords_list) # add a cli option for return type data = sentiment_score(data, 'grammatical_collocates') if not data.empty: handle = open(arg1 + '.pickle', 'wb') pickle.dump(data, handle) else: exit() # - # !jupyter nbconvert --to script collocates_sentiment.ipynb
collocates/collocates-sentiment/collocates_sentiment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from __future__ import print_function, division from keras.datasets import mnist from keras.layers.merge import _Merge from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply from keras.layers import BatchNormalization, Activation, ZeroPadding2D, Embedding from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import UpSampling2D, Conv2D from keras.models import Sequential, Model from keras.optimizers import RMSprop from functools import partial from tqdm import tqdm_notebook import matplotlib.pyplot as plt from keras.preprocessing.image import load_img import pandas as pd import keras.backend as K import math import numpy as np # + class RandomWeightedAverage(_Merge): """Provides a (random) weighted average between real and generated image samples""" def _merge_function(self, inputs): global batch_size alpha = K.random_uniform((batch_size, 1, 1, 1)) return (alpha * inputs[0]) + ((1 - alpha) * inputs[1]) class CWGANGP(): def __init__(self, epochs=100, batch_size=32, sample_interval=50): self.img_rows = 96 self.img_cols = 96 self.channels = 3 self.nclasses = 12 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.latent_dim = 100 self.losslog = [] self.epochs = epochs self.batch_size = batch_size self.sample_interval = sample_interval # Following parameter and optimizer set as recommended in paper self.n_critic = 5 optimizer = RMSprop(lr=0.00005) # Build the generator and critic self.generator = self.build_generator() self.critic = self.build_critic() #------------------------------- # Construct Computational Graph # for the Critic #------------------------------- # Freeze generator's layers while training critic self.generator.trainable = False # Image input (real sample) real_img = Input(shape=self.img_shape) # Noise input z_disc = Input(shape=(self.latent_dim,)) # Generate image based of noise (fake sample) and add label to the input label = Input(shape=(1,)) fake_img = self.generator([z_disc, label]) # Discriminator determines validity of the real and fake images fake = self.critic([fake_img, label]) valid = self.critic([real_img, label]) # Construct weighted average between real and fake images interpolated_img = RandomWeightedAverage()([real_img, fake_img]) # Determine validity of weighted sample validity_interpolated = self.critic([interpolated_img, label]) # Use Python partial to provide loss function with additional # 'averaged_samples' argument partial_gp_loss = partial(self.gradient_penalty_loss, averaged_samples=interpolated_img) partial_gp_loss.__name__ = 'gradient_penalty' # Keras requires function names self.critic_model = Model(inputs=[real_img, label, z_disc], outputs=[valid, fake, validity_interpolated]) self.critic_model.compile(loss=[self.wasserstein_loss, self.wasserstein_loss, partial_gp_loss], optimizer=optimizer, loss_weights=[1, 1, 10]) #------------------------------- # Construct Computational Graph # for Generator #------------------------------- # For the generator we freeze the critic's layers self.critic.trainable = False self.generator.trainable = True # Sampled noise for input to generator z_gen = Input(shape=(100,)) # add label to the input label = Input(shape=(1,)) # Generate images based of noise img = self.generator([z_gen, label]) # Discriminator determines validity valid = self.critic([img, label]) # Defines generator model self.generator_model = Model([z_gen, label], valid) self.generator_model.compile(loss=self.wasserstein_loss, optimizer=optimizer) def gradient_penalty_loss(self, y_true, y_pred, averaged_samples): """ Computes gradient penalty based on prediction and weighted real / fake samples """ gradients = K.gradients(y_pred, averaged_samples)[0] # compute the euclidean norm by squaring ... gradients_sqr = K.square(gradients) # ... summing over the rows ... gradients_sqr_sum = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape))) # ... and sqrt gradient_l2_norm = K.sqrt(gradients_sqr_sum) # compute lambda * (1 - ||grad||)^2 still for each single sample gradient_penalty = K.square(1 - gradient_l2_norm) # return the mean as loss over all the batch samples return K.mean(gradient_penalty) def wasserstein_loss(self, y_true, y_pred): return K.mean(y_true * y_pred) def build_generator(self): model = Sequential() model.add(Dense(128 * 6 * 6, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((6, 6, 128))) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(Conv2D(128, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(Conv2D(self.channels, kernel_size=4, padding="same")) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) label = Input(shape=(1,), dtype='int32') label_embedding = Flatten()(Embedding(self.nclasses, self.latent_dim)(label)) model_input = multiply([noise, label_embedding]) img = model(model_input) return Model([noise, label], img) def build_critic(self): model = Sequential() model.add(Dense(7*7*64, input_dim=np.prod(self.img_shape))) model.add(LeakyReLU(alpha=0.2)) model.add(Reshape((7, 7, 64))) model.add(Conv2D(16, kernel_size=3, strides=2, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Conv2D(32, kernel_size=3, strides=2, padding="same")) model.add(ZeroPadding2D(padding=((0,1),(0,1)))) model.add(BatchNormalization(momentum=0.8)) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Conv2D(64, kernel_size=3, strides=2, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Conv2D(128, kernel_size=3, strides=1, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(1)) model.summary() img = Input(shape=self.img_shape) label = Input(shape=(1,), dtype='int32') label_embedding = Flatten()(Embedding(self.nclasses, np.prod(self.img_shape))(label)) flat_img = Flatten()(img) model_input = multiply([flat_img, label_embedding]) validity = model(model_input) return Model([img, label], validity) def train(self): self.generator.load_weights('./generator') self.critic.load_weights('./discriminator') # Load the dataset tags = pd.read_csv('./tags_hair_num.csv') X_train = np.array([(np.array(load_img("./faces/{}.jpg".format(ids))) / 127.5 - 1) for ids in tqdm_notebook(tags.idx)]) y_train = tags.hair #(X_train, y_train), (_, _) = mnist.load_data() # Rescale -1 to 1 #X_train = (X_train.astype(np.float32) - 127.5) / 127.5 #X_train = np.expand_dims(X_train, axis=3) # Adversarial ground truths valid = -np.ones((self.batch_size, 1)) fake = np.ones((self.batch_size, 1)) dummy = np.zeros((self.batch_size, 1)) # Dummy gt for gradient penalty for epoch in range(self.epochs): for _ in range(self.n_critic): # --------------------- # Train Discriminator # --------------------- # Select a random batch of images idx = np.random.randint(0, X_train.shape[0], self.batch_size) imgs, labels = X_train[idx], y_train[idx] # Sample generator input noise = np.random.normal(0, 1, (self.batch_size, self.latent_dim)) # Train the critic d_loss = self.critic_model.train_on_batch([imgs, labels, noise], [valid, fake, dummy]) # --------------------- # Train Generator # --------------------- sampled_labels = np.random.randint(0, self.nclasses, self.batch_size).reshape(-1, 1) g_loss = self.generator_model.train_on_batch([noise, sampled_labels], valid) # Plot the progress print ("%d [D loss: %f] [G loss: %f]" % (epoch, d_loss[0], g_loss)) self.losslog.append([d_loss[0], g_loss]) # If at save interval => save generated image samples if epoch % self.sample_interval == 0: self.sample_images(epoch) self.generator.save_weights('generator', overwrite=True) self.critic.save_weights('discriminator', overwrite=True) with open('loss.log', 'w') as f: f.writelines('d_loss, g_loss\n') for each in self.losslog: f.writelines('%s, %s\n'%(each[0], each[1])) def sample_images(self, epoch): r, c = 2, 6 noise = np.random.normal(0, 1, (r * c, self.latent_dim)) sampled_labels = np.array(list(range(12))*1).reshape(-1, 1) gen_imgs = self.generator.predict([noise, sampled_labels]) print('gen_imgs',np.shape(gen_imgs)) #gen_imgs = self.combine_images(gen_imgs) # Rescale images 0 - 1 gen_imgs = 0.5 * gen_imgs + 1 fig, axs = plt.subplots(2*r, c) cnt = 0 for i in range(r): for j in range(c): axs[i,j].imshow(gen_imgs[cnt,:,:,:]) axs[i,j].axis('off') cnt += 1 cnt = 0 for i in range(r): for j in range(c): axs[i+2,j].imshow(gen_imgs[cnt,:,:,0]) axs[i+2,j].axis('off') cnt += 1 fig.savefig("images/mnist_%d.png" % epoch) plt.close() """ plt.imshow(gen_imgs) plt.axis('off') plt.savefig("images/mnist_%d.png" % epoch) plt.close() """ def combine_images(self, generated_images): num = generated_images.shape[0] width = int(math.sqrt(num)) height = int(math.ceil(float(num)/width)) shape = generated_images.shape[1:3] image = np.zeros((height*shape[0], width*shape[1],3), dtype=generated_images.dtype) for index, img in enumerate(generated_images): i = int(index/width) j = index % width image[i*shape[0]:(i+1)*shape[0], j*shape[1]:(j+1)*shape[1],:] = img[:, :, :] return image def generate_images(self, label): self.generator.load_weights('./generator') noise = np.random.normal(0, 1, (1, self.latent_dim)) gen_imgs = self.generator.predict([noise, np.array(label).reshape(-1,1)]) # Rescale images 0 - 1 #gen_imgs = 0.5 * gen_imgs + 1 plt.imshow(gen_imgs[0,:,:,:]) plt.axis('off') plt.close() # - epochs = 20000 batch_size = 32 sample_interval = 50 wgan = CWGANGP(epochs, batch_size, sample_interval) wgan.train()
WGAN-gp/cwgan.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Notebook służący do ewaluacji # W nim chodzi o wygenerowanie X haseł na podstawie nauczonego modelu # + # %matplotlib widget import numpy as np import math import torch import torch.nn as nn from torch import optim import torch.nn.functional as F import random import matplotlib.pyplot as plt import re from tqdm import tqdm from tqdm import tnrange, tqdm_notebook from joblib import Parallel, delayed import multiprocessing from datetime import datetime from torch.utils.data import DataLoader, Dataset from IPython.display import clear_output # - device = torch.device("cuda:0") device torch.manual_seed(1010101011) random.seed(1010101011) class CharacterLSTM(nn.Module): def __init__(self, vocabsize, lstmlayers, hiddensize): super(CharacterLSTM, self).__init__() ## WARSTWY self.embd = nn.Embedding(vocabsize, vocabsize) self.LSTM1 = nn.GRU(vocabsize, hiddensize, lstmlayers, batch_first=True, bidirectional=True) self.linear_ins = nn.Linear(2*hiddensize, vocabsize) self.drop = nn.Dropout(p=0.1) ## OUTS self.softmax = nn.LogSoftmax(dim=1) def forward(self, x, hidden, NLL=True): # WEJSCIE y0 = self.embd(x) # LSTM y, h1_ = self.LSTM1(y0, hidden) y = self.drop(y) # LINEAR OUT 1 y = self.linear_ins(y) if NLL: y = self.softmax(y[:,-1]) # zwrot return y, h1_ # + """ Odtworzenie zmiennych uczenia """ zmienne = torch.load("../models/zmienne_modelu.pth") chartoidx = zmienne[0] longestword = zmienne[1] lstms = zmienne[2] hiddensize = zmienne[3] chlstm = torch.load("../models/NEWDS_START_bezrelu_lstm_15_hidden_40_cosine1e-8_rmsprop1e-7_50epoch_loss_2.423110246658325.pt", map_location=device).to(device) # - # ## Działa dość wolno, słabo zoptymalizowane # + chardict = list(chartoidx.keys()) """ parametr randomizacji """ """ im wyższa wartość tym bardziej randomowe i mniejsze odwzorowanie """ temperature = 0.8 plikhasel = open("../haslaAI.txt", "a", encoding="utf8") hiddens = torch.zeros(2*lstms, 1, hiddensize).to(device) for _ in tqdm_notebook(range(140000)): with torch.no_grad(): lastchar = 0 cnt = 0 chlstm.eval() znaki = [chartoidx["<START>"]] bazastart = len(znaki) for __ in range(longestword - bazastart+1): znaki.append(chartoidx["<EMPTY>"]) for item in range(longestword - bazastart+1): x = torch.Tensor(znaki).long().to(device).view(1, -1) out, _ = chlstm(x, hiddens, NLL=False) zwrot = out[:,-1].view(-1) """ Rozkład wielomianowy """ exped = zwrot.data.div(temperature).exp() top_i = torch.multinomial(exped, 1) charid = top_i[0] znaki[item+bazastart] = charid slowo = "" for item in znaki: if item != 0 and item != 1: slowo+=chardict[item] # print(slowo) plikhasel.write(slowo+"\n") plikhasel.close()
Notebooks/Ewaluacja.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import scipy.sparse import pandas as pd sparse_matrix = scipy.sparse.load_npz('coo_train.npz') sparse_matrix np.array(sparse_matrix) pd.DataFrame.sparse.from_spmatrix(sparse_matrix)
model/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from sklearn import datasets cancer = datasets.load_breast_cancer() data = cancer["data"] labels = cancer["target"] data.shape #求类间距离 X0 = data[labels==0].T X1 = data[labels==1].T mu0 = np.average(X0,axis=1)[:,np.newaxis] mu1 = np.average(X1,axis=1)[:,np.newaxis] sb = (mu0-mu1) @ (mu0-mu1).T #求类内距离 sigma0 = (X0-mu0) @ (X0-mu0).T sigma1 = (X1-mu1) @ (X1-mu1).T sw = sigma0 + sigma1 X1.shape mat = np.linalg.inv(sw)@sb mat.shape a, b = np.linalg.eig(mat) #去掉虚数部分 a = np.real(a) b = np.real(b) index = np.argsort(-a) #降序排序 p = b[index][:,:2] p.shape data @ p
LDA/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Question 1 : # ## Use the User dataset to perform Random Forest and compare with Decision Tree. import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier data = pd.read_csv("C:/Users/Nisha/Downloads/data.csv - Sheet1.csv") data.head() data.shape data.describe() data.info() data.isnull().sum() data.corr() updated_data = data.drop(["User ID", "Gender"], axis=1) updated_data.head() X = updated_data.iloc[:,:2] y = updated_data.iloc[:,2:] print(X.shape, y.shape) X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.25, random_state=42) print(X_train.shape, X_test.shape, y_train.shape, y_test.shape) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.fit_transform(X_test) forest_model = RandomForestClassifier(n_estimators=40, criterion="gini",random_state=100) forest_model.fit(X_train, y_train) ypred2 = forest_model.predict(X_test) cm1 = confusion_matrix(y_test, ypred2) cm1 sns.heatmap(cm1, annot=True) accuracy_score(y_test, ypred2) tree_model = DecisionTreeClassifier(criterion="entropy",max_depth=2) tree_model.fit(X_train, y_train) ypred1 = tree_model.predict(X_test) cm = confusion_matrix(y_test, ypred1) cm sns.heatmap(cm, annot=True) accuracy_score(y_test, ypred1)
Assignment_Day_35.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # NLTK # # NLTK is a large collection of NLP tools.We won't have time to cover everything, so we'll focus on the most common tools: # # [Existing corpora](#existing)<br> # # [Tokenization](#tokenization)<br> # # [Sentence segmentation](#sent-seg)<br> # # [Collocations](#collocations)<br> # # [Sentiment analysis](#sentiment)<br> # # [Stemming](#stemming)<br> # # [What we didn't cover](#didnt)<br> # # ### Time # - Teaching: 30 minutes # - Exercises: 30 minutes # %matplotlib inline import os import nltk import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() # ## Existing corpora <a id='existing'></a> # # When you downloaded data from nltk using `nltk.download('all')`, you downloaded a whole bunch of great corpora (collections of text documents) and lexical resources (structured information about words). This gives us data to work with already! If you ever want to learn/practice an NLP method, know that just by importing nltk you have access to some data. Here are some corpora and resources that are particularly useful and that we'll use throughout this workshop: # # - ABC # - Brown # - CMU pronunciation dictionary # - Genesis # - Project Gutenberg selections # - Inaugural addresses # - Movie reviews # - Names # - State of the Union addresses # - Stopwords # - Twitter samples # - Universal Declaration of Human Rights # - WordNet # # Full list of data in NLTK [here](http://www.nltk.org/nltk_data/). from nltk.corpus import (abc, brown, cmudict, genesis, gutenberg, inaugural, movie_reviews, names, state_union, stopwords, swadesh, twitter_samples, udhr2, wordnet) # Corpora in NLTK are special objects in NLTK that give you the exact data you want only when you ask for it. For example, `brown` is not a string or a list of words. brown # #### Words, raw, sents, fileids # # But if I wanted the Brown corpus as a list of words, I could ask for it like this: brown.words() # Similarly, if I wanted the text of the ABC corpus as a string, I could get it like this: abc.raw()[:100] # If you wanted the sentences of a corpus, you can ask for them like this: movie_reviews.sents() # These corpora are often made up of multiple files. You can see the file names by using the `fileids` method. names.fileids() # To restrict the words, raw or sents to just the words/raw/sents in a particular file, you can list the file name as an optional argument to the `words`/`raw`/`sents` method. male_names = names.words('male.txt') male_names[:10] # #### Unique properties # # Some corpora have unique aspects to them. For example, the CMU pronunciation dictionary lists (some standard) pronunciation of English words. pronunciation = cmudict.dict() pronunciation['hello'] # ### Male vs. female names male_names = names.words('male.txt') female_names = names.words('female.txt') # + def last_letter(name): """Returns the last letter of `name`.""" return name.strip()[-1] def count_letters(names): """Returns the distribution of the last letters in `names`.""" return pd.Series([last_letter(n) for n in names]).value_counts(normalize=True) def letter_distribution(): male_value_counts = count_letters(male_names) female_value_counts = count_letters(female_names) return pd.DataFrame.from_dict({'male': male_value_counts, 'female': female_value_counts}) df = letter_distribution() df.plot(kind='bar', figsize=(16, 8)) plt.legend(prop={'size': 20}) plt.xticks(rotation=0, size=20); # - # ### Challenge # # - Count the lengths of the sentences (i.e. the number of words per sentence) in the `inaugural` corpus. Find the minimum, average and maximum sentence length. # - Visualize the distribution of lengths. # - Count the number of times the following words appear in the corpus: "america", "citizen", "united", "senate" and "freedom". # - If you are surprised by anything in the answer to the last question, think about capitalization issues. Make all words lowercase and then perform your counts. # + # your answer goes here # + # your answer goes here # - for word in ["america", "citizen", "united", "senate", "freedom"]: # your answer goes here # + # your answer goes here # - # ## Tokenization <a id='tokenization'></a> # # More often than not, you'll want to analyze some text that doesn't come from NLTK. Perhaps you've scraped a few websites and stored the text in a text file. One of the first steps in processing your text data is tokenization. **Tokenization refers to breaking a running string of text into individual words.** # # I've download the text contents of the Wikipedia page on [Python][1], and saved it in the `data` directory. We can read it in as follows: # # [1]: https://en.wikipedia.org/wiki/Python_(programming_language) DATA_DIR = 'data' python_wiki_fname = os.path.join(DATA_DIR, 'python_wikipedia.txt') with open(python_wiki_fname) as f: text = f.read() # Now, `text` is a string: text[:100] # We can tokenize this string by using nltk's `word_tokenize` function, which returns a list of strings. Each string is either a word or a punctuation symbol. tokens = nltk.word_tokenize(text) tokens[:10] # This uses NLTK's recommended tokenizer. There are plenty of [other tokenizers in NLTK](https://github.com/nltk/nltk/tree/develop/nltk/tokenize), but unless you have good reason to do otherwise it's best to stick to the recommended tokenizer. # # ### Challenge # # I've also downloaded the Wikipedia page for [Berkeley, California][2], and saved the contents as a file called 'berkeley_wikipedia.txt'. Borrowing from the code above, read this file in and tokenize the text. Then find the 10 most frequenct "words". After that, if you don't like counting punctuation symbols as "words", then remove all punctuation symbols then find the 10 most frequenct words. # # [2]: https://en.wikipedia.org/wiki/Berkeley,_California # + # your answer goes here # - from string import punctuation # your answer goes here # ### Sentence segmentation <a id='sent-seg'></a> # # Sentence segmentation refers to finding the beginnings and ends of sentences. It's also sometimes called sentence tokenization. Again, there are lots of ways in NLTK to do this, but they have conviently chosen a default method for us. The `nltk.sent_tokenize` function takes in a string and returns a list of strings, where each string is a sentence. sents = nltk.sent_tokenize(text) sents[:2] # ### Collocations <a id='collocations'></a> # # Collocations are words that frequently appear together. They can help us identify key phrases in a text. Collocations can be bigrams (two words), tri-grams (three) or 4-grams. In NLTK, we can use the `BigramCollocationFinder` to find all the bigram collocations in a text. First, we feed in the tokenized text. Here, we'll use the 'learned' portion of the Brown corpus. tokens = brown.words(categories='learned') collocations = nltk.BigramCollocationFinder.from_words(tokens) # Then we decide which words to filter out. I don't want words less than three characters or stopwords. ignored_words = stopwords.words('english') word_filter = lambda w: len(w) < 3 or w.lower() in ignored_words collocations.apply_freq_filter(3) collocations.apply_word_filter(word_filter) # Then we decide what method NLTK should use to decide what makes a collocation special. We'll use the likelihood ratio, which is a good standard choice. scorer = nltk.collocations.BigramAssocMeasures.likelihood_ratio collocations.nbest(scorer, 15) # This was kinda messy. We can wrap all this up into a nicer function that just takes in the tokens and spits out the collocations. def my_collocations(tokens): collocations = nltk.BigramCollocationFinder.from_words(tokens) ignored_words = stopwords.words('english') word_filter = lambda w: len(w) < 3 or w.lower() in ignored_words collocations.apply_freq_filter(3) collocations.apply_word_filter(word_filter) scorer = nltk.collocations.BigramAssocMeasures.likelihood_ratio return collocations.nbest(scorer, 15) # And now run `my_collocations` on some new text. my_collocations(state_union.words()) emma = gutenberg.words('austen-emma.txt') my_collocations(emma) my_collocations(genesis.words('english-kjv.txt')) # ### Sentiment analysis <a id='sentiment'></a> # # NLTK has support for sentiment analysis. [Sentiment analysis](https://en.wikipedia.org/wiki/Sentiment_analysis) is the task of extracting [affective states](https://en.wikipedia.org/wiki/Affect_(psychology)) from text. The VADER (Valence Aware Dictionary and sEntiment Reasoner) is a lexicon and rule-based sentiment analysis tool that is specifically attuned to sentiments expressed in social media. There was a [Python package](https://github.com/cjhutto/vaderSentiment) developed for it outside of NLTK, which was then incorporated into NLTK. Loading it through NLTK is often buggy, but we can install the original package if it fails through NLTK. It ends up working the same. from nltk.sentiment import SentimentIntensityAnalyzer try: sentiment = SentimentIntensityAnalyzer() except LookupError: print('Sentiment analysis in NLTK is not working at the moment :(') # If the `SentimentIntensityAnalyzer` isn't loading properly from `nltk`, then you'll have to install the original package using the line below: # !pip install -U vaderSentiment # And then import it like this: from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer # Whether you used NLTK's `SentimentIntensityAnalyzer` or gor it from `vaderSentiment`, the rest of the code is identical. sentiment = SentimentIntensityAnalyzer() # Analyzing a sentence for its sentiment returns a dictionary with four items. The `compound` key holds the overall score. sentence = "I hate this sentence so much. I just want it to end. It sucks!" sentiment.polarity_scores(sentence) sentences = ["VADER is smart, handsome, and funny.", # positive sentence example "VADER is not smart, handsome, nor funny.", # negation sentence example "VADER is smart, handsome, and funny!", # punctuation emphasis handled correctly (sentiment intensity adjusted) "VADER is very smart, handsome, and funny.", # booster words handled correctly (sentiment intensity adjusted) "VADER is VERY SMART, handsome, and FUNNY.", # emphasis for ALLCAPS handled "VADER is VERY SMART, handsome, and FUNNY!!!",# combination of signals - VADER appropriately adjusts intensity "VADER is VERY SMART, uber handsome, and FRIGGIN FUNNY!!!",# booster words & punctuation make this close to ceiling for score "The book was good.", # positive sentence "The book was kind of good.", # qualified positive sentence is handled correctly (intensity adjusted) "The plot was good, but the characters are uncompelling and the dialog is not great.", # mixed negation sentence "At least it isn't a horrible book.", # negated negative sentence with contraction "Make sure you :) or :D today!", # emoticons handled "Today SUX!", # negative slang with capitalization emphasis "Today only kinda sux! But I'll get by, lol" # mixed sentiment example with slang and constrastive conjunction "but" ] scores = [] for sent in sentences: score = sentiment.polarity_scores(sent) scores.append(score) df = pd.DataFrame(scores) df['sentence'] = sentences df # > _The compound score is computed by summing the valence scores of each word in the lexicon, adjusted according to the rules, and then normalized to be between -1 (most extreme negative) and +1 (most extreme positive). This is the most useful metric if you want a single unidimensional measure of sentiment for a given sentence. Calling it a 'normalized, weighted composite score' is accurate._ # # > _It is also useful for researchers who would like to set standardized thresholds for classifying sentences as either positive, neutral, or negative._ df['positive_sentiment'] = df['compound'] >= 0.5 df # ### Challenge # # I've read in a bunch of tweets from Trump, and stored them as a list of strings in `tweet_text`. Use the code from above to find the positive sentiment tweets and save them to a list called `positive_tweets`. Do the same for negative tweets, storing them in a variable called `negative_tweets`. What's the proportion of positive to negative tweets? tweets_fname = os.path.join(DATA_DIR, 'trump-tweets.csv') tweets = pd.read_csv(tweets_fname) tweet_text = list(tweets['Tweet_Text'].values) tweet_text[:2] # + # your answer goes here # + # your answer goes here # - # ### Stemming <a id='stemming'></a> # # Stemming and lemmatization both refer to removing morphological affixes on words. For example, if we stem the word "grows", we get "grow". If we stem the word "running", we get "run". We do this because often we care more about the core content of the word (i.e. that it has something to do with growth or running, rather than the fact that it's a third person present tense verb, or progressive participle). # # NLTK provides many algorithms for stemming. For English, a great baseline is the [Porter algorithm](https://tartarus.org/martin/PorterStemmer/), which is in spirit isn't that far from a bunch of regular expressions. from nltk.stem import PorterStemmer stemmer = PorterStemmer() stemmer.stem('grows') stemmer.stem('running') stemmer.stem('leaves') # NLTK has a variety of other stemming algorithms, and lemmatizers. from nltk.stem import SnowballStemmer, WordNetLemmatizer snowball = SnowballStemmer('english') lemmatizer = WordNetLemmatizer() print(snowball.stem('running')) print(snowball.stem('eats')) print(snowball.stem('embarassed')) # But watch out for errors: # Thanks to <NAME> for these examples print(snowball.stem('cylinder')) print(snowball.stem('cylindrical')) # And collisions: # Thanks to <NAME> for these examples print(snowball.stem('vacation')) print(snowball.stem('vacate')) print(lemmatizer.lemmatize('vacation')) print(lemmatizer.lemmatize('vacate')) # But why would you want to stem words in the first place? Well, stemming improves performance! # Thanks again to <NAME> for inspiration of this example import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", category=FutureWarning) from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier # + # Don't worry about following along with this code, although it's great if you do! def read_data(): airline_fname = 'airline_tweets.csv' airline_fname = os.path.join(DATA_DIR, airline_fname) df = pd.read_csv(airline_fname) twitter_handle_pattern = r'@(\w+)' hashtag_pattern = r'(?:^|\s)[##]{1}(\w+)' url_pattern = r'https?:\/\/.*.com' df['clean_text'] = (df['text'] .str.replace(hashtag_pattern, 'HASHTAG') .str.replace(twitter_handle_pattern, 'USER') .str.replace(url_pattern, 'URL') ) text = list(df['clean_text'].str.lower()) sentiment = list(df['airline_sentiment']) return text, sentiment def prepare_stems(sents): snowball = SnowballStemmer('english') tokenized_sents = [nltk.word_tokenize(s) for s in sents] stemmed_sents = [[snowball.stem(s) for s in tokenized_sent] for tokenized_sent in tokenized_sents] return [' '.join(sent) for sent in stemmed_sents] def prepare_no_stems(sents): tokenized_sents = [nltk.word_tokenize(s) for s in sents] return [' '.join(sent) for sent in tokenized_sents] def fit_model(X_train, y_train): model = RandomForestClassifier(n_estimators=10, criterion='gini') model.fit(X_train, y_train) return model def test_model(model, X_test, y_test): print('Accuracy: ', model.score(X_test, y_test)) def classify(sents, target): vectorizer = TfidfVectorizer(max_features=5000, binary=True) X = vectorizer.fit_transform(sents) X_train, X_test, y_train, y_test = train_test_split(X, target, test_size=0.25, random_state=42) model = fit_model(X_train, y_train) test_model(model, X_test, y_test) text, sentiment = read_data() stemmed_text = prepare_stems(text) unstemmed_text = prepare_no_stems(text) # - classify(stemmed_text, sentiment) classify(unstemmed_text, sentiment) # ## What we didn't cover <a id='didnt'></a> # ### Distance # # NLTK has some functionality for calculating the distance between two strings. String distance is a measure of how different two strings are. For example: nltk.edit_distance('hello', 'helo') nltk.edit_distance('hello', 'hi') # There are lots of different ways to measure edit distance. This method uses Levenshtein distance, which is the number of insertions, deletions and substitutions required to turn one string into another. Edit distance is useful if you're looking for spelling mistakes. # # The [fuzzywuzzy library](https://github.com/seatgeek/fuzzywuzzy) does a great job of edit distance too. # !pip install -U fuzzywuzzy 'this is a test' == 'this is a test!' from fuzzywuzzy import fuzz fuzz.ratio('this is a test', 'this is a test!') # ### Translation # # NLTK offers [some tools](https://github.com/nltk/nltk/tree/develop/nltk/translate) for machine translation. This is great for learning traditional translation models, but is out-dated. If you actually need to translate some text, currently I'd highly using the [Google Translate API](https://cloud.google.com/translate/docs/). # ### Text classification # # NLTK has [support for text classification](https://github.com/nltk/nltk/tree/develop/nltk/classify) using machine learning. However, I'd recommend using [scikit-learn](http://scikit-learn.org/stable/), [TensorFlow](https://www.tensorflow.org/) or [Keras](https://keras.io/) for this now. # ### Chatbots # # These are mainly just for fun. But check out the [source code](https://github.com/nltk/nltk/tree/develop/nltk/chat) if you're ever interested in building a simple chatbot yourself. # + # doesn't work so well in a Jupyter notebook because it requires interaction, # but try it in a terminal or IDE! #nltk.chat.chatbots()
03-NLTK.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import numpy.ma as ma import scipy.stats as stat import random import matplotlib as mpl import matplotlib.pyplot as plt import scipy.stats as stat # + import sys sys.path.insert(0, '~/source/diff-classifier/diff_classifier') from histogram_utils import histogram_by_video # + def main(): script = sys.argv[0] for filename in sys.argv[1:]: histogram_by_video(filename) main() # -
brain_diffusion/experiments/11_15_17_Gel_Diffusion_Study_3mM/0mM/notebooks/01_17_17_histogram_py_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import csv import pandas as pd import glob import math from pathlib import Path try: code_dir except NameError: code_dir = os.getcwd() hpc_codes_dir = code_dir.replace("codes_local", "codes_hpc") sra_dir = code_dir.replace("codes_local", "1_SRA_Run_Table_simplified") # + #--- Get list of sra files, retrive Run IDs sra_files = glob.glob("%s/*.csv"%sra_dir) srr_list = [] srr_layout = [] for file in sra_files: srr_list += pd.read_csv(file)['Run'].tolist() srr_layout += pd.read_csv(file)['LibraryLayout'].tolist() # - # ## 0_0_fastq-dump # + #--- Write Hpc script out_file = "0_0_fastq-dump" out_dir = hpc_codes_dir + "/" + out_file Path(out_dir).mkdir(parents=True, exist_ok=True) hpc_wkdir = "/gpfs/group/pipkin/hdiao/T_Cell_ChIP/0_fastq" # Write one script for every 16 files for i in range(0, math.ceil(len(srr_list)/16)): i_outname = "%s/%s-%s.sh"%(out_dir, out_file,i) with open(i_outname, "w") as fout: wfout = csv.writer(fout, delimiter="\t",lineterminator='\n') wfout.writerow(["#!/bin/bash"]) wfout.writerow(["#SBATCH --nodes=1"]) wfout.writerow(["#SBATCH --ntasks=8"]) wfout.writerow(["#SBATCH --mem=16gb"]) wfout.writerow([]) wfout.writerow(["module load sra-tools"]) wfout.writerow(["cd %s"%hpc_wkdir]) wfout.writerow([]) for j in range(i*16,min((i+1) * 16, len(srr_list))): j_srr = srr_list[j] if ((j+1) % 8 != 0) and (j < min((i+1) * 16, len(srr_list))-1): wfout.writerow(["fastq-dump -I --split-files %s &"%j_srr]) elif j < min((i+1) * 16, len(srr_list))-1: wfout.writerow(["fastq-dump -I --split-files %s "%j_srr]) wfout.writerow(["wait"]) else: wfout.writerow(["fastq-dump -I --split-files %s "%j_srr]) # - # ## 1_0_trim_alignment_convert_filterBlacklist # + #--- Write Hpc script out_file = "1_0_trim_alignment_flb" out_dir = hpc_codes_dir + "/" + out_file Path(out_dir).mkdir(parents=True, exist_ok=True) #--- HPC setup hpc_wkdir = "/gpfs/group/pipkin/hdiao/T_Cell_ChIP/1_bowtie2" hpc_inputdir = "/gpfs/group/pipkin/hdiao/T_Cell_ChIP/0_fastq" bowtie2_index = "/gpfs/group/pipkin/hdiao/ref_resources/mm/release102/GRCm38" blacklisted_bed = "/gpfs/group/pipkin/hdiao/ref_resources/mm/mm10_blacklisted_2016_nochr.bed" # Write one script for every 4 files for i in range(0, len(srr_list)): i_outname = "%s/%s-%s.sh"%(out_dir, out_file,i) srr_i = srr_list[i] srr_layout_i = srr_layout[i] with open(i_outname, "w") as fout: wfout = csv.writer(fout, delimiter="\t",lineterminator='\n') wfout.writerow(["#!/bin/bash"]) wfout.writerow(["#SBATCH --nodes=1"]) wfout.writerow(["#SBATCH --ntasks=16"]) wfout.writerow(["#SBATCH --mem=8gb"]) wfout.writerow([]) wfout.writerow(["module load fastqc"]) wfout.writerow(["module load trimgalore"]) wfout.writerow(["module load samtools"]) wfout.writerow(["module load bowtie2"]) wfout.writerow(["module load bedtools"]) ### Fastqc untrimmed wfout.writerow([]) wfout.writerow(["### Fastqc for untrimmed files"]) wfout.writerow(["cd %s"%hpc_inputdir]) if srr_layout_i == "PAIRED": wfout.writerow(["fastq_untrimmed_1=%s_1.fastq"%srr_i]) wfout.writerow(["fastq_untrimmed_2=%s_2.fastq"%srr_i]) wfout.writerow(["fastqc $fastq_untrimmed_1"]) wfout.writerow(["fastqc $fastq_untrimmed_2"]) else: wfout.writerow(["fastq_untrimmed_1=%s_1.fastq"%srr_i]) wfout.writerow(["fastqc $fastq_untrimmed_1"]) ### Trim galore wfout.writerow([]) wfout.writerow(["### Trim Galore"]) if srr_layout_i == "PAIRED": wfout.writerow(["trim_galore --paired --length 24 --stringency 3 $fastq_untrimmed_1 $fastq_untrimmed_2"]) wfout.writerow(["trim_fastq_end1=%s/%s_1_val_1.fq"%(hpc_inputdir, srr_i)]) wfout.writerow(["trim_fastq_end2=%s/%s_2_val_2.fq"%(hpc_inputdir, srr_i)]) else: wfout.writerow(["trim_galore --length 24 --stringency 3 $fastq_untrimmed_1"]) wfout.writerow(["trim_fastq_end1=%s/%s_1_trimmed.fq"%(hpc_inputdir, srr_i)]) ### Trimmed file fastqc wfout.writerow([]) wfout.writerow(["### Fastqc for trimmed files"]) if srr_layout_i == "PAIRED": wfout.writerow(["fastqc $trim_fastq_end1"]) wfout.writerow(["fastqc $trim_fastq_end2"]) else: wfout.writerow(["fastqc $trim_fastq_end1"]) ### Bowtie2 alignment wfout.writerow([]) wfout.writerow(["### Bowtie2 alignment"]) wfout.writerow(["cd %s"%hpc_wkdir]) wfout.writerow(["bowtie2_index=%s"%bowtie2_index]) wfout.writerow(["sam_name=%s.sam"%srr_i]) if srr_layout_i == "PAIRED": wfout.writerow(["bowtie2 -p 16 -x $bowtie2_index -X 1000 --fr -1 $trim_fastq_end1 -2 $trim_fastq_end2 -S $sam_name"]) else: wfout.writerow(["bowtie2 -p 16 -x $bowtie2_index -U $trim_fastq_end1 -S $sam_name"]) ### Convert & Sort & Filter wfout.writerow([]) wfout.writerow(["### Convert/sort/filter"]) wfout.writerow(["bam_name=%s.bam"%srr_i]) wfout.writerow(["bam_name_srt=%s_srt.sam"%srr_i]) wfout.writerow(["sam_name_srt_dupr=%s_srt_dupr.sam"%srr_i]) wfout.writerow(["bam_name_srt_dupr=%s_srt_dupr.bam"%srr_i]) wfout.writerow(["flb_bam_name=%s_srt_dupr_flb.bam"%srr_i]) wfout.writerow(["blacklist_bed=%s"%blacklisted_bed]) wfout.writerow([]) wfout.writerow(["samtools view -bS $sam_name > $bam_name"]) wfout.writerow(["samtools sort $bam_name -o $bam_name_srt"]) wfout.writerow(["samtools rmdup -S $bam_name_srt $sam_name_srt_dupr"]) wfout.writerow(["samtools view -bS $sam_name_srt_dupr > $bam_name_srt_dupr"]) wfout.writerow(["bedtools intersect -abam $bam_name_srt_dupr -b $blacklist_bed -v > $flb_bam_name"]) ### Delete intermediate files wfout.writerow([]) wfout.writerow(["### Remove intermediate files"]) wfout.writerow(["filesize=$(stat -c%s $flb_bam_name)"]) wfout.writerow(["if (( filesize > 10000 )) "]) wfout.writerow(["then"]) wfout.writerow([" rm $sam_name"]) wfout.writerow([" rm $bam_name"]) wfout.writerow([" rm $bam_name_srt"]) wfout.writerow([" rm $sam_name_srt_dupr"]) wfout.writerow([" rm $bam_name_srt_dupr"]) wfout.writerow([" rm $trim_fastq_end1"]) if srr_layout_i == "PAIRED": wfout.writerow([" rm $trim_fastq_end2"]) wfout.writerow(["fi"]) # - # ## 1_0_trim_alignment_convert_filterBlacklist # **Redo for failed, try without trimming** failed_alignment = pd.read_csv("/Volumes/Huitian/Projects/\ T_Cell_ChIP/codes_hpc/1_0_trim_alignment_flb_check/failed_alignments.csv") failed_alignment_run = failed_alignment['Run'].tolist() # + #--- Write Hpc script out_file = "1_0_trim_alignment_flb_redo" out_dir = hpc_codes_dir + "/" + out_file Path(out_dir).mkdir(parents=True, exist_ok=True) #--- HPC setup hpc_wkdir = "/gpfs/group/pipkin/hdiao/T_Cell_ChIP/1_bowtie2" hpc_inputdir = "/gpfs/group/pipkin/hdiao/T_Cell_ChIP/0_fastq" bowtie2_index = "/gpfs/group/pipkin/hdiao/ref_resources/mm/release102/GRCm38" blacklisted_bed = "/gpfs/group/pipkin/hdiao/ref_resources/mm/mm10_blacklisted_2016_nochr.bed" srr_list = list(set(srr_list) & set(failed_alignment_run)) # Write one script for every 4 files for i in range(0, len(srr_list)): i_outname = "%s/%s-%s.sh"%(out_dir, out_file,i) srr_i = srr_list[i] srr_layout_i = srr_layout[i] with open(i_outname, "w") as fout: wfout = csv.writer(fout, delimiter="\t",lineterminator='\n') wfout.writerow(["#!/bin/bash"]) wfout.writerow(["#SBATCH --nodes=1"]) wfout.writerow(["#SBATCH --ntasks=16"]) wfout.writerow(["#SBATCH --mem=8gb"]) wfout.writerow([]) wfout.writerow(["module load fastqc"]) wfout.writerow(["module load trimgalore"]) wfout.writerow(["module load samtools"]) wfout.writerow(["module load bowtie2"]) wfout.writerow(["module load bedtools"]) ### Fastqc untrimmed wfout.writerow([]) wfout.writerow(["### Fastqc for untrimmed files"]) if srr_layout_i == "PAIRED": wfout.writerow(["fastq_untrimmed_1=%s/%s_1.fastq"%(hpc_inputdir,srr_i)]) wfout.writerow(["fastq_untrimmed_2=%s/%s_2.fastq"%(hpc_inputdir,srr_i)]) wfout.writerow(["#fastqc $fastq_untrimmed_1"]) wfout.writerow(["#fastqc $fastq_untrimmed_2"]) else: wfout.writerow(["fastq_untrimmed_1=%s/%s_1.fastq"%(hpc_inputdir,srr_i)]) wfout.writerow(["#fastqc $fastq_untrimmed_1"]) ### Bowtie2 alignment wfout.writerow([]) wfout.writerow(["### Bowtie2 alignment"]) wfout.writerow(["cd %s"%hpc_wkdir]) wfout.writerow(["bowtie2_index=%s"%bowtie2_index]) wfout.writerow(["sam_name=%s.sam"%srr_i]) if srr_layout_i == "PAIRED": wfout.writerow(["bowtie2 -p 16 -x $bowtie2_index -X 1000 --fr -1 $fastq_untrimmed_1 -2 $fastq_untrimmed_2 -S $sam_name"]) else: wfout.writerow(["bowtie2 -p 16 -x $bowtie2_index -U $fastq_untrimmed_1 -S $sam_name"]) ### Convert & Sort & Filter wfout.writerow([]) wfout.writerow(["### Convert/sort/filter"]) wfout.writerow(["bam_name=%s.bam"%srr_i]) wfout.writerow(["bam_name_srt=%s_srt.sam"%srr_i]) wfout.writerow(["sam_name_srt_dupr=%s_srt_dupr.sam"%srr_i]) wfout.writerow(["bam_name_srt_dupr=%s_srt_dupr.bam"%srr_i]) wfout.writerow(["flb_bam_name=%s_srt_dupr_flb.bam"%srr_i]) wfout.writerow(["blacklist_bed=%s"%blacklisted_bed]) wfout.writerow([]) wfout.writerow(["samtools view -bS $sam_name > $bam_name"]) wfout.writerow(["samtools sort $bam_name -o $bam_name_srt"]) wfout.writerow(["samtools rmdup -S $bam_name_srt $sam_name_srt_dupr"]) wfout.writerow(["samtools view -bS $sam_name_srt_dupr > $bam_name_srt_dupr"]) wfout.writerow(["bedtools intersect -abam $bam_name_srt_dupr -b $blacklist_bed -v > $flb_bam_name"]) ### Delete intermediate files wfout.writerow([]) wfout.writerow(["### Remove intermediate files"]) wfout.writerow(["filesize_preFlb=$(stat -c%s $bam_name_srt_dupr)"]) wfout.writerow(["filesize=$(stat -c%s $flb_bam_name)"]) wfout.writerow(["echo $filesize_preFlb $filesize >> %s_bamSizes_pre_post_flb.txt"%srr_i]) wfout.writerow(["if (( filesize > 10000 )) "]) wfout.writerow(["then"]) wfout.writerow([" rm $sam_name"]) wfout.writerow([" rm $bam_name"]) wfout.writerow([" rm $bam_name_srt"]) wfout.writerow([" rm $sam_name_srt_dupr"]) wfout.writerow([" rm $bam_name_srt_dupr"]) wfout.writerow(["fi"]) # - # ## 1_0_trim_alignment_convert_filterBlacklist # **Redo for solid sequences, try with / without trimming** #--- Get list of sra files, retrive Run IDs sra_dir = "/Volumes/Huitian/Projects/T_Cell_ChIP/202012_ChIP/1_SRA_Run_Table_simplified" solid_file = sra_dir + "/2015_IMMUNITY_Martinez_simplified.csv" srr_list = pd.read_csv(solid_file)['Run'].tolist() srr_layout = pd.read_csv(solid_file)['LibraryLayout'].tolist() # + #--- Write Hpc script out_file = "1_0_trim_alignment_flb_solid" out_dir = hpc_codes_dir + "/" + out_file Path(out_dir).mkdir(parents=True, exist_ok=True) #--- HPC setup hpc_wkdir = "/gpfs/group/pipkin/hdiao/T_Cell_ChIP/1_bowtie2" hpc_inputdir = "/gpfs/group/pipkin/hdiao/T_Cell_ChIP/0_fastq" bowtie2_index = "/gpfs/group/pipkin/hdiao/ref_resources/mm/release102/GRCm38" blacklisted_bed = "/gpfs/group/pipkin/hdiao/ref_resources/mm/mm10_blacklisted_2016_nochr.bed" # Write one script for every 4 files for i in range(0, len(srr_list)): i_outname = "%s/%s-%s.sh"%(out_dir, out_file,i) srr_i = srr_list[i] srr_layout_i = srr_layout[i] with open(i_outname, "w") as fout: wfout = csv.writer(fout, delimiter="\t",lineterminator='\n') wfout.writerow(["#!/bin/bash"]) wfout.writerow(["#SBATCH --nodes=1"]) wfout.writerow(["#SBATCH --ntasks=16"]) wfout.writerow(["#SBATCH --mem=8gb"]) wfout.writerow([]) wfout.writerow(["module load fastqc"]) wfout.writerow(["module load trimgalore"]) wfout.writerow(["module load samtools"]) wfout.writerow(["module load bowtie2"]) wfout.writerow(["module load bedtools"]) ### Fastqc untrimmed wfout.writerow([]) wfout.writerow(["### Fastqc for untrimmed files"]) wfout.writerow(["cd %s"%hpc_inputdir]) if srr_layout_i == "PAIRED": wfout.writerow(["fastq_untrimmed_1=%s/%s_1_solidCvt.fastq"%(hpc_inputdir, srr_i)]) wfout.writerow(["fastq_untrimmed_2=%s/%s_2_solidCvt.fastq"%(hpc_inputdir, srr_i)]) wfout.writerow(["fastqc $fastq_untrimmed_1"]) wfout.writerow(["fastqc $fastq_untrimmed_2"]) else: wfout.writerow(["fastq_untrimmed_1=%s/%s_1_solidCvt.fastq"%(hpc_inputdir, srr_i)]) wfout.writerow(["fastqc $fastq_untrimmed_1"]) ### Trim galore wfout.writerow([]) wfout.writerow(["### Trim Galore"]) if srr_layout_i == "PAIRED": wfout.writerow(["trim_galore --paired --length 24 --stringency 3 $fastq_untrimmed_1 $fastq_untrimmed_2"]) wfout.writerow(["trim_fastq_end1=%s/%s_1_solidCvt_val_1.fq"%(hpc_inputdir, srr_i)]) wfout.writerow(["trim_fastq_end2=%s/%s_2_solidCvt_val_2.fq"%(hpc_inputdir, srr_i)]) else: wfout.writerow(["trim_galore --length 24 --stringency 3 $fastq_untrimmed_1"]) wfout.writerow(["trim_fastq_end1=%s/%s_1_solidCvt_trimmed.fq"%(hpc_inputdir, srr_i)]) ### Trimmed file fastqc wfout.writerow([]) wfout.writerow(["### Fastqc for trimmed files"]) if srr_layout_i == "PAIRED": wfout.writerow(["fastqc $trim_fastq_end1"]) wfout.writerow(["fastqc $trim_fastq_end2"]) else: wfout.writerow(["fastqc $trim_fastq_end1"]) ### Test if trimming is successful wfout.writerow([]) wfout.writerow(["### Test if trimming is successful"]) if srr_layout_i == "PAIRED": wfout.writerow(["filesize=$(stat -c%s $trim_fastq_end2)"]) else: wfout.writerow(["filesize=$(stat -c%s $trim_fastq_end1)"]) # If trimming did not succeed, use the original files wfout.writerow(["if (( filesize < 10000 )) "]) wfout.writerow(["then"]) wfout.writerow([" trim_fastq_end1=$fastq_untrimmed_1"]) if srr_layout_i == "PAIRED": wfout.writerow([" trim_fastq_end2=$fastq_untrimmed_2"]) wfout.writerow(["fi"]) ### Bowtie2 alignment wfout.writerow([]) wfout.writerow(["### Bowtie2 alignment"]) wfout.writerow(["cd %s"%hpc_wkdir]) wfout.writerow(["bowtie2_index=%s"%bowtie2_index]) wfout.writerow(["sam_name=%s.sam"%srr_i]) if srr_layout_i == "PAIRED": wfout.writerow(["bowtie2 -p 16 -x $bowtie2_index -X 1000 --fr -1 $trim_fastq_end1 -2 $trim_fastq_end2 -S $sam_name"]) else: wfout.writerow(["bowtie2 -p 16 -x $bowtie2_index -U $trim_fastq_end1 -S $sam_name"]) ### Convert & Sort & Filter wfout.writerow([]) wfout.writerow(["### Convert/sort/filter"]) wfout.writerow(["bam_name=%s.bam"%srr_i]) wfout.writerow(["bam_name_srt=%s_srt.sam"%srr_i]) wfout.writerow(["sam_name_srt_dupr=%s_srt_dupr.sam"%srr_i]) wfout.writerow(["bam_name_srt_dupr=%s_srt_dupr.bam"%srr_i]) wfout.writerow(["flb_bam_name=%s_srt_dupr_flb.bam"%srr_i]) wfout.writerow(["blacklist_bed=%s"%blacklisted_bed]) wfout.writerow([]) wfout.writerow(["samtools view -bS $sam_name > $bam_name"]) wfout.writerow(["samtools sort $bam_name -o $bam_name_srt"]) wfout.writerow(["samtools rmdup -S $bam_name_srt $sam_name_srt_dupr"]) wfout.writerow(["samtools view -bS $sam_name_srt_dupr > $bam_name_srt_dupr"]) wfout.writerow(["bedtools intersect -abam $bam_name_srt_dupr -b $blacklist_bed -v > $flb_bam_name"]) ### Delete intermediate files wfout.writerow([]) wfout.writerow(["### Remove intermediate files"]) wfout.writerow(["filesize_preFlb=$(stat -c%s $bam_name_srt_dupr)"]) wfout.writerow(["filesize=$(stat -c%s $flb_bam_name)"]) wfout.writerow(["echo $filesize_preFlb $filesize >> %s_bamSizes_pre_post_flb.txt"%srr_i]) wfout.writerow(["if (( filesize > 10000 )) "]) wfout.writerow(["then"]) wfout.writerow([" rm $sam_name"]) wfout.writerow([" rm $bam_name"]) wfout.writerow([" rm $bam_name_srt"]) wfout.writerow([" rm $sam_name_srt_dupr"]) wfout.writerow([" rm $bam_name_srt_dupr"]) wfout.writerow([" rm $trim_fastq_end1"]) if srr_layout_i == "PAIRED": wfout.writerow([" rm $trim_fastq_end2"]) wfout.writerow(["fi"]) # - # ## 1_0_trim_alignment_convert_filterBlacklist # **Redo for studies 2016_SCIENCE_Mackay & 2019_IMMUNITY_Veeken, try with / without trimming** # + #--- Get list of sra files, retrive Run IDs sra_dir = "/Volumes/Huitian/Projects/T_Cell_ChIP/202012_ChIP/1_SRA_Run_Table_simplified" sra_files = [sra_dir + "/2016_SCIENCE_Mackay_simplified.csv", sra_dir + "/2019_IMMUNITY_Veeken_simplified.csv",] srr_list = [] srr_layout = [] for file in sra_files: srr_list += pd.read_csv(file)['Run'].tolist() srr_layout += pd.read_csv(file)['LibraryLayout'].tolist() # + #--- Write Hpc script out_file = "1_0_trim_alignment_flb_redo_2" out_dir = hpc_codes_dir + "/" + out_file Path(out_dir).mkdir(parents=True, exist_ok=True) #--- HPC setup hpc_wkdir = "/gpfs/group/pipkin/hdiao/T_Cell_ChIP/1_bowtie2" hpc_inputdir = "/gpfs/group/pipkin/hdiao/T_Cell_ChIP/0_fastq" bowtie2_index = "/gpfs/group/pipkin/hdiao/ref_resources/mm/release102/GRCm38" blacklisted_bed = "/gpfs/group/pipkin/hdiao/ref_resources/mm/mm10_blacklisted_2016_nochr.bed" # Write one script for every 4 files for i in range(0, len(srr_list)): i_outname = "%s/%s-%s.sh"%(out_dir, out_file,i) srr_i = srr_list[i] srr_layout_i = srr_layout[i] with open(i_outname, "w") as fout: wfout = csv.writer(fout, delimiter="\t",lineterminator='\n') wfout.writerow(["#!/bin/bash"]) wfout.writerow(["#SBATCH --nodes=1"]) wfout.writerow(["#SBATCH --ntasks=16"]) wfout.writerow(["#SBATCH --mem=8gb"]) wfout.writerow([]) wfout.writerow(["module load fastqc"]) wfout.writerow(["module load trimgalore"]) wfout.writerow(["module load samtools"]) wfout.writerow(["module load bowtie2"]) wfout.writerow(["module load bedtools"]) ### Fastqc untrimmed wfout.writerow([]) wfout.writerow(["### Fastqc for untrimmed files"]) wfout.writerow(["cd %s"%hpc_inputdir]) if srr_layout_i == "PAIRED": wfout.writerow(["fastq_untrimmed_1=%s/%s_1.fastq"%(hpc_inputdir, srr_i)]) wfout.writerow(["fastq_untrimmed_2=%s/%s_2.fastq"%(hpc_inputdir, srr_i)]) wfout.writerow(["fastqc $fastq_untrimmed_1"]) wfout.writerow(["fastqc $fastq_untrimmed_2"]) else: wfout.writerow(["fastq_untrimmed_1=%s/%s_1.fastq"%(hpc_inputdir, srr_i)]) wfout.writerow(["fastqc $fastq_untrimmed_1"]) ### Trim galore wfout.writerow([]) wfout.writerow(["### Trim Galore"]) if srr_layout_i == "PAIRED": wfout.writerow(["trim_galore --paired --length 24 --stringency 3 $fastq_untrimmed_1 $fastq_untrimmed_2"]) wfout.writerow(["trim_fastq_end1=%s/%s_1_val_1.fq"%(hpc_inputdir, srr_i)]) wfout.writerow(["trim_fastq_end2=%s/%s_2_val_2.fq"%(hpc_inputdir, srr_i)]) else: wfout.writerow(["trim_galore --length 24 --stringency 3 $fastq_untrimmed_1"]) wfout.writerow(["trim_fastq_end1=%s/%s_1_trimmed.fq"%(hpc_inputdir, srr_i)]) ### Trimmed file fastqc wfout.writerow([]) wfout.writerow(["### Fastqc for trimmed files"]) if srr_layout_i == "PAIRED": wfout.writerow(["fastqc $trim_fastq_end1"]) wfout.writerow(["fastqc $trim_fastq_end2"]) else: wfout.writerow(["fastqc $trim_fastq_end1"]) ### Test if trimming is successful wfout.writerow([]) wfout.writerow(["### Test if trimming is successful"]) if srr_layout_i == "PAIRED": wfout.writerow(["filesize=$(stat -c%s $trim_fastq_end2)"]) else: wfout.writerow(["filesize=$(stat -c%s $trim_fastq_end1)"]) # If trimming did not succeed, use the original files wfout.writerow(["if (( filesize < 10000 )) "]) wfout.writerow(["then"]) wfout.writerow([" trim_fastq_end1=$fastq_untrimmed_1"]) if srr_layout_i == "PAIRED": wfout.writerow([" trim_fastq_end2=$fastq_untrimmed_2"]) wfout.writerow(["fi"]) ### Bowtie2 alignment wfout.writerow([]) wfout.writerow(["### Bowtie2 alignment"]) wfout.writerow(["cd %s"%hpc_wkdir]) wfout.writerow(["bowtie2_index=%s"%bowtie2_index]) wfout.writerow(["sam_name=%s.sam"%srr_i]) if srr_layout_i == "PAIRED": wfout.writerow(["bowtie2 -p 16 -x $bowtie2_index -X 1000 --fr -1 $trim_fastq_end1 -2 $trim_fastq_end2 -S $sam_name"]) else: wfout.writerow(["bowtie2 -p 16 -x $bowtie2_index -U $trim_fastq_end1 -S $sam_name"]) ### Convert & Sort & Filter wfout.writerow([]) wfout.writerow(["### Convert/sort/filter"]) wfout.writerow(["bam_name=%s.bam"%srr_i]) wfout.writerow(["bam_name_srt=%s_srt.sam"%srr_i]) wfout.writerow(["sam_name_srt_dupr=%s_srt_dupr.sam"%srr_i]) wfout.writerow(["bam_name_srt_dupr=%s_srt_dupr.bam"%srr_i]) wfout.writerow(["flb_bam_name=%s_srt_dupr_flb.bam"%srr_i]) wfout.writerow(["blacklist_bed=%s"%blacklisted_bed]) wfout.writerow([]) wfout.writerow(["samtools view -bS $sam_name > $bam_name"]) wfout.writerow(["samtools sort $bam_name -o $bam_name_srt"]) wfout.writerow(["samtools rmdup -S $bam_name_srt $sam_name_srt_dupr"]) wfout.writerow(["samtools view -bS $sam_name_srt_dupr > $bam_name_srt_dupr"]) wfout.writerow(["bedtools intersect -abam $bam_name_srt_dupr -b $blacklist_bed -v > $flb_bam_name"]) ### Delete intermediate files wfout.writerow([]) wfout.writerow(["### Remove intermediate files"]) wfout.writerow(["filesize_preFlb=$(stat -c%s $bam_name_srt_dupr)"]) wfout.writerow(["filesize=$(stat -c%s $flb_bam_name)"]) wfout.writerow(["echo $filesize_preFlb $filesize >> %s_bamSizes_pre_post_flb.txt"%srr_i]) wfout.writerow(["if (( filesize > 10000 )) "]) wfout.writerow(["then"]) wfout.writerow([" rm $sam_name"]) wfout.writerow([" rm $bam_name"]) wfout.writerow([" rm $bam_name_srt"]) wfout.writerow([" rm $sam_name_srt_dupr"]) wfout.writerow([" rm $bam_name_srt_dupr"]) wfout.writerow([" rm $trim_fastq_end1"]) if srr_layout_i == "PAIRED": wfout.writerow([" rm $trim_fastq_end2"]) wfout.writerow(["fi"]) # - # ## 1_0_trim_alignment_convert_filterBlacklist # **Redo for Goldrathlab Brd4 study (no _1 in fastq name)** # + #--- Get list of sra files, retrive Run IDs sra_files = ['/media/pipkin/Rocket2/T_Cell_ChIP/202012_ChIP/1_SRA_Run_Table_simplified/2021_GoldrathLab_Brd4_simplified.csv'] srr_list = [] srr_layout = [] for file in sra_files: srr_list += pd.read_csv(file)['Run'].tolist() srr_layout += pd.read_csv(file)['LibraryLayout'].tolist() # + #--- Write Hpc script out_file = "1_0_trim_alignment_flb_redo_brd4" out_dir = hpc_codes_dir + "/" + out_file Path(out_dir).mkdir(parents=True, exist_ok=True) #--- HPC setup hpc_wkdir = "/gpfs/group/pipkin/hdiao/T_Cell_ChIP/1_bowtie2" hpc_inputdir = "/gpfs/group/pipkin/hdiao/T_Cell_ChIP/0_fastq" bowtie2_index = "/gpfs/group/pipkin/hdiao/ref_resources/mm/release102/GRCm38" blacklisted_bed = "/gpfs/group/pipkin/hdiao/ref_resources/mm/mm10_blacklisted_2016_nochr.bed" # Write one script for every 4 files for i in range(0, len(srr_list)): i_outname = "%s/%s-%s.sh"%(out_dir, out_file,i) srr_i = srr_list[i] srr_layout_i = srr_layout[i] with open(i_outname, "w") as fout: wfout = csv.writer(fout, delimiter="\t",lineterminator='\n') wfout.writerow(["#!/bin/bash"]) wfout.writerow(["#SBATCH --nodes=1"]) wfout.writerow(["#SBATCH --ntasks=16"]) wfout.writerow(["#SBATCH --mem=8gb"]) wfout.writerow([]) wfout.writerow(["module load fastqc"]) wfout.writerow(["module load trimgalore"]) wfout.writerow(["module load samtools"]) wfout.writerow(["module load bowtie2"]) wfout.writerow(["module load bedtools"]) ### Fastqc untrimmed wfout.writerow([]) wfout.writerow(["### Fastqc for untrimmed files"]) wfout.writerow(["cd %s"%hpc_inputdir]) if srr_layout_i == "PAIRED": wfout.writerow(["fastq_untrimmed_1=%s/%s_1.fastq"%(hpc_inputdir, srr_i)]) wfout.writerow(["fastq_untrimmed_2=%s/%s_2.fastq"%(hpc_inputdir, srr_i)]) wfout.writerow(["fastqc $fastq_untrimmed_1"]) wfout.writerow(["fastqc $fastq_untrimmed_2"]) else: wfout.writerow(["fastq_untrimmed_1=%s/%s.fastq"%(hpc_inputdir, srr_i)]) wfout.writerow(["fastqc $fastq_untrimmed_1"]) ### Trim galore wfout.writerow([]) wfout.writerow(["### Trim Galore"]) if srr_layout_i == "PAIRED": wfout.writerow(["trim_galore --paired --length 24 --stringency 3 $fastq_untrimmed_1 $fastq_untrimmed_2"]) wfout.writerow(["trim_fastq_end1=%s/%s_1_val_1.fq"%(hpc_inputdir, srr_i)]) wfout.writerow(["trim_fastq_end2=%s/%s_2_val_2.fq"%(hpc_inputdir, srr_i)]) else: wfout.writerow(["trim_galore --length 24 --stringency 3 $fastq_untrimmed_1"]) wfout.writerow(["trim_fastq_end1=%s/%s_trimmed.fq"%(hpc_inputdir, srr_i)]) ### Trimmed file fastqc wfout.writerow([]) wfout.writerow(["### Fastqc for trimmed files"]) if srr_layout_i == "PAIRED": wfout.writerow(["fastqc $trim_fastq_end1"]) wfout.writerow(["fastqc $trim_fastq_end2"]) else: wfout.writerow(["fastqc $trim_fastq_end1"]) ### Test if trimming is successful wfout.writerow([]) wfout.writerow(["### Test if trimming is successful"]) if srr_layout_i == "PAIRED": wfout.writerow(["filesize=$(stat -c%s $trim_fastq_end2)"]) else: wfout.writerow(["filesize=$(stat -c%s $trim_fastq_end1)"]) # If trimming did not succeed, use the original files wfout.writerow(["if (( filesize < 10000 )) "]) wfout.writerow(["then"]) wfout.writerow([" trim_fastq_end1=$fastq_untrimmed_1"]) if srr_layout_i == "PAIRED": wfout.writerow([" trim_fastq_end2=$fastq_untrimmed_2"]) wfout.writerow(["fi"]) ### Bowtie2 alignment wfout.writerow([]) wfout.writerow(["### Bowtie2 alignment"]) wfout.writerow(["cd %s"%hpc_wkdir]) wfout.writerow(["bowtie2_index=%s"%bowtie2_index]) wfout.writerow(["sam_name=%s.sam"%srr_i]) if srr_layout_i == "PAIRED": wfout.writerow(["bowtie2 -p 16 -x $bowtie2_index -X 1000 --fr -1 $trim_fastq_end1 -2 $trim_fastq_end2 -S $sam_name"]) else: wfout.writerow(["bowtie2 -p 16 -x $bowtie2_index -U $trim_fastq_end1 -S $sam_name"]) ### Convert & Sort & Filter wfout.writerow([]) wfout.writerow(["### Convert/sort/filter"]) wfout.writerow(["bam_name=%s.bam"%srr_i]) wfout.writerow(["bam_name_srt=%s_srt.sam"%srr_i]) wfout.writerow(["sam_name_srt_dupr=%s_srt_dupr.sam"%srr_i]) wfout.writerow(["bam_name_srt_dupr=%s_srt_dupr.bam"%srr_i]) wfout.writerow(["flb_bam_name=%s_srt_dupr_flb.bam"%srr_i]) wfout.writerow(["blacklist_bed=%s"%blacklisted_bed]) wfout.writerow([]) wfout.writerow(["samtools view -bS $sam_name > $bam_name"]) wfout.writerow(["samtools sort $bam_name -o $bam_name_srt"]) wfout.writerow(["samtools rmdup -S $bam_name_srt $sam_name_srt_dupr"]) wfout.writerow(["samtools view -bS $sam_name_srt_dupr > $bam_name_srt_dupr"]) wfout.writerow(["bedtools intersect -abam $bam_name_srt_dupr -b $blacklist_bed -v > $flb_bam_name"]) ### Delete intermediate files wfout.writerow([]) wfout.writerow(["### Remove intermediate files"]) wfout.writerow(["filesize_preFlb=$(stat -c%s $bam_name_srt_dupr)"]) wfout.writerow(["filesize=$(stat -c%s $flb_bam_name)"]) wfout.writerow(["echo $filesize_preFlb $filesize >> %s_bamSizes_pre_post_flb.txt"%srr_i]) wfout.writerow(["if (( filesize > 10000 )) "]) wfout.writerow(["then"]) wfout.writerow([" rm $sam_name"]) wfout.writerow([" rm $bam_name"]) wfout.writerow([" rm $bam_name_srt"]) wfout.writerow([" rm $sam_name_srt_dupr"]) wfout.writerow([" rm $bam_name_srt_dupr"]) wfout.writerow([" rm $trim_fastq_end1"]) if srr_layout_i == "PAIRED": wfout.writerow([" rm $trim_fastq_end2"]) wfout.writerow(["fi"]) # -
202012_ChIP/codes_local/0_hpc_process_code_generation_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> # <script> # window.dataLayer = window.dataLayer || []; # function gtag(){dataLayer.push(arguments);} # gtag('js', new Date()); # # gtag('config', 'UA-59152712-8'); # </script> # # # Tutorial-IllinoisGRMHD: apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C # # ## Authors: <NAME> & <NAME> # # <font color='red'>**This module is currently under development**</font> # # ## In this tutorial module we explain two major functions within IllinoisGRMHD that are used to ensure that the results obtained throughout the simulation are Physically sound. # # ### Required and recommended citations: # # * **(Required)** <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. IllinoisGRMHD: an open-source, user-friendly GRMHD code for dynamical spacetimes. Class. Quantum Grav. 32 (2015) 175009. ([arxiv:1501.07276](http://arxiv.org/abs/1501.07276)). # * **(Required)** <NAME>., <NAME>., <NAME>., <NAME>. Primitive Variable Solvers for Conservative General Relativistic Magnetohydrodynamics. Astrophysical Journal, 641, 626 (2006) ([astro-ph/0512420](https://arxiv.org/abs/astro-ph/0512420)). # * **(Recommended)** <NAME>., <NAME>., <NAME>. An efficient shock-capturing central-type scheme for multidimensional relativistic flows - II. Magnetohydrodynamics. A&A 400 (2) 397-413 (2003). DOI: 10.1051/0004-6361:20021641 ([astro-ph/0210618](https://arxiv.org/abs/astro-ph/0210618)). # # If using the version of `IllinoisGRMHD` with piecewise polytropic *or* tabulated (coming soon!) EOS support, then the following citation is also required: # # * **(Required)** <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>., *IllinoisGRMHD github repository* (2019). Source Code URL: https://github.com/zachetienne/nrpytutorial/tree/master/IllinoisGRMHD/. # ### Dependencies # # The files generated in this tutorial notebook depend on the following file: # # * `IllinoisGRMHD_EoS_lowlevel_functs.C` \[[**tutorial**](Tutorial-IllinoisGRMHD__EoS_lowlevel_functs.ipynb)\] # <a id='toc'></a> # # # Table of Contents # $$\label{toc}$$ # # This module is organized as follows # # 0. [Step 0](#src_dir): **Source directory creation** # 1. [Step 1](#introduction): **Introduction** # 1. [Step 2](#apply_tau_floor): **The `apply_tau_floor` function** # 1. [Step 2.a](#positive_definiteness_of_the_metric): *Positive-definiteness of the metric* # 1. [Step 2.b](#barbi_barb_i_barb2_and_barb): *Computing $\bar{B}^{i}$, $\bar{B}_{i}$, $\bar{B}^{2}$, and $\bar{B}$* # 1. [Step 2.c](#barbdots_hatbarbdots_and_sdots): *Computing $\bar{B}\cdot\tilde{S}$, $\hat{\bar{B}}\cdot\tilde{S}$, and $\tilde{S}^{2}$* # 1. [Step 2.d](#modifying_tau): *Modifying $\tilde{\tau}$* # 1. [Step 2.d.i](#wm_sm_and_wmin): $W_{m}$, $\tilde{S}_{m}$, and $W_{\min}$ # 1. [Step 2.d.ii](#tau_min): $\tau_{\min}$ # 1. [Step 2.e](#modifying_tilde_s_i): *Modifying $\tilde{S}_{i}$* # 1. [Step 3](#enforce_pressure_floor_ceiling) **The `enforce_pressure_floor_ceiling` function** # 1. [Step 4](#enforce_limits_on_primitives_and_recompute_conservs): **The `IllinoisGRMHD_enforce_limits_on_primitives_and_recompute_conservs` function** # 1. [Step 5](#code_validation): **Code validation** # 1. [Step 6](#latex_pdf_output): **Output this notebook to $\LaTeX$-formatted PDF file** # <a id='src_dir'></a> # # # Step 0: Source directory creation \[Back to [top](#toc)\] # $$\label{src_dir}$$ # # We will now use the [cmdline_helper.py NRPy+ module](Tutorial-Tutorial-cmdline_helper.ipynb) to create the source directory within the `IllinoisGRMHD` NRPy+ directory, if it does not exist yet. # + # Step 0: Creation of the IllinoisGRMHD source directory # Step 0a: Add NRPy's directory to the path # https://stackoverflow.com/questions/16780014/import-file-from-parent-directory import os,sys nrpy_dir_path = os.path.join("..","..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) # Step 0b: Load up cmdline_helper and create the directory import cmdline_helper as cmd outdir = os.path.join("..","src") cmd.mkdir(outdir) # - # <a id='introduction'></a> # # # Step 1: Introduction \[Back to [top](#toc)\] # $$\label{introduction}$$ # # In this tutorial notebook we will discuss how we adjust our conservative variables given that our primitive variables are in the physical range. # # For a given set of primitive variables $\left\{\rho_{b},P,v^i,B^i\right\}$ in the physical range (i.e. $\rho_{b}\geq0$, $P\geq0$ and $\epsilon\geq0$), the corresponding conservative variables $\left\{\rho_{\star},\tilde{\tau},\tilde{S}_{i},\tilde{B}^{i}\right\}$ must satisfy certain inequalities (see appendix A of [Etienne *et al.* (2012)](https://arxiv.org/pdf/1112.0568.pdf) for the full discussion). Here we provide a practical recipe to impose these inequalities approximately to reduce inversion failures, which occur mainly in regions with very low density in the artificial “atmosphere” or inside the BH horizon where high accuracy is difficult to maintain but not crucial. # <a id='apply_tau_floor'></a> # # # Step 2: The `apply_tau_floor` function \[Back to [top](#toc)\] # $$\label{apply_tau_floor}$$ # # # Here we will start the `apply_tau_floor()` and declare a couple of function prototypes. # + # %%writefile $outdir/apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C void eigenvalues_3by3_real_sym_matrix(CCTK_REAL & lam1, CCTK_REAL & lam2, CCTK_REAL & lam3, CCTK_REAL M11, CCTK_REAL M12, CCTK_REAL M13, CCTK_REAL M22, CCTK_REAL M23, CCTK_REAL M33); static inline void enforce_pressure_floor_ceiling(output_stats &stats,CCTK_REAL kpoly,CCTK_REAL P_cold,CCTK_REAL Psi6,const CCTK_REAL Psi6threshold,CCTK_REAL rho_b,const CCTK_REAL rhobatm, CCTK_REAL &P); static inline int apply_tau_floor(const CCTK_REAL tau_atm,const CCTK_REAL rho_b_atm,const CCTK_REAL Psi6threshold,CCTK_REAL *PRIMS,CCTK_REAL *ADM_3METRIC,output_stats &stats,eos_struct &eos, CCTK_REAL *CONSERVS) { # - # <a id='positive_definiteness_of_the_metric'></a> # # ## Step 2.a: Positive-definiteness of the metric \[Back to [top](#toc)\] # $$\label{positive_definiteness_of_the_metric}$$ # # We start by verifying if the metrix $\gamma_{ij}$ is positive definite. Notice that although we expect this to always be true, the metric may lose its positive-definiteness due to numerical error during the evolution, especially in the region deep inside the BH, near the “puncture”. # # To verify whether or not the [metric is positive definite, we analyse its eigenvectors](https://en.wikipedia.org/wiki/Definiteness_of_a_matrix#Eigenvalues). If the metrix is *not* positive definite, we reset $\gamma_{ij}\to\psi^{4}\tilde{\gamma}_{ij}$, where $\tilde{\gamma}_{ij}$ corresponds to the 3D flat metric tensor. # %%writefile -a $outdir/apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C //First apply the rho_star floor: //rho_star = alpha u0 Psi6 rho_b, alpha u0 > 1, so if rho_star < Psi6 rho_b_atm, then we are GUARANTEED that we can reset to atmosphere. //if(CONSERVS[RHOSTAR] < 1e4*ADM_3METRIC[SQRTGAMMA]*rho_b_atm) { //if(CONSERVS[RHOSTAR] < 2*ADM_3METRIC[SQRTGAMMA]*rho_b_atm) { CCTK_REAL CONF_METRIC[6]; CCTK_REAL Psi2 = cbrt(ADM_3METRIC[SQRTGAMMA]); CCTK_REAL Psi4 = Psi2*Psi2; CCTK_REAL Psim4 = 1.0 / Psi4; CONF_METRIC[CM_GAMMATILDEXX] = ADM_3METRIC[GAMMAUPXX]*Psim4; CONF_METRIC[CM_GAMMATILDEXY] = ADM_3METRIC[GAMMAUPXY]*Psim4; CONF_METRIC[CM_GAMMATILDEXZ] = ADM_3METRIC[GAMMAUPXZ]*Psim4; CONF_METRIC[CM_GAMMATILDEYY] = ADM_3METRIC[GAMMAUPYY]*Psim4; CONF_METRIC[CM_GAMMATILDEYZ] = ADM_3METRIC[GAMMAUPYZ]*Psim4; CONF_METRIC[CM_GAMMATILDEZZ] = ADM_3METRIC[GAMMAUPZZ]*Psim4; CCTK_REAL lam1,lam2,lam3; eigenvalues_3by3_real_sym_matrix(lam1, lam2, lam3,CONF_METRIC[CM_GAMMATILDEXX], CONF_METRIC[CM_GAMMATILDEXY], CONF_METRIC[CM_GAMMATILDEXZ], CONF_METRIC[CM_GAMMATILDEYY], CONF_METRIC[CM_GAMMATILDEYZ], CONF_METRIC[CM_GAMMATILDEZZ]); if (lam1 < 0.0 || lam2 < 0.0 || lam3 < 0.0) { // Metric is not positive-defitive, reset the metric to be conformally-flat. CCTK_REAL Psi4 = cbrt(ADM_3METRIC[SQRTGAMMA]*ADM_3METRIC[SQRTGAMMA]); CONF_METRIC[CM_GAMMATILDEXX] = 1.0; CONF_METRIC[CM_GAMMATILDEXY] = 0.0; CONF_METRIC[CM_GAMMATILDEXZ] = 0.0; CONF_METRIC[CM_GAMMATILDEYY] = 1.0; CONF_METRIC[CM_GAMMATILDEYZ] = 0.0; CONF_METRIC[CM_GAMMATILDEZZ] = 1.0; ADM_3METRIC[GAMMAUPXX] = Psi4; ADM_3METRIC[GAMMAUPXY] = 0.0; ADM_3METRIC[GAMMAUPXZ] = 0.0; ADM_3METRIC[GAMMAUPYY] = Psi4; ADM_3METRIC[GAMMAUPYZ] = 0.0; ADM_3METRIC[GAMMAUPZZ] = Psi4; } # <a id='barbi_barb_i_barb2_and_barb'></a> # # ## Step 2.b: Computing $\bar{B}^{i}$, $\bar{B}_{i}$, $\bar{B}^{2}$, and $\bar{B}$ \[Back to [top](#toc)\] # $$\label{barbi_barb_i_barb2_and_barb}$$ # # We then set # # $$ # \boxed{\bar{B}^{i} = \frac{B^{i}}{\sqrt{4\pi}}}\ , # $$ # # and # # $$ # \bar{B}_{i} = \gamma_{ij}\bar{B}^{j} \implies # \boxed{ # \left\{ # \begin{matrix} # \bar{B}_{x} = \gamma_{xx}\bar{B}^{x} + \gamma_{xy}\bar{B}^{y} + \gamma_{xz}\bar{B}^{z}\\ # \bar{B}_{y} = \gamma_{yx}\bar{B}^{x} + \gamma_{yy}\bar{B}^{y} + \gamma_{yz}\bar{B}^{z}\\ # \bar{B}_{z} = \gamma_{zx}\bar{B}^{x} + \gamma_{zy}\bar{B}^{y} + \gamma_{zz}\bar{B}^{z} # \end{matrix} # \right. # }\ , # $$ # # then # # $$ # \bar{B}^{2} \equiv B_{i}B^{i} \implies \boxed{\bar{B}^{2} = B_{x}B^{x} + B_{y}B^{y} + B_{z}B^{z}}\ , # $$ # # and finally # # $$ # \boxed{\bar{B} \equiv \sqrt{\bar{B}^{2}}}\ . # $$ # %%writefile -a $outdir/apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C //Next, prepare for the tau and stilde fixes: CCTK_REAL Bxbar = PRIMS[BX_CENTER]*ONE_OVER_SQRT_4PI,Bybar = PRIMS[BY_CENTER]*ONE_OVER_SQRT_4PI,Bzbar = PRIMS[BZ_CENTER]*ONE_OVER_SQRT_4PI; CCTK_REAL Bbar_x = ADM_3METRIC[GAMMAXX]*Bxbar + ADM_3METRIC[GAMMAXY]*Bybar + ADM_3METRIC[GAMMAXZ]*Bzbar; CCTK_REAL Bbar_y = ADM_3METRIC[GAMMAXY]*Bxbar + ADM_3METRIC[GAMMAYY]*Bybar + ADM_3METRIC[GAMMAYZ]*Bzbar; CCTK_REAL Bbar_z = ADM_3METRIC[GAMMAXZ]*Bxbar + ADM_3METRIC[GAMMAYZ]*Bybar + ADM_3METRIC[GAMMAZZ]*Bzbar; CCTK_REAL Bbar2 = Bxbar*Bbar_x + Bybar*Bbar_y + Bzbar*Bbar_z; CCTK_REAL Bbar = sqrt(Bbar2); # The next part of the code is written to prevent [floating-point underflow](https://en.wikipedia.org/wiki/Arithmetic_underflow). We compute $\bar{B}$ in a different way. We start by evaluating # # $$ # \bar{B}_{\rm check} = \left|\bar{B}^{x}\right| + \left|\bar{B}^{y}\right| + \left|\bar{B}^{z}\right|\ , # $$ # # and verifying whether that is a very small, positive number. Then, we determine the largest component of $\bar{B}_{\rm check}$: # # $$ # \bar{B}_{\max} = \max\left(\left|\bar{B}^{x}\right|,\left|\bar{B}^{y}\right|,\left|\bar{B}^{z}\right|\right)\ . # $$ # # Then, we rescale $\bar{B}_{i}$ and $\bar{B}^{i}$ using # # $$ # \left(\bar{B}^{i}\right)_{\rm tmp} \equiv \frac{\bar{B}^{i}}{\bar{B}_{\max}}\ ,\quad # \left(\bar{B}_{i}\right)_{\rm tmp} \equiv \frac{\bar{B}_{i}}{\bar{B}_{\max}}\ , # $$ # # and finally recompute $\bar{B}$ # # $$ # \bar{B} = \left[\left(\bar{B}_{i}\right)_{\rm tmp}\left(\bar{B}^{i}\right)_{\rm tmp}\right]\bar{B}_{\max}\ . # $$ # %%writefile -a $outdir/apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C CCTK_REAL check_B_small = fabs(Bxbar)+fabs(Bybar)+fabs(Bzbar); if (check_B_small>0 && check_B_small<1.e-150) { // need to compute Bbar specially to prevent floating-point underflow CCTK_REAL Bmax = fabs(Bxbar); if (Bmax < fabs(Bybar)) Bmax=fabs(Bybar); if (Bmax < fabs(Bzbar)) Bmax=fabs(Bzbar); CCTK_REAL Bxtmp=Bxbar/Bmax, Bytemp=Bybar/Bmax, Bztemp=Bzbar/Bmax; CCTK_REAL B_xtemp=Bbar_x/Bmax, B_ytemp=Bbar_y/Bmax, B_ztemp=Bbar_z/Bmax; Bbar = sqrt(Bxtmp*B_xtemp + Bytemp*B_ytemp + Bztemp*B_ztemp)*Bmax; } # <a id='barbdots_hatbarbdots_and_sdots'></a> # # ## Step 2.c: Computing $\bar{B}\cdot\tilde{S}$, $\hat{\bar{B}}\cdot\tilde{S}$, and $\tilde{S}^{2}$ \[Back to [top](#toc)\] # $$\label{barbdots_hatbarbdots_and_sdots}$$ # # Then we compute # # $$ # \bar{B} \cdot \tilde{S} = \bar{B}_{i}\tilde{S}^{i} = \bar{B}_{x}\tilde{S}^{x} + \bar{B}_{y}\tilde{S}^{y} + \bar{B}_{z}\tilde{S}^{z}\ . # $$ # # and # # $$ # \hat{\bar{B}}\cdot\tilde{S} \equiv \frac{\bar{B} \cdot \tilde{S}}{\bar{B}}\ . # $$ # # However, if $\bar{B} \ll 1$, we set $\hat{\bar{B}}\cdot\tilde{S}=0$. # %%writefile -a $outdir/apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C CCTK_REAL BbardotS = Bxbar*CONSERVS[STILDEX] + Bybar*CONSERVS[STILDEY] + Bzbar*CONSERVS[STILDEZ]; CCTK_REAL hatBbardotS = BbardotS/Bbar; if (Bbar<1.e-300) hatBbardotS = 0.0; // Limit hatBbardotS //CCTK_REAL max_gammav = 100.0; //CCTK_REAL rhob_max = CONSERVS[RHOSTAR]/ADM_3METRIC[SQRTGAMMA]; //CCTK_REAL hmax = 1.0 + 2.0*rhob_max; //CCTK_REAL abs_hatBbardotS_max = sqrt(SQR(max_gammav)-1.0)*CONSERVS[RHOSTAR]*hmax; //if (fabs(hatBbardotS) > abs_hatBbardotS_max) { // CCTK_REAL fac_reduce = abs_hatBbardotS_max/fabs(hatBbardotS); // CCTK_REAL hatBbardotS_max = hatBbardotS*fac_reduce; // CCTK_REAL Bbar_inv = 1.0/Bbar; // CCTK_REAL hat_Bbar_x = Bbar_x*Bbar_inv; // CCTK_REAL hat_Bbar_y = Bbar_y*Bbar_inv; // CCTK_REAL hat_Bbar_z = Bbar_z*Bbar_inv; // CCTK_REAL sub_fact = hatBbardotS_max - hatBbardotS; // CONSERVS[STILDEX] += sub_fact*hat_Bbar_x; // CONSERVS[STILDEY] += sub_fact*hat_Bbar_y; // CONSERVS[STILDEZ] += sub_fact*hat_Bbar_z; // hatBbardotS = hatBbardotS_max; // BbardotS *= fac_reduce; // CONSERVS[STILDEX] = CONSERVS[STILDEX]; CONSERVS[STILDEY] = CONSERVS[STILDEY]; CONSERVS[STILDEZ] = CONSERVS[STILDEZ]; //} # Next we compute # # $$ # \tilde{S}^{2} \equiv \tilde{S} \cdot \tilde{S} = \gamma^{ij}\tilde{S}_{i}\tilde{S}_{j}\ , # $$ # # i.e. # # $$ # \boxed{ # \begin{align} # \tilde{S}^{2} &= \gamma^{xx}\left(\tilde{S}_{x}\right)^{2} # + \gamma^{yy}\left(\tilde{S}_{y}\right)^{2} # + \gamma^{zz}\left(\tilde{S}_{z}\right)^{2}\\ # &+2\left( # \gamma^{xy}\tilde{S}_{x}\tilde{S}_{y} # +\gamma^{xz}\tilde{S}_{x}\tilde{S}_{z} # +\gamma^{yz}\tilde{S}_{y}\tilde{S}_{z} # \right) # \end{align} # }\ . # $$ # %%writefile -a $outdir/apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C CCTK_REAL sdots= ADM_3METRIC[GAMMAUPXX]*SQR(CONSERVS[STILDEX])+ADM_3METRIC[GAMMAUPYY]*SQR(CONSERVS[STILDEY])+ADM_3METRIC[GAMMAUPZZ]*SQR(CONSERVS[STILDEZ])+2.0* (ADM_3METRIC[GAMMAUPXY]*CONSERVS[STILDEX]*CONSERVS[STILDEY]+ADM_3METRIC[GAMMAUPXZ]*CONSERVS[STILDEX]*CONSERVS[STILDEZ]+ADM_3METRIC[GAMMAUPYZ]*CONSERVS[STILDEY]*CONSERVS[STILDEZ]); # <a id='modifying_tau'></a> # # ## Step 2.d: Modifying $\tilde{\tau}$ \[Back to [top](#toc)\] # $$\label{modifying_tau}$$ # # <a id='wm_sm_and_wmin'></a> # # ### Step 2.d.i: $W_{m}$, $\tilde{S}_{m}$, and $W_{\min}$ \[Back to [top](#toc)\] # $$\label{wm_sm_and_wmin}$$ # # Then we compute other useful quantities, which are eqs. (A52), (A53), and (A54) in appendix A of [Etienne *et al.* (2012)](https://arxiv.org/pdf/1112.0568.pdf) # # $$ # \begin{align} # W_{m} &= \psi^{-6}\left[\left(\hat{\bar{B}}\cdot\tilde{S}\right)^{2}+\rho_{\star}^{2}\right]^{1/2}\ ,\\ # \tilde{S}_{m}^{2} &= \frac{W_{m}^{2}\tilde{S}^{2} + \left(\bar{B}\cdot\tilde{S}\right)^{2}\left(\bar{B}^{2}+2W_{m}\right)}{\left(W_{m}+\bar{B}^{2}\right)^{2}}\ ,\\ # W_{\min} &= \psi^{-6}\left(S_{m}^{2}+\rho_{\star}^{2}\right)^{1/2}\ ,\\ # \end{align} # $$ # # respectively (notice the slightly different notation between the equations above and the one used in the paper). # %%writefile -a $outdir/apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C CCTK_REAL Wm = sqrt(SQR(hatBbardotS)+ SQR(CONSERVS[RHOSTAR]))/ADM_3METRIC[SQRTGAMMA]; CCTK_REAL Sm2 = (SQR(Wm)*sdots + SQR(BbardotS)*(Bbar2+2.0*Wm))/SQR(Wm+Bbar2); CCTK_REAL Wmin = sqrt(Sm2 + SQR(CONSERVS[RHOSTAR]))/ADM_3METRIC[SQRTGAMMA]; CCTK_REAL sdots_fluid_max = sdots; # <a id='tau_min'></a> # # ### Step 2.d.ii: $\tilde{\tau}_{\min}$ \[Back to [top](#toc)\] # $$\label{tau_min}$$ # # Next we evaluate # # $$ # \tilde{\tau}_{\min} = \tilde{\tau} - \frac{\psi^{6}}{2}\bar{B}^{2} - \frac{\bar{B}^{2}\tilde{S}^{2} - \left(\bar{B}\cdot\tilde{S}\right)^{2}}{2\psi^{6}\left(W_{\min}+\bar{B}^{2}\right)^{2}} # $$ # %%writefile -a $outdir/apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C //tau fix, applicable when B==0 and B!=0: if(CONSERVS[TAUENERGY] < 0.5*ADM_3METRIC[SQRTGAMMA]*Bbar2) { CONSERVS[TAUENERGY] = tau_atm+0.5*ADM_3METRIC[SQRTGAMMA]*Bbar2; stats.failure_checker+=1000000; } CCTK_REAL tau_fluid_min = CONSERVS[TAUENERGY] - 0.5*ADM_3METRIC[SQRTGAMMA]*Bbar2 - (Bbar2*sdots - SQR(BbardotS))*0.5/(ADM_3METRIC[SQRTGAMMA]*SQR(Wmin+Bbar2)); # Then we verify if $\tilde{\tau}_{\min} \geq \tilde{\tau}_{\rm atm}$. If $\tilde{\tau}_{\min} < \tilde{\tau}_{\rm atm}$, then reset $\tilde{\tau}$: # # $$ # \tilde{\tau} = \tilde{\tau}_{\min} + \frac{\psi^{6}}{2}\bar{B}^{2} + \frac{\bar{B}^{2}\tilde{S}^{2} - \left(\bar{B}\cdot\tilde{S}\right)^{2}}{2\psi^{6}\left(W_{\min}+\bar{B}^{2}\right)^{2}}\ . # $$ # %%writefile -a $outdir/apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C //Apply Stilde fix when B==0. //if(PRIMS[BX_CENTER]==0 && PRIMS[BY_CENTER]==0 && PRIMS[BZ_CENTER]==0 && (ADM_3METRIC[SQRTGAMMA]>30.0 || CONSERVS[RHOSTAR]/ADM_3METRIC[SQRTGAMMA]<100*rho_b_atm)) { //if(check_B_small < 1.e-300) { /********************************** * Piecewise Polytropic EOS Patch * * Computing Patm * **********************************/ /* This modification of the code trades the variable * "gamma_equals2" by the already defined function * pow(). * * Also, assuming that Patm < rho_ppoly_tab[0], we skip * the declaration of new variables to store the * values of K_ppoly_tab[0] and Gamma_ppoly_tab[0]. Thus: * ----------------------------------------- * | P_{atm} = K_{0} * rho_{atm}^{Gamma_{0}} | * ----------------------------------------- */ int polytropic_index = find_polytropic_K_and_Gamma_index(eos,rho_b_atm); CCTK_REAL Patm = eos.K_ppoly_tab[polytropic_index]*pow(rho_b_atm,eos.Gamma_ppoly_tab[polytropic_index]); if(check_B_small*check_B_small < Patm*1e-32) { CCTK_REAL rhot=CONSERVS[TAUENERGY]*(CONSERVS[TAUENERGY]+2.0*CONSERVS[RHOSTAR]); CCTK_REAL safetyfactor = 0.999999; //if(ADM_3METRIC[SQRTGAMMA]>Psi6threshold) safetyfactor=0.99; if(sdots > safetyfactor*rhot) { CCTK_REAL rfactm1 = sqrt((safetyfactor*rhot)/sdots); CONSERVS[STILDEX]*=rfactm1; CONSERVS[STILDEY]*=rfactm1; CONSERVS[STILDEZ]*=rfactm1; stats.failure_checker+=10000000; } } else if(ADM_3METRIC[SQRTGAMMA]>Psi6threshold) { //Apply new Stilde fix. if (tau_fluid_min < tau_atm*1.001) { tau_fluid_min = tau_atm*1.001; CONSERVS[TAUENERGY] = tau_fluid_min + 0.5*ADM_3METRIC[SQRTGAMMA]*Bbar2 + (Bbar2*sdots - SQR(BbardotS))*0.5/(ADM_3METRIC[SQRTGAMMA]*SQR(Wmin+Bbar2)); } # <a id='modifying_tilde_s_i'></a> # # ## Step 2.e: Modifying $\tilde{S}_{i}$ \[Back to [top](#toc)\] # $$\label{modifying_tilde_s_i}$$ # # Then we check if $\tilde{S}^{2} \leq \tilde{\tau}_{\min}\left(\tilde{\tau}_{\min}+2\rho_{\star}\right)$. If not, we reset $\tilde{S}_{i}$ # # $$ # \tilde{S}_{i}\to \tilde{S}_{i}\sqrt{\frac{\tilde{\tau}_{\min}\left(\tilde{\tau}_{\min}+2\rho_{\star}\right)}{\tilde{S}^{2}}} # $$ # + # %%writefile -a $outdir/apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C CCTK_REAL LHS = tau_fluid_min*(tau_fluid_min+2.0*CONSERVS[RHOSTAR]); CCTK_REAL RHS = sdots_fluid_max; CCTK_REAL safetyfactor = 0.999999; if(safetyfactor*LHS < RHS) { CCTK_REAL rfactm1 = sqrt((safetyfactor*LHS)/RHS); CONSERVS[STILDEX]*=rfactm1; CONSERVS[STILDEY]*=rfactm1; CONSERVS[STILDEZ]*=rfactm1; stats.failure_checker+=100000000; } } return 0; } /***********************************************************/ /***********************************************************/ /***********************************************************/ /***********************************************************/ # - # <a id='enforce_pressure_floor_ceiling'></a> # # # Step 3: The `enforce_pressure_floor_ceiling` function \[Back to [top](#toc)\] # $$\label{enforce_pressure_floor_ceiling}$$ # # After the Newton-Raphson solver has successfully found a set of primitives, the primitives are checked for physicality, and if they are not in the physical range, they are minimally modified until they return to the physical range. First,if the velocity is found to be superluminal, the speed is reduced to `IllinoisGRMHD`’s default Lorentz factor limit, a procedure which we already explained above when we discussed the `impose_speed_limit_output_u0` function. # # Next, `IllinoisGRMHD` does not include any cooling mechanism, which means that for evolutions adopting a $\Gamma$-law equation of state, the pressure should not physically drop below $P_{\rm cold}$. So a pressure floor of $0.9P_{\rm cold}$ is imposed. Increasing this floor to $P_{\rm cold}$ exactly results in large central density drifts in TOV star evolutions. # # **NOTE**: Please keep in mind that the floor and ceiling values presented here were found ***empirically***. # + # %%writefile -a $outdir/apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C static inline void enforce_pressure_floor_ceiling(output_stats &stats,CCTK_REAL kpoly,CCTK_REAL P_cold,CCTK_REAL Psi6,const CCTK_REAL Psi6threshold,CCTK_REAL rho_b,const CCTK_REAL rhobatm, CCTK_REAL &P) { CCTK_REAL P_min=0.9*P_cold; if(P<P_min) { stats.failure_checker+=10; P=P_min; } //MAX(P,P_min); //if(P < P_min) P=1.0*P_cold; /* OLD: Discarded because lower limit is unphysical. if(P <= 0.5*kpoly*P_cold) { P=0.5*kpoly*P_cold; } */ # - # Simulations can crash in the other extreme, if $P/P_{\rm cold}$ becomes too large. This typically only happens in very low density regions or inside black holes. So at densities $\rho_{b}<100\rho_{\rm atm}$ or deep inside black hole horizons, a ceiling on $P$ of $100P_{\rm cold}$ is enforced (see Appendix A of [Etienne *et al.* (2012)](https://arxiv.org/abs/1112.0568) for more details). # # We also introduce a parameter, $\psi^{6}_{\rm threshold}$, which determines whether the region under consideration is deep inside the BH horizon or not. For regions deep inside the BH horizon, defined by $\sqrt{\gamma} = \psi^{6} > \psi^{6}_{\rm threshold}$, the primary goal is to keep the evolution stable and prevent inaccurate data from leaking out of the BH horizon. It was determined that in this situation, a better ceiling on $P$ is $10^{5}P_{\rm cold}$. # + # %%writefile -a $outdir/apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C //CCTK_REAL P_max = 10.0*P_cold; CCTK_REAL P_max = 100.0*P_cold; if(Psi6 > Psi6threshold) P_max = 1e5*P_cold; // <-- better than 10. if((rho_b < 100.0*rhobatm || Psi6 > Psi6threshold) && P>P_max) { P=P_max; stats.failure_checker+=100; } /* CCTK_REAL rho_horiz_cap = 1000.0*rhobatm; //New density damping mechanism inside the horizon if(Psi6 > Psi6threshold && rho_b>rho_horiz_cap) { CCTK_REAL six_phi=log(Psi6); CCTK_REAL six_phithreshold=log(Psi6threshold); CCTK_REAL Psi6max_approx=350000; rho_b = rho_horiz_cap+(rho_b-rho_horiz_cap)*exp(-200.0*SQR((six_phi-six_phithreshold)/log(Psi6max_approx))); } */ } # - # <a id='enforce_limits_on_primitives_and_recompute_conservs'></a> # # # Step 4: `The IllinoisGRMHD_ enforce_limits_on_primitives_and_recompute_conservs` function \[Back to [top](#toc)\] # $$\label{enforce_limits_on_primitives_and_recompute_conservs}$$ # # We start by imposing physical limits on the primitive variables $\left\{\rho_{b},P,v^{i}\right\}$, using: # # 1. $\rho_{b} \to \min\left(\rho_{b},\rho_{b,{\rm atm}}\right)$ # 1. `enforce_pressure_floor_ceiling()`: documented [above](#enforce_pressure_floor_ceiling) # # We then compute $g_{\mu\nu}$, $g^{\mu\nu}$, $T_{\mu\nu}$, $T^{\mu\nu}$, and, finally, recompute the conservative variables. # # In this step, we will NRPy+ to generate expressions for: # # 1. The physical ADM 4-metric, $g_{\mu\nu}$: done using the [BSSN.ADMBSSN_tofrom_4metric NRPy+ module](/edit/NRPyIGM/BSSN/ADMBSSN_tofrom_4metric.py) \[[**tutorial**](/notebooks/NRPyIGM/Tutorial-ADMBSSN_tofrom_4metric.ipynb)\] # 2. The physical ADM inverse 4-metric $g^{\mu\nu}$: done using the [BSSN.ADMBSSN_tofrom_4metric NRPy+ module](/edit/NRPyIGM/BSSN/ADMBSSN_tofrom_4metric.py) \[[**tutorial**](/notebooks/NRPyIGM/Tutorial-ADMBSSN_tofrom_4metric.ipynb)\] # 3. The covariant GRMHD energy momentum tensor, $T_{\mu\nu}^{\rm GRMHD}$, which is done is three steps: # 1. Compute $T_{\mu\nu}^{\rm GRHD}$: done using the [GRHD.equations NRPy+ module](/edit/NRPyIGM/GRHD/equations.py) \[[**tutorial**](/notebooks/NRPyIGM/Tutorial-GRHD_Equations-Cartesian.ipynb)\] # 2. Compute $T_{\mu\nu}^{\rm GRFFE}$: done using the [GRFFE.equations NRPy+ module](/edit/NRPyIGM/GRFFE/equations.py) \[[**tutorial**](/notebooks/NRPyIGM/Tutorial-GRFFE_Equations-Cartesian.ipynb)\] # 3. Compute $T_{\mu\nu}^{\rm GRMHD} = T_{\mu\nu}^{\rm GRHD} + T_{\mu\nu}^{\rm GRFFE}$: done using the [GRMHD.equations NRPy+ module](/edit/NRPyIGM/GRMHD/equations.py) \[[**tutorial**](/notebooks/NRPyIGM/Tutorial-GRMHD_Equations-Cartesian.ipynb)\] # 4. The contravariant GRMHD energy momentum tensor, $T^{\mu\nu}_{\rm GRMHD}$, which is done is three steps: # 1. Compute $T^{\mu\nu}_{\rm GRHD}$: done using the [GRHD.equations NRPy+ module](/edit/NRPyIGM/GRHD/equations.py) \[[**tutorial**](/notebooks/NRPyIGM/Tutorial-GRHD_Equations-Cartesian.ipynb)\] # 2. Compute $T^{\mu\nu}_{\rm GRFFE}$: done using the [GRFFE.equations NRPy+ module](/edit/NRPyIGM/GRFFE/equations.py) \[[**tutorial**](/notebooks/NRPyIGM/Tutorial-GRFFE_Equations-Cartesian.ipynb)\] # 3. Compute $T^{\mu\nu}_{\rm GRMHD} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE}$: done using the [GRMHD.equations NRPy+ module](/edit/NRPyIGM/GRMHD/equations.py) \[[**tutorial**](/notebooks/NRPyIGM/Tutorial-GRMHD_Equations-Cartesian.ipynb)\] # + from outputC import * import indexedexp as ixp import sympy as sp import BSSN.ADMBSSN_tofrom_4metric as AB4m import GRHD.equations as GRHD import GRFFE.equations as GRFFE import GRMHD.equations as GRMHD NRPy_headers_dir_path = os.path.join(outdir,"NRPy_generated_headers") ADMgammaDD = ixp.zerorank2() for i in range(3): for j in range(i,3): ADMgammaDD[i][j] = ADMgammaDD[j][i] = sp.symbols("ADM_3METRIC[GAMMA"+chr(ord('X')+i)+chr(ord('X')+j)+"]",real=True) ADMbetaU = ixp.zerorank1() for i in range(3): ADMbetaU[i] = sp.symbols("ADM_3METRIC[BETA"+chr(ord('X')+i)+"]",real=True) ADMalpha = sp.symbols("ADM_3METRIC[ALPHA]",real=True) vU = ixp.zerorank1() for i in range(3): vU[i] = sp.symbols("PRIMS[V"+chr(ord('X')+i)+"]",real=True) GRHD.u4U_in_terms_of_vU__rescale_vU_by_applying_speed_limit(ADMalpha,ADMbetaU,ADMgammaDD, vU) for i in range(3): vU[i] = GRHD.rescaledvU[i] u4U = GRHD.u4U_ito_vU # First compute smallb4U & smallbsquared from BtildeU, which are needed # for GRMHD stress-energy tensor T4UU and T4UD: GRHD.compute_sqrtgammaDET(ADMgammaDD) B_notildeU = ixp.zerorank1() for i in range(3): B_notildeU[i] = sp.symbols("PRIMS[B"+chr(ord('X')+i)+"_CENTER]",real=True) sqrt4pi = sp.symbols('sqrt4pi', real=True) GRFFE.compute_smallb4U( ADMgammaDD,ADMbetaU,ADMalpha, u4U,B_notildeU, sqrt4pi) GRFFE.compute_smallbsquared(ADMgammaDD,ADMbetaU,ADMalpha, GRFFE.smallb4U) rho_b,P,epsilon = sp.symbols("PRIMS[RHOB] PRIMS[PRESSURE] eps", real=True) GRMHD.compute_GRMHD_T4UU(ADMgammaDD, ADMbetaU, ADMalpha, rho_b, P, epsilon, u4U, GRFFE.smallb4U, GRFFE.smallbsquared) GRMHD.compute_GRMHD_T4UD(ADMgammaDD, ADMbetaU, ADMalpha, GRMHD.GRHDT4UU,GRMHD.GRFFET4UU) # Compute g_{\mu\nu} AB4m.g4DD_ito_BSSN_or_ADM("ADM",gammaDD=ADMgammaDD,betaU=ADMbetaU,alpha=ADMalpha) ADMgammaUU,dummy = ixp.symm_matrix_inverter3x3(ADMgammaDD) AB4m.g4UU_ito_BSSN_or_ADM("ADM",betaU=ADMbetaU,alpha=ADMalpha,gammaUU=ADMgammaUU) # Set up g4DD and T4UU in a way that uses the already computed # gridfunctions instead of the complicated SymPy expressions g4DD = ixp.zerorank2(DIM=4) T4UU = ixp.zerorank2(DIM=4) count = 0 for mu in range(4): for nu in range(mu,4): g4DD[mu][nu] = g4DD[nu][mu] = sp.Symbol("g4dn["+str(mu)+"]["+str(nu)+"]",real=True) T4UU[mu][nu] = T4UU[nu][mu] = sp.Symbol("TUPMUNU["+str(count)+"]",real=True) count += 1 # Compute T4DD using the simplified expressions T4DD = ixp.zerorank2(DIM=4) for mu in range(4): for nu in range(4): for rho in range(4): for sigma in range(4): T4DD[mu][nu] += g4DD[mu][rho] * g4DD[nu][sigma] * T4UU[rho][sigma] # Compute conservative variables in terms of primitive variables GRHD.compute_rho_star( ADMalpha, GRHD.sqrtgammaDET, rho_b,u4U) GRHD.compute_tau_tilde(ADMalpha, GRHD.sqrtgammaDET, GRMHD.T4UU,GRHD.rho_star) GRHD.compute_S_tildeD( ADMalpha, GRHD.sqrtgammaDET, GRMHD.T4UD) varlist = [] exprlist = [] for mu in range(4): for nu in range(4): varlist.append("g4dn["+str(mu)+"]["+str(nu)+"]") exprlist.append(AB4m.g4DD[mu][nu]) for mu in range(4): for nu in range(4): varlist.append("g4up["+str(mu)+"]["+str(nu)+"]") exprlist.append(AB4m.g4UU[mu][nu]) count = 0 for mu in range(4): for nu in range(mu,4): varlist.append("TUPMUNU["+str(count)+"]") exprlist.append(GRMHD.T4UU[mu][nu]) count += 1 import time start = time.time() outputC(exprlist,varlist,filename=os.path.join(NRPy_headers_dir_path,"compute_g4dn_g4up_T4UU.h"), params="outCverbose=False") print("Time elapsed: %4.2lf seconds"%(time.time()-start)) varlist = [] exprlist = [] count = 0 for mu in range(4): for nu in range(mu,4): varlist.append("TDNMUNU["+str(count)+"]") exprlist.append(T4DD[mu][nu]) count += 1 varlist.append("CONSERVS[RHOSTAR]") exprlist.append(GRHD.rho_star) for i in range(3): varlist.append("CONSERVS[STILDE"+chr(ord('X')+i)+"]") exprlist.append(GRHD.S_tildeD[i]) varlist.append("CONSERVS[TAUENERGY]") exprlist.append(GRHD.tau_tilde) # for mu in range(4): # varlist.append("smallb4U["+str(mu)+"]") # exprlist.append(GRFFE.smallb4U[mu]) # varlist.append("smallbsquared") # exprlist.append(GRFFE.smallbsquared) start = time.time() outputC(exprlist,varlist,filename=os.path.join(NRPy_headers_dir_path,"compute_T4DD_CONSERVS.h"), params="outCverbose=False") print("Time elapsed: %4.2lf seconds"%(time.time()-start)) # + # %%writefile -a $outdir/apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C void IllinoisGRMHD_enforce_limits_on_primitives_and_recompute_conservs(const int already_computed_physical_metric_and_inverse,CCTK_REAL *PRIMS, struct output_stats &stats,eos_struct &eos, CCTK_REAL *ADM_3METRIC,CCTK_REAL g4dn[4][4],CCTK_REAL g4up[4][4], CCTK_REAL *TUPMUNU,CCTK_REAL *TDNMUNU,CCTK_REAL *CONSERVS) { #ifndef ENABLE_STANDALONE_IGM_C2P_SOLVER DECLARE_CCTK_PARAMETERS; #endif // Useful debugging tool, can be used to track fixes: //CCTK_REAL rho_b_orig=PRIMS[RHOB],P_orig=PRIMS[PRESSURE],vx_orig=PRIMS[VX],vy_orig=PRIMS[VY],vz_orig=PRIMS[VZ]; /***********************************************************/ // Enforce limits on pressure, density, and v^i /***********************************************************/ // Density floor: // printf("HEY222a %e %e %e\n",PRIMS[RHOB],rho_b_atm,PRIMS[PRESSURE]); PRIMS[RHOB] = MAX(PRIMS[RHOB],rho_b_atm); // Density ceiling: PRIMS[RHOB] = MIN(PRIMS[RHOB],rho_b_max); // Next set h, the enthalpy: CCTK_REAL h_enthalpy, P_cold,eps_cold,dPcold_drho,eps_th,Gamma_cold; /* <- Note that in setting h, we need to define several * other variables. Though some will be unused later * in this function, they may be useful in other * functions */ compute_P_cold__eps_cold__dPcold_drho__eps_th__h__Gamma_cold(PRIMS,eos,Gamma_th,P_cold,eps_cold,dPcold_drho,eps_th,h_enthalpy,Gamma_cold); // Pressure floor & ceiling: int polytropic_index = find_polytropic_K_and_Gamma_index(eos,PRIMS[RHOB]); enforce_pressure_floor_ceiling(stats,eos.K_ppoly_tab[polytropic_index],P_cold,ADM_3METRIC[SQRTGAMMA],Psi6threshold,PRIMS[RHOB],rho_b_atm, PRIMS[PRESSURE]); // printf("HEY222b %e %e %e\n",PRIMS[RHOB],rho_b_atm,PRIMS[PRESSURE]); // Possibly adjusted pressure, so recompute eps & h: CCTK_REAL eps = eps_cold + (PRIMS[PRESSURE]-P_cold)/(Gamma_th-1.0)/PRIMS[RHOB]; const CCTK_REAL sqrt4pi = 1.0 * sqrt(4.0*M_PI); CCTK_REAL smallb4U[4]; CCTK_REAL smallbsquared; //FIXME: Use already_computed_physical_metric_and_inverse to determine whether g4dn & g4up really need to be calculated. #include "NRPy_generated_headers/compute_g4dn_g4up_T4UU.h" #include "NRPy_generated_headers/compute_T4DD_CONSERVS.h" //printf("HEY?? %e %e %e %e %e\n",smallb4U[0],smallb4U[1],smallb4U[2],smallb4U[3],smallbsquared); } # - # <a id='code_validation'></a> # # # Step 5: Code validation \[Back to [top](#toc)\] # $$\label{code_validation}$$ # # First we download the original `IllinoisGRMHD` source code and then compare it to the source code generated by this tutorial notebook. # + # # Verify if the code generated by this tutorial module # # matches the original IllinoisGRMHD source code # # First download the original IllinoisGRMHD source code # import urllib # from os import path # original_IGM_file_url = "https://bitbucket.org/zach_etienne/wvuthorns/raw/5611b2f0b17135538c9d9d17c7da062abe0401b6/IllinoisGRMHD/src/apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C" # original_IGM_file_name = "apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs-original.C" # original_IGM_file_path = os.path.join(outdir,original_IGM_file_name) # # Then download the original IllinoisGRMHD source code # # We try it here in a couple of ways in an attempt to keep # # the code more portable # try: # original_IGM_file_code = urllib.request.urlopen(original_IGM_file_url).read().decode("utf-8") # # Write down the file the original IllinoisGRMHD source code # with open(original_IGM_file_path,"w") as file: # file.write(original_IGM_file_code) # except: # try: # original_IGM_file_code = urllib.urlopen(original_IGM_file_url).read().decode("utf-8") # # Write down the file the original IllinoisGRMHD source code # with open(original_IGM_file_path,"w") as file: # file.write(original_IGM_file_code) # except: # # If all else fails, hope wget does the job # # !wget -O $original_IGM_file_path $original_IGM_file_url # # Perform validation # # Validation__tau_and_prims_limits__C = !diff $original_IGM_file_path $outdir/apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C # if Validation__tau_and_prims_limits__C == []: # # If the validation passes, we do not need to store the original IGM source code file # # !rm $original_IGM_file_path # print("Validation test for apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C: PASSED!") # else: # # If the validation fails, we keep the original IGM source code file # print("Validation test for apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.C: FAILED!") # # We also print out the difference between the code generated # # in this tutorial module and the original IGM source code # print("Diff:") # for diff_line in Validation__tau_and_prims_limits__C: # print(diff_line) # - # <a id='latex_pdf_output'></a> # # # Step 6: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] # $$\label{latex_pdf_output}$$ # # The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename # [Tutorial-IllinoisGRMHD__apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.pdf](Tutorial-IllinoisGRMHD__apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means). latex_nrpy_style_path = os.path.join(nrpy_dir_path,"latex_nrpy_style.tplx") # #!jupyter nbconvert --to latex --template $latex_nrpy_style_path --log-level='WARN' Tutorial-IllinoisGRMHD__apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.ipynb # #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.tex # #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.tex # #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.tex # !rm -f Tut*.out Tut*.aux Tut*.log
IllinoisGRMHD/doc/Tutorial-IllinoisGRMHD__apply_tau_floor__enforce_limits_on_primitives_and_recompute_conservs.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python3 # name: python3 # --- import numpy as np import matplotlib.pyplot as plt # # Mais EDOs # # Agora, vamos resolver uma EDO que não tem solução analítica: # $$ \begin{cases} y'(t) = - y(t) + 2\sin(t^2) \\ y(0) = 1.2\end{cases} $$ # # Será que o mesmo algoritmo funciona? ### Começar com código (notebook!) copiado da aula passada, e mudar ### Resposta aqui T = 10 y0 = 1.2 t30, y30 = eulerexplicito(T, 30, y0) t100, y100 = eulerexplicito(T, 100, y0) plt.plot(t30, y30, label='30') plt.plot(t100, y100, label='100') plt.legend() # Comentar plt.show() # Mais precisão: passos menores t300, y300 = eulerexplicito(T, 300, y0) plt.plot(t100, y100, label='100') plt.plot(t300, y300, label='300') plt.legend() plt.show() t1000, y1000 = eulerexplicito(T, 1000, y0) plt.plot(t300, y300, label='300') plt.plot(t1000, y1000, label='1000') plt.legend() plt.show() # # Explorando from ipywidgets import IntSlider, FloatSlider, interact def do(npts, Tmax, y0): ts, ys = eulerexplicito(Tmax, npts, y0) plt.figure(figsize=(12,4)) plt.plot(ts, ys) plt.title('n = {}'.format(npts)) plt.show() npts = IntSlider(value=100, min=10, max=400, step=5) Tmax = IntSlider(value=5, min=1, max=20) y0 = FloatSlider(value=1.2, min=-2, max=2) interact(do, npts=npts, Tmax=Tmax, y0=y0); # # Generalizando # # Uma EDO é definida por dois objetos: # - a dinâmica, ou seja, como calcular $y'(t)$, # - a condição inicial, ou seja, $y(t_0)$. # # Em geral, a dinâmica é dada explicitamente, mas podemos descrever, de forma bastante geral, por uma função $F$: # $$ y'(t) = F(t, y(t)), $$ # que indica como calcular a derivada em função do instante de tempo e do valor da função. # # Por exemplo, quem seria $F$ nos exemplos já vistos? # ## Uma função geral # # Além da dinâmica e da condição inicial, # uma função geral para aproximar a solução usando o método de Euler explícito # necessita dos pontos intermediários $t_i$ onde calcular a função. # Assim, podemos propor o seguinte modelo de função: def eulerexplicito(F, t0, y0, ts): """Calcula uma solução aproximada da equação y' = F(t,y) pelo método de Euler, nos pontos [ts].""" ### Resposta aqui return ys def F(t,y): return 2*np.sin(t**2) - y ts2 = np.linspace(0, 10, num=100) ys2 = eulerexplicito(F, 0, 1.2, ts2[1:]) plt.plot(ts2, ys2) plt.show() # ## E funções auxiliares # # - Dado um intervalo, e o número de pontos # - Dado um intervalo, e o tamanho do passo def euler_npts(F, I, y0, npts, retpts=False): ### Resposta aqui ys = eulerexplicito(F, I[0], y0, ts[1:]) if retpts: return ts, ys else: return ys def euler_h(F, I, y0, h, retpts=False): ### Resposta aqui ys = eulerexplicito(F, I[0], y0, ts[1:]) if retpts: return ts, ys else: return ys # ## Observe-as funcionando # Comentar for n in [50,100,200]: ts, ys = euler_npts(F, [0,10], 1.2, n, retpts=True) plt.plot(ts, ys, label=str(n)) plt.legend(title='num steps') plt.show() for h in [1,0.5,0.1,0.05]: ts, ys = euler_h(F, [0,10], 1.2, h, retpts=True) plt.plot(ts, ys, label=str(h)) plt.legend(title='step length') plt.show()
comp-cientifica-I-2018-2/semana-2/raw_files/Euler Explicito 2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Module 3 # # ## Video 12: Slicing Arrays # **Python for the Energy Industry** # # ## 1D Arrays # # Arrays such as those seen in the previous video can be sliced in a similar way to lists: # + import numpy as np my_array = np.arange(10) print('my_array -',my_array) # get values between index 2 (included) and 6 (excluded) print('my_array[2:6] -',my_array[2:6]) # get values between index 4 (included) the end of the array print('my_array[4:] -',my_array[4:]) # get values between the start of the array and index 7 (excluded) print('my_array[:7] -',my_array[:7]) # - # Slicing with the form a:b:c, accesses indices between a and b, and indices that are multiples of c. For example: print('my_array[2:9:2] -',my_array[2:9:2]) # ## 2D Arrays # # ### Generating Arrays # # Numpy arrays can have any number of dimensions. Working with 2D arrays is common, and most functions for generating 1D arrays also work for multiple dimensions: # 4 by 4 array of zeros print(np.zeros((4,4))) # 6 by 3 array of ones print(np.ones((6,3))) # 2 by 10 array of random numbers print(np.random.rand(2,10)) # Note that the when specifying the shape of the array, the first argument is the number of rows and the second argument is columns. # # You can get the shape of an array: # + x = np.ones((6,3)) print(x.shape) # - # ### Slicing Arrays # # Accessing elements in 2D arrays is done by separating the indices with a comma. The element in the second row, third column of an array would be accessed with the slice [1,2]. # + a = np.random.rand(3,3) print(a) print('second row, third column:',a[1,2]) # - # This applies when slicing as well, so getting all columns of the second row would be: [1,:] # + a = np.random.rand(3,3) print(a) print('second row, all columns:',a[1,:]) # - # ### Filtering Arrays # # You can also access the values in an array that meet certain conditions: print(a[a > 0.5]) print(a[np.logical_and(a > 0.5, a < 0.8)]) # ### Exercise # # Create a 5 by 5 array of random numbers between 0 and 10.
docs/examples/academy/12. Slicing Arrays.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # [dev] PyMC4 Design overview. Generators, Coroutines and all the things # For a brief introduction of Coroutines you can first read [PEP0342](https://www.python.org/dev/peps/pep-0342/), but we will cover most of the basics here with a practical example. Here we will make a draft of a PPL on top of `tfp`. The challenge is to use dynamic graph building and designing a flexible framework. # ## A simple case import tensorflow_probability as tfp import tensorflow as tf from tensorflow_probability import distributions as tfd # Let's first look at "HOW-TO PPL". Essentially, we want to implement a probabilisitc program (think of a Bayesian model that you usually represent in a directed acyclic graph) that does 2 things: # # 1. forward sampling to get prior (predictive) samples; # 2. reverse evaluation on some inputs to compute the log-probability (if we conditioned on the observed, we are evaluating the unnormalized posterior distribution of the unknown random variables). # # Specifically for computing the log_prob, we need to keep track of the dependency. For example, if we have something simple like `x ~ Normal(0, 1), y ~ Normal(x, 1)`, in the reverse mode (goal 2 from above) we need to swap `x` with the input `x` (either from user or programmatically) to essentially do `log_prob = Normal(0, 1).log_prob(x_input) + Normal(x_input, 1).log_prob(y_input)`. # # There are a few approaches to this problem. For example, in PyMC3 with a static graph backend (theano), we are able to write things in a declarative way and use the static computational graph to keep track of the dependences. This means we are able to do `log_prob = x.log_prob(x) + y.log_prob(y)`, as the value representation of the random variables `x` and `y` are "swap-in" with some input at runtime. With a dynamic graph we run into problems as we write the model in a forward mode -- we essentially lose track of the dependence in the reverse calling mode. We need to either keep track of the graph representation ourselves, or write a function that could be reevaluated to make sure we have the right dependencies. # This ideally should look like this def model(x): scale = tfd.HalfCauchy(0, 1) coefs = tfd.Normal(tf.zeros(x.shape[1]), 1, ) predictions = tfd.Normal(tf.linalg.matvec(x, coefs), scale) return predictions # But this function will not work (you can try it yourself), because there is no random variable concept in `tfp`, meaning that you cannot do `RV_a.log_prob(RV_a)` (yes, just think of Random Variable in a PPL as a tensor/array-like object that you can do computation and a log_prob method that we can evaluate it on itself. model(tf.random.normal((100, 10))) # Generating a log_prob from this model also wont work, because the computation is different than the function we have written down above. # # What we want here is to track function evaluation at runtime, depending on the context (goal 1 or goal 2 from above). # # The very first way to cope with is was writing a wrapper over a distribution object. This wrapper was intended to catch a call to the distribution and use context to figure out what to do: for goal 1, we draw a sample from a random variable and plug the concrete value into the downstream dependencies; for goal 2 we got the concrete value and evaluate it with the respective random variable, and also plug the concrete value into the downstream dependencies. Here we use Coroutines from Python to have dynamic control flow that could achieve this kind of deferred execution. def model(x): scale = yield tfd.HalfCauchy(0, 1) coefs = yield tfd.Normal(tf.zeros(x.shape[1]), 1, ) predictions = yield tfd.Normal(tf.linalg.matvec(x, coefs), scale) return predictions # Now, we evaluate the model as expected but `yield` allows us to defer the program execution. But before evaluating this function, let's figure out what does yield do. def generator(message): print("I am a generator and I yield", message) response = yield message print("I am a generator and I got", response) return "goodbye" g = generator("(generators are cool)") mes = g.send(None) print(mes) g.send("(Indeed, generators are cool)") # What has happened here: # # * we had a simple generator and were able to communicate with it via `send` # * after `send` is called (first time requires it to have `None` argument) generator goes to the next `yield` expression and yields what it is asked to yield. # * as a return value from `send` we have this exact message from `yield message` # * we set the lhs of `response = yield message` with next `send` and no earlier # * after generator has no `yield` statements left and finally reaches `return`, it raises `StopIteration` with return value as a first argument # # Now we are ready to evaluate our model by hand state = dict(dists=dict(), samples=dict()) state["input"] = tf.random.normal((3, 10)) m = model(state["input"]) scale_dist = next(m) print(scale_dist) # which means, we are here # # ```python # def model(x): # scale = yield tfd.HalfCauchy(0, 1) # <--- HERE # coefs = yield tfd.Normal(tf.zeros(x.shape[1]), 1, ) # ... # ``` # What can we do with this distribution? We can choose forward sampling (in this case we sample from the state-less distribution `HalfCauchy(0, 1)`). But we need it to be used by user seamlessly later on regardless of the context (goal 1 or 2 above). On the model side, we need to store intermediate values and its associated distributions (hey! that's a random variable!). assert scale_dist.name not in state["dists"] state["samples"][scale_dist.name] = scale_dist.sample() state["dists"][scale_dist.name] = scale_dist coefs_dist = m.send(state["samples"][scale_dist.name]) print(coefs_dist) # ```python # def model(x): # scale = yield tfd.HalfCauchy(0, 1) # coefs = yield tfd.Normal(tf.zeros(x.shape[1]), 1, ) # <--- WE ARE NOW HERE # predictions = yield tfd.Normal(tf.linalg.matvec(x, coefs), scale) # return predictions # ``` # We do the same thing assert coefs_dist.name not in state["dists"] state["samples"][coefs_dist.name] = coefs_dist.sample() state["dists"][coefs_dist.name] = coefs_dist preds_dist = m.send(state["samples"][coefs_dist.name]) print(preds_dist) # ```python # def model(x): # scale = yield tfd.HalfCauchy(0, 1) # coefs = yield tfd.Normal(tf.zeros(x.shape[1]), 1, ) # predictions = yield tfd.Normal(tf.linalg.matvec(x, coefs), scale) # <--- NOW HERE # return predictions # ``` # We are now facing predictive distribution. Here we have several options: # * sample from it: we get prior predictive # * set custom values instead of sample, essentially conditioning on data. We might be interested in this to compute unnormalized posterior # * replace it with another distribution, arbitrary magic assert preds_dist.name not in state["dists"] state["samples"][preds_dist.name] = tf.zeros(preds_dist.batch_shape) state["dists"][preds_dist.name] = preds_dist # Gotcha, we found duplicated names in our toy graphical model. We can easily tell our user to rewrite the model to get rid of duplicate names m.throw(RuntimeError( "We found duplicate names in your cool model: {}, " "so far we have other variables in the model, {}".format( preds_dist.name, set(state["dists"].keys()), ) )) # The good thing is that we *communicate* with user, and can give meaningful exceptions with few pain. # # The correct model should look like this: # # ```python # def model(x): # scale = yield tfd.HalfCauchy(0, 1) # coefs = yield tfd.Normal(tf.zeros(x.shape[1]), 1, ) # predictions = yield tfd.Normal(tf.linalg.matvec(x, coefs), scale, name="Normal_1") # <--- HERE we asked out user to change the name # return predictions # ``` # # Let's set all the names according to the new model and interact with user again using the same model m.gi_running # Our generator is now at the end of its execution - we can't interact with it any more. Let's create a new one and reevaluate with same sampled values (A hint how to get the desired `logp` function) def model(x): scale = yield tfd.HalfCauchy(0, 1) coefs = yield tfd.Normal(tf.zeros(x.shape[1]), 1, ) predictions = yield tfd.Normal(tf.linalg.matvec(x, coefs), scale, name="Normal_1") # <--- HERE we asked out user to change the name return predictions m = model(state["input"]) print(m.send(None)) print(m.send(state["samples"]["HalfCauchy"])) print(m.send(state["samples"]["Normal"])) try: m.send(tf.zeros(state["input"].shape[0])) except StopIteration as e: stop_iteration = e else: raise RuntimeError("No exception met") print(stop_iteration) # Instead of returning some value in the last `send`, generator raises `StopIteration` because it is exhausted and reached the `return` statement (no more `yield` met). As explained (and checked here) in [PEP0342](https://www.python.org/dev/peps/pep-0342/), we have a return value inside # ## Automate process above # We all are lazy humans and cant stand doing repetitive things. In our model evaluation we followed pretty simple rules: # * asserting name is not used # * checking if we should sample or place a specific value instead # * recording distributions and samples # # Next step is to make a function that does all this instead of us. In this tutorial let's keep it simple: def interact(gen, state): control_flow = gen() return_value = None while True: try: dist = control_flow.send(return_value) if dist.name in state["dists"]: control_flow.throw(RuntimeError( "We found duplicate names in your cool model: {}, " "so far we have other variables in the model, {}".format( dist.name, set(state["dists"].keys()), ) )) if dist.name in state["samples"]: return_value = state["samples"][dist.name] else: return_value = dist.sample() state["samples"][dist.name] = return_value state["dists"][dist.name] = dist except StopIteration as e: if e.args: return_value = e.args[0] else: return_value = None break return return_value, state # This implementation assumes no arg generator, we make things just simple preds, state = interact(lambda: model(tf.random.normal((3, 10))), state=dict(dists=dict(), samples=dict())) state preds # We get all the things as expected. To calculate `logp` you just iterate over distributions and match them with the correspondig values. But let's dive deeper # ## One level deeper # Recall the motivating example from [PR#125](https://github.com/pymc-devs/pymc4/pull/125) # + def Horseshoe(mu=0, tau=1., s=1., name=None): with tf.name_scope(name): scale = yield tfd.HalfCauchy(0, s, name="scale") noise = yield tfd.Normal(0, tau, name="noise") return scale * noise + mu def linreg(x): scale = yield tfd.HalfCauchy(0, 1, name="scale") coefs = yield Horseshoe(tf.zeros(x.shape[1]), name="coefs") predictions = yield tfd.Normal(tf.linalg.matvec(x, coefs), scale, name="predictions") return predictions # - preds, state = interact(lambda: linreg(tf.random.normal((3, 10))), state=dict(dists=dict(), samples=dict())) # Oooups, we have a type error. What we want is a nested model, but nesting models is something different from a plain generator. As we have our model being a generator itself, the return value of `Horseshoe(tf.zeros(x.shape[1]), name="coefs")` is a generator. Of course this generator has no name attribute. Okay, we can ask user to use `yield from` construction to generate from the generator def linreg_ugly(x): scale = yield tfd.HalfCauchy(0, 1, name="scale") coefs = yield from Horseshoe(tf.zeros(x.shape[1]), name="coefs") predictions = yield tfd.Normal(tf.linalg.matvec(x, coefs), scale, name="predictions") return predictions preds, state = interact(lambda: linreg_ugly(tf.random.normal((3, 10))), state=dict(dists=dict(), samples=dict())) # Okay, we passed this thing state["dists"] # We got nesting models working, but it requires `yield from`. This is UGLY and potentially confusing for user. Fortunately, we can rewrite out `interact` function to accept nested models in a few lines, and let the Python do the task for us. import types def interact_nested(gen, state): # for now we should check input type if not isinstance(gen, types.GeneratorType): control_flow = gen() else: control_flow = gen return_value = None while True: try: dist = control_flow.send(return_value) # this makes nested models possible if isinstance(dist, types.GeneratorType): return_value, state = interact_nested(dist, state) # ^ in a few lines of code, go recursive else: if dist.name in state["dists"]: control_flow.throw(RuntimeError( "We found duplicate names in your cool model: {}, " "so far we have other variables in the model, {}".format( dist.name, set(state["dists"].keys()), ) )) if dist.name in state["samples"]: return_value = state["samples"][dist.name] else: return_value = dist.sample() state["samples"][dist.name] = return_value state["dists"][dist.name] = dist except StopIteration as e: if e.args: return_value = e.args[0] else: return_value = None break return return_value, state # remember we had problems here: # ```python # preds, state = interact(lambda: linreg(tf.random.normal((3, 10))), state=dict(dists=dict(), samples=dict())) # ``` # Additionally we can specify the observed variable preds, state = interact_nested(lambda: linreg(tf.random.normal((3, 10))), state=dict(dists=dict(), samples={"predictions/":tf.zeros(3)})) state["dists"] state["samples"] # Cool, we've finished the central idea behind PyMC4 core engine. There is some extra stuff to do to make `evaluate_nested` really powerful # # * resolve transforms # * resolve reparametrizations # * variational inference # * better error messages # * lazy returns in posterior predictive mode # # Some of this functionality may be found in the corresponding [PR#125](https://github.com/pymc-devs/pymc4/pull/125)
notebooks/pymc4_design_guide.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lorem Ipsum # ## "Neque porro quisquam est qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit..." # # Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris ut tellus in eros lobortis mollis. Sed vestibulum porttitor est, convallis varius mi efficitur non. Suspendisse tempus lectus tempus feugiat consequat. Proin quis aliquam orci. Donec sit amet turpis nec neque tempor vehicula sit amet ac arcu. Mauris congue sem ut nunc suscipit posuere. Pellentesque ante risus, auctor id consectetur ac, feugiat eu massa. Fusce tortor neque, mattis placerat enim ac, luctus commodo augue. Etiam iaculis turpis arcu. Nam ornare varius rhoncus. Curabitur sed molestie metus. Etiam eget nunc tempus est convallis aliquet vel sit amet nulla. Sed accumsan, orci vel porttitor finibus, lacus erat faucibus risus, ac ornare augue mi sed felis. Aenean vel porta orci. # # Donec tincidunt tempor augue. Donec suscipit, diam a tincidunt vehicula, erat ex lacinia leo, quis pharetra elit tellus et nibh. Duis eros nisi, ullamcorper sed leo in, blandit suscipit dolor. Nulla sit amet urna turpis. Sed in justo non tortor vulputate congue. Maecenas viverra consequat suscipit. Sed interdum ut magna non sodales. # # + class Lorem(object): def __init__(self): pass def show(self): print('''Lorem ipsum dolor sit amet, consectetur adipiscing elit.\n Mauris ut tellus in eros lobortis mollis.\n Sed vestibulum porttitor est, convallis varius mi efficitur non.\n Suspendisse tempus lectus tempus feugiat consequat.\n Proin quis aliquam orci.\n Donec sit amet turpis nec neque tempor vehicula sit amet ac arcu.\n Mauris congue sem ut nunc suscipit posuere.\n Pellentesque ante risus, auctor id consectetur ac, feugiat eu massa.\n Fusce tortor neque, mattis placerat enim ac, luctus''') def main(): l = Lorem() l.show() main()
scripts/advanced_report/Lorem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import sys import re import string import argparse from numpy import random try: from nltk.corpus import stopwords from nltk import word_tokenize from nltk import pos_tag except: print("NLTK NOT FOUND") # - checkword = "hard" # + tmp_dominant_only = 1 # datafile = "~/Data/one-million-sense-tagged-instances-wn30" split = 0.4 window = 1 windowSize = 5 add_start_end = 1 dominant_only = 0 dominant_per = 0.2 remove_punctuation = 0 remove_stopwords = 0 POS_tags = 0 POS_RED = 0 senseval = True pos="noun" Full_Sent = open('Sentences.txt', 'r').read().split('\n') Full_Sens = open('Senses.txt', 'r').read().split('\n') print("Total No. of Senses: " + str(len(Full_Sens))) print("Total No. of Sentences: " + str(len(Full_Sent))) sentStr = "" sensStr = "" wordList = { 'hard': ['HARD1', 'HARD2', 'HARD3'], 'interest': ['interest1', 'interest2', 'interest3', 'interest4', 'interest5', 'interest6'], 'line': ['text', 'phone', 'product', 'formation', 'division', 'cord'], 'serve': ['SERVE2', 'SERVE6', 'SERVE10', 'SERVE12'] } checklist = wordList[checkword] for i in range(0, len(Full_Sens)): if Full_Sens[i] in checklist: sentStr += Full_Sent[i] + "\n" sensStr += Full_Sens[i] + "\n" sentFile = open('Preprocess_Files/' + checkword + '/sent', 'w') sensFile = open('Preprocess_Files/' + checkword + '/sense', 'w') sentFile.write(sentStr) sensFile.write(sensStr) sentFile.close() sensFile.close() Sent = open('Preprocess_Files/' + checkword + '/sent', "r").read().split('\n') del Sent[len(Sent) - 1] # last string is empty Sens = open('Preprocess_Files/' + checkword + '/sense', "r").read().split() assert(len(Sent) == len(Sens)) print("Using Senseval Dataset") print("For Word: " + checkword) print("Total number of examples: " + str(len(Sent))) SenSet = list(set(Sens)) # Delete minor senses if flag positive if dominant_only == 0: SenCnt = [] for i in SenSet: SenCnt.append(Sens.count(i)) maxCnt = max(SenCnt) SenRemove = [] for i in range(0, len(SenSet)): if SenCnt[i] < dominant_per * maxCnt: print("IGNORING", SenSet[i], SenCnt[i]) SenRemove.append(SenSet[i]) tmpSt = [] tmpSe = [] for i in range(0, len(Sent)): if Sens[i] not in SenRemove: tmpSt.append(Sent[i]) tmpSe.append(Sens[i]) Sent = tmpSt Sens = tmpSe # - Sent SenSet # + ##Use POS Tags if flag positive # if POS_tags == 1: # for i in range(0, len(Sent)): # try: # tmp = word_tokenize(Sent[i]) # tmp_tagged = pos_tag(tmp) # except UnicodeDecodeError as u: # Sent[i] = re.sub(r'[^\x00-\x7F]+',' ', Sent[i]) # tmp = word_tokenize(Sent[i]) # tmp_tagged = pos_tag(tmp) # tmp = "" # for j in range(0, len(tmp_tagged)): # tmptag = tmp_tagged[j][1].lower() # if POS_RED == 1: # if tmptag in ['$', '\'', '(', ')', ',', '.', '--', ':', ';']: # tmptag = 'PUNCTUATION' # elif tmptag in ['DT', 'EX']: # tmptag = 'DETERMINER' # elif tmptag in ['jj', 'jjr', 'jjs']: # tmptag = 'ADJ' # elif tmptag in ['nn', 'nnp', 'nnps', 'nns']: # tmptag = 'NOUN' # elif tmptag in ['prp', 'prp$']: # tmptag = 'PRONOUN' # elif tmptag in ['rb', 'rbr','rbs']: # tmptag = 'ADVERB' # elif tmptag in ['vb', 'vbd', 'vbg','vbn','vbp','vbz']: # tmptag = 'VERB' # elif tmptag in ['wdt', 'wp', 'wp$', 'wrb']: # tmptag = 'WH_WORD' # tmp += " " + tmp_tagged[j][0] + "_" + tmptag # Sent[i] = tmp ##Randomly shuffle sentences tmp = list(zip(Sent, Sens)) random.shuffle(tmp) Sent[:], Sens[:] = zip(*tmp) # - Sent[0] # + ##Get set of senses lenst = len(Sent) SenSet = list(set(Sens)) SenSet.sort() SenCnt = [] ##Get count of each sense for i in SenSet: SenCnt.append(Sens.count(i)) ##Most dominant sense maxCnt = max(SenCnt) Sent_div = [] for i in range(0,len(SenSet)): Sent_div.append([]) newline_regex = re.compile(r"\n[\n]*") exclude = set(string.punctuation) for i in range(0, lenst): Sent[i] = Sent[i].strip() ##remove stopwords if flag positive if remove_stopwords == 1: tmp = Sent[i].split() filtered_words = [word for word in tmp if word not in stopwords.words('english')] Sent[i] = ' '.join(filtered_words) ##remove punctuations if flag positive if remove_punctuation == 1: Sent[i] = ''.join(ch for ch in Sent[i] if ch not in exclude) ##extract window around the word if window positive if window == 1: left, _ ,right = Sent[i].lower().partition(checkword) n = windowSize left = left.split()[-n:] right = right.split()[:n] #print(right) #try: # del right[0] #except: # pass tmp = [] if len(left) < windowSize and add_start_end == 1: for j in range(0,windowSize-len(left)): tmp.append('START') tmp.extend(left) left = tmp if add_start_end == 1: for j in range(len(right), windowSize): right.append('END') Sent[i] = ' '.join(left + [checkword] + right) ##Formatting the sentences Sent[i] = Sent[i].strip() Sent[i] = Sent[i].replace(" ", "\n") Sent[i] = newline_regex.sub("\n", Sent[i]) if "\n\n" in Sent[i]: print(Sent[i]) ind = SenSet.index(Sens[i]) Sent_div[ind].append(Sent[i]) ##Split Training and test sentences SentTest = [] SensTest = [] Sent = [] Sens = [] for i in range(0, len(SenSet)): numTotal = len(Sent_div[i]) numTest = int( split * numTotal) numTrain = numTotal - numTest SentTest.extend(Sent_div[i][ 0: numTest ]) for j in range(0, numTest ): SensTest.append(SenSet[i]) Sent.extend(Sent_div[i][ numTest: numTotal ]) for j in range(0, numTrain ): Sens.append(SenSet[i]) ##Equalize the number of Senses lenst = len(Sent) for i in range(0, lenst): ind = SenSet.index(Sens[i]) num = int(1.0*maxCnt/SenCnt[ind]) for j in range(0, num): Sent.append(Sent[i]) Sens.append(Sens[i]) ##Write training set sentences tmp = list(zip(Sent, Sens)) random.shuffle(tmp) Sent[:], Sens[:] = zip(*tmp) print("Number of training examples: " + str(len(Sent))) # fileSent = open("BLSTM/text_words.csv", "w") # fileSens = open("BLSTM/summary_words.csv", "w") # tmp = '\n\n'.join(Sent) # fileSent.write(tmp) # tmp = '\n\n'.join(Sens) # fileSens.write(tmp) # try: # ##Write test set sentences # tmp = list(zip(SentTest, SensTest)) # random.shuffle(tmp) # SentTest[:], SensTest[:] = zip(*tmp) # print("Number of testing examples: " + str(len(SentTest))) # fileSentTest = open("BLSTM/test_text_words.csv", "w") # fileSensTest = open("BLSTM/test_summary_words.csv", "w") # tmp = '\n\n'.join(SentTest) # fileSentTest.write(tmp) # tmp = '\n\n'.join(SensTest) # fileSensTest.write(tmp) # except: # print("Number of testing examples: 0") # - Sent[0] Sens
Four Word Model/initial_processing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Practical Deep Learning for Coders, v3 # # Lesson7_resnet_mnist # # MNIST CNN # %reload_ext autoreload # %autoreload 2 # %matplotlib inline from fastai.vision import * # ### Data 数据 path = untar_data(URLs.MNIST) path.ls() il = ImageList.from_folder(path, convert_mode='L') il.items[0] defaults.cmap='binary' il il[0].show() sd = il.split_by_folder(train='training', valid='testing') sd (path/'training').ls() ll = sd.label_from_folder() ll x,y = ll.train[0] x.show() print(y,x.shape) tfms = ([*rand_pad(padding=3, size=28, mode='zeros')], []) ll = ll.transform(tfms) bs = 128 # not using imagenet_stats because not using pretrained model data = ll.databunch(bs=bs).normalize() x,y = data.train_ds[0] x.show() print(y) def _plot(i,j,ax): data.train_ds[0][0].show(ax, cmap='gray') plot_multi(_plot, 3, 3, figsize=(8,8)) xb,yb = data.one_batch() xb.shape,yb.shape data.show_batch(rows=3, figsize=(5,5)) # ### Basic CNN with batchnorm 批量正规化(归一化)的基础版CNN模型 def conv(ni,nf): return nn.Conv2d(ni, nf, kernel_size=3, stride=2, padding=1) model = nn.Sequential( conv(1, 8), # 14 nn.BatchNorm2d(8), nn.ReLU(), conv(8, 16), # 7 nn.BatchNorm2d(16), nn.ReLU(), conv(16, 32), # 4 nn.BatchNorm2d(32), nn.ReLU(), conv(32, 16), # 2 nn.BatchNorm2d(16), nn.ReLU(), conv(16, 10), # 1 nn.BatchNorm2d(10), Flatten() # remove (1,1) grid ) learn = Learner(data, model, loss_func = nn.CrossEntropyLoss(), metrics=accuracy) print(learn.summary()) xb = xb.cuda() model(xb).shape learn.lr_find(end_lr=100) learn.recorder.plot() learn.fit_one_cycle(3, max_lr=0.1) # ### Refactor 重构 def conv2(ni,nf): return conv_layer(ni,nf,stride=2) model = nn.Sequential( conv2(1, 8), # 14 conv2(8, 16), # 7 conv2(16, 32), # 4 conv2(32, 16), # 2 conv2(16, 10), # 1 Flatten() # remove (1,1) grid ) learn = Learner(data, model, loss_func = nn.CrossEntropyLoss(), metrics=accuracy) learn.fit_one_cycle(10, max_lr=0.1) # ### Resnet-ish class ResBlock(nn.Module): def __init__(self, nf): super().__init__() self.conv1 = conv_layer(nf,nf) self.conv2 = conv_layer(nf,nf) def forward(self, x): return x + self.conv2(self.conv1(x)) help(res_block) model = nn.Sequential( conv2(1, 8), res_block(8), conv2(8, 16), res_block(16), conv2(16, 32), res_block(32), conv2(32, 16), res_block(16), conv2(16, 10), Flatten() ) def conv_and_res(ni,nf): return nn.Sequential(conv2(ni, nf), res_block(nf)) model = nn.Sequential( conv_and_res(1, 8), conv_and_res(8, 16), conv_and_res(16, 32), conv_and_res(32, 16), conv2(16, 10), Flatten() ) learn = Learner(data, model, loss_func = nn.CrossEntropyLoss(), metrics=accuracy) learn.lr_find(end_lr=100) learn.recorder.plot() learn.fit_one_cycle(12, max_lr=0.05) print(learn.summary())
zh-nbs/Lesson7_resnet_mnist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Lemmatization import nltk nltk.download('wordnet') # + tags=[] ### import necessary libraries from nltk.stem import WordNetLemmatizer from nltk.tokenize import word_tokenize from nltk.corpus import stopwords import string # - text = "Very orderly and methodical he looked, with a hand on each knee, and a loud watch ticking a sonorous sermon under his flapped newly bought waist-coat, as though it pitted its gravity and longevity against the levity and evanescence of the brisk fire." print(text) # + tags=[] # tokenise text tokens = word_tokenize(text) tokens = [token for token in tokens if (token not in stop_words and token not in string.punctuation)] # - wordnet_lemmatizer = WordNetLemmatizer() stop_words = stopwords.words('english') lemmatized = [wordnet_lemmatizer.lemmatize(token) for token in tokens if (token not in stop_words and token not in string.punctuation)] len(lemmatized) # ### Let's compare stemming and lemmatization from nltk.stem.porter import PorterStemmer stemmer = PorterStemmer() stemmed = [stemmer.stem(token) for token in tokens if (token not in stop_words and token not in string.punctuation)] len(stemmed) import pandas as pd df = pd.DataFrame(data={'token': tokens, 'stemmed': stemmed, 'lemmatized': lemmatized}) df = df[['token', 'stemmed', 'lemmatized']] df[(df.token != df.stemmed) | (df.token != df.lemmatized)] # Let's compare the speed of both techniques # + tags=[] import requests url = "https://www.gutenberg.org/files/11/11-0.txt" alice = requests.get(url) print(alice.text) # - wordnet_lemmatizer = WordNetLemmatizer() wordnet_lemmatizer.lemmatize("having") # %%time _ = [wordnet_lemmatizer.lemmatize(token, pos='n') for token in word_tokenize(alice.text)] # %%time _ = [stemmer.stem(token) for token in word_tokenize(alice.text)] # * Lemmatising is faster than stemming in this case because the nltk lemmatiser also takes another argument called the part-of-speech (POS) tag of the input word. # * The default part-of-speech tag is 'noun'.. # * You will learn more about part-of-speech tagging later in this course. # * Right now, the stemmer will have more accuracy than the lemmatiser because each word is lemmatised assuming it's a noun. To lemmatise efficiently, you need to pass it's POS tag manually.
9. NLP/1. Lexical Processing/2. Basic Lexical Processing/5. lemmatization.ipynb
# + [markdown] colab_type="text" id="Fwkmm6XXccMu" # ##### Copyright 2018 The TF-Agents Authors. # + [markdown] colab_type="text" id="i1S3JIIpcfnK" # ### Get Started # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/agents/blob/master/tf_agents/colabs/nest_tutorial.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/agents/blob/master/tf_agents/colabs/nest_tutorial.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # </table> # # + [markdown] colab_type="text" id="BSeKG-5bBXsv" # # Nest Tutorial # tf.contrib.framework.nest is a utility library that packs/flattens arbitary nested structures. A nested structure is a Python sequence, tuple (including `namedtuple`), or dict that can contain # further sequences, tuples, and dicts. The utilities here assume (and do not check) that the nested structures form a 'tree'. # # For example, if you have a robot with two types of sensor inputs, you might want to structure the sensor inputs in a named tuple like collections.namedtuple('SensorInputs', 'input_1', 'input_2'). Then feed it into some machine learning system as input. But cetrain machine learning libraries (TensorFlow) takes only array as inputs, and therefore requires the user to pack/flatten the data themselves. Nest abstracts away this kind of tedious work, so user can focus on the application logic. # # Below we provide three examples to illustarte the usages of nest: # # # 1. Using nest pack/flatten nested structure # 2. Using nest to augment py_func to handle structured input/output. # # # # # + [markdown] colab_type="text" id="NhZ9DvaAGn6z" # # nest takes array, tuple, dict as input # + colab={} colab_type="code" id="oZWf6rAo-WpY" import numpy as np import tensorflow as tf nest = tf.contrib.framework.nest import unittest import collections # + colab={} colab_type="code" id="cMsUCCSUFZKD" class NestInputTest(unittest.TestCase): def testNestFlattensArray(self): self.assertEquals(nest.flatten([[1, 2, 3], [4, 5]]), [1, 2, 3, 4, 5]) def testNestFlattensDict(self): self.assertEquals(nest.flatten({'key_1': 1, 'key_2': 2}), [1, 2]) def testNestFlattensTuple(self): self.assertEquals(nest.flatten((1, 2)), [1, 2]) def testNestFlattensNamedTuple(self): NamedTuple = collections.namedtuple('NamedTuple', 'str_attr int_attr') self.assertEquals( nest.flatten(NamedTuple(str_attr='1', int_attr=2)), ['1', 2]) def testNestPacksSequences(self): packed_sequence = nest.pack_sequence_as(([None, None], (None,)), [1, 2, 3]) self.assertEquals(packed_sequence, ([1, 2], (3,))) suite = unittest.TestLoader().loadTestsFromTestCase(NestInputTest) unittest.TextTestRunner().run(suite) # + [markdown] colab_type="text" id="IBVE6Z4VRIp7" # Below we look at a more complex example where we use nest.map_structure to transform inputs. # # + colab={} colab_type="code" id="omglRglpOQGJ" class NestMapStructureTest(tf.test.TestCase): def testAddTensors(self): add_op = nest.map_structure(lambda x, y: x + y, tf.constant(1), tf.constant(1)) with self.test_session() as sess: self.assertEquals(add_op.eval(), 2) def testGroupTuplesByElement(self): grouped = nest.map_structure(lambda *arr: arr, ('a', 'b', ('c', 'd')), (3, 4, (5, 6)), (False, True, (True, True))) np.testing.assert_equal(grouped, (('a', 3, False), ('b', 4, True), (('c', 5, True), ('d', 6, True)))) suite = unittest.TestLoader().loadTestsFromTestCase(NestMapStructureTest) unittest.TextTestRunner().run(suite) # + [markdown] colab_type="text" id="k8549QwGo5_A" # # Using nest to work with structured input/output with py_func # py_func only takes arrays as input and output, so nest is very useful when we want to process structured input and output with py_func. # Below is an example that adds three numbers together and returns the sum and list of original operands. # # + colab={} colab_type="code" id="TvkttfPk7lsn" # define input/output structures input_structure = {'input1': (None, None), 'input2': {'num3': None}} output_structure = {'num1': None, 'num2': None, 'num3': None, 'sum': None} def adds(*arg): # input is a flat list; pack it back to structured data packed_input = nest.pack_sequence_as(input_structure, arg) num1, num2 = packed_input['input1'] num3 = packed_input['input2']['num3'] # return a flat list return [num1, num2, num3, num1 + num2 + num3] with tf.Graph().as_default(): inputs = { 'input1': (tf.constant(1), tf.constant(2)), 'input2': { 'num3': tf.constant(3) } } # call py_func with structured inputs flat_result = tf.py_func(adds, nest.flatten(inputs), [tf.int32, tf.int32, tf.int32, tf.int32]) # pack the result into structured tensors nest_result = nest.pack_sequence_as(output_structure, flat_result) tf_num1 = nest_result['num1'] tf_num2 = nest_result['num2'] tf_num3 = nest_result['num3'] tf_sum = nest_result['sum'] with tf.Session() as sess: # verify it all works np.testing.assert_equal( sess.run([tf_num1, tf_num2, tf_num3, tf_sum]), [1, 2, 3, 6])
tf_agents/colabs/nest_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Aufgabe 2.6 - Zweidimensionale, stationäre Potentialströmung import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import matplotlib.lines as mlines import numpy as np from scipy.interpolate import griddata from matplotlib import cm np.set_printoptions(linewidth=150) # Hilfsfunktionen: Zuordnung 2D-1D-Mapping, Abstand zwischen Punkt und finiter Linie (für Zuordnung der RBen) def dist_finite(x1, y1, x2, y2, x3, y3): # x3,y3 is the point px = x2-x1 py = y2-y1 norm = px*px + py*py u = ((x3 - x1) * px + (y3 - y1) * py) / float(norm) if u > 1: u = 1 elif u < 0: u = 0 x = x1 + u * px y = y1 + u * py dx = x - x3 dy = y - y3 dist = (dx*dx + dy*dy) return dist def rectangular_mesh(L, H, n_x, n_y): mesh = [] for i in np.linspace(0, L, n_x): for j in np.linspace(0, H, n_y): mesh += [[i, j]] return mesh def index_to_i_j(index, n_x, n_y): n_x = int(index/n_y) n_y = index % n_y return [n_x, n_y] def i_j_to_index(i, j, n_x, n_y): return i * n_y + j # Randbedingungs-Klasse für 2D-Probleme abgewandelt # # `line_neumann` wendet eine Randbedingung auf alle Mesh-Punkte auf einer finiten Linie an. class rb(): position_x = 10 position_y = 10 def __init__(self, position_x, position_y): self.position_x = position_x self.position_y = position_y def apply(self, mesh, systemmatrix, rhs, delta_x): pass class neumann(rb): fluss = 5 def __init__(self, position_x, position_y, fluss): super(neumann,self).__init__(position_x, position_y) self.fluss = fluss def apply(self, mesh, systemmatrix, rhs, delta_x, delta_y, n_x, n_y): if (self.position_x < 0 or self.position_x >= n_x or self.position_y < 0 or self.position_y >= n_y): raise ValueError("Out of bounds") # obere oder untere Grenze? vertikal = False if (self.position_y == 0 or self.position_y == n_y - 1): vertikal = True position = 0; if (self.position_y == 0): position = i_j_to_index(self.position_x, 1, n_x, n_y) elif (self.position_y == n_y - 1): position = i_j_to_index(self.position_x, n_y - 2, n_x, n_y) elif (self.position_x == 0): position = i_j_to_index(1, self.position_y, n_x, n_y) elif (self.position_x == n_x - 1): position = i_j_to_index(n_x - 2, self.position_y, n_x, n_y) else: raise ValueError("Not on boundary") main_index = i_j_to_index(self.position_x, self.position_y, n_x, n_y) divisor = (delta_y if vertikal else delta_x) #print("Second index: {}".format(index_to_i_j(position, n_x, n_y))) #print("Setting entries ({}, {}) and ({}, {})".format(main_index, main_index, main_index, position)) systemmatrix[main_index, position] = -1 / divisor systemmatrix[main_index, main_index] = 1 / divisor rhs[main_index] = self.fluss class dirichlet(rb): stromfunktion = 150 def __init__(self, position_x, position_y, stromfunktion): super(dirichlet,self).__init__(position_x, position_y) self.stromfunktion = stromfunktion def apply(self, mesh, systemmatrix, rhs, delta_x, delta_y, n_x, n_y): if (self.position_x < 0 or self.position_x >= n_x or self.position_y < 0 or self.position_y >= n_y): raise ValueError("Out of bounds") pos = i_j_to_index(self.position_x, self.position_y, n_x, n_y) systemmatrix[pos, :] = np.zeros((n_x * n_y)) systemmatrix[pos, pos] = 1 rhs[pos] = self.stromfunktion class line_neumann(): start_x = 0 start_y = 0 end_x = 10 end_y = 10 fluss = 5 def __init__(self, position_x, position_y, fluss): self.start_x = position_x[0] self.end_x = position_x[1] self.start_y = position_y[0] self.end_y = position_y[1] self.fluss = fluss def apply(self, mesh, systemmatrix, rhs, delta_x, delta_y, n_x, n_y): rben = [] for i in range(len(mesh)): point=mesh[i] dist = dist_finite(self.start_x, self.start_y, self.end_x, self.end_y, point[0], point[1]) if dist < 0.001: pos = index_to_i_j(i, n_x, n_y) randb = neumann(pos[0], pos[1], self.fluss) #print("Located Point on BC: indices ({}, {})".format(randb.position_x, randb.position_y)) randb.apply(mesh, systemmatrix, rhs, delta_x, delta_y, n_x, n_y) # ### Aufgabe 2.6.1 # Problem-Setup L = 10 H = 5 n_x = 51 n_y = 51 delta_x = L / (n_x - 1) delta_y = H / (n_y - 1) mesh = rectangular_mesh(L, H, n_x, n_y) systemmatrix = np.zeros((n_x * n_y , n_x * n_y)) # ### Aufgabe 2.6.2 # # Randbedingungen rhs = np.zeros((n_x * n_y)) rbs = [line_neumann((0, 0), (0, H/4), 0), line_neumann((0, 0), (H/4, H/2), -10), line_neumann((0, 0), (H/2, H), 0), line_neumann((L, L), (0, H/2), 0), line_neumann((L, L), (H/2, 3*H/4), 10), line_neumann((L, L), (3*H/4, H), 0), line_neumann((0, L), (H, H), 0), line_neumann((0, L), (0, 0), 0) ] for rb in rbs: rb.apply(mesh, systemmatrix, rhs, delta_x, delta_y, n_x, n_y) # ### Aufgabe 2.6.3 # Der Punkt (1, 1) wird mit einer Dirichlet-RB versehen: rw = dirichlet(1, 1, 150) # ### Aufgabe 2.6.4 # # Matrix mit Zentraldifferenzen auffüllen: for i in range(len(mesh)): coords = mesh[i] pos = index_to_i_j(i, n_x, n_y) if (pos[0] == 0 or pos[0] == n_x - 1 or pos[1] == 0 or pos[1] == n_y - 1): continue # Randpunkt # horizontale Ableitung west = i_j_to_index(pos[0] - 1, pos[1], n_x, n_y) ost = i_j_to_index(pos[0] + 1, pos[1], n_x, n_y) systemmatrix[i, west] = systemmatrix[i, west] - 1 / delta_x**2 systemmatrix[i, ost] = systemmatrix[i, ost] - 1 / delta_x **2 systemmatrix[i, i] = systemmatrix[i, i] + 2 / delta_x ** 2 # vertikale Ableitung sued = i_j_to_index(pos[0], pos[1] - 1, n_x, n_y) nord = i_j_to_index(pos[0], pos[1] + 1, n_x, n_y) systemmatrix[i, sued] = systemmatrix[i, sued] - 1 / delta_y **2 systemmatrix[i, nord] = systemmatrix[i, nord] - 1 / delta_y **2 systemmatrix[i, i] = systemmatrix[i, i] + 2 / delta_y ** 2 #print("Set gradient for point {}, index {}. North: {}, East: {}, South: {}, West: {}".format(pos, i, nord, ost, sued, west)); rw.apply(mesh, systemmatrix, rhs, delta_x, delta_y, n_x, n_y) # ### Aufgabe 2.6.5 # Lösung und Darstellung der Ergebnisse print(systemmatrix) print(rhs) print(delta_x) print(delta_y) result = np.linalg.solve(systemmatrix, rhs) # + fig = plt.figure(figsize=(16, 6)) fig.add_subplot(1, 2, 1) plt.title("Höhenlinien der Potentialfunktion") plt.xlabel("x-Koordinate") plt.ylabel("y-Koordinate") xv, yv = np.linspace(0, L, n_x), np.linspace(0, H, n_y) x2, y2 = np.meshgrid(xv, yv) plt.contour(np.linspace(0, L, n_x), np.linspace(0, H, n_y), result.reshape((n_y, n_x), order='F'), 50) plt.colorbar() ax = fig.add_subplot(1, 2, 2, projection='3d') z2 = griddata(mesh, result, (x2, y2), method='cubic') ax.view_init(elev=30., azim=130) ax.plot_surface(x2, y2, z2, cmap=cm.coolwarm) plt.title("Potentialfunktion") plt.show() # - plt.figure(figsize=(10, 6)) u, v = np.gradient(result.reshape((n_y, n_x), order='F')) plt.streamplot(np.linspace(0, L, n_x), np.linspace(0, H, n_y), v, u) plt.gca().set_xlim([-0.5, L+0.5]) plt.gca().set_ylim([-0.5, H+0.5]) plt.plot([0, 0], [0, H/4], 'k', alpha=1, linewidth=2) plt.plot([0, 0], [H/2, H], 'k', alpha=1, linewidth=2) plt.plot([L, L], [0, H/2], 'k', alpha=1, linewidth=2) plt.plot([L, L], [3*H/4, H], 'k', alpha=1, linewidth=2) plt.plot([0, L], [0, 0], 'k', alpha=1, linewidth=2) plt.plot([0, L], [H, H], 'k', alpha=1, linewidth=2) plt.title("Stromlinien") plt.xlabel("x-Koordinate") plt.ylabel("y-Koordinate") plt.show()
Stromfadentheorie.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # + ## VECTORISE DATA #create wordvector contentWords <- table(c(contents)) contentWords <- cbind.data.frame(word = tolower(names(contentWords)[1:length(names(contentWords))]), count = contentWords) wordVector <- unique(c(tolower(unlist(titles)), tolower(unlist(contents)))) wordVector <- wordVector[wordVector != ""] vectorisedData <- foreach(i = 1:nrow(dataset), .combine=rbind) %dopar% { #i <- 1 tVector <- integer(length(wordVector)) row <- contents[contents[, "id"] == i,] countVector <- table(c(titles[[i]], row[2:length(row)])) for (j in 1:length(countVector)) { index <- match(names(countVector)[j], wordVector) tVector[index] <- countVector[j] } t(tVector) #vectorisedData <- rbind.data.frame(vectorisedData, t(as.data.frame(tVector))) #tVector } names(vectorisedData) <- wordVector vectorisedData <- as.matrix(vectorisedData) colnames(vectorisedData) <- wordVector # + ## Word Bundles correlationmatrix <- cor(vectorisedData) bundles <- as.data.frame(foreach(i = 1:ncol(vectorisedData), .combine = rbind) %do% { #i<-1 vec <- correlationmatrix[, i] bundle <- vec[vec > 0.9] c(id = i, bundle = paste(names(bundle[!is.na(bundle)]), collapse = "-"), value = sum(bundle[!is.na(bundle)]), first = colnames(correlationmatrix)[i], length = length(bundle[!is.na(bundle)])) }) bundles <- bundles[match(unique(bundles$bundle[as.numeric(bundles$value) > 1]), bundles$bundle),] # + ## Synonyms library(stringdist) similaritymatrix <- matrix(foreach(i = 1:length(wordVector), .combine = rbind, .packages = "stringdist") %dopar% { stringdist(wordVector[i], wordVector) }, ncol = length(wordVector), nrow = length(wordVector), dimnames = list(wordVector,wordVector)) synonyms <- as.data.frame(foreach(i = 1:ncol(vectorisedData), .combine = rbind) %do% { #i<-1 vec <- similaritymatrix[, i] synonym <- vec[vec < 0.25 * length(colnames(similaritymatrix)[i])] c(id = i, synonym = paste(names(synonym[!is.na(synonym)]), collapse = "-"), value = sum(synonym[!is.na(synonym)])) }) synonyms <- synonyms[match(unique(synonyms$synonym[as.numeric(synonyms$value) > 1]), synonyms$synonym),] # + ## SELECT FEATURES analytics <- cbind.data.frame(wordVector, count = sapply(1:length(wordVector), function(w) { sum(vectorisedData[, w]) }), mean = sapply(1:length(wordVector), function(w) { mean(vectorisedData[, w]) }), stdev = sapply(1:length(wordVector), function(w) { sd(vectorisedData[, w]) }), max = sapply(1:length(wordVector), function(w) { max(vectorisedData[, w]) }), min = sapply(1:length(wordVector), function(w) { min(vectorisedData[, w]) })) analytics$varration <- sapply(1:nrow(analytics), function(a) { analytics$stdev[a] / analytics$mean[a] }) test <- top_n(analytics, 50, analytics$count) test <- analytics[rank(analytics$count, ties.method = "random"),] lengths <- sapply(1:nrow(vectorisedData), function(i) { sum(vectorisedData[i,]) }) lengths <- cbind.data.frame(length = lengths) library(dplyr) #analytics[sort(analytics$count, decreasing = T),] #selectedFeatures <- as.character(analytics[!is.na(analytics$varration) & analytics$varration > 2,]$wordVector) #selectedFeatures <- as.character(top_n(analytics, 200, analytics$varration)$wordVector) #selectedFeatures <- names(vectorisedData) selectedFeatures <- as.character(bundles$first[as.numeric(bundles$length)>2]) featureSet <- matrix(as.numeric(vectorisedData[, selectedFeatures]),nrow = nrow(vectorisedData))#, #date = with(dataset, (as.numeric(date) - quantile(as.numeric(date), 0.05)) / (mean(as.numeric(date)) - quantile(as.numeric(date), 0.05))), #hash_ = dataset$hash) # + ## K MEANS CLUSTERING cartesianDistance <- function(v1, v2) { #sum(mapply(function(c1, c2) {(c1 - c2) ^ 2 }, v1, v2))^0.5 ((v1 - v2) %*% t(v1 - v2))[[1]]^0.5 } generateCentroid <- function() { centroid <- double(length(selectedFeatures)) #centroid[sample(1:length(selectedFeatures), round(mean(lengths$length) + 1))] <- 1 centroid <- as.numeric(featureSet[sample(1:nrow(dataset),1),]) centroid } k <- 10 centroids <- list() for (i in 1:k) { centroids <- rbind.data.frame(centroids, generateCentroid()) } centroids <- as.matrix(centroids) colnames(centroids)<- NULL G <- 100 history <- list() g <- 1 while (g <= G) { dataset$cluster <- foreach(i = 1:nrow(featureSet), .combine = rbind) %dopar% { #i <- 1 distances <- sapply(1:k, function(c) { #c <- 2 V1 <- as.matrix(centroids[c,]) V2 <- as.matrix(featureSet[i, ]) (t(V1-V2) %*% (V1-V2))[[1]]^0.5 }) match(min(distances), distances) } newcentroids <- centroids for (i in 1:k) { #i <- 2 if (nrow(dataset[dataset$cluster == i, ]) > 1) { #i <- 1 members <- as.data.frame(featureSet[dataset$cluster == i,]) for (j in 1:length(selectedFeatures)) { newcentroids[i, j] = mean(members[, j]) } } else { newcentroid <- generateCentroid() for (j in 1:length(selectedFeatures)) { newcentroids[i, j] = newcentroid[j] } } } lastCentroids <- as.matrix(centroids) centroids <- as.matrix(newcentroids) dataset$distance <- foreach(i = 1:nrow(featureSet), .combine = rbind) %dopar% { # i <- 1 V1 <- as.matrix(centroids[dataset$cluster[i],]) #, nrow = 1) V2 <- as.matrix(featureSet[i,]) (t(V1 - V2) %*% (V1 - V2))[[1]] ^ 0.5 } distances <- double(k) for (i in 1:k) { distances[i] <- sum(dataset$distance[dataset$cluster == i]) / length(dataset$distance[dataset$cluster == i]) } history <- rbind.data.frame(history, cbind(g, t(distances), sum(distances))) g <- g + 1 } names(history) <- c("generation", 1:k, "total") View(history) # + plotClusters <- function(corpus,clusterAssignments) { sampleMeasures <- sample(names(corpus),5) measures <- cbind(Id=rownames(distances), Cluster=clusterAssignments[rownames(distances)], distances[,sampleMeasures]) foreach(i=sampleMeasures, .combine=rbind) %do% { foreach(j=sampleMeasures, .combine=rbind) %do% { cbind(measures[,c('Cluster')], Measure1=rep(i,nrow(measures)), Value1=measures[,i], Measure2=rep(j,nrow(measures)), Value2=measures[,j]) } } %>% as.data.frame %>% { ggplot(data=., mapping=aes(x=Value1,y=Value2,colour=Cluster)) + geom_point() + facet_grid(Measure1 ~ Measure2) } } plotClusters(corpus,results$clusterAssignments)
_notebooks/FindingTheWords_1/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import scipy import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline from sklearn.svm import SVC from sklearn.model_selection import cross_val_score from sklearn.decomposition import PCA as sklearn_pca from sklearn.linear_model import LogisticRegression from sklearn.feature_selection import RFE from sklearn import preprocessing from sklearn.feature_selection import SelectKBest from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler from sklearn import decomposition from sklearn.model_selection import cross_val_score, KFold from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.feature_selection import SelectFromModel from sklearn.metrics import accuracy_score # Read and import data raw_data = pd.read_csv('epi_r.csv') # List the raw features in the dataset list(raw_data.columns) # + # Get a basic understanding of the dataset raw_data.describe() # + # Plot Ratings to see distribution raw_data.rating.hist(bins=20) plt.title('Histogram of Recipe Ratings') plt.show() # + #Create bins an group names bins = [-0.1, 4.375, 5] group_names = ['Low', 'High'] #Adding a column with both classes: High & Low raw_data['categories'] = pd.cut(raw_data['rating'], bins, labels=group_names) #Count the number of elements per class pd.value_counts(raw_data['categories']) raw_data['code'] = raw_data.categories.cat.codes pd.value_counts(raw_data['code']) # - # Count nulls null_count = raw_data.isnull().sum() null_count[null_count>0] # + #Initial features dropped 'categories','rating','code', 'title', 'calories', 'protein', 'fat', 'sodium','title' #Dropped features added from the features importance = 0 (using a Random Fores Classifier for feature selection) X = raw_data.drop(['categories','rating','code', 'title', 'calories', 'protein', 'fat', 'sodium','title', 'alaska', 'bulgaria', 'gouda', 'crêpe', 'miami', 'seattle','waffle','hollywood','pot pie', 'kansas', 'anthony bourdain','portland', 'eau de vie', 'oktoberfest', 'missouri', 'columbus', 'chile', 'dip', 'georgia', 'guam', 'wisconsin', 'pennsylvania', 'apple juice', 'pacific palisades', 'france', 'oatmeal', 'emeril lagasse', 'octopus', 'cr��me de cacao', 'suzanne goin', 'couscous', 'tennessee', 'north carolina', 'westwood', 'yuca', 'denver', 'healdsburg', "hors d'oeuvre", 'st. louis', 'hummus', 'marshmallow', 'sukkot', 'rosé', 'cookbooks', 'costa mesa', 'purim', 'camping', 'soufflé/meringue','custard','swordfish','labor day', 'rutabaga', 'philippines', 'bulgur', 'providence', 'slow cooker', 'bran', 'ice cream', 'pasadena', 'london', 'frittata', 'atlanta', 'colorado', 'spain', '#wasteless', 'wok', 'flat bread', 'mandoline', 'beef shank', '<NAME>', 'mezcal', 'kiwi', 'low sugar', 'aspen', 'beverly hills', 'mississippi', 'west virginia', 'minnesota', 'butterscotch/caramel', 'sorbet', 'ireland', 'omelet', 'tested & improved', 'lunar new year', 'stock', 'frankenrecipe', 'hamburger', 'dorie greenspan', 'iced tea', 'minneapolis', 'las vegas', 'drinks', 'paris', 'venison', 'new hampshire', 'washington', 'florida', 'australia', 'san francisco', 'south carolina', 'epi + ushg', 'idaho', 'vermont', 'nebraska', 'freezer food', 'texas', 'dallas', 'lentil', 'chicago', 'peru', 'potato salad', 'juicer', 'brie', 'bon app��tit', 'boston', 'washington, d.c.', 'germany', 'new mexico', 'utah', 'fritter', 'whole wheat', 'long beach', 'grand marnier', 'quiche', 'kansas city', 'cranberry sauce', 'new orleans', 'caviar', 'illinois', 'iced coffee', 'israel', 'maryland', 'lasagna', 'kentucky', 'aperitif', 'maine', 'macaroni and cheese', '#cakeweek', 'lingonberry', 'wild rice', 'pizza', 'iowa','cookbook critic', 'meatloaf', 'italy', 'wasabi', 'louisiana', 'cuba', 'windsor', 'rabbit', 'oklahoma', 'marinade', 'chartreuse', 'lamb shank','rub', 'pancake', 'martini', 'mexico', 'lancaster', 'granola', 'semolina', 'epi loves the microwave', 'cobbler/crumble', 'persimmon', 'england', 'jícama', 'haiti', 'halibut', 'meatball', 'orzo', 'cook like a diner', 'poppy', 'egypt', 'japan', 'arizona', 'jamaica', 'dominican republic', 'santa monica', 'leftovers', 'sake', 'kwanzaa', 'houston', 'buffalo'. 'louisville', 'house cocktail', 'smoker', 'yonkers', 'cupcake', 'kitchen olympics', 'marscarpone' ], 1) # Result column coded Y = raw_data.code # + # View the shape of the dataset X.shape # - # #### PCA and new features based on PCA # + # Create a scaler object sc = StandardScaler() # Fit the scaler to the features and transform X_std = sc.fit_transform(X) # Create a PCA object with the 30 components as a parameter pca = decomposition.PCA(n_components=30) # Fit the PCA and transform the data X_std_pca = pca.fit_transform(X_std) # View the new feature data's shape X_std_pca.shape # + # Create a new dataframe with the new features X1 = pd.DataFrame(X_std_pca) # + # Build heatmap based on the Correlation Matrix sns.heatmap(X1.corr()) plt.show() # + #Call the SV Classifier svc1 = SVC(kernel='linear',cache_size=1000, probability=True, random_state=0) # Train the SVClassifier based on the PCA features svc1.fit(X1,Y) # + #Cross valdate results kf = KFold(5) print('Cross Validation Scores:', cross_val_score(svc1, X1, Y, cv=kf)) print('Cross Validation Scores:', cross_val_score(svc1, X1, Y, cv=kf).mean()) # - # #### Select features based on a Random Forest Classifier # Split the data into 40% test and 60% training X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=50) # + # Create a random forest classifier clf = RandomForestClassifier(random_state=0) # Train the classifier clf.fit(X_train, y_train) # Print the name and gini importance of each feature feats = {} # a dict to hold feature_name: feature_importance for feature, importance in zip(X.columns, clf.feature_importances_): feats[feature] = importance #add the name/value pair importances = pd.DataFrame.from_dict(feats,orient='index').rename(columns={ 0: 'Importance'}).reset_index() drop = importances.loc[importances['Importance'] == 0.0] drop['index'].tolist() # + # Create a selector object that will use the random forest classifier to identify # features that have an importance of more than 0.007 sfm = SelectFromModel(clf, threshold=0.007) # Train the selector sfm.fit(X_train, y_train) # + # Print the names of the most important features for feature_list_index in sfm.get_support(indices=True): print(X.columns[feature_list_index]) # + # Transform the data to create a new dataset containing only the most important features X_important_train = sfm.transform(X_train) X_important_test = sfm.transform(X_test) # + # New SVClassifier based on a new kernel svc2 = SVC(cache_size=1000, kernel='rbf', probability=True, random_state=0) # Train the new classifier on the new dataset containing the most important features svc2.fit(X_important_train, y_train) # - #Cross validation of the results obtained kf = KFold(5) print('Cross Validation Scores:', cross_val_score(svc2, X_important_test, y_test, cv=kf)) print('Cross Validation Scores:', cross_val_score(svc2, X_important_test, y_test, cv=kf).mean())
Guided+example+%2B+challenge.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Name:** \_\_\_\_\_ # # **EID:** \_\_\_\_\_ # # CS5489 - Tutorial 6 # ## Dimensionality Reduction and Face Recognition # # In this tutorial you will use linear and non-linear dimensionality reduction on face images, and then train a classifier for face recognition. In the second part, you will then use dimensionality reduction for de-noising. # # First we need to initialize Python. Run the below cell. # %matplotlib inline import IPython.core.display # setup output image format (Chrome works best) IPython.core.display.set_matplotlib_formats("svg") import matplotlib.pyplot as plt import matplotlib from numpy import * from sklearn import * import glob import os random.seed(100) rbow = plt.get_cmap('rainbow') # ## 1. Loading Data and Pre-processing # We first need to load the images. Download `olivetti_py3.pkz` from Canvas, and place it in in the same directory as this ipynb file. _DO NOT UNZIP IT_. Then run the following cell to load the images. oli = datasets.fetch_olivetti_faces(data_home="./") X = oli.data.astype('float') Y = oli.target img = oli.images imgsize = oli.images[0].shape # Each image is a 64x64 array of pixel values, resulting in a 4096 dimensional vector. Run the below code to show an example: print(img[0].shape) plt.imshow(img[0], cmap='gray', interpolation='nearest') plt.show() # Run the below code to show all the images! tmp = [] for i in range(0,400,20): tmp.append( hstack(img[i:i+20]) ) allimg = vstack(tmp) plt.figure(figsize=(9,9)) plt.imshow(allimg, cmap='gray', interpolation='nearest') plt.gca().xaxis.set_ticklabels([]) plt.gca().yaxis.set_ticklabels([]) plt.show() # Each person is considered as one class, and there are 10 images for each class. In total there are 40 classes (people). The data is already vectorized and put into the matrix `X`. The class labels are in vector `Y`. Now we split the data into training and testing sets. # + # randomly split data into 80% train and 20% test set trainX, testX, trainY, testY = \ model_selection.train_test_split(X, Y, train_size=0.80, test_size=0.20, random_state=4487) print(trainX.shape) print(testX.shape) # - # # 2. Linear Dimensionality Reduction - PCA # The dimension of the data is too large (4096) so learning classifiers will take a long time. Instead, our strategy is to use PCA to reduce the dimension first and then use the PCA weights as the representation for each image. Run PCA oon the data using 9 principal components. # + ### INSERT YOUR CODE HERE # - # The below function will plot the basis vectors of PCA. Run the next 2 cells to view the PCs. def plot_basis(model, imgsize): cname = model.__class__.__name__ if cname == 'LDA': KK = model.n_components comps = model.coef_ mn = None elif cname == 'PCA': KK = model.n_components_ comps = model.components_ mn = model.mean_ elif cname == 'NMF': KK = model.n_components_ comps = model.components_ mn = None K = KK if mn is not None: K += 1 nr = int(ceil(K/5.0)) sind = 1 #vmin = comps.flatten().min() #vmax = comps.flatten().max() # plot the mean pcfig = plt.figure(figsize=(8,nr*2)) if mn is not None: plt.subplot(nr,5,sind) plt.imshow(mn.reshape(imgsize), interpolation='nearest') plt.title("mean") plt.gray() plt.gca().xaxis.set_ticklabels([]) plt.gca().yaxis.set_ticklabels([]) sind += 1 # plot the components for j in range(0,KK): plt.subplot(nr,5,sind) v = comps[j,:] I = v.reshape(imgsize) plt.imshow(I, interpolation='nearest') plt.gray() plt.title("basis " + str(j+1)) plt.gca().xaxis.set_ticklabels([]) plt.gca().yaxis.set_ticklabels([]) sind += 1 # run the function plot_basis(pca, imgsize) # _What do the basis images look like? Do some basis images correspond to particular facial features?_ # - **INSERT YOUR ANSWER HERE** # ### Face Recognition # Now train a _linear_ classifier to do the face recognition. Use the calculated PCA representation as the new set of inputs. Use cross-validation to set the hyperparameters of the classifier. _You do not need to do cross-validation for the number of components._ Calculate the average training and testing accuracies. Remember to transform the test data into the PCA representation too! # # **NOTE**: It is **NOT** recommended to use `svm.SVC` for the training. This is because the default SVC implements 1-vs-1 multi-class classification, which means it will calculate 40x39=1560 classifiers! All other classifier should be fine since they do 1-vs-all multi-class classification, e.g., LogisticRegressionCV, LinearSVC, etc. If you want to use svm.SVC, make sure to use the OneVsRestClassifier as shown in Lecture 3. # + ### INSERT YOUR CODE HERE # - # ### Finding the best number of components # Now try a range of number of components for PCA to get the best test accuracy. Train a classifier for each one and see which dimension gives the best testing accuracy. Make a plot of PCA dimension vs. test accuracy. # + ### INSERT YOUR CODE HERE # - # _What is the best number of components? View the basis images to see what they look like_ # - **INSERT YOUR ANSWER HERE** # + ### INSERT YOUR CODE HERE # - # # 3. Linear Dimensionality Reduction - NMF # Now we will repeat the experiment using non-negative matrix factorization instead of PCA. Perform NMF with 9 components and visualize the basis images. # + ### INSERT YOUR CODE HERE # - # _What do the basis images look like? What is the main difference with the PCA basis images?_ # - **INSERT YOUR ANSWER HERE** # ### Finding the best number of components # Now find the number of components that gives the best test accuracy. Use the same type of classifier that you used in the previous experiment. Use cross-validation to select the hyperparameters of the classifier. You do not need to do cross-validation for the number of components. # + ### INSERT YOUR CODE HERE # - # _Which number of components give the best test result? How does the accuracy compare to the best PCA result? Why is NMF or PCA better?_ # - **INSERT YOUR ANSWER HERE** # + ### INSERT YOUR CODE HERE # - # # Non-Linear Dimensionality Reduction - KPCA # Now we will use KPCA to perform non-linear dimensionality reduction on the data before training the classifier. Run KPCA on the data using 10 principal components. Use the RBF kernel with gamma=0.001. # + ### INSERT YOUR CODE HERE ### # - # The below function will plot the basis vectors of KPCA. Run the next 2 cells to view the PCs. The kernel PCs are a combination of similarities to points in the training set. The PCs are visualized by showing the top 5 positive and negative training examples, along with their coefficient $\alpha_i$. def plot_kbasis(model, imgsize, X): KK = model.n_components alphas = model.alphas_.T minmax = 5 py = KK px = minmax*2 for i in range(KK): # sort alphas inds = argsort(alphas[i,:]) myi = r_[arange(-1,-minmax-1,-1), arange(minmax-1,-1,-1)] myinds = inds[myi] for j,jj in enumerate(myinds): plt.subplot(py,px,(j+1)+i*px) plt.imshow(X[jj,:].reshape(imgsize), interpolation='nearest') plt.gray() if alphas[i,jj]<0: mycol = 'b' else: mycol = 'r' plt.title("{:.3f}".format(alphas[i,jj]), fontsize=7, color=mycol) if (j==0): plt.ylabel('PC' + str(i+1)) plt.xticks([]) plt.yticks([]) # run the function plt.figure(figsize=(10,12)) plot_kbasis(kpca, imgsize, trainX) # _What is the interpretation for the KPCA basis? What kind of faces do some of the PCs prefer?_ # - **INSERT YOUR ANSWER HERE** # ### Finding the best kernel and best number of components # Now try different kernels (poly, RBF, etc), kernel parameters, and number of components to get the best test accuracy. Train a classifier for each one and see which dimension gives the best testing accuracy. Make a plots of number of components vs. test accuracy. # + ### INSERT YOUR CODE HERE # - # _What is the best kernel and number of components?_ # - **INSERT YOUR ANSWER HERE** # *Which dimensionality reduction method performs the best for classification? Why?* # - **INSERT YOUR ANSWER HERE** # # De-noising with dimensionality reduction # In this section, we will use dimensionality reduction for denoising a face image. Given a noisy image $\mathbf{x}$, we first compute the low-dimensional representation $\mathbf{w}$, e.g., using PCA. Then given $\mathbf{w}$, we reconstruct the image $\hat{\mathbf{x}}$ from the PCA/NMF basis. In sklearn, the reconstruction step can be achieved using the `inverse_transform()` method of the PCA or NMF classes. # # Train a PCA or NMF model using a large number of components (e.g., 100) on the face dataset, and then test it for denoising. For the noisy images, you can try different types of noise, such as: # - additive noise (using `random.normal`) # - salt & pepper noise (using `random.binomial`) # - masking out part of the image (the masked out part can be replaced with black, gray, or white). # # for evaluation of the reconstruction or denoising result you can use "peak signal to noise ratio", which is implemented in skimage as `skimage.metrics.peak_signal_noise_ratio` or `skimage.measure.compare_psnr` (on older versions). import skimage # + ### INSERT YOUR CODE HERE # - # _Which kind of noise can be handled well by linear dimensionality reduction? Why?_ # - **INSERT YOUR ANSWER HERE**
Machine-Learning/Lecture 6/Homework/Tutorial6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Q7WfzCajCtqu" colab_type="text" # **[BA2-01]** # # Import modules. # + id="-8IWbaVwQ211" colab_type="code" colab={} import numpy as np from pandas import DataFrame import matplotlib matplotlib.rcParams['font.size'] = 12 # + [markdown] id="EEN_i3DdDRU1" colab_type="text" # **[BA2-02]** # # Define the Bandit class. # + id="fQhRTF0kLS_B" colab_type="code" colab={} class Bandit: def __init__(self, arms=10): self.arms = arms self.means = np.zeros(self.arms) def select(self, arm): reward = np.random.normal(loc=self.means[arm], scale=1.0) # Add random values to the means. self.means += np.random.normal(loc=0.0, scale=0.01, size=self.arms) return reward # + [markdown] id="rWuXEgyhgJhd" colab_type="text" # **[BA2-03]** # # Define a function to choose an action with the ε-greedy policy. # + id="96gPBQXwmTd3" colab_type="code" colab={} def get_action(qs, epsilon): if np.random.random() < epsilon: # Explore: choose randomly. return np.random.randint(len(qs)) else: # Exploit: choose the arm with the max average. return np.argmax(qs) # + [markdown] id="OaY0pRZeOIXE" colab_type="text" # **[BA2-04]** # # Define a function to run an episode. # + id="Yb9u5WXMK8Lp" colab_type="code" colab={} def episode(bandit, alpha, steps): total_rewards = [0] qs = [0] * bandit.arms count = [0] * bandit.arms for _ in range(steps): arm = get_action(qs, epsilon=0.1) reward = bandit.select(arm) # Append total rewards total_rewards.append(total_rewards[-1] + reward) # Update an estimate of the mean. if alpha == 0: # Use an average to estimate the mean. count[arm] += 1 qs[arm] += (reward - qs[arm]) / count[arm] else: # Update an estimate with a constant weight. qs[arm] += alpha * (reward - qs[arm]) return total_rewards # + [markdown] id="skaKFwecQ7EX" colab_type="text" # **[BA2-05]** # # Run episodes with various alphas. # + id="K5wNAgN9vFks" colab_type="code" outputId="5035e3f8-ba96-4406-ec11-b971c4ec1f78" colab={"base_uri": "https://localhost:8080/", "height": 285} rewards = {} for alpha in [0, 0.1, 0.5, 0.9]: bandit = Bandit() rewards['α={}'.format(alpha)] = episode(bandit, alpha, steps=2000) ax = DataFrame(rewards).plot() _ = ax.set_xlabel('Step') _ = ax.set_ylabel('Total rewards') # + [markdown] id="wRoUZGRxggEO" colab_type="text" # **[BA2-06]** # # Define a function to apply the grid search for α. # + id="QESvfq6eOUED" colab_type="code" colab={} def hypertune(values, num_samples): rewards = {} for alpha in values: scores = [] for _ in range(num_samples): bandit = Bandit() # Prepare a new environment. result = episode(bandit, alpha, steps=2000) scores.append(result[-1]) # Append the final (total) reward. rewards['{:1.1f}'.format(alpha)] = scores ax = DataFrame(rewards).plot(kind='box', figsize=(6, 8)) _ = ax.set_xlabel('α') _ = ax.set_ylabel('Total rewards') # + [markdown] id="V6qvt2UzgnQT" colab_type="text" # **[BA2-07]** # # Apply the grid search and show results with box plots. # + id="uubP4o5lBhag" colab_type="code" outputId="b00fcb6b-6922-4641-8be2-ba7168c30e9b" colab={"base_uri": "https://localhost:8080/", "height": 503} hypertune(np.linspace(0, 1.0, 11), num_samples=500)
Chapter01/02_Bandit_Algorithm_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Day11 import pandas as pd uriage_data = pd.read_csv("csv/uriage.csv") uriage_data.head() kokyaku_data = pd.read_excel("csv/kokyaku_daicho.xlsx") kokyaku_data.head() # ## Day12 uriage_data["item_name"].head() uriage_data["item_price"].head() # ## Day13 uriage_data["purchase_date"] = pd.to_datetime(uriage_data["purchase_date"]) uriage_data["purchase_month"] = uriage_data["purchase_date"].dt.strftime("%Y%m") uriage_data.pivot_table(index = "purchase_month", columns = "item_name", aggfunc = "size", fill_value = 0) uriage_data.pivot_table(index = "purchase_month", columns = "item_name", values = "item_price", aggfunc = "sum", fill_value = 0) len(pd.unique(uriage_data["item_name"])) uriage_data["item_name"] = uriage_data["item_name"].str.upper() uriage_data["item_name"] = uriage_data["item_name"].str.replace(" ", "") uriage_data["item_name"] = uriage_data["item_name"].str.replace(" ", "") uriage_data.sort_values(by = ["item_name"], ascending = True) res = pd.unique(uriage_data["item_name"]) print(res, len(res)) # ## Day15 uriage_data.isnull().any(axis = 0) # + pd.set_option('mode.chained_assignment', 'raise') flg = uriage_data["item_price"].isnull() for trg in uriage_data.loc[flg, "item_name"].unique(): price = uriage_data.loc[(~flg) & (uriage_data["item_name"] == trg), "item_price"].max() uriage_data.loc[(flg) & (uriage_data["item_name"] == trg), "item_price"] = price uriage_data.head() # - uriage_data.isnull().any(axis = 0) for trg in uriage_data["item_name"].sort_values().unique(): C = uriage_data.loc[uriage_data["item_name"] == trg, "item_price"] print(f"{trg}: 最大額 {C.max()}, 最小額 {C.min(skipna = False)}") # min(skipna = True / False)
100 knock/02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Resnet 20 - CIFAR10 # # ## Comparing multiple executions # # * How stable is the ranking of discovered networks? # # # Let's run 4 instances of AMC (ResNet20, DDPG) and then review the results. # + # %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib import csv from matplotlib.ticker import FuncFormatter import ipywidgets as widgets from ipywidgets import interactive, interact, Layout import matplotlib.pylab as pylab import matplotlib.animation as animation from matplotlib import animation, rc from auto_compression_jupyter import * #plt.style.use('seaborn') # pretty matplotlib plots params = {'legend.fontsize': 'x-large', 'figure.figsize': (15, 7), 'axes.labelsize': 'x-large', 'axes.titlesize':'xx-large', 'xtick.labelsize':'x-large', 'ytick.labelsize':'x-large'} pylab.rcParams.update(params) # Widen the cells to get entire rows in the screen. from IPython.core.display import display, HTML #display(HTML("<style>.container { width:100% !important; }</style>")) EXPERIMENTS_DIR = os.path.join("/experiments", "amc") # - fpath = os.path.join(EXPERIMENTS_DIR, "resnet20-ddpg-private", "2019.07.30-015225") df_list = load_experiment_instances(fpath) # ## Multiple experiment runs # # Below we plot the execution of the same experiment several times.<br> # We configure AMC as follows: # - Architecture: Resnet20 (CIFAR10) # - Action range: 5% - 100% retain rate # - Target density: 50% # - Don't fine-tune a discovered network before providing a reward. # # The command-line is provided below: # # time python3 ../../classifier_compression/multi-run.py ${AMC_EXP_PATH}/resnet20-ddpg-private amc.py --arch=resnet20_cifar ${CIFAR10_PATH} --resume=../ssl/checkpoints/checkpoint_trained_dense.pth.tar --amc-protocol=mac-constrained --amc-action-range 0.05 1.0 --amc-target-density=0.5 -p=50 --etes=0.075 --amc-ft-epochs=0 --amc-prune-pattern=channels --amc-prune-method=fm-reconstruction --amc-agent-algo=DDPG --amc-cfg=auto_compression_channels.yaml --amc-rllib=hanlab -j=1 # # Each colored line represents one experiment execution instance. We plot the search-Top1 score of discovered networks as the RL-based AMC system learns to find better compressed networks. You might be impressed by: # * The variability in behavior, which is typical for RL algorithms. # plot_experiment_comparison(df_list) # Now let's train some networks to convergence. We look at all of the networks that were progressively discovered as "best" networks during AMC's search. Note that these are **not** the best scoring discovered-networks. # # Fine-tune for 60 epochs (using 16 processes): **notice the large LR**. # # time python parallel-finetune.py --scan-dir=${AMC_EXP_PATH}/resnet20-ddpg-private/2019.07.30-015225 --arch=resnet20_cifar --lr=0.1 --vs=0 -p=50 --compress=../automated_deep_compression/fine_tune.yaml ${CIFAR10_PATH} -j=1 --epochs=60 --output-csv=ft_60epoch_results.csv --processes=16 # * Bubbles represent discovered networks # * Bubble sizes represent the relative number of FLOPs in each network # * Darker networks were discovered later in the search process # + df_resnet20_ft60 = pd.read_csv(os.path.join(fpath, "ft_60epoch_results.csv")) create_fig("Discovered Networks:\nComparing Search Top1 vs Fine-tuned Top1\n", "Fine-tuned Top1 Accuracy", "Search Top1 Accuracy") shorten_dir_name(df_resnet20_ft60) plot_networks(df_resnet20_ft60, edgecolors='orange') print(len(df_resnet20_ft60.index)) # - # Let's look at the same diagram from a different perspective.<br> # Remember, we ran our AMC experiment 4 times, and we fine-tuned networks discovered in each of these 4 experiments. Now let's separate these networks by color and observe how they cluster. create_fig("Discovered Networks:\nComparing Search Top1 vs Fine-tuned Top1\n", "Fine-tuned Top1 Accuracy", "Search Top1 Accuracy") plot_networks_by_experiment(df_resnet20_ft60, edgecolors='red')#, create_figure=False)
examples/auto_compression/amc/jupyter/amc_resnet20.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: mlopt # language: python # name: mlopt # --- # + # %load_ext autoreload # %autoreload 2 import pickle, os import numpy as np import cvxpy as cp from manipulation import Manipulation # + mu_min, mu_max = 0.1, 0.1 h_min, h_max = 2, 2 h = 0.5*(h_min+h_max) r_min, r_max = 1, 1 r = 0.5*(r_min+r_max) w_std = 10 F_max = 1. N_v = 5 #number of points to choose from N_h = 6 num_grasps = 4 prob_params = [N_v, N_h, num_grasps, F_max, \ h_min, h_max, h, \ r_min, r_max, r, \ mu_min, mu_max, w_std] # + #setup filenames relative_path = os.getcwd() dataset_name = 'default' if not os.path.isdir(os.path.join(relative_path, 'data', dataset_name)): os.mkdir(os.path.join(relative_path+'/data/'+dataset_name)) if not os.path.isdir(os.path.join(relative_path, 'config')): os.mkdir(os.path.join(relative_path, 'config')) train_fn = os.path.join(relative_path, 'data', dataset_name, 'train.p') test_fn = os.path.join(relative_path, 'data', dataset_name, 'test.p') #load default parameter values #define all possible params that can be varied all_params = ['N_v', 'N_h', 'num_grasps', 'F_max', \ 'h_min', 'h_max', 'h', 'r_min', 'r_max', 'r', \ 'r_min', 'r_max', 'r', 'mu_min', 'mu_max', \ 'mu', 'w'] ##TODO(pculbert): implement custom param variation. #plan: split all_params into static & sampled params #in Problem object, keep param dict with keys == all_params #on constructing problem, make every param key map to either static value #or a cvxpy.Parameter object. ### VARY ANY DESIRED PARAM VALUES HERE ### param_dict = {'N_v': N_v, 'N_h': N_h, 'num_grasps': num_grasps, 'F_max': F_max, \ 'h_min': h_min, 'h_max': h_max, 'h': h, \ 'r_min': r_min, 'r_max': r_max, 'r': r, \ 'mu_min': mu_min, 'mu_max': mu_max, 'w_std': w_std} #specify which parameters to sample, & their distributions sampled_params = ['mu', 'w'] sample_dists = {'w' : lambda w: np.exp(w) / np.sum(np.exp(w)), \ 'mu' : lambda : np.array([mu_min + (mu_max-mu_min)*np.random.rand()])} #specify dataset sizes num_train = 4500; num_test = 500 num_probs = num_train + num_test #write out config config_fn = os.path.join(relative_path, 'config', dataset_name+'.p') config = [dataset_name, prob_params, sampled_params] outfile = open(config_fn,"wb") pickle.dump(config,outfile); outfile.close() # + mp = Manipulation() #create numpy containers for data: (params, x, u, y, J*, solve_time) params = {} if 'mu' in sampled_params: params['mu'] = np.zeros((num_probs,1)) if 'w' in sampled_params: params['w'] = np.zeros((num_probs,12)) Fs = np.zeros((num_probs, 3,12,N_v*N_h)) As = np.zeros((num_probs, 12)) Ws = np.zeros((num_probs, 12)) Y = np.zeros((num_probs, 1, N_v*N_h)).astype(int) costs = np.zeros(num_probs); solve_times = np.zeros(num_probs) costs = np.zeros(num_probs); solve_times = np.zeros(num_probs) # - #big for loop, sampling problem data, solving MICP, & storing ii = 0 while ii < num_probs: p_dict = {} if 'mu' in sampled_params: params['mu'][ii,:] = sample_dists['mu']() p_dict['mu'] = params['mu'][ii,:] if 'w' in sampled_params: params['w'][ii,:] = sample_dists['w'](np.random.rand(12)) p_dict['w'] = params['w'][ii,:] prob_success = False try: prob_success, cost, solve_time, optvals = mp.solve_micp(p_dict) except (KeyboardInterrupt, SystemExit): raise except: print('mosek failed at '.format(ii)) if prob_success: costs[ii] = cost; solve_times[ii] = solve_time As[ii,:], F_star, Y[ii,:,:] = optvals for jj in range(len(F_star)): Fs[ii,:,:,jj] = F_star[jj] ii += 1 # + #post-processing + write out train_params = {}; test_params = {} if 'mu' in sampled_params: train_params['mu'] = params['mu'][:num_train,:] test_params['mu'] = params['mu'][num_train:,:] if 'w' in sampled_params: train_params['w'] = params['w'][:num_train,:] test_params['w'] = params['w'][num_train:,:] train_data = [train_params] train_data += [As[:num_train,:], Fs[:num_train,:,:,:], Y[:num_train,:]] train_data += [costs[:num_train], solve_times[:num_train]] test_data = [test_params] test_data += [As[num_train:,:], Fs[num_train:,:,:,:], Y[num_train:,:]] test_data += [costs[num_train:], solve_times[num_train:]] train_file = open(train_fn,'wb') pickle.dump(train_data,train_file); train_file.close() test_file = open(test_fn, 'wb') pickle.dump(test_data,test_file); test_file.close() # - train_file = open(train_fn,'rb') p, a, f, y, c, s = pickle.load(train_file); train_file.close()
manipulation/data_generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Latent Position Two-Graph Testing # + import numpy as np np.random.seed(88889999) from graspy.inference import LatentPositionTest from graspy.embed import AdjacencySpectralEmbed from graspy.simulations import sbm, rdpg from graspy.utils import symmetrize from graspy.plot import heatmap, pairplot # %matplotlib inline # - # ## Generate a stochastic block model graph to model as a random dot product graph # To start, we generate a binary stochastic block model graph (SBM). An SBM is composed of 'communities' or 'blocks,' where a node's block membership in a graph determines its probability of connection to the other nodes in the graph. # + n_components = 4 # the number of embedding dimensions for ASE P = np.array([[0.9, 0.11, 0.13, 0.2], [0, 0.7, 0.1, 0.1], [0, 0, 0.8, 0.1], [0, 0, 0, 0.85]]) P = symmetrize(P) csize = [50] * 4 A = sbm(csize, P) X = AdjacencySpectralEmbed(n_components=n_components).fit_transform(A) heatmap(A, title='4-block SBM adjacency matrix') pairplot(X, title='4-block adjacency spectral embedding') # - # In the adjacency matrix above, there is a clearly defined block structrure corresponding to the 4 communities in the graph that we established. On the right, we see the **adjacency spectral embedding (ASE)** of this graph. ASE(A) recovers an estimate of the **latent positions** of $A $. Latent positions refer to the idea of a **random dot product graph (RDPG)** which can be modeled as follows: # # For an adjacency matrix $A \in \mathbb{R}^{n x n}$, the probability of an edge existing between node $i$ and node $j$ (aka whether or not $A_{ij}$ is a 1) is determined by the matrix $P \in \mathbb{R}^{n x n}$ # # $P = XX^T$, where $X \in \mathbb{R}^{n x d} $ and is referred to as the latent positions of the graph. $X$ is referred to as the latent positions of the graph because each node $n_i$ is modeled as having a hidden, usually unobserved location in $\mathbb{R}^d$ (we'll call it $x_i$). The probability of an edge existing between $n_i$ and $n_j$ is equal to the dot product $x_i \cdot x_j$ # # ASE is one way to obtain an estimate of the latent positions of a graph, $\hat{X}$ # # In the above embedding, we see 4 clusters of nodes corresponding to the 4 blocks that we prescribed. ASE recovers the fact that all of the nodes in a block have similar latent positions. So, RDPGs can also model an SBM graph. # ## Sample new RDPGs from this latent position # Given the estimate of X, we now sample two new RDPGs from the same latent position above # + A1 = rdpg(X, loops=False, rescale=False, directed=False) A2 = rdpg(X, loops=False, rescale=False, directed=False) Xhat1 = AdjacencySpectralEmbed(n_components=n_components).fit_transform(A1) Xhat2 = AdjacencySpectralEmbed(n_components=n_components).fit_transform(A2) heatmap(A1, title='Sampled RDPG 1 adjacency matrix') heatmap(A2, title='Sampled RDPG 2 adjacency matrix') pairplot(Xhat1, title='Sampled RDPG 1 adjacency spectral embedding') pairplot(Xhat2, title='Sampled RDPG 2 adjacency spectral embedding') # - # Qualitatively, both of the simulated RDPGs above match the behavior we would expect, with 4 clear blocks and the corresponding 4 clusters in the embedded space. But, can we say they were generated from the same latent positions? # ## Latent position test where null is true # Now, we want to know whether the above two graphs were generated from the same latent position. We know that they were, so the test should predict that the differences between Sampled RDPG 1 and 2 (up to a rotation, see below) are no greater than those differences observed by chance. In this case, we will use the `LatentPositionTest` in `GraSPy` because we know the true alignment between the vertices of the two graphs we are testing. In other words, node $i$ in graph 1 can be thought of as equivalent to node $i$ in graph 2 because of the way we generated these graphs. # # In other words, we are testing $$ H_0: X_1 = X_2 R$$$$ H_a: X_1 \neq X_2 R$$ # # and want to see that the p-value for the latent position test is high (fail to reject the null) # # Here, R is an orthogonal rotation matrix found from solving the [orthogonal procrustes problem](https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.linalg.orthogonal_procrustes.html) (Note: this constraint can be relaxed for other versions of semipar) # # Note that LatentPositionTest.fit() may take several minutes lpt = LatentPositionTest(n_bootstraps=200, n_components=n_components) lpt.fit(A1, A2) print('p = {}'.format(lpt.p_value_)) # We see that the corresponding p-value is high, indicating that the observed differences between latent positions of Sampled RDPG 1 and 2 are likely due to chance # ## Matched test where the null is false # Now, we distort the latent position of one of the sampled graphs by adding noise. The matched test should have a low p-value, indicating that we should reject the null hypothesis # + A3 = rdpg(X, loops=False, rescale=False, directed=False) A4 = rdpg(X + np.random.normal(0.05, 0.02, size=(X.shape)), loops=False, rescale=False, directed=False) Xhat3 = AdjacencySpectralEmbed(n_components=n_components).fit_transform(A3) Xhat4 = AdjacencySpectralEmbed(n_components=n_components).fit_transform(A4) heatmap(A3, title='Sampled RDPG 3 adjacency matrix') heatmap(A4, title='Sampled RDPG 4 (distorted) adjacency matrix') pairplot(Xhat3, title='Sampled RDPG 3 adjacency spectral embedding') pairplot(Xhat4, title='Sampled RDPG 4 (distorted) adjacency spectral embedding') # - lpt = LatentPositionTest(n_bootstraps=200, n_components=n_components) lpt.fit(A3, A4) print('p = {}'.format(lpt.p_value_))
docs/tutorials/inference/latent_position_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Basic Circuit Identities from qiskit import * from qiskit.circuit import Gate # When we program quantum computers, our aim is always to build useful quantum circuits from the basic building blocks. But sometimes, we might not have all the basic building blocks we want. In this section, we'll look at how we can transform basic gates into each other, and how to use them to build some gates that are slightly more complex \(but still pretty basic\). # # Many of the techniques discussed in this chapter were first proposed in a paper by Barenco and coauthors in 1995 [1]. # ### Making a controlled-$Z$ from a CNOT # The controlled-Z or `cz` gate is another well-used two-qubit gate. Just as the CNOT applies an $X$ to its target qubit whenever its control is in state $|1\rangle$, the controlled-$Z$ applies a $Z$ in the same case. In Qasm it can be invoked directly with # # ```python # # a controlled-Z # qc.cz(c,t) # ``` # # where c and t are the control and target qubits. In IBM Q devices, however, the only kind of two-qubit gate that can be directly applied is the CNOT. We therefore need a way to transform one to the other. # # The process for this is quite simple. We know that the Hadamard transforms the states $|0\rangle$ and $|1\rangle$ to the states $|+\rangle$ and $|-\rangle$. We also know that the effect of the $Z$ gate on the states $|+\rangle$ and $|-\rangle$ is the same as that for $X$ on the state $|0\rangle$ and $|1\rangle$. From this reasoning, or from simply multiplying matrices, we find that # # $$ # H X H = Z,\\\\ # H Z H = X. # $$ # # The same trick can be used to transform a CNOT into a controlled-$Z$. All we need to do is precede and follow the CNOT with a Hadamard on the target qubit. This will transform any $X$ applied to that qubit into a $Z$. # # ```python # # also a controlled-Z # qc.h(t) # qc.cx(c,t) # qc.h(t) # ``` # # More generally, we can transform a single CNOT into a controlled version of any rotation around the Bloch sphere by an angle $\pi$, by simply preceding and following it with the correct rotations. For example, a controlled-$Y$: # # ```python # # a controlled-Y # qc.sdg(t) # qc.cx(c,t) # qc.s(t) # ``` # # and a controlled-$H$: # # ```python # # a controlled-H # qc.ry(-pi/4,t) # qc.cx(c,t) # qc.ry(pi/4,t) # ``` # ### Swapping qubits # Sometimes we need to move information around in a quantum computer. For some qubit implementations, this could be done by physically moving them. Another option is simply to move the state between two qubits. This is done by the SWAP gate. # # ```python # # swaps states of qubits a and b # qc.swap(a,b) # ``` # # The command above directly invokes this gate, but let's see how we might make it using our standard gate set. For this, we'll need to consider a few examples. # # First, we'll look at the case that qubit a is in state $|1\rangle$ and qubit b is in state $|0\rangle$. For this we'll apply the following gates: # # ```python # # swap a 1 from a to b # qc.cx(a,b) # copies 1 from a to b # qc.cx(b,a) # uses the 1 on b to rotate the state of a to 0 # ``` # # This has the effect of putting qubit b in state $|1\rangle$ and qubit a in state $|0\rangle$. In this case at least, we have done a SWAP. # # Now let's take this state and SWAP back to the original one. As you may have guessed, we can do this with the reverse of the above process: # # ```python # # swap a q from b to a # qc.cx(b,a) # copies 1 from b to a # qc.cx(a,b) # uses the 1 on a to rotate the state of b to 0 # ``` # # Note that in these two processes, the first gate of one would have no effect on the initial state of the other. For example, when we swap the $|1\rangle$ b to a, the first gate is `cx q[b], q[a]`. If this were instead applied to a state where no $|1\rangle$ was initially on b, it would have no effect. # # Note also that for these two processes, the final gate of one would have no effect on the final state of the other. For example, the final `cx q[b], q[a]` that is required when we swap the $|1\rangle$ from a to b has no effect on the state where the $|1\rangle$ is not on b. # # With these observations, we can combine the two processes by adding an ineffective gate from one onto the other. For example, # # ```python # qc.cx(b,a) # qc.cx(a,b) # qc.cx(b,a) # ``` # # We can think of this as a process that swaps a $|1\rangle$ from a to b, but with a useless `qc.cx(b,a)` at the beginning. We can also think of it as a process that swaps a $|1\rangle$ from b to a, but with a useless `qc.cx(b,a)` at the end. Either way, the result is a process that can do the swap both ways around. # # It also has the correct effect on the $|00\rangle$ state. This is symmetric, and so swapping the states should have no effect. Since the CNOT gates have no effect when their control qubits are $|0\rangle$, the process correctly does nothing. # # The $|11\rangle$ state is also symmetric, and so needs a trivial effect from the swap. In this case, the first CNOT gate in the process above will cause the second to have no effect, and the third undoes the first. Therefore, the whole effect is indeed trivial. # # We have thus found a way to decompose SWAP gates into our standard gate set of single-qubit rotations and CNOT gates. # # ```python # # swaps states of qubits a and b # qc.cx(b,a) # qc.cx(a,b) # qc.cx(b,a) # ``` # # It works for the states $|00\rangle$, $|01\rangle$, $|10\rangle$ and $|11\rangle$, as well as for all superpositions of them. It therefore swaps all possible two-qubit states. # # The same effect would also result if we changed the order of the CNOT gates: # # ```python # # swaps states of qubits a and b # qc.cx(a,b) # qc.cx(b,a) # qc.cx(a,b) # ``` # # This is an equally valid way to get the SWAP gate. # # The derivation used here was very much based on the z basis states, but it could also be done by thinking about what is required to swap qubits in states $|+\rangle$ and $|-\rangle$. The resulting ways of implementing the SWAP gate will be completely equivalent to the ones here. # ### Making the CNOTs we need from the CNOTs we have # The gates in any quantum computer are driven by the physics of the underlying system. In IBM Q devices, the physics behind CNOTs means that they cannot be directly applied to all possible pairs of qubits. For those pairs for which a CNOT can be applied, it typically has a particular orientation. One specific qubit must act as control, and the other must act as the target, without allowing us to choose. # # #### Changing the direction of a CNOT # # Let's deal with the second problem described above: If we have a CNOT with control qubit $c$ and target qubit $t$, how can we make one for which qubit $t$ acts as the control and qubit $c$ is the target? # # This question would be very simple to answer for the controlled-$Z$. For this gate, it doesn't matter which way around the control and target qubits are. # # ```python # qc.cz(c,t) # ``` # # has exactly the same effect as # # ```python # qc.cz(t,c) # ``` # # This means that we can think of either one as the control, and the other as the target. # # To see why this is true, let's remind ourselves of what the Z gate is: # # $$ # Z= \begin{pmatrix} 1&0 \\\\ 0&-1 \end{pmatrix}. # $$ # # We can think of this as multiplying the state by $-1$, but only when it is $|1\rangle$. # # For a controlled-$Z$ gate, the control qubit must be in state $|1\rangle$ for a $Z$ to be applied to the target qubit. Given the above property of $Z$, this only has an effect when the target is in state $|1\rangle$. We can therefore think of the controlled-$Z$ gate as one that multiplies the state of two qubits by $-1$, but only when the state is $|11\rangle$. # # This new interpretation is phrased in a perfectly symmetric way, and demonstrates that the labels of 'control' and 'target' are not necessary for this gate. # # This property gives us a way to reverse the orientation of a CNOT. We can first turn the CNOT into a controlled-$Z$ by using the method described earlier: placing a Hadamard both before and after on the target qubit. # # ```python # # a cz # qc.h q[t]; # qc.cx q[c], q[t]; # qc.h q[t]; # ``` # # Then, since we are free to choose which way around to think about a controlled-$Z$'s action, we can choose to think of $t$ as the control and $c$ as the target. We can then transform this controlled-$Z$ into a corresponding CNOT. We just need to place a Hadamard both before and after on the target qubit \(which is now qubit $c$\). # # ```python # # a cx with control qubit t and target qubit c # qc.h(c) # qc.h(t) # qc.cx(c,t) # qc.h(t) # qc.h(c) # ``` # # And there we have it: we've turned around the CNOT. All that is needed is a Hadamard on both qubits before and after. # # The rest of this subsection is dedicated to another explanation of how to turn around a CNOT, with a bit of math (introduced in the 'States for Many Qubits' article of the previous chapter, and the 'Fun with Matrices' article of this chapter), and some different insight. Feel free to skip over it. # # Here is another way to write the CNOT gate: # # $$ # {\rm CX}_{c,t} = |0\rangle \langle0| \otimes I + |1\rangle \langle1| \otimes X. # $$ # # Here the $|1\rangle \langle1|$ ensures that the second term only affects those parts of a superposition for which the control qubit $c$ is in state $|1\rangle$. For those, the effect on the target qubit t is $X$. The first terms similarly address those parts of the superposition for which the control qubit is in state $|0\rangle$, in which case it leaves the target qubit unaffected. # # Now let's do a little math. The $X$ gate has eigenvalues $\pm 1$ for the states $|+\rangle$ and $|-\rangle$. The $I$ gate has an eigenvalue of $1$ for all states including $|+\rangle$ and $|-\rangle$. We can thus write them in spectral form as # # $$ # X = |+\rangle \langle+| \, \, - \, \, |-\rangle \langle-|, \, \, \, \, I = |+\rangle \langle+| \, \, + \, \, |-\rangle \langle-| # $$ # # Substituting these into the expression above gives us # # $$ # {\rm CX}_{c,t} = |0\rangle \langle0| \otimes |+\rangle \langle+| \, \, + \, \, |0\rangle \langle0| \otimes |-\rangle \langle-| \, \, + \, \, |1\rangle \langle1| \otimes |+\rangle \langle+| \, \, - \, \, |1\rangle \langle1| \otimes |-\rangle \langle-| # $$ # # Using the states $|0\rangle$ and $|1\rangle$, we can write the $Z$ gate in spectral form, and also use an alternative \(but completely equivalent\) spectral form for $I$: # # $$ # Z = |0\rangle \langle0| ~-~ |1\rangle \langle1|, ~~~ I = |0\rangle \langle0| ~+~ |1\rangle \langle1|. # $$ # # With these, we can factorize the parts of the CNOT expressed with the $|0\rangle$ and $|1\rangle$ state: # # $$ # {\rm CX}_{c,t} = I \otimes |+\rangle \langle+| \, \, + \, \, Z \otimes |-\rangle \langle-| # $$ # # This gives us a whole new way to interpret the effect of the CNOT. The $Z \otimes |-\rangle \langle-| $ term addresses the parts of a superposition for which qubit $t$ is in state $|-\rangle$ and then applies a $Z$ gate to qubit $c$. The other term similarly does nothing to qubit $c$ when qubit $t$ is in state $|+\rangle.$ # # In this new interpretation, it is qubit $t$ that acts as the control. It is the $|+\rangle$ and $|-\rangle$ states that decide whether an action is performed, and that action is the gate $Z$. This sounds like a very different gate to our familiar CNOT, and yet it is the CNOT. These are two equally true descriptions of its effects. # # Among the many uses of this property is the method to turn around a CNOT. For example, consider applying a Hadamard to qubit $c$ both before and after this CNOT: # # ```python # h(c) # cx(c,t) # h(c) # ``` # # This transforms the $Z$ in the $Z \otimes |-\rangle \langle-| $ term into an $X$, and leaves the other term unchanged. The combined effect is then a gate that applies an $X$ to qubit $c$ when qubit $t$ is in state $|-\rangle$. This is halfway to what we are wanting to build. # # To complete the process, we can apply a Hadamard both before and after on qubit $t$. This transforms the $|+\rangle$ and $|-\rangle$ states in each term into $|0\rangle$ and $|1\rangle$. Now we have something that applies an $X$ to qubit $c$ when qubit $t$ is in state $|1\rangle$. This is exactly what we want: a CNOT in reverse, with qubit $t$ as the control and $c$ as the target. # # #### CNOT between distant qubits # # Suppose we have a control qubit $c$ and a target qubit $t$, and we want to do a CNOT gate between them. If this gate is directly possible on a device, we can just do it. If it's only possible to do the CNOT in the wrong direction, we can use the method explained above. But what if qubits $c$ and $t$ are not connected at all? # # If qubits $c$ and $t$ are on completely different devices in completely different labs in completely different countries, you may be out of luck. But consider the case where it is possible to do a CNOT between qubit $c$ and an additional qubit $a$, and it is also possible to do one between qubits $a$ and $t$. The new qubit can then be used to mediate the interaction between $c$ and $t$. # # One way to do this is with the SWAP gate. We can simply SWAP $a$ and t, do the CNOT between $c$ and $a$, and then swap $a$ and $t$ back again. The end result is that we have effectively done a CNOT between $c$ and $t$. The drawback of this method is that it costs a lot of CNOT gates, with six needed to implement the two SWAPs. # # Another method is to use the following sequence of gates. # # ```python # # a CNOT between qubits c and t, with no end effect on qubit a # qc.cx(a,t) # qc.cx(c,a) # qc.cx(a,t) # qc.cx(c,a) # ``` # # To see how this works, first consider the case where qubit $c$ is in state $|0\rangle$. The effect of the `cx q[c], q[a]` gates in this case are trivial. This leaves only the two `cx q[a], q[t]` gates, which cancel each other out. The net effect is therefore that nothing happens. # # If qubit $c$ is in state $|1\rangle$, things are not quite so simple. The effect of the `cx q[c], q[a]` gates is to toggle the value of qubit $a$; it turns any $|0\rangle$ in the state of qubit $a$ into $|1\rangle$ and back again, and vice versa. # # This toggle effect affects the action of the two `cx q[a], q[t]` gates. It ensures that whenever one is controlled on a $|0\rangle$ and has trivial effect, the other is controlled on a $|1\rangle$ and applies an $X$ to qubit $t$. The end effect is that qubit $a$ is left unchanged, but qubit $t$ will always have had an $X$ applied to it. # # Putting everything together, this means that an $X$ is applied to qubit $t$ only when qubit $c$ is in state $|1\rangle$. Qubit $a$ is left unaffected. We have therefore engineered a CNOT between qubits $c$ and $t$. Unlike when using SWAP gates, this required only four CNOT gates to implement. # # It is similarly possible to engineer CNOT gates when there is a longer chain of qubits required to connect our desired control and target. The methods described above simply need to be scaled up. # ### Controlled rotations # We have already seen how to build controlled $\pi$ rotations from a single CNOT gate. Now we'll look at how to build any controlled rotation. # # First, let's consider arbitrary rotations around the y axis. Specifically, consider the following sequence of gates. # # ```python # qc.ry(theta/2) q[t]; # qc.cx q[c], q[t]; # qc.ry(-theta/2) q[t]; # qc.cx q[c], q[t]; # ``` # # If the control qubit is in state $|0\rangle$, all we have here is a $R_y(\theta/2)$ immediately followed by its inverse, $R_y(-\theta/2)$. The end effect is trivial. If the control qubit is in state $|1\rangle$, however, the `ry(-theta/2)` is effectively preceded and followed by an X gate. This has the effect of flipping the direction of the y rotation and making a second $R_y(\theta/2)$. The net effect in this case is therefore to make a controlled version of the rotation $R_y(\theta)$. # # This method works because the x and y axis are orthogonal, which causes the x gates to flip the direction of the rotation. It therefore similarly works to make a controlled $R_z(\theta)$. A controlled $R_x(\theta)$ could similarly be made using CNOT gates. # # We can also make a controlled version of any single-qubit rotation, $U$. For this we simply need to find three rotations A, B and C, and a phase $\alpha$ such that # # $$ # ABC = I, ~~~e^{i\alpha}AZBZC = U # $$ # # We then use controlled-Z gates to cause the first of these relations to happen whenever the control is in state $|0\rangle$, and the second to happen when the control is state $|1\rangle$. An $R_z(2\alpha)$ rotation is also used on the control to get the right phase, which will be important whenever there are superposition states. # # ```python # qc.append(a, [t]) # qc.cz(c,t) # qc.append(b, [t]) # qc.cz(c,t) # qc.append(c, [t]) # qc.u1(alpha,c) # ``` # # ![A controlled version of a gate V](https://s3.us-south.cloud-object-storage.appdomain.cloud/strapi/4efe86a907a64a59a720b4dc54a98a88iden1.png) # # Here `A`, `B` and `C` are gates that implement $A$ , $B$ and $C$, and must be defined as custom gates. For example, if we wanted $A$ to be $R_x(\pi/4)$, the custom would be defined as # # ```python # qc_a = QuantumCircuit(1, name='A') # qc_a.rx(np.pi/4,0) # A = qc_a.to_instruction() # ``` # ### The Toffoli # The Toffoli gate is a three-qubit gate with two controls and one target. It performs an X on the target only if both controls are in the state $|0\rangle$. The final state of the target is then equal to either the AND or the NAND of the two controls, depending on whether the initial state of the target was $|0\rangle$ or $|1\rangle$. A Toffoli can also be thought of as a controlled-controlled-NOT, and is also called the CCX gate. # # ```python # # Toffoli with control qubits a and b and target t # qc.ccx(a,b,t) # ``` # # To see how to build it from single- and two-qubit gates, it is helpful to first show how to build something even more general: an arbitrary controlled-controlled-U for any single-qubit rotation U. For this we need to define controlled versions of $V = \sqrt{U}$ and $V^\dagger$. In the Qasm code below, we assume that subroutines `cv` and `cvdg` have been defined for these, respectively. The controls are qubits $a$ and $b$, and the target is qubit $t$. # # ```python # qc.cv q[b], q[t]; # qc.cx q[a], q[b]; # qc.cvdg q[b], q[t]; # qc.cx q[a], q[b]; # qc.cv q[a], q[t]; # ``` # # ![A doubly controlled version of a gate V](https://s3.us-south.cloud-object-storage.appdomain.cloud/strapi/693974b222d24dba9111e02ae25e9151iden2.png) # # By tracing through each value of the two control qubits, you can convince yourself that a U gate is applied to the target qubit if and only if both controls are 1. Using ideas we have already described, you could now implement each controlled-V gate to arrive at some circuit for the doubly-controlled-U gate. It turns out that the minimum number of CNOT gates required to implement the Toffoli gate is six [2]. # # # ![A Toffoli](https://s3.us-south.cloud-object-storage.appdomain.cloud/strapi/b3cbeb9b7d674d60a75bed351e4f2bcbiden3.png) # # # The Toffoli is not the unique way to implement an AND gate in quantum computing. We could also define other gates that have the same effect, but which also introduce relative phases. In these cases, we can implement the gate with fewer CNOTs. # # For example, suppose we use both the controlled-Hadamard and controlled-$Z$ gates, which can both be implemented with a single CNOT. With these we can make the following circuit: # # ```python # qc.ch(a,t) # qc.cz(b,t) # qc.ch(a,t) # ``` # # For the state $|00\rangle$ on the two controls, this does nothing to the target. For $|11\rangle$, the target experiences a $Z$ gate that is both preceded and followed by an H. The net effect is an $X$ on the target. For the states $|01\rangle$ and $|10\rangle$, the target experiences either just the two Hadamards \(which cancel each other out\) or just the $Z$ \(which only induces a relative phase\). This therefore also reproduces the effect of an AND, because the value of the target is only changed for the $|11\rangle$ state on the controls -- but it does it with the equivalent of just three CNOT gates. # ### Arbitrary rotations from H and T # The qubits in current devices are subject to noise, which basically consists of gates that are done by mistake. Simple things like temperature, stray magnetic fields or activity on neighboring qubits can make things happen that we didn't intend. # # For large applications of quantum computers, it will be necessary to encode our qubits in a way that protects them from this noise. This is done by making gates much harder to do by mistake, or to implement in a manner that is slightly wrong. # # This is unfortunate for the single-qubit rotations $R_x(\theta)$, $R_y(\theta)$ and $R_z(\theta)$. It is impossible to implent an angle $\theta$ with perfect accuracy, such that you are sure that you are not accidentally implementing something like $\theta + 0.0000001$. There will always be a limit to the accuracy we can achieve, and it will always be larger than is tolerable when we account for the build-up of imperfections over large circuits. We will therefore not be able to implement these rotations directly in fault-tolerant quantum computers, but will instead need to build them in a much more deliberate manner. # # Fault-tolerant schemes typically perform these rotations using multiple applications of just two gates: $H$ and $T$. # # The T gate is expressed in Qasm as # # ```python # qc.t(0) # T gate on qubit 0 # ``` # # It is a rotation around the z axis by $\theta = \pi/4$, and so is expressed mathematically as $R_z(\pi/4) = e^{i\pi/8~Z}$. # # In the following we assume that the $H$ and $T$ gates are effectively perfect. This can be engineered by suitable methods for error correction and fault-tolerance. # # Using the Hadamard and the methods discussed in the last chapter, we can use the T gate to create a similar rotation around the x axis. # # ```python # qc.h(0) # qc.t(0) # qc.h(0) # ``` # # Now let's put the two together. Let's make the gate $R_z(\pi/4)~R_x(\pi/4)$. # # ```python # qc.h(0) # qc.t(0) # qc.h(0) # qc.t(0) # ``` # # Since this is a single-qubit gate, we can think of it as a rotation around the Bloch sphere. That means that it is a rotation around some axis by some angle. We don't need to think about the axis too much here, but it clearly won't be simply x, y or z. More important is the angle. # # The crucial property of the angle for this rotation is that it is irrational. You can prove this yourself with a bunch of math, but you can also see the irrationality in action by applying the gate. Repeating it $n$ times results in a rotation around the same axis by a different angle. Due to the irrationality, the angles that result from different repetitions will never be the same. # # We can use this to our advantage. Each angle will be somewhere between $0$ and $2\pi$. Let's split this interval up into $n$ slices of width $2\pi/n$. For each repetition, the resulting angle will fall in one of these slices. If we look at the angles for the first $n+1$ repetitions, it must be true that at least one slice contains two of these angles. Let's use $n_1$ to denote the number of repetitions required for the first, and $n_2$ for the second. # # With this, we can prove something about the angle for $n_2-n_1$ repetitions. This is effectively the same as doing $n_2$ repetitions, followed by the inverse of $n_1$ repetitions. Since the angles for these are not equal \(because of the irrationality\) but also differ by no greater than $2\pi/n$ \(because they correspond to the same slice\), the angle for $n_2-n_1$ repetitions satisfies # # $$ # \theta_{n_2-n_1} \neq 0, ~~~~-\frac{2\pi}{n} \leq \theta_{n_2-n_1} \leq \frac{2\pi}{n} . # $$ # # We therefore have the ability to do rotations around small angles. We can use this to rotate around angles that are as small as we like, just by increasing the number of times we repeat this gate. # # By using many small-angle rotations, we can also rotate by any angle we like. This won't always be exact, but it is guaranteed to be accurate up to $2\pi/n$, which can be made as small as we like. We now have power over the inaccuracies in our rotations. # # So far, we only have the power to do these arbitrary rotations around one axis. For a second axis, we simply do the $R_z(\pi/4)$ and $R_x(\pi/4)$ rotations in the opposite order. # # ```python # qc.h(0) # qc.t(0) # qc.h(0) # qc.t(0) # ``` # # The axis that corresponds to this rotation is not the same as that for the gate considered previously. We therefore now have arbitrary rotation around two axes, which can be used to generate any arbitrary rotation around the Bloch sphere. We are back to being able to do everything, though it costs quite a lot of $T$ gates. # # It is because of this kind of application that $T$ gates are so prominent in quantum computation. In fact, the complexity of algorithms for fault-tolerant quantum computers is often quoted in terms of how many $T$ gates they'll need. This motivates the quest to achieve things with as few $T$ gates as possible. Note that the discussion above was simply intended to prove that $T$ gates can be used in this way, and does not represent the most efficient method we know. # ### References # # [1] [Barenco, *et al.* 1995](https://journals.aps.org/pra/abstract/10.1103/PhysRevA.52.3457?cm_mc_uid=43781767191014577577895&cm_mc_sid_50200000=1460741020) # # [2] [<NAME>, 2009](http://dl.acm.org/citation.cfm?id=2011799)
ch-gates/basic-circuit-identities.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # [View in Colaboratory](https://colab.research.google.com/github/XinyueZ/tf/blob/master/ipynb/iris_decision_tree_classifier.ipynb) # + id="WIsGkMXyvCgo" colab_type="code" colab={} from sklearn import tree from sklearn.datasets import load_iris from sklearn.metrics import accuracy_score # + id="gq34M84xvRzp" colab_type="code" colab={} iris_dataset = load_iris() # + id="HVFKsP9HvShg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 2689} outputId="2f79b1a5-71f7-4a74-bda4-fd81dffba0aa" print(iris_dataset) # + id="3h8gVjWAz4Cl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f5b1f78b-a754-46a4-bc98-170fb12008cf" CUT_DATA = .5 # Cut part of data from original. train_size = int(len(iris_dataset.data) * CUT_DATA) print("train.size: {}".format(train_size)) # + id="w2oQ_o7W0Cqr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1394} outputId="38b4f2f3-3685-4574-d30f-fa13b34fb8d4" train_data = iris_dataset.data[:train_size] train_labels = iris_dataset.target[:train_size] print("train.data:\n{}\n\ntrain.labels:\n{}".format(train_data, train_labels)) # + id="SBCfp1-52W88" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1394} outputId="5a1de68c-f175-4927-9ffb-cd78f404cd26" test_data = iris_dataset.data[train_size:] test_labels = iris_dataset.target[train_size:] print("test.data:\n{}\n\ntest.labels:\n{}".format(test_data, test_labels)) # + id="NPaFWB2235sO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="e9af0517-d88c-40b6-bbcd-d99cfb3b2a43" model = tree.DecisionTreeClassifier() model.fit(train_data, train_labels) # + id="3HnwKonl4PgW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="cea55c40-709a-4fbd-863d-b140fccc3859" predication_labels = model.predict(test_data) print(predication_labels) # + id="Liv6iuoW5iT_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="573dd435-3319-4465-9a4e-cc8099cc6908" accuracy = accuracy_score(test_labels, predication_labels) print("Accuracy: {:.4}%".format(accuracy * 100))
ipynb/iris_decision_tree_classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="L8Mf4Sd9gQcL" # ! mkdir ~/.kaggle # ! cp kaggle.json ~/.kaggle/ # ! chmod 600 ~/.kaggle/kaggle.json # + colab={"base_uri": "https://localhost:8080/"} id="2VdVa1LSgQZa" outputId="8b5ff394-e92b-4e67-fd17-ca25c79fccc0" # ! kaggle datasets download -d rashikrahmanpritom/plant-disease-recognition-dataset # + colab={"base_uri": "https://localhost:8080/"} id="KKHdwT_zgQUE" outputId="ed9fbd17-0174-4712-bfb5-7bf0481f5a9b" # ! unzip plant-disease-recognition-dataset.zip # + id="Z3woS9hBeUGZ" import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from tensorflow.keras import layers,models import os from tensorflow.keras.preprocessing.image import ImageDataGenerator import cv2 # + id="aIclkEl9jkJ9" colab={"base_uri": "https://localhost:8080/"} outputId="c6a137da-ca0c-427f-f75a-660d8f998d7e" train_directory='/content/Train/Train' categories=[category for category in os.listdir(train_directory)] categories # + id="h1G6e8NGkca5" valid_directory='/content/Validation/Validation' test_directory='/content/Test/Test' # + colab={"base_uri": "https://localhost:8080/", "height": 269} id="A0tgTGX2eaNP" outputId="e67b278e-7c7a-4f02-db47-ad627c917821" # def create_category(): for category in categories: path=os.path.join(train_directory,category) class_label=categories.index(category) for image in os.listdir(path): img_array=cv2.imread(os.path.join(path,image)) cvt=cv2.cvtColor(img_array,cv2.COLOR_BGR2RGB) plt.imshow(cvt) break break # + id="OgdWUOVDr19x" training_data=[] def create_training_data(): for category in categories: path=os.path.join(train_directory,category) for image in os.listdir(path): try: img_array=cv2.imread(os.path.join(path,image)) cvt=cv2.cvtColor(img_array,cv2.COLOR_BGR2RGB) new_img=cv2.resize(cvt,(100,100)) new_array=training_data.append([new_img,class_label]) except: pass create_training_data() # + id="oyCXui8efxip" validation_data=[] def create_validation(): for category in categories: path=os.path.join(valid_directory,category) class_label=categories.index(category) for image in os.listdir(path): try: img_array=cv2.imread(os.path.join(path,image)) cvt=cv2.cvtColor(img_array,cv2.COLOR_BGR2RGB) new_img=cv2.resize(cvt,(100,100)) new_array=validation_data.append([new_img,class_label]) except: pass create_validation() # + colab={"base_uri": "https://localhost:8080/"} id="KC31H_aWsu1U" outputId="43567421-08db-4daf-9c50-0138283707dd" for sample in training_data[3]: print(sample) # + id="YrwV85AStvS1" x_train=[] y_train=[] for features,labels in training_data: x_train.append(features) y_train.append(labels) # + id="MIGhgurotMSu" y_valid=[] x_valid=[] for features,labels in validation_data: x_valid.append(features) y_valid.append(labels) # + colab={"base_uri": "https://localhost:8080/"} id="it7otpgFuILU" outputId="17bb8a3d-0292-4fd9-f9fd-df96e27add17" x_train=np.array(x_train) y_train=np.array(y_train) print(x_train.shape) y_train.shape # + colab={"base_uri": "https://localhost:8080/"} id="Wc7vh2sYuLOM" outputId="92dcb6b4-97e4-4ed1-d120-167a9f04c049" x_valid=np.array(x_valid) y_valid=np.array(y_valid) print(x_valid.shape) print(y_valid.shape) # + id="RhmqZov2xHRE" x_train,x_valid=x_train/255,x_valid/255 # + colab={"base_uri": "https://localhost:8080/"} id="GXLaZiagwhx2" outputId="023b69e5-e4b3-45f3-e2a0-c203a41130e2" train_data_gen=ImageDataGenerator(zoom_range=1,vertical_flip=True,width_shift_range=0.2,height_shift_range=0.2, fill_mode='nearest',shear_range=0.2,rescale=1./255) valid_data_gen=ImageDataGenerator(zoom_range=1,vertical_flip=True,width_shift_range=0.3,height_shift_range=0.1, fill_mode='nearest',shear_range=0.4,rescale=1./255) train_data=train_data_gen.flow_from_directory(directory=train_directory,target_size=(100,100),batch_size=50) valid_data=valid_data_gen.flow_from_directory(directory=valid_directory,target_size=(100,100),batch_size=50) # + id="CA0J90YfuhPu" # making the model model=models.Sequential([ layers.Conv2D(64,(3,3),input_shape=(100,100,3),activation='relu'), layers.MaxPool2D(3,3), layers.Conv2D(64,(3,3),activation='relu'), layers.MaxPool2D(3,3), layers.Flatten(), layers.Dense(500,activation='relu'), layers.Dense(3,activation='softmax') ] ) # + id="ObxUCpWEvHNa" model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy']) # + colab={"base_uri": "https://localhost:8080/"} id="Si-_so6MvWsX" outputId="91f8820e-470a-4fff-b6fc-5f49df585e15" model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="A1_CGhEqvYMC" outputId="4dfcb242-8f85-4c9e-8133-2cd29b1460f6" # model fitting with image augmentation hist=model.fit(train_data,validation_data=valid_data,epochs=10) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="a7jrj9r797lU" outputId="2210f313-3955-405b-a589-675870e51c7b" plt.figure(figsize=(12,8)) plt.plot(hist.history['accuracy'],label='accuracy') plt.plot(hist.history['val_accuracy'],label='val_accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend(loc='Center right') plt.title('Accuracy Vs. Validation Accuracy') plt.ylim(0,1) plt.show() plt.figure(figsize=(12,8)) plt.plot(hist.history['loss'],label='loss') plt.plot(hist.history['val_loss'],label='validation_loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend(loc='center right') plt.title('Loss Vs. Validation Loss') plt.ylim(0,2) plt.show() # + id="RlxaaRo0GrxQ" k# making the model model1=models.Sequential([ layers.Conv2D(64,(3,3),input_shape=(100,100,3),activation='relu'), layers.MaxPool2D(3,3), layers.Conv2D(64,(3,3),activation='relu'), layers.MaxPool2D(3,3), layers.Flatten(), layers.Dense(500,activation='relu'), layers.Dense(3,activation='softmax') ] ) model1.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy']) # + colab={"base_uri": "https://localhost:8080/"} id="A7wgtkAEvhJP" outputId="0e7d6b94-1faf-4458-b045-3711d144b142" # model fitting without image augmentation hist1=model1.fit(x=x_train,y=y_train,validation_data=(x_valid,y_valid),epochs=5) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="MP5Vkm7f2M-_" outputId="e78b31db-f6c9-4cd4-a434-451e5975b1b4" plt.figure(figsize=(12,8)) plt.plot(hist1.history['accuracy'],label='accuracy') plt.plot(hist1.history['val_accuracy'],label='val_accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend(loc='Center right') plt.ylim(0,2) plt.show() plt.figure(figsize=(12,8)) plt.plot(hist1.history['loss'],label='loss') plt.plot(hist1.history['val_loss'],label='validation_loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend(loc='center right') plt.ylim(0,48) plt.show() # + id="SuWx29ym-w10"
Day24/plantdiesease.ipynb