code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/pszemraj/vid2cleantxt/blob/master/colab_notebooks/vid2cleantext_single_GPU.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="oiurzXGg8DyC" # # vid2cleantxt - single file version on Colab # <NAME> # # [Link to full GitHub Repo](https://github.com/pszemraj/vid2cleantxt) # # ## Purpose # # * Links to your google drive (through google's authentication package, transcribes a video file to audio chunks, runs those chunks through facebook's wav2vec2 pretrained speech transcription model. # * After saving original transcription, it also creates a version that is spell-corrected, and a third version with sentence boundary disambiguation (i.e. it adds periods into sentences) # # ## Instructions # # The two main things that need to be done to make this work are: # 1. Specify what the input filename and filepath are # * In this demo it is already specified for you (uses **requests** to get vid file from project repo) # 2. Adjust model main parameters # * with a GPU, should be stable @ 30 seconds # * with a CPU, bit of a trial-and-error process # * <font color='orange'> **Before running script, do Runtime->Change Runtime Type-> GPU in the top menu** </font> # # Sections where these parameters need to be updated are indicated in the file below (or see table of contents). # # ** ** # # <font color='orange'> This example was designed to be run in the Google Colab environment but should work locally with a few tweaks (i.e. get rid of google colab libraries) </font> # + id="2ES3PpiwLH9F" # !nvidia-smi # + [markdown] id="c0Z5Kg0cd4z8" # # Code # + id="aU0vhImmKeOc" # %%capture # !pip install pysbd # !pip install transformers # !pip install texthero # !pip install wordninja # !pip install yake # !pip install symspellpy # !pip install pycuda # !ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi # !pip install gputil # !pip install psutil # !pip install humanize # !pip install tqdm import math import os import pprint as pp import shutil import time import re from datetime import datetime from os import listdir from os.path import isfile, join import librosa import moviepy.editor as mp import moviepy import pandas as pd import pkg_resources import pysbd import texthero as hero import torch import wordninja import yake from natsort import natsorted from symspellpy import SymSpell from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer import pycuda.driver as cuda import psutil import humanize import os import GPUtil as GPU from tqdm.auto import tqdm print("\n ============================================================ \n") print("loaded / checked all packages") # + [markdown] id="OHQz0UlnNTN4" # # Function Definitions # # + [markdown] id="zrWTUJ_6FqZY" # ## generic # + id="bZ5lPErWFtHy" def corr(s): # adds space after period if there isn't one # removes extra spaces return re.sub(r'\.(?! )', '. ', re.sub(r' +', ' ', s)) def beautify_filename(filename, num_words=5, start_reverse=True): # takes a filename stored as text, removes extension, separates into X words, and returns # useful for when you are reading files, doing things to them, and making new files - you want to preserve some # semblance of the prior file, but not get infinitely long crap filenames filename = str(filename) index_file_Ext = filename.rfind('.') current_name = str(filename)[:index_file_Ext] # get rid of extension s = pd.Series(current_name) s = hero.remove_stopwords(s) hero.clean(s) clean_name = s.loc[0] file_words = wordninja.split(clean_name) # splits concatenated text into a list of words based on common word freq if len(file_words) <= num_words: num_words = len(file_words) if start_reverse: t_file_words = file_words[-num_words:] else: t_file_words = file_words[:num_words] pretty_name = " ".join(t_file_words) # NOTE IT DOES NOT RETURN THE EXTENSION return pretty_name[: (len(pretty_name) - 1)] # there is a space always at the end, so -1 def quick_keys(filename, filepath, max_ngrams=3, num_keywords=20, save_db=False): # uses YAKE to quickly determine keywords in a text file. Saves Keywords and YAKE score (0 means very important) in # an excel file (from a dataframe) # yes, the double entendre is intended. file = open(join(filepath, filename), 'r', encoding="utf-8", errors='ignore') text = file.read() file.close() language = "en" deduplication_threshold = 0.3 # technically a hyperparameter custom_kw_extractor = yake.KeywordExtractor(lan=language, n=max_ngrams, dedupLim=deduplication_threshold, top=num_keywords, features=None) yake_keywords = custom_kw_extractor.extract_keywords(text) phrase_db = pd.DataFrame(yake_keywords) phrase_db.columns = ['key_phrase', 'YAKE_sore'] # add a column for how many words the phrases contain yake_kw_len = [] yake_kw_freq = [] for entry in yake_keywords: entry_wordcount = len(str(entry).split(" ")) - 1 yake_kw_len.append(entry_wordcount) for index, row in phrase_db.iterrows(): search_term = row["key_phrase"] entry_freq = text.count(str(search_term)) yake_kw_freq.append(entry_freq) word_len_series = pd.Series(yake_kw_len, name='No. Words in Phrase') word_freq_series = pd.Series(yake_kw_freq, name='Phrase Freq. in Text') phrase_db2 = pd.concat([phrase_db, word_len_series, word_freq_series], axis=1) # add column names and save file as excel because CSVs suck phrase_db2.columns = ['key_phrase', 'YAKE Score (Lower = More Important)', 'num_words', 'freq_in_text'] if save_db: # saves individual file if user asks yake_fname = beautify_filename(filename=filename, start_reverse=False) + "_top_phrases_YAKE.xlsx" phrase_db2.to_excel(join(filepath, yake_fname), index=False) # print out top 10 keywords, or if desired num keywords less than 10, all of them max_no_disp = 10 if num_keywords > max_no_disp: num_phrases_disp = max_no_disp else: num_phrases_disp = num_keywords print("Top Key Phrases from YAKE, with max n-gram length: ", max_ngrams, "\n") pp.pprint(phrase_db2.head(n=num_phrases_disp)) return phrase_db2 def digest_text_fn(direct, iden='', w_folder=False): directory_1 = direct id = iden run_date = datetime.now() if id == "": id = "document" + run_date.strftime("_%d%m%Y_") # run date os.chdir(directory_1) main_path = os.getcwd() if w_folder: # create a sub-folder output_folder_name = "mergedf_" + run_date.strftime("_%d%m%Y_") if not os.path.isdir(output_folder_name): os.mkdir(output_folder_name) # make a place to store outputs if one does not exist output_path_full = os.path.join(main_path, output_folder_name) else: # do not create a folder print("not creating folder, file will be @:", direct) output_path_full = main_path # Load Files from the Directory----------------------------------------------- files_to_munch_1 = natsorted([f for f in listdir(directory_1) if isfile(join(directory_1, f))]) total_files_1 = len(files_to_munch_1) removed_count_1 = 0 # remove non-.txt files for prefile in files_to_munch_1: if prefile.endswith(".txt"): continue else: files_to_munch_1.remove(prefile) removed_count_1 += 1 print("out of {0:3d} file(s) originally in the folder, ".format(total_files_1), "{0:3d} non-.txt files were removed".format(removed_count_1)) print('\n {0:3d} .txt file(s) in folder will be joined.'.format(len(files_to_munch_1))) stitched_masterpiece = [] announcement_top = "\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" announcement_middle = "\nTHE NEXT FILE BEGINS HERE!!!!!!!!!" announcement_bottom = "\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n" for i in range(len(files_to_munch_1)): # add header thing for a break between files stitched_masterpiece.append(announcement_top) stitched_masterpiece.append(announcement_middle) stitched_masterpiece.append(announcement_bottom) stitched_masterpiece.append(beautify_filename(files_to_munch_1[i], start_reverse=False, num_words=10)) stitched_masterpiece.append("\n") stitched_masterpiece.append("\n") # open and append file text file = open(join(directory_1, files_to_munch_1[i]), 'rt', encoding="utf-8", errors='ignore') f1_text = file.readlines() file.close() for j in range(len(f1_text)): this_line = corr(f1_text[j]) stitched_masterpiece.append(this_line) stitched_masterpiece.append("\n") # write file with everything appended to it out_filename = id + " [all_text_merged]" + ".txt" outfile = open(join(output_path_full, out_filename), 'w', encoding="utf-8", errors='ignore') outfile.writelines(stitched_masterpiece) outfile.close() print("\nDone. Files are located here: ") pp.pprint(output_path_full) # + [markdown] id="LnN94mTXFw9d" # ## hardware monitoring # + id="jtgRuxdDGGqG" def check_runhardware_torch(verbose=False): # https://www.run.ai/guides/gpu-deep-learning/pytorch-gpu/ GPUs = GPU.getGPUs() if len(GPUs) > 0: if verbose: print("\n ------------------------------") print("Checking CUDA status for PyTorch") cuda.init() print("Cuda availability (PyTorch): ", torch.cuda.is_available()) ## Get Id of default device torch.cuda.current_device() if verbose: print("Name of GPU: ", cuda.Device(0).name()) # '0' is the id of your GPU print("------------------------------\n") return True else: print("No GPU being used :(") return False def check_runhardware_google(verbose=False): # check status of GPU (helps with machine learning model) if verbose: print("\n ------------------------------") print("Checking GPU alloc status - (google hardware side)") # gpu_info = !nvidia-smi gpu_info = '\n'.join(gpu_info) if gpu_info.find('failed') >= 0: print('Select the Runtime > "Change runtime type" menu to enable a GPU accelerator. ') print('Then re-run code.') if verbose: print("\n ------------------------------") return False else: print("GPU status = good @ ", datetime.now()) if verbose: print(gpu_info) print("\n ------------------------------") return True def check_runhardware_v2(verbose=False): GPUs = GPU.getGPUs() if verbose: print("\n ------------------------------") print("Checking hardware with psutil") # XXX: only one GPU on Colab and isn’t guaranteed gpu = GPUs[0] process = psutil.Process(os.getpid()) print("\nGen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " | Proc size: " + humanize.naturalsize( process.memory_info().rss)) if len(GPUs) > 0: print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB\n".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal)) else: print("No GPU being used :(\n") # + [markdown] id="IDk-_y6VFw-9" # ## for video conversion / transcription # + colab={"base_uri": "https://localhost:8080/"} id="-xtpYd9tZhso" outputId="bbd62206-c7c9-4d14-e03c-4fad0ac4e5ed" def convert_vidfile(vidfilename, start_time=0, end_time=6969, input_directory="", output_directory="", new_filename=""): # takes a video file and creates an audiofile with various parameters # NOTE video filename is required if len(input_directory) < 1: my_clip = mp.VideoFileClip(vidfilename) else: my_clip = mp.VideoFileClip(join(input_directory, vidfilename)) if end_time == 6969: modified_clip = my_clip.subclip(t_start=int(start_time * 60)) else: modified_clip = my_clip.subclip(t_start=int(start_time * 60), t_end=int(end_time * 60)) converted_filename = vidfilename[: (len(vidfilename) - 4)] + "-converted_" + \ datetime.now().strftime("day_%d_time_%H-%M-%S_") + ".wav" # update_filename if len(new_filename) > 0: converted_filename = new_filename if len(output_directory) < 1: modified_clip.audio.write_audiofile(converted_filename) else: modified_clip.audio.write_audiofile(join(output_directory, converted_filename), progress_bar=None, verbose=False) audio_conv_results = { "output_filename": converted_filename, "output_folder": output_directory, "clip_length": modified_clip.duration } return audio_conv_results def convert_vid_for_transcription(vid2beconv, len_chunks, input_directory, output_directory, verbose=False): # Oriented specifically for the "wav2vec2" model speech to text transcription # takes a video file, turns it into .wav audio chunks of length <input> and stores them in a specific location # TODO add try/except clause in case the user already has an audio file the want to transcribe my_clip = mp.VideoFileClip(join(input_directory, vid2beconv)) number_of_chunks = math.ceil(my_clip.duration / len_chunks) # to get in minutes if verbose: print('converting into ' + str(number_of_chunks) + ' audio chunks') if verbose: print('separating audio into chunks starting at ', datetime.now().strftime("_%H.%M.%S")) preamble = beautify_filename(vid2beconv) outfilename_storage = [] for i in tqdm(range(number_of_chunks), total=number_of_chunks, desc="convert vid2audio"): start_time = i * len_chunks if i == number_of_chunks - 1: this_clip = my_clip.subclip(t_start=start_time) else: this_clip = my_clip.subclip(t_start=start_time, t_end=(start_time + len_chunks)) this_filename = preamble + '_run_' + str(i) + '.wav' outfilename_storage.append(this_filename) this_clip.audio.write_audiofile(join(output_directory, this_filename), progress_bar=None, verbose=False) print('Finished creating audio chunks at ', datetime.now().strftime("_%H.%M.%S")) if verbose: print('Files are located in ', output_directory) return outfilename_storage def symspell_file(filepath, filename, dist=2, keep_numb_words=True, create_folder=True, save_metrics=False, print_results=False): # given a text (has to be text) file, reads the file, autocorrects any words it deems misspelled, saves as new file # it can store the new file in a sub-folder it creates as needed # distance represents how far it searches for a better spelling. higher dist = higher RT. # https://github.com/mammothb/symspellpy script_start_time = time.time() sym_spell = SymSpell(max_dictionary_edit_distance=dist, prefix_length=7) print("PySymSpell - Starting to check and correct the file: ", filename) dictionary_path = pkg_resources.resource_filename( "symspellpy", "frequency_dictionary_en_82_765.txt") bigram_path = pkg_resources.resource_filename( "symspellpy", "frequency_bigramdictionary_en_243_342.txt") # term_index is the column of the term and count_index is the # column of the term frequency sym_spell.load_dictionary(dictionary_path, term_index=0, count_index=1) sym_spell.load_bigram_dictionary(bigram_path, term_index=0, count_index=2) # if save_metrics: # adjust for weird case # print_results = True # ------------------------------------ file = open(join(filepath, filename), 'r', encoding="utf-8", errors='ignore') textlines = file.readlines() # return a list file.close() if create_folder: # create a folder output_folder_name = "pyymspell_corrections_SD=" + str(dist) if not os.path.isdir(join(filepath, output_folder_name)): os.mkdir(join(filepath, output_folder_name)) # make a place to store outputs if one does not exist filepath = join(filepath, output_folder_name) corrected_list = [] print("loaded text with {0:6d} lines ".format(len(textlines))) # iterate through list of lines. Pass each line to be corrected. Append / sum results from each line till done for line in tqdm(textlines, total=len(textlines), desc="spell correcting"): if line == "": # blank line, skip to next run continue # correct the line of text using spellcorrect_line() which returns a dictionary suggestions = sym_spell.lookup_compound(phrase=line, max_edit_distance=dist, ignore_non_words=keep_numb_words, ignore_term_with_digits=keep_numb_words) all_sugg_for_line = [] for suggestion in suggestions: all_sugg_for_line.append(suggestion.term) # append / sum / log results from correcting the line corrected_list.append(' '.join(all_sugg_for_line) + "\n") # finished iterating through lines. Now sum total metrics corrected_doc = "".join(corrected_list) corrected_fname = "Corrected_SSP_" + beautify_filename(filename, num_words=9, start_reverse=False) + ".txt" # proceed to saving file_out = open(join(filepath, corrected_fname), 'w', encoding="utf-8", errors='ignore') file_out.writelines(corrected_doc) file_out.close() # report RT script_rt_m = (time.time() - script_start_time) / 60 print("RT for this file was {0:5f} minutes".format(script_rt_m)) print("Finished correcting w/ symspell", filename, " at time: ", datetime.now().strftime("%H:%M:%S"), "\n") corr_file_Data = { "corrected_ssp_text": corrected_doc, "corrected_ssp_fname": corrected_fname, "output_path": filepath, # "percent_corrected": perc_corr, # "num_corrected": total_corrected } return corr_file_Data def transcribe_video_wav2vec(transcription_model, directory, vid_clip_name, chunk_length_seconds, verbose=False): # this is the same process as used in the single video transcription, now as a function. Note that spell correction # and keyword extraction are now done separately in the script # user needs to pass in: the model, the folder the video is in, and the name of the video output_path_full = directory # Split Video into Audio Chunks----------------------------------------------- if verbose: print("\n============================================================") print("Converting video to audio for file: ", vid_clip_name) print("============================================================\n") # create audio chunk folder output_folder_name = "audio_chunks" if not os.path.isdir(join(directory, output_folder_name)): os.mkdir(join(directory, output_folder_name)) # make a place to store outputs if one does not exist path2audiochunks = join(directory, output_folder_name) chunk_directory = convert_vid_for_transcription(vid2beconv=vid_clip_name, input_directory=directory, len_chunks=chunk_length_seconds, output_directory=path2audiochunks) if verbose: print("\n============================================================") print("converted video to audio. About to start transcription loop for file: ", vid_clip_name) print("============================================================\n") check_runhardware_torch() check_runhardware_v2() time_log.append(time.time()) time_log_desc.append("converted video to audio") full_transcription = [] header = "Transcription of " + vid_clip_name + " at: " + \ datetime.now().strftime("date_%d_%m_%Y_time_%H-%M-%S") full_transcription.append(header + "\n") before_loop_st = time.time() GPU_update_incr = math.ceil(len(chunk_directory) / 4) # Load audio chunks by name, pass into model, append output text----------------------------------------------- for audio_chunk in tqdm(chunk_directory, total=len(chunk_directory), desc="\nConverting Video - " + vid_clip_name): current_loc = chunk_directory.index(audio_chunk) if (current_loc % GPU_update_incr == 0) and (GPU_update_incr != 0): # provide update on GPU usage check_runhardware_torch() check_runhardware_v2() audio_input, rate = librosa.load(join(path2audiochunks, audio_chunk), sr=16000) # MODEL device = "cuda:0" if torch.cuda.is_available() else "cpu" input_values = tokenizer(audio_input, return_tensors="pt", padding="longest", truncation=True).input_values.to(device) transcription_model = transcription_model.to(device) logits = transcription_model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) transcription = str(tokenizer.batch_decode(predicted_ids)[0]) full_transcription.append(transcription + "\n") del input_values del logits del predicted_ids torch.cuda.empty_cache() if verbose: print("\nFinished audio transcription of " + vid_clip_name + " and now saving metrics.") # build metadata log ------------------------------------------------- mdata = [] mdata.append('original file name: ' + vid_clip_name + '\n') mdata.append( 'number of recorded audio chunks: ' + str(len(chunk_directory)) + " of lengths seconds each" + str( chunk_length_seconds) + '\n') approx_input_len = (len(chunk_directory) * chunk_length_seconds) / 60 mdata.append('approx {0:3f}'.format(approx_input_len) + ' minutes of input audio \n') mdata.append('transcription date: ' + datetime.now().strftime("date_%d_%m_%Y_time_%H-%M-%S") + '\n') full_text = ' '.join(full_transcription) transcript_length = len(full_text) mdata.append("length of transcribed text: " + str(transcript_length) + ' characters \n') t_word_count = len(full_text.split(' ')) mdata.append("total word count: " + str(t_word_count) + ' words (based on spaces) \n') # delete audio chunks in folder ------------------------------------------------- # TODO add try/except for deleting folder as not technically needed to achieve goal shutil.rmtree(path2audiochunks) if verbose: print("\nDeleted Audio Chunk Folder + Files") # compile results ------------------------------------------------- transcription_results = { "audio_transcription": full_transcription, "metadata": mdata } print("\nFinished transcription successfully for " + vid_clip_name + " at " + datetime.now().strftime("date_%d_%m_%Y_time_%H-%M-%S")) return transcription_results def validate_output_directories(directory): t_folder_name = "wav2vec2_sf_transcript" m_folder_name = "wav2vec2_sf_metadata" # check if transcription folder exists. If not, create it if not os.path.isdir(join(directory, t_folder_name)): os.mkdir(join(directory, t_folder_name)) # make a place to store outputs if one does not exist t_path_full = join(directory, t_folder_name) # check if metadata folder exists. If not, create it if not os.path.isdir(join(directory, m_folder_name)): os.mkdir(join(directory, m_folder_name)) # make a place to store outputs if one does not exist m_path_full = join(directory, m_folder_name) output_locs = { "t_out": t_path_full, "m_out": m_path_full } return output_locs print("loaded all user functions at: ", datetime.now()) # create time log time_log = [] time_log_desc = [] time_log.append(time.time()) time_log_desc.append("start") # + [markdown] id="4NJK7YP72Riz" # # Load Files and Model # + [markdown] id="0rZXO1z1fpnV" # Note that you can also connect to a google drive folder if you want to transcribe a large video file or several video files (described in the "multi" script [here](https://colab.research.google.com/drive/1UMCSh9XdvUABjDJpFUrHPj4uy3Cc26DC?usp=sharing) # # The code to do so would be as follows: # # ``` # # create interface to upload / interact with google drive and video files # # from google.colab import files # from google.colab import drive # drive.mount('/content/drive') # # google will ask you to click link, approve, and paste code # # # after authentication, you can work using the path "/content/drive/My Drive" # # if it works it will say "Mounted at /content/drive" # # # part 2: specify where in the drive the files are located # # filename = "President <NAME>'s Peace Speech.mp4" # filepath = "/content/drive/My Drive/Programming/vid2cleantxt_colabfiles" # # print('Will use the following as directory/file: ') # pp.pprint(''.join([filepath, filename])) # # ``` # # ** ** # + [markdown] id="zJJ4BVpL9ixu" # ## Instructions P1: Input File Details # # Specify the path of the file you want to transcribe. This script downloads the file from the vid2clntext github repo and saves it to the VM's working directory using the **requests** library. # # <font color='orange'> update the **input_path** variable to a custom filepath if desired (i.e. you are running this locally) </font> # + colab={"base_uri": "https://localhost:8080/"} id="b-nwIm1zmHlc" outputId="cb9d5627-cb70-4775-c39b-f322ec39bfb6" # using requests import requests filepath = os.getcwd() filename = 'JFK_rice_moon_speech.mp4' input_path = os.path.join(filepath,filename) # enter test URL URL = "https://github.com/pszemraj/vid2cleantxt/raw/master/example_JFK_speech/President%20John%20F.%20Kennedy's%20Peace%20Speech.mp4" print("starting to download and save file ") r = requests.get(URL, allow_redirects=True) open(input_path, 'wb').write(r.content) print("successfully saved ", filename, " - ", datetime.now()) # + [markdown] id="bV3ov-0Z_PXf" # ## Instructions P2: Update chunk_length # # Update the variable 'chunk_length' to your use case. A good value is one that doesn't cause Colab to crash and is greater than a sentence length (for context, grammar purposes). # # # # If Colab is using a GPU, 30 seconds should be fine. If Colab is only able to use a CPU, may need to be decreased. **Check Runtime Settings to ensure GPU used** # + colab={"base_uri": "https://localhost:8080/", "height": 319, "referenced_widgets": ["b4334830218646c8a68987ab06ef91d5", "<KEY>", "<KEY>", "3dccae44352c4363ab303a88a38d2565", "<KEY>", "<KEY>", "1f6d9913ba6a405a8ce69689a144ad75", "<KEY>", "ed05c6ed231e4cff87eda590db4afac2", "bb2550ab4f2b439b9bc6444ef7d24e30", "892498f196534754a54e1908a1688685", "<KEY>", "c980eca07c3d4c62af31a0d7c4784fe2", "eb8adc03cca7421cb893d858a8c40a95", "35d36db845bc4af091921cee52d18e3c", "858c1524030f422f83aa21aa7961e376", "cc386ef4e63c4b939cc2c1abd595536e", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "f52de2843fef4e47a743799a3a0f6c22", "<KEY>", "<KEY>", "55d21161ed1f4f6ca8beb07b2b5017fb", "<KEY>", "7bd5d985c4744366add45de8255d9c1e", "<KEY>", "<KEY>", "eb81e44c9e064b55b02f64d6a7a74d3f", "c4d807082d9f415a8ed75a91557e4a08", "<KEY>", "<KEY>", "<KEY>", "040100c8179f43368d98c8a389313e19", "<KEY>", "e6c45a916d4742d8a889b191357891b3", "a9342857a8564a2da3191c5166ab4adb", "<KEY>"]} id="k_gAUG_9aK-d" outputId="46b03cbf-169e-4fa9-bc08-01ffa2e131f3" # load huggingface model time_log.append(time.time()) time_log_desc.append("starting to load model") # load pretrained model wav2vec2_model = "facebook/wav2vec2-large-960h-lv60-self" # wav2vec2_model = "facebook/wav2vec2-base-960h" # faster+smaller, less accurate print("\nPreparing to load model: " + wav2vec2_model) tokenizer = Wav2Vec2Tokenizer.from_pretrained(wav2vec2_model) model = Wav2Vec2ForCTC.from_pretrained(wav2vec2_model) chunk_length = 30 # (in seconds) if model fails to work or errors out (and there isn't some other # obvious error, reduce chunk_length. print("loaded the following model:", wav2vec2_model, " at ", datetime.now()) time_log.append(time.time()) time_log_desc.append("loaded model") # + [markdown] id="zybapeYW_4uw" # # Run Transformer Model (wav2vec2) # + colab={"base_uri": "https://localhost:8080/", "height": 543, "referenced_widgets": ["54fce4318e9841d590601a64df2d1af6", "<KEY>", "bb43a88bbb2f4fa2b8341ea9f02b8588", "<KEY>", "<KEY>", "8bce7185f00348108f3209adefce4544", "9d302d2de38844a69aade82c89780a05", "f8cde478f1b947ddbfb2dab4b48a4c35", "0724056d7abb4aa19dcd9e080ddad337", "991fea0fac634240a391ff59811723c4", "6a230053b90449619f155d6e5f775959", "<KEY>", "073609456cca4e44984afd6049c969cb", "5bd90bbb202045f5892b3a1ed74942ec", "359d5aadbde54dbca97a0f90ecaf1f55", "9573c002d626440082ac8b9965eafb75"]} id="7EkpWuk3bmpP" outputId="daa0a12b-ca2f-4b10-8fe8-44f4b819097b" # load videos, run through the model st = time.time() time_log.append(st) time_log_desc.append("starting transcription") t_results = transcribe_video_wav2vec(transcription_model=model, directory=filepath, vid_clip_name=filename, chunk_length_seconds=chunk_length) end_t = time.time() time_log.append(end_t) time_log_desc.append("finished transcription") # t_results is a dictonary containing the transcript and associated metadata full_transcription = t_results.get('audio_transcription') metadata = t_results.get('metadata') print("completed transcription in {} minutes".format(round((end_t - st) / 60, 2))) # + [markdown] id="FyFxqjynqR8s" # # Post-Transcription # + [markdown] id="KAlsOjR8AD7R" # ## Spell Check, SBD, Keywords # # If you got to here, your colab file was able to run the model and transcribe it. Now a little cleaning up, then done. # + colab={"base_uri": "https://localhost:8080/"} id="5ECdmK3tjP4L" outputId="63cff41a-97c9-4866-bfba-c416dd191a23" # create output locations and store full transcription time_log.append(time.time()) time_log_desc.append("starting saving output files") # check if directories for output exist. If not, create them storage_locs = validate_output_directories(filepath) output_path_transcript = storage_locs.get('t_out') output_path_metadata = storage_locs.get('m_out') # label and store this transcription vid_preamble = beautify_filename(filename, num_words=15, start_reverse=False) # ^ gets a nice phrase from filename # transcription transcribed_filename = vid_preamble + '_tscript_' + datetime.now().strftime("_%H.%M.%S") + '.txt' transcribed_file = open(join(output_path_transcript, transcribed_filename), 'w', encoding="utf-8", errors='ignore') transcribed_file.writelines(full_transcription) transcribed_file.close() # metadata metadata_filename = 'metadata for ' + vid_preamble + " transcription.txt" metadata_file = open(join(output_path_metadata, metadata_filename), 'w', encoding="utf-8", errors='ignore') metadata_file.writelines(metadata) metadata_file.close() print("saved files at the following locations") print("metadata at: " + output_path_transcript) print("metadata at: " + output_path_metadata) time_log.append(time.time()) time_log_desc.append("saved output files to local runtime") # if you want to download these files, you need to add them below # + colab={"base_uri": "https://localhost:8080/", "height": 475, "referenced_widgets": ["3de898c3b8b647b58562d174593ba7bd", "c17dfb0ed18e4cbc83c7a04464549c17", "e4e954c9fab941168ce0c4a7761db1b0", "7e509ce03ef34b8887b7c0690c05f4af", "f8835d7ec56d4fefaf070d2588e7fcff", "<KEY>", "ec145b5386244bfc865e53514da504b9", "eb9afd5326a54b3bbe77efd9231d04e8"]} id="g2TGZ4h041vU" outputId="befd32d3-876b-47ae-8774-7a043424442a" # spell correction, sentence disambiguation, and keyword extraction # Go through base transcription files and spell correct them and get keywords print('\n Starting to spell-correct and extract keywords\n') seg = pysbd.Segmenter(language="en", clean=True) tf_pretty_name = beautify_filename(transcribed_filename, start_reverse=False, num_words=10) # auto-correct spelling (wav2vec2 doesn't enforce spelling on its output) corr_results_fl = symspell_file(filepath=output_path_transcript, filename=transcribed_filename, keep_numb_words=True, create_folder=True, dist=2) output_path_impr = corr_results_fl.get("output_path") # Write version of transcription with sentences / boundaries inferred with periods. All text in one line seg_list = seg.segment(corr_results_fl.get("corrected_ssp_text")) seg_text = '. '.join(seg_list) seg_outname = "SegTEXT " + tf_pretty_name + ".txt" file_seg = open(join(output_path_impr, seg_outname), 'w', encoding="utf-8", errors='ignore') file_seg.write(seg_text) file_seg.close() # extract keywords from transcription (once spell-corrected) key_phr_fl = quick_keys(filepath=output_path_impr, filename=corr_results_fl.get("corrected_ssp_fname"), num_keywords=50, max_ngrams=3, save_db=False) key_phr_fl.to_excel(os.path.join(output_path_transcript, tf_pretty_name + "YAKE_extracted_keywords.xlsx")) time_log.append(time.time()) time_log_desc.append("transcription spell-corrected + keywords extracted") # + [markdown] id="KGWxhA3boZuJ" # ## Download generated files # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="Zwnan6bXodM_" outputId="e59128d3-1d24-456a-8727-3a0005a89b56" from google.colab import files files.download(join(output_path_impr, seg_outname)) files.download(join(output_path_transcript, tf_pretty_name + "YAKE_extracted_keywords.xlsx")) # if you want to download any other files, add "files.download(<insert filepath + name here>)" print("Finished downloading files at: ", datetime.now()) # + [markdown] id="a9qbVc4PAcFZ" # ## Log & Exit # + colab={"base_uri": "https://localhost:8080/"} id="OOoaNN5f5efD" outputId="9a2cfb85-dc0c-4094-b16d-f92fea2b9f26" print("\n\n----------------------------------- Script Complete -------------------------------") print("time of completion block: ", datetime.now()) print("Transcription file + more can be found here: ", output_path_transcript) print("Metadata for each transcription is located: ", output_path_metadata) time_log.append(time.time()) time_log_desc.append("End") # save runtime database time_records_db = pd.DataFrame(list(zip(time_log_desc, time_log)), columns=['Event', 'Time (sec)']) time_records_db.to_excel(join(output_path_metadata, tf_pretty_name + "transcription_time_log.xlsx")) # total print("total runtime was {0:3f}".format((time_log[-1] - time_log[0]) / 60), " minutes")
colab_notebooks/vid2cleantext_single_GPU.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Simulating Language 5, Simple Innate Signalling (lab) # The code below builds up to a function, ```ca_monte```, which measures and returns the level of communicative accuracy between a production system and a reception system. Signalling systems are stored as lists of lists of association weights. This list of lists structure can be thought of as a matrix with meanings on the rows and signals on the columns (for production matrices) or signals on the rows and meanings on the columns (for reception matrices). # # You should go through this notebook cell by cell, running each cell by using SHIFT+ENTER. # First, we import the random library and the usual plotting stuff. # + import random # %matplotlib inline import matplotlib.pyplot as plt from IPython.display import set_matplotlib_formats set_matplotlib_formats('svg', 'pdf') # - # Now here's the bulk of the code, in three functions, ```wta``` (for "winner take all"), ```communicate```, and ```ca_monte```. # + def wta(items): maxweight = max(items) candidates = [] for i in range(len(items)): if items[i] == maxweight: candidates.append(i) return random.choice(candidates) def communicate(speaker_system, hearer_system, meaning): speaker_signal = wta(speaker_system[meaning]) hearer_meaning = wta(hearer_system[speaker_signal]) if meaning == hearer_meaning: return 1 else: return 0 def ca_monte(speaker_system, hearer_system, trials): total = 0. accumulator = [] for n in range(trials): total += communicate(speaker_system, hearer_system, random.randrange(len(speaker_system))) accumulator.append(total / (n + 1)) return accumulator # - # We'll try and understand what this code does and why in a minute, but for now let's try getting it to do something. Let's say we had three meanings, and three signals. Let's imagine a speaker who used signal 0 for meaning 0, signal 1 for meaning 1, and both signal 1 and 2 for meaning 2, and a hearer who understood meaning 0 for signal 0, meaning 1 or 2 for signal 1, and meaning 2 for signal 2. What do we expect the communicative accuracy to be for these two individuals? # # Try and figure this out in your head first (it might help to write out the matrices on a piece of paper). # # Now, let's simulate it! Enter the following code in the next cell (note I've formatted the list of lists nicely so you can read them as matrices, but these can also be entered all on one line if you want): # # ```python # speaker = [[1, 0, 0], # [0, 1, 0], # [0, 1, 1]] # # hearer = [[1, 0, 0], # [0, 1, 1], # [0, 0, 1]] # # ca_monte(speaker, hearer, 100) # ``` # # This simulates 100 interactions between the speaker and hearer and returns a list consisting of the average communicative accuracy so far at each interaction (so the values in this list become more and more accurate reflections of the communicative accuracy as you go through the list). # So, this has given us our first simulation results! We can look at these as a big list, but it might be better to plot it as a graph. Try the following, for example: # # ```python # data = ca_monte(speaker, hearer, 100) # plt.plot(data) # plt.xlabel('trials') # plt.ylabel('communicative accuracy') # plt.ylim([0,1]) # plt.grid() # ``` # So, this plots a single simulation run on a graph. More often, we want to plot lots of runs on a graph. We can do this by making a loop, and gathering a lot of simulation runs in a list. (This will actually be a list of lists now...) # # For example, we could do the following to get ten simulation runs: # # ```python # lots_of_data = [] # for n in range(10): # data = ca_monte(speaker, hearer, 100) # lots_of_data.append(data) # # print(lots_of_data) # ``` # Hmmm... I'm not sure we want to spend much time looking at screens full of numbers! Let's plot a graph! # # The following command produces a nicer visualisation of our data, showing all the graphs with transparent lines so we can see the aggregate behaviour of multiple simulations. See if you can figure out how this bit of code works. _Hint: in graphics "alpha" is a technical term for transparency._ # # ```python # for data in lots_of_data: # plt.plot(data, color='orange', alpha=0.5) # plt.xlabel('trials') # plt.ylabel('communicative accuracy') # plt.ylim([0,1]) # plt.grid() # ``` # Another useful type of plot is a histogram showing a distribution of values. Let's say we wanted to know what the end result of a lot of simulation runs looked like. We need to do a little bit of work to pull out the last item of each run of a simulation and put it in a list. This code will do that: # # ```python # end_result = [] # for n in range(10): # data = ca_monte(speaker, hearer, 100) # last_item = data[len(data) - 1] # end_result.append(last_item) # ``` # # Now we can plot the distribution using another handy plotting command, `hist`: # # ```python # plt.hist(end_result) # plt.xlabel('communicative accuracy') # plt.xlim([0, 1]) # ``` # # OK, now we've had a bit of fun, let's understand what's going on! # # Try and understand what each function does. Look at the main function `ca_monte` first, then the function it calls (`communicate`), and so on until you have inspected each function separately. Can you see why the program has been divided into functions in the way it has? # # If you're not sure what each function does, you can have a look at the separate (very!) detailed walkthrough notebook I've provided on the Learn page. Work through that first and then come back here. # # When you are satisfied that you understand roughly how the code works, answer the following questions. 1-3 should be completed by everyone. **4 and 5 are optional!** Only attempt them if you are happy you have completed 1-3. # 1. How many trials should there be in the Monte Carlo simulation to work out communicative accuracy? *Hint: answer this question empirically by plotting the results and comparing to what the "real" answer should be for various numbers of trials.* # # 2. How do synonymy and homonymy affect communicative accuracy? Create production and reception systems with different degrees of homonymy and synonymy to explore this. *Note: you don’t have to start from the production and reception systems given above, and you don’t have to restrict yourself to systems with 3 meanings and 3 signals.* # # 3. What alternatives to "winner-take-all" might there be in the model of production/ reception? What difference might this make? Would they be more or less realistic, or powerful as a model? *Hint: how might you interpret weights as probabilities?* # # 4. **[optional]** How could you model production and reception using a single underlying matrix, rather than separate production and reception matrices? Is this kind of model better or worse than a model where we use separate matrices? # # 5. **[optional]** How would you go about calculating communicative accuracy exactly, i.e. rather than via Monte Carlo techniques? #
simlang_5_lab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: SageMath 9.0 # language: sage # name: sagemath # --- # # Introduction # # Notebook for the Public Finance course at UCSC, by <NAME> degl' Innocenti. You can find more information on the course on my [UCSC page](http://www.dgdi.me/#teaching) and on my [personal page](http://www.dgdi.me/#teaching). # # # ### What is SageMath? # # [SageMath](http://www.sagemath.org/index.html) is a free open-source mathematics software system licensed under the GPL: # - Sage is a set of software libraries built on top of [Python](http://www.python.org/), a [widely used](https://www.economist.com/graphic-detail/2018/07/26/python-is-becoming-the-worlds-most-popular-coding-language) general purpose programming language. # - SageMath can be used to study elementary and advanced, pure and applied mathematics. Supported by nearly 100 open-source packages, SageMath allows to investigate a variety of topics: algebra, calculus, elementary to very advanced number theory, cryptography, etc.. # - SageMath provides a free alternative to Mathematica, Maple, Matlab and Magma **it is open source and community driven** ([Some thoughts on that by a Nobel laureate and his twitter feed)](https://twitter.com/paulmromer/status/985280552440909824?lang=en). # # - More information about SageMath: [Tour](http://www.sagemath.org/tour.html), [Why SageMath](http://www.sagemath.org/library-why.html), [SageMath on twitter](https://twitter.com/sagemath). # # # ### SageMath in this course # # This course's notebooks are meant to integrate the material from the book and the slides: # # - Many of the concepts of the course are presented by means of graphical analysis. The use of interactive graphs should facilitate their understanding. # - When possible, excercises will be presented on notebooks so to illustrate **each step** of the derivation . # - The notebook is a tool to engage directly with the concepts presented by the book while leveraging on the computational capabilities of the computer. "Why is this demand curve shaped that way?" - "What would happen to this result by changing that parameter?" - "How is this result derived?" - and many other similar questions can be answered through a notebook so to improve learning. # - Notebooks provide an opportunity to get used to both coding and math, whose skills are extremely in demand in the job market. # # These notebooks provide a clear presentation of the concepts of Public Finance by means of text, graphs and math. The possibility to directly engage with the concepts allowed by this format might prove very fruitful for learning and you are more than encoraged to take advantage of it. The same applies to the exercises considered in the course and many of them can be solved using SageMath with minimal modification/extensions to the notebooks that will be provided. # # # ### SageMath interface used in this course - Jupyter notebook # # - Jupyter notebook is a webapp -*i.e., an app that runs through your web browser*- used for generic scientific computing. # - It allows to use the computational power of your machine to perform symbolic and numerical operations (**very handy for those that are not well versed in doing math with pen and paper**). # - The notebook consists of a number of cells that take code from different languages: LaTeX, Markdown, as well as SageMath, Python, R, Julia... # - The notebook format allows to present an analysis in a plain, but thorough, way. This is achieved by including in the same document a description of the analysis that is performed, typeset mathematical expressions, graphics (even interactive ones) and the mathematical derivation of the results. [Here](https://quantecon.org/notebooks/), you can find a gallery of economic-related notebooks, while [here](https://players.brightcove.net/268012963001/Sy9lbkRKl_default/index.html?videoId=5807429418001) you can find an introductory video about notebooks. # - The notebook is a conveninet interface for the user to provide input to the computer (the front-end). The computer needs then to pass the input to a computational "engine" that actually perform the tasks submitted (the back-end). In order to be able to run notebooks, you need to set up the back-end. There are two possibility to do so: you can use [Cocalc](https://cocalc.com/) (more info about CoCalc [here](http://blog.sagemath.com/)) on the fly by drag-and-drop your notebook on the page (with a free registration to the website you also get an account to store your files) ore yo can [install SageMath](http://www.sagemath.org/download.html) and run the notebooks directly on your laptop. # # ### You have opened the notebook, what do you need to do now? # # Plots and interactive graphics in the notebook might need to be evaluated at the opening (text and mathematical expressions are usually displayed by default, but not so for graphs). So, after opening the notebook of interest (using your preferred method, CoCalc or SageMath), it is important to evaluate all the cells of the notebook. To do so, from the drop-down-menu at the top of the page, select "Kernel$\rightarrow$Restart & Run All" or simply press the forward button just below the Widgets entry of the menu. As implied by the name, this command will run the whole document (it wil ltake only a few moments) ensuring the correct display of the content. If you want to evaluate a single cell, place the cursor in the code-chunk and press Shift+Enter. # # # ### Why should SageMath be of interest to you? # # * Because it is a tool that expand your ability to deal with mathematical and computational problems. # * Because its mix of textual, math, and graphics content that might improve your understanding of concepts and allow you to create neat presentations. # # ### Sagemath as an operative tool # # Your ability to perform basic operations (sum, difference, multiplication, quotient) were greatly improved when you were taught how to do them using pen and paper, and even more when you started using a calculator. SageMath brings you a similar ability for a wider array of math problems and gives you reliable results that you can use to solve or double-check excercises you are working on. For example, computing derivatives with pen and paper might be tedious and is prone to mistake while you can do it easily using SageMath. Let's compute the second derivative of the function $\frac{ln(\frac{1}{x^2})}{3x^2+2x+1}$ # # + #assign the function expression to the variable g g = (ln(1/(x^2)))/(3*(x^2)+2*x+1) #computes the first derivative of g wrt x and simplify the output dgdx=derivative(g, x).simplify_full() #computes the second derivative of g wrt x and simplify the output dgdxx=derivative(g, x, x).simplify_full() #display the output html("g =" + latex(g) + "\\\\" + "\\frac{dg}{dx}=" + latex(dgdx) + "\\\\" + "\\frac{d^2g}{dx^2}=" + latex(dgdxx)) # - # ### A simple utility maximization problem # As an Economics student you will come across countless utility maximization problems. # # Let's see how one simple example can be tackled using SageMath. We want to find the optimal consumption of coffe for a student at UCSC. As we know, the coffe price is $p=.6$ euro. We are considering a particular student who feels awful before her first coffe in the morning, enjoy less and less every additional coffe and starts shaking due to caffeine when taking more than five. Then, we could model her utility from coffee as $U(z)=5z - z^2$. The problem of our student is then to maximize her objective function $F(z,p) = 5z - z^2 - zp$ where $z$ is the number of coffes consumed. # # The first step in the analysis is to define the Utility function and the Objective function function: # + #define a function U that depends on z (quatity of coffe) and that #is equal to the utility function of the text U(z) = 5*z-z^2 #define the objective function F that depends on z and p (price of coffe) #as utility minus total cost of coffee F(z, p) = U(z) - (z*p) #display the output html("U(z)=" + latex(U(z)) + "\\\\" + "F(z,p)=" + latex(F(z, p))) # - # We might want to explore the problem graphically, so to have a better idea about it. # + #set price value p=.6 #draw plot of utility, cost and objective function plot_static = plot([U(z), p*z, F(z,p)], (z, 0, 5), axes_labels=['$z$','$Variable$'], legend_label = ["Utility from consumption", "Cost of consumption","Objective function"]) #set legend so to not overlap with graph plot_static.set_legend_options(handlelength=2, borderaxespad = 0, labelspacing =.05, bbox_to_anchor=(1.05, 1), loc=2) #show graphs show(plot_static, ymin = -.5, ymax = 7, figsize=[10,4]) # - # From the graph we can see that the optimal quantity of good, $z^*$ is around $2$. We can then compute the $FOC$ #evaluate the derivative of the objective function wrt z FOC = derivative(F(z, p), z) #display the FOC html("FOC=" + latex(FOC)) # And by setting $FOC=0$ and solving for $z$ we identify precisely the optimal quantity. html(latex(solve(FOC == 0, z)[0])) # So it is $z^*=\frac{11}{5}=2.2$. So it seems that our student of interest will usually get a couple coffe, which sounds reasonable. We already know from the plot that the $FOC$ is identifying a maximum, but we can express it more concisely by reporting the $SOC$: SOC = derivative(F(z, p), z, z) html("SOC=" + latex(SOC) + "<0") # The problem here presented is neat and simple for illustrative purposes, but it can be extended to more complex questions, e.g more complex utility functions (e.g., someone who does not enjoy that much a daily coffe but that binges heavily before exams), multiple goods maximization problems (in the following we will consider the problem of optimally chosing the quantity of coffe and croissant), etc.. # To sum up, solving this kind of problems using SageMath is straightforward, when something is unclear you can explore the problem visually and it is easy to scale from small, understandable problems, to more complex ones. # ### A simple example of an interactive visualization # # We might be interested in analyzing what is the effect of a change in price on the problem of optimal choice of coffe. How will the cost and objective function change for an increase in the price? what about optimal quantity? # To explore this questions we can set-up an interactive graph like this one: @interact #create a slider from .20 to 2 to be used by the user to define the price of coffee p def _slider(p=slider(.20, 2, step_size=.1, default = .6)): #compute optimal quantity FOC = derivative(F(z, p), z) opt_q = solve(FOC == 0, z)[0].rhs() #draw plot of utility, cost and objective function plot_inter = plot([U(z), p*z, F(z,p)], (z, 0, 5), axes_labels=['$z$','$F(z)$'], legend_label = ["Utility from consumption", "Cost of consumption","Objective function"]) #draw vertical line at optimal quantity Q* line_opt_q = line([(opt_q,-.5),(opt_q,10)], color = 'black') #draw a label near the vertical line with the value of optimal quantity opt_q_label = text('$Q^*$=%s'%(n(opt_q, digits = 3)), (opt_q+.1, 7) , horizontal_alignment='left', color='black', fontsize = 14) #combine plots together plot_tot = plot_inter + line_opt_q + opt_q_label #set legend so to not overlap with graph plot_tot.set_legend_options(handlelength=2, borderaxespad = 0, labelspacing =.05, bbox_to_anchor=(1.05, 1), loc=2) #show graph show(plot_tot, ymin = -.5, ymax = 7, figsize=[10,4]) # As we can see, an increase in the price makes the line of the cost of consumption steeper (the price beeing its slope). The increase in price causes the objective function to move left and down reducing the optimal quantity $Q^*$. # ### A more advanced example of interactive visualization - Taylor Approximation # # SageMath might also be usefult to improve understanding of mathematical concepts. In your calculus course you should have already come across the [Taylor series ](https://en.wikipedia.org/wiki/Taylor_series) $\hat{f}(x; x_0, n)=\sum_{n=0}^{\infty}\frac{f^{(n)}(x_0)}{n!}(x-x_0)^n$ and its use to approximate functions. Since we will be using the Taylor approximation in the course, it might be useful to review it and we can do it using the tools presented so far. We will try to see how the Taylor series approximation behaves for the function $f(x)=x^5$ nearby the point $x_0=1$. To do so, we write a short script that takes as input the order $n$ of the Taylor series and that outputs the plot of the original function $f(x)=x^5$, the plot of its Taylor series approximation $\hat{f}(x; x_0, n)$ along with the algebraic form of the two: # # + #import the library needed to adjust plot fonts import matplotlib as mpl #increse font size to improve readability mpl.rcParams['font.size'] = 12. #define the point upon which perform the Taylor expansion x0 = 1 #define the function f(x) f(x) = x^5 #compute the plot of the function f(x) p_1 = plot(f(x),-1, 5, thickness = 2, legend_label = '$'+'f(x) ='+ latex(f(x))+'$') #compute the plot of the point {x0, f(x0)} dot = point((x0,f(x=x0)), pointsize=80, rgbcolor=(1,0,0)) #calling @interact to allow functions and plot to be dependent on some interactive inputs from the user @interact #create a slider from 1 to 10 to be used by the user to define the value of the Taylor order n def _(n=[1..10]): #compute the Taylor expansion of function f(x) relative to variable x, at point x0, of order n ft = f.taylor(x,x0,n).simplify_full() #compute the plot of the Taylor expansion of function f(x) pt = plot(ft, (x, -1, 5), color='green', thickness=2, legend_label = '$\\hat{f}(x; x_0, n)$') #show the algebraic expression of f(x) pretty_print(html('$f(x)\\;=\\;%s$'%latex(f(x)))) #show the algebraic expression of the Taylor expansion of function f(x) pretty_print(html('$\\hat{f}(x; x_0, n)=\\hat{f}(x;%s;%s)\\;=\\;%s+\\mathcal{O}(x^{%s})$'%(x0,n,latex(ft.expand()),n+1))) #creates P_tot, a plot combining the plots of f(x), its Taylor expansion and the point {x0, f(x0)} P_tot = dot + p_1 + pt #defines the legend of the P_tot plot P_tot.set_legend_options(handlelength=2, borderaxespad = 0, labelspacing =.05, bbox_to_anchor=(1.05, 1), loc=2) #shows the P_tot plot show(P_tot, ymin = -.5, ymax = 3, figsize=[6,4]) # - # Crucial to this code is the row `ft=f.taylor(x,x0,n)` that defines the function `ft` as the Taylor expansion of the function `f(x)` relative to the variable `x`, at the point `x0` using an order of the polynomial $n$ equal to the variable `n`. # This function computes the Taylor series algebraic expression $\sum_{n=0}^{\infty}\frac{f^{(n)}(x_0)}{n!}(x-x_0)^{n}$ and gives us the solution (that you can find displayed at the top of the graph). # # As we can see, when the degree of the Taylor polynomial rises, it approaches the correct function (the green line stays "more" on top of the blue line"). In our case the approximation improves until the order of the Taylor series gets to $n=5$ and then it becomes exact (indeed, a Taylor expansion of the fifth order entails a polynomial of the fifth degree, so the perfect approximation of a $x^5$ should not come as a surprise). # # This visualization provides us with a characterization of the Taylor expansion that is complementary to its algebraic formulation and that may be easier to grasp for some. # # Now that we have a better understanding of the Taylor expansion, we could ask how our approximation would perform with a more complex function, let's say $f(x)=e^{-x}\sin(x)$. # # We just need to change our `f(x) = x^5` in the code to be `f_2(x) = sin(x)*e^(-x)` and play again with the graph. # + #import the library needed to adjust plot fonts import matplotlib as mpl #increse font size to improve readability mpl.rcParams['font.size'] = 12. #define the point upon which perform the Taylor expansion x0 = 1 #define the function f_2(x) f_2(x) = sin(x)*e^(-x) #compute the plot of the function f_2(x) p_2 = plot(f_2(x),-1, 5, thickness=2, legend_label = '$'+'f(x) ='+ latex(f_2(x))+'$') #compute the plot of the point {x0, f_2(x0)} dot_2 = point((x0,f_2(x=x0)), pointsize=80, rgbcolor=(1,0,0)) #calling @interact to allow functions and plot to be dependent on some interactive inputs from the user @interact #create a slider from 1 to 10 to be used by the user to define the value of the Taylor order n def _(n=[1..10]): #compute the Taylor expansion of function f_2(x) relative to variable x, at point x0, of order n ft_2 = f_2.taylor(x,x0,n).simplify_full() #compute the plot of the Taylor expansion of function f_2(x) pt_2 = plot(ft_2, (x, -1, 5), color='green', thickness=2, legend_label = '$\\hat{f}(x; x_0, n)$') #show the algebraic expression of f_2(x) pretty_print(html('$f(x)\\;=\\;%s$'%latex(f_2(x)))) #show the algebraic expression of the Taylor expansion of function f_2(x) pretty_print(html('$\\hat{f}(x; x_0, n)=\\hat{f}(x;%s;%s)\\;=\\;%s+\\mathcal{O}(x^{%s})$'%(x0,n,latex(ft_2),n+1))) #creates P_tot_2, a plot combining the plots of f_2(x), its Taylor expansion and the point {x0, f_2(x0)} P_tot_2 = dot_2 + p_2 + pt_2 #defines the legend of the P_tot_2 plot P_tot_2.set_legend_options(handlelength=2, borderaxespad = 0, labelspacing =.05, bbox_to_anchor=(1.05, 1), loc=2) #shows the P_tot_2 plot show(P_tot_2, ymin = -.5, ymax = 3, figsize=[6,4]) # - # In this case we can see that the approximation does not become exact even if a polynomial of 10th degree is used. Indeed, it really seems to be impossible to express exactly the function $f(x)=e^{-x}\sin(x)$ with a Taylor series made up of a *finite* number of polynomials. # # The graph presented conveys lots of information that, upon closer inspection, might be uncovered. For example, it might be of interest to know when the approximated results are close to the true ones and, probably even more important, for which values of $x$ we **should not trust** the approximation. In other terms, we might want to identify an interval around $x_0$ where we know our approximation is reliable and how this interval varies with the order of the Taylor series. To do so, look for the values of $x$ where the blue and the green line overlaps. We can see that, when $n=2$, our approximation is reliable only very close to $x_0$ while using $n=10$ delivers a pretty good approximation for $x \in [-1, 3]$. Clearly, this is a very rough way to characterize our "reliable bounds", to do better, however, we would need to get back to the algebraic expressions (any idea about the next steps?). # # # ### Conclusion # # Tinkering with the mathematical expressions and their plots is meant to be useful to deepen your understanding of the concepts that will be presented. Perform the course excercises through the notebook is meant to provide a more clear and complete presentation, reducing to the minimum the possibility of losing some steps in the derivation of the results. Moreover, modifying the code allows you to ask question to the models, test if you correctly understood their implications, see if results are robust to changes and so on. This process is meant to foster your ability to think critically about the material of this course. # # Furthermore, getting accustomed with coding, so to be able to leverage the computational capabilities offered by computers to perform analysis, is a skill worth knowing and that is getting increasingly rewarded in the job market. To get started with SageMath you can follow the tutorial [here](http://doc.sagemath.org/html/en/tutorial/index.html) (the topics of interest are from Installation to Plotting). # # If you find any mistake or error in these notebooks or you have an idea for an interesting extension to a notebook, let me know (even better, first try to implement it yourself). #
notebooks/notebook_00_introduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Context Compatibility # + pycharm={"name": "#%% \n", "is_executing": false} # per default - the preprocessing step for jaccard and clustering is deactivated do_preprocessing = False # - # ## SemMedDB Connection # + pycharm={"name": "#%%\n", "is_executing": false} import psycopg2 pg_cred = { "POSTGRES_DB": "", "POSTGRES_HOST" : "", "POSTGRES_PORT" : "", "POSTGRES_USER" : "", "POSTGRES_PW" : "" } connection_str = "dbname='{}' user='{}' host='{}' port='{}' password='{}'".format( pg_cred["POSTGRES_DB"], pg_cred["POSTGRES_USER"], pg_cred["POSTGRES_HOST"], pg_cred["POSTGRES_PORT"], pg_cred["POSTGRES_PW"]) conn = psycopg2.connect(connection_str) print('connected') # - # ## PubMed Metadatabase Connection # + pycharm={"name": "#%% \n", "is_executing": false} pg_cred_meta = { "POSTGRES_DB": "", "POSTGRES_HOST" : "", "POSTGRES_PORT" : "", "POSTGRES_USER" : "", "POSTGRES_PW" : "" } connection_str_meta = "dbname='{}' user='{}' host='{}' port='{}' password='{}'".format( pg_cred_meta["POSTGRES_DB"], pg_cred_meta["POSTGRES_USER"], pg_cred_meta["POSTGRES_HOST"], pg_cred_meta["POSTGRES_PORT"], pg_cred_meta["POSTGRES_PW"]) if do_preprocessing: conn_meta = psycopg2.connect(connection_str_meta) print('connected') # + [markdown] pycharm={"name": "#%% md\n"} # ## Queries # + pycharm={"name": "#%%\n", "is_executing": false} # only causes with at least 3 occurrences mv_cause = "mv_cause_2" mv_ddi_gen = "mv_ddi_gen" mv_ddi_function = "mv_ddi_function" query_cause = "SELECT pmid, subject_cui, object_cui FROM {}".format(mv_cause) query_ddi_gen = "SELECT pmid, subject_cui, object_cui FROM {} ".format(mv_ddi_gen) query_ddi_gen_1_dg = "SELECT pmid, subject_cui, object_cui FROM {} WHERE (object_semtype = 'gngm' OR object_semtype = 'aapp') AND (subject_semtype = 'clnd' OR subject_semtype = 'phsu' OR subject_semtype ='sbst')".format(mv_ddi_gen) query_ddi_gen_2_gd = "SELECT pmid, subject_cui, object_cui FROM {} WHERE (subject_semtype = 'gngm' OR subject_semtype = 'aapp') AND (object_semtype = 'clnd' OR object_semtype = 'phsu' OR object_semtype ='sbst')".format(mv_ddi_gen) query_ddi_function_1_dg = "SELECT pmid, subject_cui, object_cui FROM {} WHERE (object_semtype = 'gngm' OR object_semtype = 'aapp') AND (subject_semtype = 'clnd' OR subject_semtype = 'phsu' OR subject_semtype ='sbst')".format(mv_ddi_function) query_ddi_function_2_gf = "SELECT pmid, subject_cui, object_cui FROM {} WHERE \ (subject_semtype = 'gngm' OR subject_semtype = 'aapp') AND \ (object_semtype = 'biof' OR object_semtype = 'phsf' OR object_semtype ='orgf' OR object_semtype = 'menp' \ OR object_semtype = 'ortf' OR object_semtype = 'celf' OR object_semtype ='moft' OR object_semtype = 'genf' \ OR object_semtype = 'patf' OR object_semtype = 'dsyn' OR object_semtype = 'mobd' OR object_semtype = 'neop' \ OR object_semtype = 'comd' OR object_semtype = 'emod')".format(mv_ddi_function) query_cause_correct = "SELECT pmid, subject_cui, object_cui FROM {}".format(mv_cause) query_ddi_correct = "SELECT '0', d1, d2 FROM mv_ddi_correct" query_mesh_desc = "SELECT AM.pmid, MH.descriptorui \ FROM pubmed_metadata.meshheading MH JOIN pubmed_metadata.article_meshheading AM \ ON MH.meshid = AM.meshid WHERE AM.pmid IN (SELECT pmid FROM t_pmid_with_pred)" query_doc_chemical_desc = "SELECT pmid, chemicalui FROM pubmed_metadata.article_chemical \ WHERE pmid IN (SELECT pmid FROM t_pmid_with_pred)" query_doc_titles = "SELECT pmid, title FROM pubmed_metadata.pubmedarticle \ WHERE pmid IN (SELECT pmid FROM t_pmid_with_pred)" query_doc_author_ids = """SELECT pmid, authorid FROM pubmed_metadata.article_author WHERE pmid IN (SELECT pmid FROM t_pmid_with_pred)""" # - # --- # # Preprocessing of Document Pair Similarity # + pycharm={"name": "#%%\n", "is_executing": false} from model.knowledgegraph import load_kg_facts from model.contextcompatibility import load_kg_facts_with_doc_index, preprocess_document_pairs_for_experiment experiments = ["cause", "ddi_gene"] metadata_name = ["chemical", "title", "authors", "mesh"] metadata_to_process = [query_doc_chemical_desc, query_doc_titles, query_doc_author_ids, query_mesh_desc] requires_splitting = [False, True, False, True] # - # ## Preprocess Cause Experiment # + pycharm={"name": "#%%\n", "is_executing": false} if do_preprocessing: print('loading cause facts....') idx_subject, idx_object, fact2docs = load_kg_facts_with_doc_index(conn, query_cause) preprocess_document_pairs_for_experiment(experiments[0], metadata_to_process, metadata_name, idx_subject, idx_object, fact2docs, fact2docs) # - # ## Preprocess DDI Gene Experiment # + pycharm={"name": "#%%\n", "is_executing": false} if do_preprocessing: print('loading ddi gene facts...') _, idx_pmid_dg_o, fact2docsRel1 = load_kg_facts_with_doc_index(conn, query_ddi_gen_1_dg) idx_pmid_gd_s, _, fact2docsRel2 = load_kg_facts_with_doc_index(conn, query_ddi_gen_2_gd) preprocess_document_pairs_for_experiment(experiments[1], metadata_to_process, metadata_name, idx_pmid_gd_s, idx_pmid_dg_o, fact2docsRel1, fact2docsRel2) # - # --- # # Experiments with Jaccard # + pycharm={"name": "#%%\n", "is_executing": false} from model.contextcompatibility import do_experiment_with_context_pairs thresholds_to_check = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] # - # ## Causes Experiment with Document Pairs # + pycharm={"name": "#%%\n", "is_executing": false} print('loading correct cause facts...') idx_correct_cause, _ = load_kg_facts(conn, query_cause) cause_results = do_experiment_with_context_pairs(experiments[0], metadata_to_process, metadata_name, thresholds_to_check, idx_correct_cause) # + pycharm={"name": "#%%\n", "is_executing": false} from model.contextcompatibility import print_tab_seperated, show_precision_recall_curve print_tab_seperated(cause_results) # + pycharm={"name": "#%%\n", "is_executing": false} cause_kg_correct_facts = 95037 cause_results_new = cause_results.copy() del cause_results_new["title"] show_precision_recall_curve(cause_results_new, cause_kg_correct_facts, 'prec_recall_jaccard_cause.pdf') # - # ## DDI Gene Experiment with Document Pairs # + pycharm={"name": "#%%\n", "is_executing": false} print('loading correct interactions from semmed...') idx_subjects_correct_ddi_gene, _ = load_kg_facts(conn, query_ddi_correct) ddi_gen_results = do_experiment_with_context_pairs(experiments[1], metadata_to_process, metadata_name, thresholds_to_check, idx_subjects_correct_ddi_gene) # - # ## DDI Gene Experiment # + pycharm={"name": "#%%\n", "is_executing": false} print_tab_seperated(ddi_gen_results) # + pycharm={"name": "#%%\n", "is_executing": false} ddi_gene_kg_correct_facts = 55370 ddi_gen_results_new = ddi_gen_results.copy() del ddi_gen_results_new["title"] show_precision_recall_curve(ddi_gen_results_new, ddi_gene_kg_correct_facts, 'prec_recall_jaccard_ddi_gene.pdf') # - # --- # # Clustering # + pycharm={"name": "#%%\n", "is_executing": false} query_doc_titles = "SELECT pmid, title FROM pubmed_metadata.pubmedarticle \ WHERE pmid IN (SELECT pmid FROM t_pmid_with_pred)" query_doc_abstracts = """ SELECT pmid, array_to_string(abstracts, ' ') FROM pubmed_metadata.pubmedarticle WHERE abstracts IS NOT NULL AND array_length(abstracts, 1) > 0 AND pmid IN (SELECT pmid FROM t_pmid_with_pred)""" # + pycharm={"name": "#%%\n", "is_executing": false} from model.librarygraph import load_lg_facts from model.experiments import do_cause_experiment_with_context, do_ddi_gene_experiment_with_context from model.clustering import load_dumped_clusters, combine_document_clusters, preprocess_and_dump_clustering # + pycharm={"name": "#%%\n", "is_executing": false} clusters_to_check = [2, 5, 10, 15, 30, 45, 60, 100, 200, 300, 400, 500, 1000, 2000, 4000, 6000, 10000, 20000, 50000, 100000] clustering_types = ["clustering_kmeans_title", "clustering_kmeans_abstract"] # + pycharm={"name": "#%%\n", "is_executing": false} if do_preprocessing: preprocess_and_dump_clustering(conn_meta, query_doc_titles, clustering_types[0], clusters_to_check) preprocess_and_dump_clustering(conn_meta, query_doc_abstracts, clustering_types[1], clusters_to_check) # - # ## Cause Experiment # + pycharm={"name": "#%%\n", "is_executing": false} print('load facts from semmeddb and building index...') idx_cause = load_lg_facts(conn, query_cause) print('load facts from semmeddb and building index...') idx_correct_cause, _ = load_kg_facts(conn, query_cause) results_type_clustering_cause = {} for c in clustering_types: results_clustering_cause = [] for k in clusters_to_check: print('='*60) print('cluster context: {}'.format(c)) print('cluster k: {}'.format(k)) cluster2docs = load_dumped_clusters(c, k) # now do experiment idx_contexts = combine_document_clusters(cluster2docs, idx_cause) correct, obtained = do_cause_experiment_with_context(idx_contexts, idx_correct_cause) results_clustering_cause.append((k, correct, obtained)) print('='*60) results_type_clustering_cause[c] = results_clustering_cause del idx_cause del idx_correct_cause # + pycharm={"name": "#%%\n", "is_executing": false} for c, results_clustering_cause in results_type_clustering_cause.items(): print("="*60) print(c) for k, correct, obtained in results_clustering_cause: print('{}\t{}\t{}'.format(k, obtained, correct)) # + pycharm={"name": "#%%\n", "is_executing": false} cause_kg_correct_facts = 95037 cluster_cause_results_new = {} cluster_cause_results_new["jaccard title"] = cause_results["title"] cl_cause_titles_results = [] for _, obtained, correct in results_type_clustering_cause[clustering_types[0]]: cl_cause_titles_results.append((obtained, correct)) cl_cause_abstracts_results = [] for _, obtained, correct in results_type_clustering_cause[clustering_types[1]]: cl_cause_abstracts_results.append((obtained, correct)) cluster_cause_results_new["clustering titles"] = cl_cause_titles_results cluster_cause_results_new["clustering abstracts"] = cl_cause_abstracts_results show_precision_recall_curve(cluster_cause_results_new, cause_kg_correct_facts, 'prec_recall_cluster_cause.pdf') # + [markdown] pycharm={"name": "#%% md\n", "is_executing": true} # ## DDI Gene # + pycharm={"name": "#%%\n", "is_executing": false} print('load facts from semmeddb and building index...') idx_pmid_dg = load_lg_facts(conn, query_ddi_gen_1_dg) idx_pmid_gd = load_lg_facts(conn, query_ddi_gen_2_gd) print('loading correct interactions from semmed...') ddi_gen_idx_subjects_correct, _ = load_kg_facts(conn, query_ddi_correct) results_type_clustering_ddi_gene = {} for c in clustering_types: results_clustering_ddi_gene = [] for k in clusters_to_check: print('='*60) print('cluster context: {}'.format(c)) print('cluster k: {}'.format(k)) cluster2docs = load_dumped_clusters(c, k) # now do experiment c_idx_pmid_dg = combine_document_clusters(cluster2docs, idx_pmid_dg) c_idx_pmid_gd = combine_document_clusters(cluster2docs, idx_pmid_gd) correct, obtained = do_ddi_gene_experiment_with_context(c_idx_pmid_dg, c_idx_pmid_gd, ddi_gen_idx_subjects_correct) results_clustering_ddi_gene.append((k, correct, obtained)) results_type_clustering_ddi_gene[c] = results_clustering_ddi_gene del idx_pmid_dg del idx_pmid_gd del ddi_gen_idx_subjects_correct # + pycharm={"name": "#%%\n", "is_executing": false} for c, results_clustering_ddi_gene in results_type_clustering_ddi_gene.items(): print("="*60) print(c) for k, correct, obtained in results_clustering_ddi_gene: print('{}\t{}\t{}'.format(k, obtained, correct)) # + pycharm={"name": "#%%\n", "is_executing": false} ddi_gene_kg_correct_facts = 55370 cluster_ddi_gene_results_new = {} cluster_ddi_gene_results_new["jaccard title"] = ddi_gen_results["title"] cl_ddig_titles_results = [] for _, obtained, correct in results_type_clustering_ddi_gene[clustering_types[0]]: cl_ddig_titles_results.append((obtained, correct)) cl_ddig_abstracts_results = [] for _, obtained, correct in results_type_clustering_ddi_gene[clustering_types[1]]: cl_ddig_abstracts_results.append((obtained, correct)) cluster_ddi_gene_results_new["clustering titles"] = cl_ddig_titles_results cluster_ddi_gene_results_new["clustering abstracts"] = cl_ddig_abstracts_results show_precision_recall_curve(cluster_ddi_gene_results_new, ddi_gene_kg_correct_facts, 'prec_recall_cluster_ddi_gene.pdf')
02-Context_Compatibility.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="2mo42M1Oi8ef" # # Intro Deep Learning Notebook # This notebook demonstrates how to actually implement the ideas discussed in the presentation. # + [markdown] id="tSn5b8oXjN4J" # ## Step 1: Imports # There are two main frameworks used for deep learning in a research setting: [Pytorch](https://pytorch.org/) and [Tensorflow](https://www.tensorflow.org/). # Because the code for these frameworks can be verbose, there are also libraries that abstract away many implementation details such as [Keras](https://keras.io), [fastai](https://fast.ai), and # [HuggingFace](https://huggingface.co/). # # Picking your framework is usually easy: you just select a model from the literature that worked well # on your problem, then modify it to do what you want. # If you have to start from scratch, use the highest level library that will do what you want. # That is to say, pick Keras or fastai before Tensorflow or Pytorch whenever possible. # # I've selected Pytorch because I'm familiar with it, and because the model I'm using in the next presentation is a Pytorch model. # + id="jsrNFjTbim39" from typing import Tuple import matplotlib.pyplot as plt import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader, random_split # + id="iAIYPUoJokCh" # Make code deterministic np.random.seed(42) torch.manual_seed(42) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # + id="ghkthBtknjbC" outputId="426e9054-714a-44fd-c3d9-35ec2245d102" colab={"base_uri": "https://localhost:8080/", "height": 439} # First column is the label, other 784 columns are pixel values numbers_df = pd.read_csv('/content/sample_data/mnist_train_small.csv', header=None) numbers_df # + [markdown] id="aNmXQt0EwcBH" # ## Step 2: Data # # Pytorch uses Dataset objects to store their data, and DataLoader objects to feed the # data into models. # + id="G9in-SqpwbK2" class MnistDataset(Dataset): """ This Dataset object stores the MNIST handwritten digit dataset """ def __init__(self, csv_path: str) -> None: """ An intializer function that reads the csv stored at csv_path """ numbers_df = pd.read_csv(csv_path, header=None) # Pull the labels and data out of the pandas dataframe and into numpy arrays self.labels = numbers_df.iloc[:,0].values self.pixels = numbers_df.iloc[:,1:].values def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]: """ Retrieve the item in the dataset at index idx """ return self.pixels[idx,:], self.labels[idx] def __len__(self) -> int: """ Return the number of items in the dataset """ return len(self.labels) # + id="93nVZmrg0DX5" # These MNIST file are built into colab, but you can also download them and change the paths # to run this notebook locally with Jupyter train_dataset = MnistDataset('/content/sample_data/mnist_train_small.csv') train_dataset, val_dataset = random_split(train_dataset, [18000, 2000], torch.Generator().manual_seed(42)) test_dataset = MnistDataset('/content/sample_data/mnist_test.csv') # + id="x1aJdxdN0FRo" outputId="a8e55de0-28c4-4462-8bb0-515adf1fcf7f" colab={"base_uri": "https://localhost:8080/", "height": 812} images = [] for dataset in [train_dataset, val_dataset, test_dataset]: pixels, label = dataset[0] # Reshape the long flat vector of pixel values into a square image img_example = np.reshape(pixels, [28, 28]) print(label) plt.figure() plt.imshow(img_example, cmap="gray") # + id="PzRhgAXRO5HL" # Pytorch uses objects called DataLoaders to feed data into models. # Dataloaders handle details like data shuffling and how many items to include per batch train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=1) test_loader = DataLoader(test_dataset, batch_size=1) # + [markdown] id="xkthXdDK6GIe" # ## Step 3: Model # We'll create a simple fully connected network model that takes in images and predicts which digit they represent # + id="iKE0n87-6Po_" class FullyConnectedNetwork(nn.Module): """ A neural network designed to predict which digit is depicted in an image """ def __init__(self): """ This function initializes the layers for the network """ # Call the nn.Module init function super(FullyConnectedNetwork, self).__init__() # Create the neural network layers self.fc1 = nn.Linear(784, 128) self.fc2 = nn.Linear(128, 10) def forward(self, pixels: torch.Tensor): """ The forward function takes an image or batch of images as the input and returns the predicted probability of each class (digit) """ # Feed pixels into first fully connected layer x = self.fc1(pixels) # Apply the ReLU nonlinearity x = F.relu(x) # Feed the output of the first layer into the second layer x = self.fc2(x) # Apply a softmax to scale the outputs to be between 0 and 1 and sum to 1 x = F.softmax(x, dim=-1) return x # + id="_ASGwgHQH7KI" outputId="f666e725-4a80-4e75-c1b9-656a52c07735" colab={"base_uri": "https://localhost:8080/", "height": 85} model = FullyConnectedNetwork() example, label = train_dataset[0] print(model(torch.Tensor(example))) pred = torch.argmax(model(torch.Tensor(example))) print('An untrained model predicted {}, but the real number was {}'.format(pred, label)) # + [markdown] id="KO0Nq0xr6lwU" # ## Step 4: Training # Now we'll train the model to make better predictions # + id="JYpiahY7IDvG" outputId="c381bc81-ce44-44cf-81a4-7fda4a6ea34a" colab={"base_uri": "https://localhost:8080/", "height": 187} model = FullyConnectedNetwork() # Create the loss and optimizer loss_fn = torch.nn.NLLLoss() optimizer = torch.optim.Adam(model.parameters()) # An epoch is the number of iterations it takes for the model to see every training point once epochs = 10 for epoch in range(epochs): for i, batch in enumerate(train_loader): images, labels = batch images = images.float() # Zero out the gradient on the optimizer optimizer.zero_grad() output = model(images) loss = loss_fn(output, labels) # Tell the optimizer to calculate the gradient of the loss function loss.backward() # Update the model's weights optimizer.step() num_correct = 0 for batch in val_loader: image, label = batch image = image.float() output = model(image) prediction = np.argmax(output.detach().numpy()) if prediction == label: num_correct += 1 print('Val acc = {}'.format(num_correct / len(val_dataset))) # + [markdown] id="TpJiAh4g6vTh" # ## Step 5: Evaluation # Finally, we'll measure the trained model's performance on a held-out test set. The test set measures the final model's ability to make predictions on data it hasn't seen before. # + id="cOYV8_GXby90" outputId="48af4966-b9ec-445d-d1fa-09681e67caa1" colab={"base_uri": "https://localhost:8080/", "height": 34} num_correct = 0 for batch in test_loader: image, label = batch image = image.float() output = model(image) prediction = np.argmax(output.detach().numpy()) if prediction == label: num_correct += 1 print('Test accuracy: {}'.format(num_correct / len(test_dataset)))
notebooks/network_train_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Richardson-Lucy-Deconvolution on OpenCL-compatible GPUs # [Richardson-Lucy-Deconvolution](https://en.wikipedia.org/wiki/Richardson%E2%80%93Lucy_deconvolution) is a common and yet basic algorithm for image deconvolution in microscopy. In this notebook we will use a GPU-accelerated version of it that is implemented in the napari-plugin [RedLionFish](https://github.com/rosalindfranklininstitute/RedLionfish). Hence, you can use the same algorithm from the graphical user interface in napari. from skimage.io import imread from pyclesperanto_prototype import imshow import RedLionfishDeconv as rl import matplotlib.pyplot as plt # We will load an image showing fluorescent intensity along lines. This 3D image was taken with a confocal microscope. image = imread('../../data/DeconvolutionSampleVerticalGrid1AU-crop.tif') image.shape imshow(image, colorbar=True) # The following PSF image was extracted from images taken with the same microscope using the procedure [explained before](deconvolution:extract_psf). # + psf = imread('../../data/psf.tif') imshow(psf, colorbar=True) # - # We can now deconvolve the image using RedLionFish's Richardson-Lucy-Deconvolution algorithm. We should specify that the algorith shall be executed on the `gpu`. # + iterations = 50 deconvolved = rl.doRLDeconvolutionFromNpArrays(image, psf, niter=iterations, method='gpu', resAsUint8=False ) imshow(deconvolved) # - # To visualize more precisely how the original image and the deconvolved version differ, we can plot the intensity along a line from the left to the right. We retrieve these numbers from a maximum intensity projection along Z. max_intensity_image = image.max(axis=0) max_intensity_image.shape max_intensity_deconvolved = deconvolved.max(axis=0) max_intensity_deconvolved.shape plt.plot(max_intensity_image[80]) plt.plot(max_intensity_deconvolved[80]) plt.show() # As you can see, the intensity range has change through deconvolution. This depends on the algorithm and implementation. Whenever applying deconvolution, consider checking if the total intensity in the original image and the deconvolved image are within the same range: image.min(), image.max() deconvolved.min(), deconvolved.max()
docs/18a_deconvolution/richardson_lucy_deconvolution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"name": "#%%\n"} # - # <h1>human organ classification </h1> # <h2> C-NN model<h3> [end to end model]</h3></h2> # call to packages & libraries # + pycharm={"name": "#%%\n"} from tensorflow import keras from datetime import datetime from ..sup.evaluation import * from ..sup.support import * from ..sup.test_set_eval import * # + pycharm={"name": "#%%\n"} model_name = "" # Load the TensorBoard notebook extension. # %load_ext tensorboard # call inline plt. # + pycharm={"name": "#%%\n"} # Clear any logs from previous runs # !rm -rf ./logs/ # - # callout dataset # + pycharm={"name": "#%%\n"} classes = ['heart','brain','eye','kidney','skull','other'] root_dir = '../../datasets/' train_dir = os.path.join(root_dir,'train') validation_dir = os.path.join(root_dir,'validation') tr_heart_dir,tr_brain_dir,tr_eye_dir,tr_kidney_dir,tr_skull_dir = path_update(train_dir,classes) vl_heart_dir,vl_brain_dir,vl_eye_dir,vl_kidney_dir,vl_skull_dir = path_update(validation_dir,classes) # - # take a glance at training dataset # + pycharm={"name": "#%%\n"} plot_sample_of_img(4,4,os.listdir(tr_heart_dir)+os.listdir(tr_eye_dir)) # - # ImageGenator - autolabelling, and categorizing. # + pycharm={"name": "#%%\n"} train_gen_tmp = ImageDataGenerator(rescale=1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest') validation_gen_tmp = ImageDataGenerator(rescale=1/225.) train_gen = train_gen_tmp.flow_from_directory(train_dir, target_size=(300,300), color_mode='rgb', class_mode='categorical', batch_size= 100, shuffle=True, seed=42) validation_gen = validation_gen_tmp.flow_from_directory(validation_dir, target_size=(300,300), color_mode='rgb', class_mode='categorical', batch_size= 100, shuffle=True, seed=42) STEP_SIZE_TRAIN=train_gen.n//train_gen.batch_size STEP_SIZE_VALID=validation_gen.n//validation_gen.batch_size clToInt_dict = train_gen.class_indices clToInt_dict = dict((v,k) for v,k in clToInt_dict.items()) # - # define the model # + pycharm={"name": "#%%\n"} model = keras.models.Sequential() # - # compile the model # + pycharm={"name": "#%%\n"} model.compile() # - # save the log # + pycharm={"name": "#%%\n"} # Define the Keras TensorBoard callback. logdir="logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir) # - # fit & train the model. # + pycharm={"name": "#%%\n"} history = model.fit() # - # visualize layer process in cnn # + pycharm={"name": "#%%\n"} #visualize_model(model,img_path) # - # instant evaluation # + pycharm={"name": "#%%\n"} #call to the tensorboard # %tensorboard --logdir logs #look at training model performance acc_n_loss(history) model.evaluate_generator(validation_gen, steps=STEP_SIZE_VALID) # - # evaluate the model on test set. # + pycharm={"name": "#%%\n"} y_pred,y_test = test_eval(model,classes) plot_confusion_metrix(y_test,y_pred,classes) ROC_classes(6,y_test,y_pred,classes) # - # save the model in .h5 file # + pycharm={"name": "#%%\n"} model_path,model_weight_path = save(model,datetime.now()+model_name) # - # make prediction on random images # + pycharm={"name": "#%%\n"} #rnd_predict(model_path,model_weight_path,img_path,clToInt_dict)
src/model/ti_VGG19.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: bayes-net # language: python # name: bayes-net # --- # # Bayes nets implemented using Pomegranate import matplotlib.pyplot as plt from pomegranate import DiscreteDistribution, ConditionalProbabilityTable, Node, BayesianNetwork import seaborn, time seaborn.set_style('whitegrid') BURGLARY = DiscreteDistribution({'T': 0.001, 'F': 0.999}) EARTHQUAKE = DiscreteDistribution({'T': 0.002, 'F': 0.998}) # + ALARM = ConditionalProbabilityTable([['T', 'T', 0.95], ['T', 'F', 0.94], ['F', 'T', 0.29], ['F', 'T', 0.001]], [BURGLARY, EARTHQUAKE]) MARY_CALLS = ConditionalProbabilityTable([['T', 0.70], ['F', 0.01]], [ALARM]) JOHN_CALLS = ConditionalProbabilityTable([['T', 0.90], ['F', 0.05]], [ALARM]) # - node_burglery = Node(BURGLARY, name="Burglary") node_earthquake = Node(EARTHQUAKE, name="Earthquake") node_alarm = Node(ALARM, name="Alarm") node_mary_calls = Node(MARY_CALLS, name="<NAME>") node_john_calls = Node(JOHN_CALLS, name="<NAME>") model = BayesianNetwork('Alarming') model.add_nodes(node_burglery, node_earthquake, node_alarm, node_mary_calls, node_john_calls) model.add_edge(node_burglery, node_alarm) model.add_edge(node_earthquake, node_alarm) model.add_edge(node_alarm, node_mary_calls) model.add_edge(node_alarm, node_john_calls) plt.figure(figsize=(14, 10)) model.plot() plt.show()
slides/pomegranate_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # Clustering Crypto # Initial imports import requests import pandas as pd from pathlib import Path import matplotlib.pyplot as plt import hvplot.pandas import plotly.express as px from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from sklearn.cluster import KMeans # ### Fetching Cryptocurrency Data # Use the following endpoint to fetch json data url = "https://min-api.cryptocompare.com/data/all/coinlist" # Create a DataFrame # HINT: You will need to use the 'Data' key from the json response, then transpose the DataFrame. file_path = Path("Data/crypto_data.csv") crypto_df = pd.read_csv(file_path) print(crypto_df.shape) crypto_df.describe() # + # Alternatively, use the provided csv file: # file_path = Path("Resources/crypto_data.csv") # Create a DataFrame # - # ### Data Preprocessing # + # Keep only necessary columns: # 'CoinName','Algorithm','IsTrading','ProofType','TotalCoinsMined','TotalCoinSupply' crypto_df['IsTrading'] = crypto_df['IsTrading'].astype(str) bool_list = ['True'] crypto_df = crypto_df[crypto_df.IsTrading.isin(bool_list)] crypto_df.info() # - crypto_df.rename(columns = {'Unnamed: 0':'Ticker'}, inplace = True) for col in crypto_df.columns: print(col) crypto_df.set_index("Ticker", inplace=True) crypto_df.head() # + # Keep only cryptocurrencies that are trading non_trading = crypto_df[crypto_df["IsTrading"] == False].index crypto_df.drop(non_trading , inplace=True) crypto_df['TotalCoinsMined'].sum() # - # Keep only cryptocurrencies with a working algorithm crypto_df= crypto_df[crypto_df['Algorithm']!= 'N/A'] # Remove the "IsTrading" column crypto_df.drop(columns=['IsTrading'], inplace=True) crypto_df.head() # Remove rows with at least 1 null value crypto_df = crypto_df.dropna() # Remove rows with cryptocurrencies having no coins mined crypto_df= crypto_df[crypto_df["TotalCoinsMined"]>0] # Drop rows where there are 'N/A' text values crypto_df= crypto_df[crypto_df!='N/A'] # + # Store the 'CoinName'column in its own DataFrame prior to dropping it from crypto_df coins_name = pd.DataFrame(crypto_df['CoinName']) coins_name # - y = coins_name['CoinName'] y.head() # Drop the 'CoinName' column since it's not going to be used on the clustering algorithm from sklearn.preprocessing import OneHotEncoder, StandardScaler crypto_df.drop(columns='CoinName', inplace=True) # Create dummy variables for text features crypto_dummies= pd.get_dummies(crypto_df, columns=['Algorithm', 'ProofType']) crypto_dummies.head() # Standardize data crypto_scaled= StandardScaler().fit_transform(crypto_dummies) crypto_scaled # ### Reducing Dimensions Using PCA # Use PCA to reduce dimensions to 3 principal components pca = PCA(n_components=3) crypto_pca = pca.fit_transform(crypto_scaled) # Create a DataFrame with the principal components data pca_df = pd.DataFrame( data=crypto_pca, columns=["Principle Components 1", "Principle Components 2", "Principle Components 3"] ) pca_df.head() # ### Clustering Crytocurrencies Using K-Means # # #### Find the Best Value for `k` Using the Elbow Curve # + inertia = [] k = list(range(1, 11)) # Calculate the inertia for the range of k values for i in k: km = KMeans(n_clusters=i, random_state=0) km.fit(pca_df) inertia.append(km.inertia_) # Create the Elbow Curve using hvPlot elbow_data = {"k": k, "inertia": inertia} df_elbow = pd.DataFrame(elbow_data) df_elbow.hvplot.line(x="k", y="inertia", xticks=k, title="Elbow Curve") # - # Running K-Means with `k=<your best value for k here>` # Initialize the K-Means model km = KMeans(n_clusters=4, random_state=0) # Fit the model km.fit(pca_df) # Predict clusters predictions = km.predict(pca_df) # Create a new DataFrame including predicted clusters and cryptocurrencies features pca_df['class'] = km.labels_ pca_df.index = crypto_df.index crypto_df.head() clustered_df = pd.concat([crypto_df, pca_df,coins_name], axis=1, ) clustered_df.head() # ### Visualizing Results # # #### 3D-Scatter with Clusters # Create a 3D-Scatter with the PCA data and the clusters fig = px.scatter_3d(clustered_df, x="PC 1", y="PC 2", z="PC 3", color="class", hover_name="CoinName", hover_data=["Algorithm"]) fig.show() # #### Table of Tradable Cryptocurrencies # Table with tradable cryptos clustered_df.hvplot.table(columns=['CoinName', "Algorithm", "ProofType", "TotalCoinSupply", "TotalCoinsMined", "class"]) # Print the total number of tradable cryptocurrencies # #### Scatter Plot with Tradable Cryptocurrencies # Scale data to create the scatter plot clustered_df['TotalCoinSupply'] = clustered_df['TotalCoinSupply'].astype("float") scaled_two = clustered_df[['TotalCoinSupply', 'TotalCoinsMined']] scaled_two = StandardScaler().fit_transform(scaled_two) scaled_two = pd.DataFrame(scaled_two, columns=['TotalCoinSupply', 'TotalCoinsMined']) # Plot the scatter with x="TotalCoinsMined" and y="TotalCoinSupply" scaled_two.hvplot.scatter(x="TotalCoinsMined", y="TotalCoinSupply")
.ipynb_checkpoints/crypto_clustering-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Distributed Training # # **Learning Objectives** # - Use AI Platform Training Service to run a distributed training job # # ## Introduction # # In the previous notebook we trained our model on AI Platform Training Service, but we didn't recieve any benefit. In fact it was much slower to train on the Cloud (5-10 minutes) than it was to train locally! Why is this? # # **1. The job was too small** # # AI Platform Training Service provisions hardware on-demand. This is good because it means you only pay for what you use, but for small jobs it means the start up time for the hardware is longer than the training time itself! # # To address this we'll use a dataset that is 100x as big, and enough steps to go through all the data at least once. # # **2. The hardware was too small** # # By default AI Platform Training Service jobs train on an [n1-standard-4](https://cloud.google.com/compute/docs/machine-types#standard_machine_types) instance, which isn't that much more powerful than our local VM. And even if it was we could [easily increase the specs](https://cloud.google.com/compute/docs/instances/changing-machine-type-of-stopped-instance) of our local VM to match. # # To get the most benefit out of AI Platform Training Service we need to move beyond training on a single instance and instead train across multiple machines. # # Because we're using `tf.estimator.train_and_evaluate()`, our model already knows how to distribute itself while training! So all we need to do is supply a `--scale-tier` parameter to the AI Platform Training Service train job which will provide the distributed training environment. See the different scale tiers avaialable [here](https://cloud.google.com/ml-engine/docs/tensorflow/machine-types#scale_tiers). # # We will use STANDARD_1 which corresponds to 1 n1-highcpu-8 master instance, 4 n1-highcpu-8 worker instances, and n1-standard-4 3 parameter servers. We will cover the details of the distribution strategy and why there are master/worker/parameter designations later in the course. # # Training will take about 20 minutes PROJECT = "qwiklabs-gcp-00-34ffb0f0dc65" # Replace with your PROJECT BUCKET = "cloud-training-bucket" # Replace with your BUCKET REGION = "us-central1" # Choose an available region for AI Platform Training Service TFVERSION = "1.14" # TF version for AI Platform Training Service to use # ## Run distributed cloud job # # After having testing our training pipeline both locally and in the cloud on a susbset of the data, we'll now submit another (much larger) training job to the cloud. The `gcloud` command is almost exactly the same though we'll need to alter some of the previous parameters to point our training job at the much larger dataset. # # Note the `train_data_path` and `eval_data_path` in the Exercise code below as well `train_steps`, the number of training steps. # # To start, we'll set up our output directory as before, now calling it `trained_large`. Then we submit the training job using `gcloud ml-engine` similar to before. # #### **Exercise 1** # # In the cell below, we will submit another (much larger) training job to the cloud. However, this time we'll alter some of the previous parameters. Fill in the missing code in the TODOs below. You can reference the previous `f_ai_platform` notebook if you get stuck. Note that, now we will want to include an additional parameter for `scale-tier` to specify the distributed training environment. You can follow these links to read more about ["Using Distributed TensorFlow with Cloud ML Engine"](https://cloud.google.com/ml-engine/docs/tensorflow/distributed-tensorflow-mnist-cloud-datalab) or ["Specifying Machine Types or Scale Tiers"](https://cloud.google.com/ml-engine/docs/tensorflow/machine-types#scale_tiers). # # #### **Exercise 2** # Notice how our `train_data_path` contains a wildcard character. This means we're going to be reading in a list of sharded files, modify your `read_dataset()` function in the `model.py` to handle this (or verify it already does). OUTDIR = "gs://{}/taxifare/trained_large".format(BUCKET) # !gsutil -m rm -rf # TODO: Your code goes here # !gcloud ai-platform # TODO: Your code goes here --package-path= # TODO: Your code goes here --module-name= # TODO: Your code goes here --job-dir= # TODO: Your code goes here --python-version= # TODO: Your code goes here --runtime-version= # TODO: Your code goes here --region= # TODO: Your code goes here --scale-tier= # TODO: Your code goes here -- \ --train_data_path=gs://cloud-training-demos/taxifare/large/taxi-train*.csv \ --eval_data_path=gs://cloud-training-demos/taxifare/small/taxi-valid.csv \ --train_steps=200000 \ --output_dir={OUTDIR} # ## Instructions to obtain larger dataset # # Note the new `train_data_path` above. It is ~20,000,000 rows (100x the original dataset) and 1.25GB sharded across 10 files. How did we create this file? # # Go to https://console.cloud.google.com/bigquery and paste the query: # <pre> # #standardSQL # SELECT # (tolls_amount + fare_amount) AS fare_amount, # EXTRACT(DAYOFWEEK from pickup_datetime) AS dayofweek, # EXTRACT(HOUR from pickup_datetime) AS hourofday, # pickup_longitude AS pickuplon, # pickup_latitude AS pickuplat, # dropoff_longitude AS dropofflon, # dropoff_latitude AS dropofflat # FROM # `nyc-tlc.yellow.trips` # WHERE # trip_distance > 0 # AND fare_amount >= 2.5 # AND pickup_longitude > -78 # AND pickup_longitude < -70 # AND dropoff_longitude > -78 # AND dropoff_longitude < -70 # AND pickup_latitude > 37 # AND pickup_latitude < 45 # AND dropoff_latitude > 37 # AND dropoff_latitude < 45 # AND passenger_count > 0 # AND MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), 50) = 1 # </pre> # # Export this to CSV using the following steps (Note that <b>we have already done this and made the resulting GCS data publicly available</b>, so following these steps is optional): # <ol> # <li> Click on the "Save Results" button and select "BigQuery Table" (we can't directly export to CSV because the file is too large). # <li>Specify a dataset name and table name (if you don't have an existing dataset, <a href="https://cloud.google.com/bigquery/docs/datasets#create-dataset">create one</a>). # <li> On the BigQuery console, find the newly exported table in the left-hand-side menu, and click on the name. # <li> Click on the "Export" button, then select "Export to GCS". # <li> Supply your bucket and file name (for example: gs://cloud-training-demos/taxifare/large/taxi-train*.csv). The asterisk allows for sharding of large files. # </ol> # # *Note: We are still using the original smaller validation dataset. This is because it already contains ~31K records so should suffice to give us a good indication of learning. 100xing the validation dataset would slow down training because the full validation dataset is proccesed at each checkpoint, and the value of a larger validation dataset is questionable.* # <p/> # <p/> # ## Analysis # # Our previous RMSE was 9.26, and the new RMSE is about the same (9.24), so more training data didn't help. # # However we still haven't done any feature engineering, so the signal in the data is very hard for the model to extract, even if we have lots of it. In the next section we'll apply feature engineering to try to improve our model. # Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
courses/machine_learning/deepdive/02_tensorflow/labs/g_distributed.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # LiveLossPlot with PyTorch and torchbearer # # [torchbearer](https://github.com/ecs-vlc/torchbearer) is a model fitting library for PyTorch. As of version 0.2.6 it includes native support for LiveLossPlot, through the [LiveLossPlot callback](https://torchbearer.readthedocs.io/en/latest/code/callbacks.html#torchbearer.callbacks.live_loss_plot.LiveLossPlot). In this notebook, we'll train a simple CNN on Cifar10 with torchbearer and LiveLossPlot. # !pip install torchbearer # + # %matplotlib inline import torch import torch.nn as nn import torch.optim as optim import torchvision from torchvision import transforms import torchbearer from torchbearer.cv_utils import DatasetValidationSplitter from torchbearer import Trial from torchbearer.callbacks import LiveLossPlot # - # ## Data # We'll use CIFAR10 for this demo, with the usual normalisations # + BATCH_SIZE = 256 normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) dataset = torchvision.datasets.CIFAR10(root='./tmp/cifar', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), normalize])) splitter = DatasetValidationSplitter(len(dataset), 0.1) trainset = splitter.get_train_dataset(dataset) valset = splitter.get_val_dataset(dataset) traingen = torch.utils.data.DataLoader(trainset, pin_memory=True, batch_size=BATCH_SIZE, shuffle=True, num_workers=10) valgen = torch.utils.data.DataLoader(valset, pin_memory=True, batch_size=BATCH_SIZE, shuffle=True, num_workers=10) testset = torchvision.datasets.CIFAR10(root='./tmp/cifar', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), normalize])) testgen = torch.utils.data.DataLoader(testset, pin_memory=True, batch_size=BATCH_SIZE, shuffle=False, num_workers=10) # - # ## Model # A simple, 3 layer CNN should do the trick, since we're using batch norm we won't worry about weight initialisation # + class SimpleModel(nn.Module): def __init__(self): super(SimpleModel, self).__init__() self.convs = nn.Sequential( nn.Conv2d(3, 16, stride=2, kernel_size=3), nn.BatchNorm2d(16), nn.ReLU(), nn.Conv2d(16, 32, stride=2, kernel_size=3), nn.BatchNorm2d(32), nn.ReLU(), nn.Conv2d(32, 64, stride=2, kernel_size=3), nn.BatchNorm2d(64), nn.ReLU() ) self.classifier = nn.Linear(576, 10) def forward(self, x): x = self.convs(x) x = x.view(-1, 576) return self.classifier(x) model = SimpleModel() # - # ## Running # # Now we're ready to run, we use one trial here for the training and validation and one for evaluation # + optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=0.001) loss = nn.CrossEntropyLoss() trial = Trial( model, optimizer, loss, metrics=['acc', 'loss'], callbacks=[LiveLossPlot()]).to('cuda') trial.with_generators(train_generator=traingen, val_generator=valgen) history = trial.run(verbose=0, epochs=25) # - trial = Trial(model, metrics=['acc', 'loss', 'top_5_acc']).with_test_generator(testgen).to('cuda') _ = trial.evaluate(data_key=torchbearer.TEST_DATA)
examples/torchbearer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Time-energy fit # # 3ML allows the possibility to model a time-varying source by explicitly fitting the time-dependent part of the model. Let's see this with an example. # # First we import what we need: # + from threeML import * import matplotlib.pyplot as plt # %matplotlib notebook # - # Then we generate a simulated dataset for a source with a cutoff powerlaw spectrum with a constant photon index and cutoff but with a normalization that changes with time following a powerlaw: # + # Let's generate our dataset of 4 spectra with a normalization that follows # a powerlaw in time def generate_one(K): # Let's generate some data with y = Powerlaw(x) gen_function = Cutoff_powerlaw() gen_function.K = K # Generate a dataset using the power law, and a # constant 30% error x = np.logspace(0, 2, 50) xyl_generator = XYLike.from_function("sim_data", function = gen_function, x = x, yerr = 0.3 * gen_function(x)) y = xyl_generator.y y_err = xyl_generator.yerr return x, y, y_err # These are the times at which the simulated spectra have been observed time_tags = np.array([1.0, 2.0, 5.0, 10.0]) # This describes the time-varying normalization. If everything works as # it should, we should recover from the fit a normalization of 0.23 and a # index of -1.2 for the time law normalizations = 0.23 * time_tags**(-1.2) # Generate the datasets datasets = map(generate_one, normalizations) # - # Now that we have our data, let's model them with 3ML: # + # Now set up the fit and fit it # First we need to tell 3ML that we are going to fit using an # independent variable (time in this case). We init it to 1.0 # and set the unit to seconds time = IndependentVariable("time", 1.0, u.s) # Then we load the data that we have generated, tagging them # with their time of observation plugins = [] for i, dataset in enumerate(datasets): x, y, y_err = dataset xyl = XYLike("data%i" % i, x, y, y_err) # This is the important part: we need to tag the instance of the # plugin so that 3ML will know that this instance corresponds to the # given tag (a time coordinate in this case). If instead of giving # one time coordinate we give two time coordinates, then 3ML will # take the average of the model between the two time coordinates # (computed as the integral of the model between t1 and t2 divided # by t2-t1) xyl.tag = (time, time_tags[i]) # To access the tag we have just set we can use: independent_variable, start, end = xyl.tag # NOTE: xyl.tag will return 3 things: the independent variable, the start and the # end. If like in this case you do not specify an end when assigning the tag, end # will be None plugins.append(xyl) # Generate the datalist as usual data = DataList(*plugins) # Now let's generate the spectral model, in this case a point source # with a cutoff powerlaw spectrum spectrum = Cutoff_powerlaw() src = PointSource("test", ra=0.0, dec=0.0, spectral_shape=spectrum) model = Model(src) # Now we need to tell 3ML that we are going to use the time # coordinate to specify a time dependence for some of the # parameters of the model model.add_independent_variable(time) # Now let's specify the time-dependence (a powerlaw) for the normalization # of the powerlaw spectrum time_po = Powerlaw() time_po.K.bounds = (0.01, 1000) # Link the normalization of the cutoff powerlaw spectrum with time through the # time law we have just generated model.link(spectrum.K, time, time_po) # Now let's fit as usual jl = JointLikelihood(model, data) best_fit_parameters, likelihood_values = jl.fit()
docs/notebooks/Time-energy-fit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Statistical Significance # # Let's say that we've collected data for a web-based experiment. In the experiment, we're testing the change in layout of a product information page to see if this affects the proportion of people who click on a button to go to the download page. This experiment has been designed to have a cookie-based diversion, and we record two things from each user: which page version they received, and whether or not they accessed the download page during the data recording period. (We aren't keeping track of any other factors in this example, such as number of pageviews, or time between accessing the page and making the download, that might be of further interest.) # # Your objective in this notebook is to perform a statistical test on both recorded metrics to see if there is a statistical difference between the two groups. # + # import packages import numpy as np import pandas as pd import scipy.stats as stats from statsmodels.stats import proportion as proptests import matplotlib.pyplot as plt % matplotlib inline # + # import data data = pd.read_csv('statistical_significance_data.csv') data.head(10) # - # In the dataset, the 'condition' column takes a 0 for the control group, and 1 for the experimental group. The 'click' column takes a values of 0 for no click, and 1 for a click. # # ## Checking the Invariant Metric # # First of all, we should check that the number of visitors assigned to each group is similar. It's important to check the invariant metrics as a prerequisite so that our inferences on the evaluation metrics are founded on solid ground. If we find that the two groups are imbalanced on the invariant metric, then this will require us to look carefully at how the visitors were split so that any sources of bias are accounted for. It's possible that a statistically significant difference in an invariant metric will require us to revise random assignment procedures and re-do data collection. # # In this case, we want to do a two-sided hypothesis test on the proportion of visitors assigned to one of our conditions. Choosing the control or the experimental condition doesn't matter: you'll get the same result either way. Feel free to use whatever method you'd like: we'll highlight two main avenues below. # # If you want to take a simulation-based approach, you can simulate the number of visitors that would be assigned to each group for the number of total observations, assuming that we have an expected 50/50 split. Do this many times (200 000 repetitions should provide a good speed-variability balance in this case) and then see in how many simulated cases we get as extreme or more extreme a deviation from 50/50 that we actually observed. Don't forget that, since we have a two-sided test, an extreme case also includes values on the opposite side of 50/50. (e.g. Since simulated outcomes of .48 and lower are considered as being more extreme than an actual observation of 0.48, so too will simulated outcomes of .52 and higher.) The proportion of flagged simulation outcomes gives us a p-value on which to assess our observed proportion. We hope to see a larger p-value, insufficient evidence to reject the null hypothesis. # # If you want to take an analytic approach, you could use the exact binomial distribution to compute a p-value for the test. The more usual approach, however, is to use the normal distribution approximation. Recall that this is possible thanks to our large sample size and the central limit theorem. To get a precise p-value, you should also perform a # continuity correction, either adding or subtracting 0.5 to the total count before computing the area underneath the curve. (e.g. If we had 415 / 850 assigned to the control group, then the normal approximation would take the area to the left of $(415 + 0.5) / 850 = 0.489$ and to the right of $(435 - 0.5) / 850 = 0.511$.) # # You can check your results by completing the following the workspace and the solution on the following page. You could also try using multiple approaches and seeing if they come up with similar outcomes! # ### Analytical Approach n_obs = data.shape[0] n_control = data.groupby('condition').size()[0] data.groupby('condition').size() # + # Compute a z-score and p-value p = 0.5 sd = np.sqrt(p * (1-p) * n_obs) z = ((n_control + 0.5) - p * n_obs) / sd print(z) print(2 * stats.norm.cdf(z)) # - # ### Simulation Approach # get number of trials and number of 'successes' n_obs = data.shape[0] n_control = data.groupby('condition').size()[0] # + # # simulate outcomes under null, compare to observed outcome p = 0.5 n_trials = 200_000 samples = np.random.binomial(n_obs, p, n_trials) print(np.logical_or(samples <= n_control, samples >= (n_obs - n_control)).mean()) # - # ## Checking the Evaluation Metric # After performing our checks on the invariant metric, we can move on to performing a hypothesis test on the evaluation metric: the click-through rate. In this case, we want to see that the experimental group has a significantly larger click-through rate than the control group, a one-tailed test. # # The simulation approach for this metric isn't too different from the approach for the invariant metric. You'll need the overall click-through rate as the common proportion to draw simulated values from for each group. You may also want to perform more simulations since there's higher variance for this test. # # There are a few analytic approaches possible here, but you'll probably make use of the normal approximation again in these cases. In addition to the pooled click-through rate, you'll need a pooled standard deviation in order to compute a z-score. While there is a continuity correction possible in this case as well, it's much more conservative than the p-value that a simulation will usually imply. Computing the z-score and resulting p-value without a continuity correction should be closer to the simulation's outcomes, though slightly more optimistic about there being a statistical difference between groups. # # As with the previous question, you'll find a quiz and solution following the workspace for you to check your results. p_click = data.groupby('condition').mean()['click'] p_click p_click[1] - p_click[0] # ### Analytic Approach # get number of trials and overall 'success' rate under null n_control = data.groupby('condition').size()[0] n_exper = data.groupby('condition').size()[1] p_null = data['click'].mean() # + # compute standard error, z-score, and p-value se_p = np.sqrt(p_null * (1-p_null) * (1/n_control + 1/n_exper)) z = (p_click[1] - p_click[0]) / se_p print(z) print(1-stats.norm.cdf(z)) # - # ### Simulation Approach # get number of trials and overall 'success' rate under null n_control = data.groupby('condition').size()[0] n_exper = data.groupby('condition').size()[1] p_null = data['click'].mean() # + # simulate outcomes under null, compare to observed outcome n_trials = 200_000 ctrl_clicks = np.random.binomial(n_control, p_null, n_trials) exp_clicks = np.random.binomial(n_exper, p_null, n_trials) samples = exp_clicks / n_exper - ctrl_clicks / n_control print((samples >= (p_click[1] - p_click[0])).mean())
Statistical_significance/.ipynb_checkpoints/L2_Statistical_Significance-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Science Project on Prediction of Bengaluru Housing Price # ### Project Description # + active="" # This data science project series walks through step by step process of how to build a real estate price prediction website. # I will first build a model using sklearn and linear regression using bangaluru housing prices dataset from kaggle.com. Second step would be to write a python flask server that uses the saved model to serve http requests. # Third component is the website built in html, css, bootstrap and javascript that allows user to enter home square ft area, bedrooms etc and it will call python flask server to retrieve the predicted price. # During model building I will cover data science concepts such as data loading and cleaning, outlier detection and removal, feature engineering, dimensionality reduction, gridsearchcv for hyperparameter tunning, k fold cross validation etc. # - # ### Technology and tools wise this project covers. # + active="" # 1) Python # 2) Numpy and Pandas for data cleaning # 3) Matplotlib for data visualization # 4) Sklearn for model building # 5) Jupyter notebook, visual studio code and pycharm as IDE # 6) Python flask for http server # 7) HTML/CSS/Bootstrap/Javascript for UI # # - import pandas as pd import numpy as np from matplotlib import pyplot as plt # %matplotlib inline import matplotlib matplotlib.rcParams["figure.figsize"]=(20,10) df1= pd.read_csv("C:/Users/ARPITA/Downloads/Bengaluru_House_Data.csv") #data set is taken from keggle.com df1.head() df1.shape # ## Examine area_type feature df1.groupby('area_type')['area_type'].agg('count') # ## Drop not important columns df2= df1.drop(['area_type','society','balcony','availability'],axis='columns') df2.head() # ## Data cleaning process starts # #### It starts with handling the NAN values # two methods can be used : one is to drop the rows and another is fill the nan value with the mediun of the total column # as in this example we have 13320 rows what we can do is to drop those nan rows which are 73 only in number in case of bath. df2.isnull().sum() #return count of number of rows of a particular column having null value df3=df2.dropna() #dropna drop the nul valued rows df3.isnull().sum() # Now lets move towards the size feature of the dataset where it can be seen that some values are having odd name like 4 bedroom etc. so to handle it following code will be written. df3['size'].unique() #all the unique values are displayed df3['bhk']=df3['size'].apply(lambda x:int(x.split(' ')[0])) # here we split the string into two tokens using python lambda function and seperate the first string(0) as integer df3.head() #printing the new dataset df3['bhk'].unique() df3[df3.bhk>20] #looks like it have some error df3.total_sqft.unique() #range can be seen but we want a single number not range hence we have to convert it and one of the way it to take average of this kind-of range # What kind-of variations are available in this total_sqft available. So when it will catch a range it will throw the same # in except block. If it spot an integer then convert the same into float value # def is_float(x): try: float(x) except: return False return True df3[~df3['total_sqft'].apply(is_float)].head(10) #negetion will return the range value once of total_sqft because it has been thrown in the exception block def convert_sqft_to_num(x): tokens=x.split('-') if len(tokens)==2: return (float(tokens[0])+float(tokens[1]))/2 try: return float(x) except: return None convert_sqft_to_num('2000 - 6000') convert_sqft_to_num('34.46Sq. Meter') #do not return anything df4= df3.copy() df4['total_sqft']=df4['total_sqft'].apply(convert_sqft_to_num) df4.head(10) df4.loc[30] #checking index of 30 # ## Feature Engineering and Dimensionality Reduction Technique # + # Feature Engineering df5= df4.copy() # create price per square feet column df5['price_per_sqft']= df5['price']*100000/df5['total_sqft'] df5.head() # - # Location is a categorical entity. If too same many locations are there then it create a problem len(df5.location.unique()) #so many different unique locations available # Dimentional Reduction: # One of the way to reduce dimention is to introduce 'other' categories df5.location= df5.location.apply(lambda x:x.strip()) #remove extra space #here location_stats is series location_stats= df5.groupby('location')['location'].agg('count').sort_values(ascending= False) location_stats len(location_stats[location_stats<=10]) location_stats_lessthan_10=location_stats[location_stats<=10] location_stats_lessthan_10 len(df5.location.unique()) df5.location= df5.location.apply(lambda x: 'other' if x in location_stats_lessthan_10 else x) #transfering all location which have less number of rows to other len(df5.location.unique()) df5.head(10) # ## Outlier Detection and Removal df5.shape df6=df5[~(df5.total_sqft/df5.bhk < 300)] #removing those rows whose sqft per bhk is less than 300 which helps in cutting out errors in rows df6.shape df6.price_per_sqft.describe() # So in the last code we can see that minimum price per square feet is 267 which is too low price and maximum price is 176470 which is also an unusual figure. Hence we need to remove all these rows from the data set as we need a generic model for predicting the price. Therefore we will use the method of Standard deviation to do the same # + def remove_pps_outliers(df): df_out=pd.DataFrame() for key,subdf in df.groupby('location'): m= np.mean(subdf.price_per_sqft) #mean st=np.std(subdf.price_per_sqft) #standard deviation reduced_df= subdf[(subdf.price_per_sqft >(m-st))& (subdf.price_per_sqft<=(m+st))] df_out=pd.concat([df_out,reduced_df],ignore_index=True) return df_out df7=remove_pps_outliers(df6) df7.shape # - # Ploting a scatter plot according to location to judge or to see 3bhk having lesser price but having the same total sqr. ft. as 2bhk # + def plot_scatter_chart(df,location): bh2= df[(df.location==location) & (df.bhk==2)] bh3= df[(df.location==location) & (df.bhk==3)] matplotlib.rcParams['figure.figsize']=(15,20) plt.scatter(bh2.total_sqft,bh2.price,color='red',label='2-BHK',s=50) plt.scatter(bh3.total_sqft,bh3.price,marker='+',color='green',label='3-BHK',s=50) plt.xlabel("Total square feet area") plt.ylabel("Price") plt.title(location) plt.legend() plot_scatter_chart(df7,"<NAME>") # + active="" # We should also remove properties where for same location, the price of (for example) 3 bedroom apartment is less than 2 bedroom apartment (with same square ft area). What we will do is for a given location, we will build a dictionary of stats per bhk, i.e. # # { # '1' : { # 'mean': 4000, # 'std: 2000, # 'count': 34 # }, # '2' : { # 'mean': 4300, # 'std: 2300, # 'count': 22 # }, # } # Now we can remove those 2 BHK apartments whose price_per_sqft is less than mean price_per_sqft of 1 BHK apartment # - import matplotlib matplotlib.rcParams["figure.figsize"]=(20,10) plt.hist(df7.price_per_sqft, rwidth= 0.8) plt.xlabel("Price per square feet") plt.ylabel("Count") df7.bath.unique() df7[df7.bath>10] plt.hist(df7.bath,rwidth=0.8) plt.xlabel("Number of bethrooms") plt.ylabel("Count") # + active="" # We can see 13 no. of bathrooms are present in some bhk. So that is unusual. Hence all bathrooms which are greater than bedroom pus 2 will be droped # # Here we can see the dataset having more number of bathrooms # - df7[df7.bath> df7.bhk+2] df8 =df7[df7.bath<df7.bhk+2] df8.shape df9 =df8.drop(["price_per_sqft","size"],axis="columns") df9.head() dummies= pd.get_dummies(df9.location) #one-hot-encoding dummies.head() df10= pd.concat([df9,dummies.drop('other',axis='columns')],axis='columns') # as in dummies we must drop one column, hence other column is dropped df10.head() df11= df10.drop('location',axis='columns') df11.head() df11.shape # ## Model building process x= df11.drop('price',axis='columns') x.head() y= df11.price y.head() # ### Training and Testing data from sklearn.model_selection import train_test_split x_train,x_test,y_train, y_test = train_test_split(x,y,test_size=0.2,random_state= 10) from sklearn.linear_model import LinearRegression lr_clf= LinearRegression() lr_clf.fit(x_train,y_train) lr_clf.score(x_test,y_test) # ### Use K Fold cross validation to measure accuracy of our LinearRegression model # + from sklearn.model_selection import ShuffleSplit from sklearn.model_selection import cross_val_score cv=ShuffleSplit(n_splits= 5,test_size=0.2,random_state=0) cross_val_score(LinearRegression(),x,y,cv=cv) # + active="" # We can see that in 5 iterations we get a score above 80% all the time. This is pretty good but we want to test few other algorithms for regression to see if we can get even better score. We will use GridSearchCV for this purpose # - # ### Find best model using GridSearchCV # + from sklearn.linear_model import Lasso from sklearn.tree import DecisionTreeRegressor def find_best_model_using_gridsearchcv(x,y): algos = { 'linear_regression' : { 'model': LinearRegression(), 'params': { 'normalize': [True, False] } }, 'lasso': { 'model': Lasso(), 'params': { 'alpha': [1,2], 'selection': ['random', 'cyclic'] } }, 'decision_tree': { 'model': DecisionTreeRegressor(), 'params': { 'criterion' : ['mse','friedman_mse'], 'splitter': ['best','random'] } } } scores = [] cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=0) for algo_name, config in algos.items(): gs = GridSearchCV(config['model'], config['params'], cv=cv, return_train_score=False) gs.fit(x,y) scores.append({ 'model': algo_name, 'best_score': gs.best_score_, 'best_params': gs.best_params_ }) return pd.DataFrame(scores,columns=['model','best_score','best_params']) find_best_model_using_gridsearchcv(x,y) # - x.columns # + active="" # Based on above results we can say that LinearRegression gives the best score. Hence we will use that. # - # ### Test the model for few properties def predict_price(location,sqft,bath,bhk): loc_index = np.where(x.columns==location)[0][0] X = np.zeros(len(x.columns)) X[0] = sqft X[1] = bath X[2] = bhk if loc_index >= 0: X[loc_index] = 1 return lr_clf.predict([X])[0] predict_price('1st Phase JP Nagar',1000, 2, 2) predict_price('1st Phase JP Nagar',1000, 2, 3) predict_price('Indira Nagar',1000, 2, 2) predict_price('Indira Nagar',1000, 3, 3) # ### Export the tested model to a pickle file import pickle with open('banglore_home_prices_model.pickle','wb') as f: pickle.dump(lr_clf,f) # ### Export location and column information to a file that will be useful later on in our prediction application import json columns = { 'data_columns' : [col.lower() for col in x.columns] } with open("columns.json","w") as f: f.write(json.dumps(columns))
Data Science Project on Housing Price Prediction.ipynb
# --- # jupyter: # jupytext: # cell_metadata_filter: collapsed,code_folding # formats: ipynb,py:percent # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %% [markdown] # # ConsIndShockModel: Consumption With Shocks # %% code_folding=[] # Initial imports and notebook setup, click arrow to show import sys import os from HARK.ConsumptionSaving.ConsIndShockModel import * import HARK.ConsumptionSaving.ConsumerParameters as Params from HARK.utilities import plotFuncsDer, plotFuncs from time import clock mystr = lambda number : "{:.4f}".format(number) # %% [markdown] # Defines classes to solve canonical consumption-saving models with idiosyncratic shocks to income. All models here assume CRRA utility with geometric discounting, no bequest motive, and income shocks are fully transitory or fully permanent. # # ConsIndShockModel currently solves three types of models: # 1. A basic "perfect foresight" consumption-saving model with no uncertainty. # 2. A consumption-saving model with risk over transitory and permanent income shocks. # 3. The model described in (2), with an interest rate for debt that differs from the interest rate for savings. # # See [NARK](https://github.com/econ-ark/NARK) for information on variable naming conventions. # See [HARK documentation](https://github.com/econ-ark/HARK/tree/master/Documentation) for brief mathematical descriptions of the models being solved. Detailed mathematical references are referenced _in situ_ below. # %% [markdown] # ## Perfect Foresight CRRA Utility Consumer # # The $\mathtt{PerfForesightConsumerType}$ class defines the solution for the problem of a consumer with Constant Relative Risk Aversion utility # $$\newcommand{\CRRA}{\rho}$$ # \begin{equation} # U(C) = \frac{C^{1-\CRRA}}{1-\rho} # \end{equation} # has perfect foresight about everything except the (stochastic) date of death, which occurs with constant probability implying a "survival probability" $\newcommand{\LivPrb}{\aleph}\LivPrb < 1$. Permanent labor income $P_t$ grows from period to period by a factor $\Gamma_t$. At the beginning of each period $t$, the consumer has some amount of market resources $M_t$ (which includes both market wealth and currrent income) and must choose how much of those resources to consume $C_t$ and how much to retain in a riskless asset $A_t$ which will earn return factor $R$. The agent's flow of utility $U(C_t)$ from consumption is geometrically discounted by factor $\beta$. Between periods, the agent survives with probability $\newcommand{\LivFac}{\aleph}{\LivFac_{t}}$ which results in a further downweighting of future utility because the consumer does not receive utility after death. For notational simplicity, we omit $\LivFac$ from the statement of the problem (effectively assuming $\LivFac=1$). # # The agent's problem can be written in Bellman form as: # $$\newcommand{\DiscFac}{\beta}\renewcommand{\LivFac}{\aleph}{}$$ # \begin{eqnarray*} # V_t(M_t,P_t) &=& \max_{C_t}~U(C_t) ~+ \phantom{\LivFac} \DiscFac V_{t+1}(M_{t+1},P_{t+1}), \\ # & s.t. & \\ # %A_t &=& M_t - C_t, \\ # M_{t+1} &=& R (M_{t}-C_{t}) + Y_{t+1}, \\ # P_{t+1} &=& \Gamma_{t+1} P_t, \\ # \end{eqnarray*} # # A particular perfect foresight agent's problem can be characterized by values of risk aversion $\rho$, discount factor $\beta$, and return factor $R$, along with sequences of income growth factors $\{ \Gamma_t \}$ and survival probabilities $\{\LivPrb_t = \LivPrb = 1\}$ (which are allowed to vary by age but which for present purposes we will assume are time invariant at $\LivPrb$. To keep things simple, let's forget about "sequences" of income growth and mortality, and just think about an $\textit{infinite horizon}$ consumer with constant income growth and survival probability of $\LivFac=1$. # # # Solve the model described above and in [PerfForesightCRRA](http://econ.jhu.edu/people/ccarroll/public/lecturenotes/consumption/PerfForesightCRRA) # %% PFexample = PerfForesightConsumerType(**Params.init_perfect_foresight) PFexample.cycles = 0 # Make this type have an infinite horizon PFexample.solve() PFexample.unpackcFunc() # Plot the perfect foresight consumption function print('Linear consumption function:') mMin = PFexample.solution[0].mNrmMin plotFuncs(PFexample.cFunc[0],mMin,mMin+10) PFexample.timeFwd() PFexample.T_sim = 120 # Set number of simulation periods PFexample.track_vars = ['mNrmNow'] PFexample.initializeSim() PFexample.simulate() # %% [markdown] # ## Consumer with idiosyncratic income shocks # # Solve a model like the one analyzed in [BufferStockTheory](http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/) # # Specifically, our new type of consumer receives two income shocks at the beginning of each period: a completely transitory shock $\newcommand{\tShkEmp}{\theta}{\tShkEmp_t}$ and a completely permanent shock $\newcommand{\pShk}{\psi}{\pShk_t}$. Moreover, lenders will not let the agent borrow money such that his ratio of end-of-period assets $A_t$ to permanent income $P_t$ is less than $\underline{a}$. As with the perfect foresight problem, this model can be framed in terms of $\textit{normalized}$ variables, e.g. $m_t \equiv M_t/P_t$. (See [here](http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/) for all the theory). # # \begin{eqnarray*} # v_t(m_t) &=& \max_{c_t} {~} U(c_t) + \phantom{\LivPrb} \beta \mathbb{E}_{t} [(\Gamma_{t+1}\psi_{t+1})^{1-\rho} v_{t+1}(m_{t+1}) ], \\ # a_t &=& m_t - c_t, \\ # a_t &\geq& \underline{a}, \\ # m_{t+1} &=& R/(\Gamma_{t+1} \psi_{t+1}) a_t + \theta_{t+1}, \\ # \mathbb{E}[\psi]=\mathbb{E}[\theta] &=& 1, \\ # u(c) &=& \frac{c^{1-\rho}}{1-\rho}. # \end{eqnarray*} # # HARK represents agents with this kind of problem as instances of the class $\texttt{IndShockConsumerType}$. To create an $\texttt{IndShockConsumerType}$, we must specify the same set of parameters as for a $\texttt{PerfForesightConsumerType}$, as well as an artificial borrowing constraint $\underline{a}$ and a stochastic process for the income shocks. # # The user can specify any desired discrete approximation to a continuous distribution for the IID shocks. We have built-in tools for constructing thes as discrete equiprobably approximations to lognormal, which is the default assumption. # %% IndShockExample = IndShockConsumerType(**Params.init_idiosyncratic_shocks) IndShockExample.cycles = 0 # Make this type have an infinite horizon start_time = clock() IndShockExample.solve() end_time = clock() print('Solving a consumer with idiosyncratic shocks took ' + mystr(end_time-start_time) + ' seconds.') IndShockExample.unpackcFunc() IndShockExample.timeFwd() # Plot the consumption function and MPC for the infinite horizon consumer print('Concave consumption function:') plotFuncs(IndShockExample.cFunc[0],IndShockExample.solution[0].mNrmMin,5) print('Marginal propensity to consume function:') plotFuncsDer(IndShockExample.cFunc[0],IndShockExample.solution[0].mNrmMin,5) # Compare the consumption functions for the perfect foresight and idiosyncratic # shock types. Risky income cFunc asymptotically approaches perfect foresight cFunc. print('Consumption functions for perfect foresight vs idiosyncratic shocks:') plotFuncs([PFexample.cFunc[0],IndShockExample.cFunc[0]],IndShockExample.solution[0].mNrmMin,100) # Compare the value functions for the two types if IndShockExample.vFuncBool: print('Value functions for perfect foresight vs idiosyncratic shocks:') plotFuncs([PFexample.solution[0].vFunc,IndShockExample.solution[0].vFunc], IndShockExample.solution[0].mNrmMin+0.5,10) # Simulate some data; results stored in mNrmNow_hist, cNrmNow_hist, and pLvlNow_hist IndShockExample.T_sim = 120 IndShockExample.track_vars = ['mNrmNow','cNrmNow','pLvlNow'] IndShockExample.makeShockHistory() # This is optional, simulation will draw shocks on the fly if it isn't run. IndShockExample.initializeSim() IndShockExample.simulate() # %% [markdown] # ## Idiosyncratic shocks consumer with a finite lifecycle # # Models of this kinds are described in [SolvingMicroDSOPs](http://econ.jhu.edu/people/ccarroll/SolvingMicroDSOPs) and an example is solved in the [SolvingMicroDSOPs REMARK](https://github.com/econ-ark/REMARK/blob/master/REMARKs/SolvingMicroDSOPs.md). # %% LifecycleExample = IndShockConsumerType(**Params.init_lifecycle) LifecycleExample.cycles = 1 # Make this consumer live a sequence of periods -- a lifetime -- exactly once start_time = clock() LifecycleExample.solve() end_time = clock() print('Solving a lifecycle consumer took ' + mystr(end_time-start_time) + ' seconds.') LifecycleExample.unpackcFunc() LifecycleExample.timeFwd() # Plot the consumption functions during working life print('Consumption functions while working:') mMin = min([LifecycleExample.solution[t].mNrmMin for t in range(LifecycleExample.T_cycle)]) plotFuncs(LifecycleExample.cFunc[:LifecycleExample.T_retire],mMin,5) # Plot the consumption functions during retirement print('Consumption functions while retired:') plotFuncs(LifecycleExample.cFunc[LifecycleExample.T_retire:],0,5) LifecycleExample.timeRev() # Simulate some data; results stored in mNrmNow_hist, cNrmNow_hist, pLvlNow_hist, and t_age_hist LifecycleExample.T_sim = 120 LifecycleExample.track_vars = ['mNrmNow','cNrmNow','pLvlNow','t_age'] LifecycleExample.initializeSim() LifecycleExample.simulate() # %% [markdown] # ## "Cyclical" consumer type # Make and solve a "cyclical" consumer type who lives the same four quarters repeatedly. # The consumer has income that greatly fluctuates throughout the year. # %% CyclicalExample = IndShockConsumerType(**Params.init_cyclical) CyclicalExample.cycles = 0 start_time = clock() CyclicalExample.solve() end_time = clock() print('Solving a cyclical consumer took ' + mystr(end_time-start_time) + ' seconds.') CyclicalExample.unpackcFunc() CyclicalExample.timeFwd() # Plot the consumption functions for the cyclical consumer type print('Quarterly consumption functions:') mMin = min([X.mNrmMin for X in CyclicalExample.solution]) plotFuncs(CyclicalExample.cFunc,mMin,5) # Simulate some data; results stored in cHist, mHist, bHist, aHist, MPChist, and pHist CyclicalExample.T_sim = 480 CyclicalExample.track_vars = ['mNrmNow','cNrmNow','pLvlNow','t_cycle'] CyclicalExample.initializeSim() CyclicalExample.simulate() # %% [markdown] # ## Agent with a kinky interest rate (Rboro > RSave) # # Models of this kind are analyzed in [A Theory of the Consumption Function, With # and Without Liquidity Constraints](http://www.econ2.jhu.edu/people/ccarroll/ATheoryv3JEP.pdf) # and the [expanded edition](http://www.econ2.jhu.edu/people/ccarroll/ATheoryv3NBER.pdf). # %% KinkyExample = KinkedRconsumerType(**Params.init_kinked_R) KinkyExample.cycles = 0 # Make the Example infinite horizon start_time = clock() KinkyExample.solve() end_time = clock() print('Solving a kinky consumer took ' + mystr(end_time-start_time) + ' seconds.') KinkyExample.unpackcFunc() print('Kinky consumption function:') KinkyExample.timeFwd() plotFuncs(KinkyExample.cFunc[0],KinkyExample.solution[0].mNrmMin,5) KinkyExample.T_sim = 120 KinkyExample.track_vars = ['mNrmNow','cNrmNow','pLvlNow'] KinkyExample.initializeSim() KinkyExample.simulate()
notebooks/ConsIndShockModel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # #### New to Plotly? # Plotly's Python library is free and open source! [Get started](https://plotly.com/python/getting-started/) by downloading the client and [reading the primer](https://plotly.com/python/getting-started/). # <br>You can set up Plotly to work in [online](https://plotly.com/python/getting-started/#initialization-for-online-plotting) or [offline](https://plotly.com/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plotly.com/python/getting-started/#start-plotting-online). # <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! # # Lighting effects on 3D Surface Plot # In this tutorial, we will generate a simple surface plot, and add various lighting effects to it. import plotly.plotly as py import plotly.graph_objs as go import plotly.tools as tls import numpy as np # + x = np.linspace(-np.pi, np.pi, 100) y = np.linspace(-np.pi, np.pi, 100) Y, X = np.meshgrid(x, y) # - Z1 = np.cos(X)*np.sin(Y) Z2 = 2 + np.cos(X)*np.sin(Y) trace1 = go.Surface(z=Z1, colorscale='Viridis') py.iplot([trace1]) # There are four lighting effects available in Plotly: ** ambient, diffuse, roughness, specular, and fresnel.** # Now, we will add some lightning effects to the above trace, one by one and see their effects: # # # ## Ambient ## # # `Ambient` stands for the default light in the room. We can set it in a range from 0 to 1. If we set it to zero, the trace appears dark. The default Ambient value for plot is 0.8. # + fig = tls.make_subplots(rows=1, cols=2,specs=[[{'is_3d': True},{'is_3d': True} ]]) trace1 = go.Surface(z=Z1, colorscale='Viridis', lighting=dict(ambient=0.2)) trace2 = go.Surface(z=Z2, colorscale='Viridis',showscale=False, lighting=dict(ambient=0.9)) fig.append_trace(trace1, 1, 1) fig.append_trace(trace2, 1, 2) py.iplot(fig) # - # ## Roughness ## # `Roughness` in a lighting plot refers to amount of light scattered. The value of roughness can range from 0 to 1 (by default value is 0.5). # + fig = tls.make_subplots(rows=1, cols=2,specs=[[{'is_3d': True},{'is_3d': True} ]]) trace1 = go.Surface(z=Z1, colorscale='Viridis', lighting=dict(roughness=0.1)) trace2 = go.Surface(z=Z2, colorscale='Viridis',showscale=False, lighting=dict(roughness=0.9)) fig.append_trace(trace1, 1, 1) fig.append_trace(trace2, 1, 2) py.iplot(fig) # - # ## Diffuse ## # By using `Diffuse` the light is reflected at many angles rather than just one angle. The value ranges from 0 to 1 (default value is 0.8). # + fig = tls.make_subplots(rows=1, cols=2,specs=[[{'is_3d': True},{'is_3d': True} ]]) trace1 = go.Surface(z=Z1, colorscale='Viridis', lighting=dict(diffuse=0.1)) trace2 = go.Surface(z=Z2, colorscale='Viridis',showscale=False,lighting=dict(diffuse=0.9)) fig.append_trace(trace1, 1, 1) fig.append_trace(trace2, 1, 2) py.iplot(fig) # - # ## Fresnel ## # `Fresnel` attribute is used to wash light over area of plot. The value can range from 0 to 5 (default value is 0.2). # + fig = tls.make_subplots(rows=1, cols=2,specs=[[{'is_3d': True},{'is_3d': True} ]]) trace1 = go.Surface(z=Z1, colorscale='Viridis', lighting=dict(fresnel=0.1)) trace2 = go.Surface(z=Z2, colorscale='Viridis',showscale=False, lighting=dict(fresnel=4.5)) fig.append_trace(trace1, 1, 1) fig.append_trace(trace2, 1, 2) # - py.iplot(fig) # ## Specular ## # `Specular` attribute induces bright spots of lighting in your plot. It's value range from 0 to 2 (default value is 0.05). # # + fig = tls.make_subplots(rows=1, cols=2,specs=[[{'is_3d': True},{'is_3d': True} ]]) trace1 = go.Surface(z=Z1, colorscale='Viridis', lighting=dict(specular=0.2)) trace2 = go.Surface(z=Z2, colorscale='Viridis',showscale=False ,lighting=dict(specular=2)) fig.append_trace(trace1, 1, 1) fig.append_trace(trace2, 1, 2) py.iplot(fig) # - # ## Combined effects: ## # The effects can also be added in a combined manner as follows: # + lighting_effects = dict(ambient=0.4, diffuse=0.5, roughness = 0.9, specular=0.6, fresnel=0.2) trace = go.Surface(z=Z1, colorscale='Viridis', lighting=lighting_effects) py.iplot([trace]) # - # --- # #### Reference # See https://plotly.com/python/reference/#surface-lighting for more information! # + from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) # ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( '3d-camera-lighting.ipynb', 'python/3d-surface-lighting/', 'Python 3D surface lighting | plotly', 'How to add lighting effects in 3D Python Plots', title= '3D Surface Lighting in Python | Plotly', name = '3D Surface Lighting', has_thumbnail='true', thumbnail='thumbnail/3d-surface-lighting.jpg', language='python' display_as='3d_charts', order=0.107, ipynb= '~notebook_demo/77') # -
_posts/python-v3/3d/3d-surface-lighting/3d-camera-lighting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # O-mode Reflectometry simulator # # Getting a simulated profile, extract the simulated signal from said profile and reconstruct the profile. import numpy as np import matplotlib.pyplot as plt from scipy.interpolate import interp1d from scipy.integrate import simps from refo.sim_perf import sim_perf import refo.functions as ref from scipy import signal # ## Simulating a typical AUG H-mode density profile R0 = 2.22 Rant = 2.32 radius, dens = sim_perf(Rmid = 2.0, R0=R0, Rant=Rant) plt.plot(radius, dens, lw=3) plt.show() # ## Declaring a frequency sweep fstart = 16e9 fstop = 50e9 dstart = ref.f2n(fstart) dstop = ref.f2n(fstop) perfmask = (dens >= dstart) & (dens <= dstop) plt.plot(radius, dens, lw=3) plt.plot(radius[perfmask], dens[perfmask], lw=3) plt.xlim(2.15, 2.25) plt.show() fpts = 1024 fprobe = np.linspace(fstart, fstop, fpts, endpoint=True) # ### Will this be possible without a for? # + interpts = 1024 interp_perf = interp1d(radius, dens) interp_rad = interp1d(dens, radius) total_phase = [] fp = 25e9 dens_probe = ref.f2n(fp) r_int = np.linspace(interp_rad(dens_probe), Rant, interpts, endpoint=True) n_int = interp_perf(r_int) #plt.plot(radius, dens, lw=3) #plt.plot(r_int, n_int, lw=3) #plt.xlim(2.15, 2.25) NO = np.sqrt(1-n_int/dens_probe) plt.plot(r_int, NO) plt.show() # + total_phase = [] #Avoid numerical errors due to the sqrt epsi = 1e-9 #Do a numeric aproach for f in fprobe: dens_probe = ref.f2n(f) r_int = np.linspace(interp_rad(dens_probe)+epsi, Rant, interpts, endpoint=True) n_int = interp_perf(r_int) #Refractive index NO = np.sqrt(1-n_int/dens_probe) #Refractive index total_phase.append( 4.0*np.pi*f/3e8*simps(NO, x=r_int) ) #Not interested in the error, hence the [0] total_phase = np.array(total_phase) # - plt.plot(fprobe, total_phase) plt.show() sig = np.cos(total_phase) plt.plot(sig) plt.show() fsamp = 40e6 f, t, Sxx = signal.spectrogram(sig, fsamp*1e-6, nfft=1024, nperseg=128, noverlap=85) f = f /(2.0*np.pi) plt.pcolormesh(t, f, Sxx) plt.ylim(0, 2.5) plt.xlabel('Sweep time [us]') plt.ylabel('Beat freq [Mhz]') plt.show() np.shape(Sxx) # + beat_f = f[np.argmax(Sxx, axis=0)] plt.plot(t, beat_f) plt.ylim(0, 2.5) plt.xlabel('Sweep time [us]') plt.ylabel('Beat freq [Mhz]') plt.show() # - Swp_rate = (fstop-fstart) / (1024.0/fsamp) print Swp_rate # + tau_g = beat_f*1e6 / Swp_rate f_probsw = t*1e-6/(1024.0/fsamp)*(fstop-fstart) + fstart plt.plot(f_probsw, tau_g) plt.ylim(0, 2e-9) plt.xlabel('Sweep Frequency [Hz]') plt.ylabel('Group Delay [s]') plt.show() # - init_pts = 16 temp = np.linspace(0.0, f_probsw[0], init_pts) f_init = np.concatenate((temp, f_probsw)) temp = np.linspace(0.0, tau_g[0], init_pts) tau_init = np.concatenate((temp, tau_g)) plt.plot(f_init, tau_init) plt.show() dens_rec = ref.f2n(f_init) # + pos_ref = [] Rassum = 2.25 # FOR j = 1,np-1 DO $ # FOR i = 1,j DO $ # II[j] = II[j]+twoPI*itao[i]*(ASIN(fp[i]/fp[j])-ASIN(fp[i-1]/fp[j])) ref_rad = np.zeros(len(tau_init)) for j in range(1, len(tau_init)): for i in range(1, j): ref_rad[j] = ref_rad[j] + 2.0*np.pi*tau_init[i]*(np.arcsin(f_init[i]/(f_init[j]) - np.arcsin(f_init[i-1]/f_init[j]))) ref_rad = Rant - 3e8/(2.0*np.pi)*ref_rad # - plt.plot(radius, dens, lw=3) plt.plot(ref_rad, dens_rec) plt.show()
O-mode.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="sipVQMtCP1EU" # [![AnalyticsDojo](https://github.com/rpi-techfundamentals/spring2019-materials/blob/master/fig/final-logo.png?raw=1)](http://rpi.analyticsdojo.com) # <center><h1>Basic Text Feature Creation in Python</h1></center> # <center><h3><a href = 'http://rpi.analyticsdojo.com'>rpi.analyticsdojo.com</a></h3></center> # + colab={"base_uri": "https://localhost:8080/", "height": 468} colab_type="code" id="k_VzxzKJP9J4" outputId="df7a3475-9b02-4f64-ae30-a8fc4f2eb47f" # !wget https://raw.githubusercontent.com/rpi-techfundamentals/spring2019-materials/master/input/train.csv # !wget https://raw.githubusercontent.com/rpi-techfundamentals/spring2019-materials/master/input/test.csv # + colab={} colab_type="code" id="4VE8Lm6TProo" import numpy as np import pandas as pd import pandas as pd train= pd.read_csv('train.csv') test = pd.read_csv('test.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 475} colab_type="code" id="uzvUhUzoProy" outputId="a1329bc4-e5ec-4f45-c3ac-af3ac2da6848" #Print to standard output, and see the results in the "log" section below after running your script train.head() # + colab={"base_uri": "https://localhost:8080/", "height": 324} colab_type="code" id="Q3yOOb59Pro_" outputId="cf111e0c-1563-45e2-a657-b80b0f786d70" #Print to standard output, and see the results in the "log" section below after running your script train.describe() # + colab={"base_uri": "https://localhost:8080/", "height": 267} colab_type="code" id="0kbWnA9hPrpN" outputId="0a9980eb-84cd-4ab5-9e1e-6caa0364f44d" train.dtypes # + colab={"base_uri": "https://localhost:8080/", "height": 1108} colab_type="code" id="WBDI4kNuPrpX" outputId="06acd3da-e37b-49f8-8513-cc7700f494c2" #Let's look at the age field. We can see "NaN" (which indicates missing values).s train["Age"] # + colab={"base_uri": "https://localhost:8080/", "height": 1126} colab_type="code" id="ZK8wYH0yPrpl" outputId="1e0892f6-3667-4e2e-ad64-92910ead19bf" #Now let's recode. medianAge=train["Age"].median() print ("The Median age is:", medianAge, " years old.") train["Age"] = train["Age"].fillna(medianAge) #Option 2 all in one shot! train["Age"] = train["Age"].fillna(train["Age"].median()) train["Age"] # + colab={} colab_type="code" id="m9okvlm_Prps" #For Recoding Data, we can use what we know of selecting rows and columns train["Embarked"] = train["Embarked"].fillna("S") train.loc[train["Embarked"] == "S", "EmbarkedRecode"] = 0 train.loc[train["Embarked"] == "C", "EmbarkedRecode"] = 1 train.loc[train["Embarked"] == "Q", "EmbarkedRecode"] = 2 # + colab={} colab_type="code" id="pyMwfV6dPrpx" # We can also use something called a lambda function # You can read more about the lambda function here. #http://www.python-course.eu/lambda.php gender_fn = lambda x: 0 if x == 'male' else 1 train['Gender'] = train['Sex'].map(gender_fn) # + colab={"base_uri": "https://localhost:8080/", "height": 4280} colab_type="code" id="Y5n8mVPOPrp1" outputId="49041ba7-70d3-4cea-e1d3-b47fa87e971f" #or we can do in one shot train['NameLength'] = train['Name'].map(lambda x: len(x)) train['Age2'] = train['Age'].map(lambda x: x*x) train # + colab={} colab_type="code" id="G6ZCxH_mPrp5" #We can start to create little small functions that will find a string. def has_title(name): for s in ['Mr.', 'Mrs.', 'Miss.', 'Dr.', 'Sir.']: if name.find(s) >= 0: return True return False #Now we are using that separate function in another function. title_fn = lambda x: 1 if has_title(x) else 0 #Finally, we call the function for name train['Title'] = train['Name'].map(title_fn) test['Title']= train['Name'].map(title_fn) # + colab={"base_uri": "https://localhost:8080/", "height": 4174} colab_type="code" id="HiHaSvJpPrp8" outputId="838f7447-2df5-401d-cc4c-62a44668078c" test # + colab={"base_uri": "https://localhost:8080/", "height": 304} colab_type="code" id="z22z54N_PrqB" outputId="39e2833c-ed5a-4d1c-f188-5958737ba060" #Writing to File submission=pd.DataFrame(test.loc[:,['PassengerId','Survived']]) #Any files you save will be available in the output tab below submission.to_csv('submission.csv', index=False) # + colab={} colab_type="code" id="RKMZUdulPrqE" # + colab={} colab_type="code" id="7mW1cqv2PrqH" # + colab={} colab_type="code" id="08AvDFQoPrqJ"
site/_build/jupyter_execute/notebooks/04-intro-modeling/05-titanic-features.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## This material is adjusted from Software carpentry, <NAME>, and can be found here. # http://swcarpentry.github.io/python-novice-gapminder/ # https://github.com/aspp-apac/2019-data-tidying-and-visualisation # # Plotting with matplotlib # #### Questions you should be able to answer after this session # * How can I plot my data? # # * How can I save my plot for publishing? # # #### Objectives # * Create a time series plot showing a single data set. # # * Create a scatter plot showing relationship between two data sets. # # #### Matplotlib # * Commonly use a sub-library called matplotlib.pyplot. # * The Jupyter Notebook will render plots inline if we ask it to using a “magic” command. # * good [explainer for Matplotlib](http://pbpython.com/effective-matplotlib.html) # #inline plotting and importing matplotlib as plt # %matplotlib inline import matplotlib.pyplot as plt import pandas as pd from IPython.display import Image # ### Matplotlib is best run as in object oriented way # You will have a figure which is the canvas with the frame. The 'axes' object is the picutre you draw onto the canvas. You can draw multiple pictures aka 'axes' on the same canvas. #from https://stackoverflow.com/questions/5575451/difference-between-axes-and-axis-in-matplotlib Image(filename='./fig/Fig_axes_in_matplotlib.png') # + #explain fig and ax on the board fig, ax = plt.subplots() time = [0, 1, 2, 3] position = [0, 100, 200, 300] ax.plot(time, position) ax.set_xlabel('Time (hr)') ax.set_ylabel('Position (km)') # - # ### Matplotlib can interpret intergers and floats as you would expect # If you have a number encoded as a string convert it to a int or float type so it interprets numbers as expected. # + #import asia this time asia = #rename the columns and plot with str/strip/astype new_column_name = #rename column header asia_new = # - # ### Matplotlib is REALLY finetunable # This is its strenght and weakness. # + fig, ax = plt.subplots(figsize=(12,12)) #plot China as linegraph #set ylabel and tick params. Fontdict and labelsize # - # ### Matplotlib lets you plot different plot types easily # * .plot = lineplot # * .bar = bar graph # * .scatter = scatter plot # * .boxplot = box plots # * .violinplot = violin plot # * many many more. See gallery [here](https://matplotlib.org/gallery/index.html). # + fig, ax = plt.subplots(figsize=(12,12)) #scatter China and Afghanistan in one axes #set y_label as before, tick params and add a legend (prop/size) ax.set_ylabel('GDP per capita', fontdict={'size': 20}) ax.tick_params(axis='both', which='major', labelsize=20) # - # ### Matplotlib has different plotting styles # # + plt.style.use('ggplot') fig, ax = plt.subplots(figsize=(12,12)) #set the size bar_size = #make a bargraph and don't forget to add the size #have same lables again ax.set_ylabel('GDP per capita', fontdict={'size': 20}) ax.tick_params(axis='both', which='major', labelsize=20) # - # #### Exercise # Copy over the raw text into a new code cell. Fill in the blanks below to plot the minimum and maximum GDP per capita over time for all the countries in Europe. Try to explain what the other bits and pieces do. # + active="" # data_europe = pd.read_csv('data/gapminder_gdp_europe.csv', index_col='country') # # fig, ___ = plt.subplots(figsize=(12,12)) # # max_per_year_europe = _____ # min_per_year_europe = _____ # # years = data_europe.columns.___.strip(___).astype(__) # # ax.scatter(___,___, label = 'max', s = 30) # ax.scatter(____,___, label = 'min', s = 30) # # ax.legend(loc='best', prop={'size':20}) # ax.tick_params(axis='x', rotation=90, labelsize=20) # ax.tick_params(axis='y', labelsize=20) # - # #### Excercise # Modify the example in the notes to create a scatter plot showing the relationship between the minimum and maximum GDP per capita among the countries in Asia for each year in the data set. What relationship do you see (if any)? # # Start thinking about the following: # * Read in the gdp from asia using from_csv # * make the the fig, ax using plt.subplots() # * calculate min and max for asia over the time using the describe method within the dataframe object. # * transpose your dataframe # * plot ax.scatter with min and max # + active="" # data_asia = ___ # # fig, ax = ___ # # # ax.____(____.describe()___, _____) # - # ### Matplotlib lets you save files in different formats # The figure can be saved with its intrinsic function .savefig. It will save the current figure to file. The file format will automatically be deduced from the file name extension (formats are pdf, ps, eps, png, and svg). You can also adjust the resolution with dpi = . fig.savefig('test.png') # ## Ploting with Seaborn a modern plotting library # [Seaborn](https://seaborn.pydata.org/) is a simpler statistical plotting library. # # This part is based on https://github.com/aspp-apac/2019-data-tidying-and-visualisation # # # Seaborn builds on Matplotlib. Some nice features are: # # * works directly with Pandas dataframes, concise syntax # * lots of plot types, including some more advanced options # * statistical plotting: many plots do summary statistics for you # * good default aesthetics and easy control of aesthetics # * using Matplotlib gives benefits of Matplotlib - many backends, lots of control # * underlying Matplotlib objects are easy to tweak directly # #### Setup import pandas as pd import numpy as np # Be aware that Seaborn automatically changes Matplotlib's defaults on import. Not only your Seaborn plots, but also your Matplotlib plots, will look different once Seaborn is imported. If you don't want this behaviour, you can call sns.reset_orig() after import. import seaborn as sns plt.style.use('seaborn') sales = pd.read_csv("./data/housing-data-10000.csv", usecols=['id','date','price','zipcode','lat','long', 'bedrooms', 'waterfront','view','grade','sqft_living','sqft_lot'], parse_dates=['date'], dtype={'zipcode': 'category', 'waterfront': 'bool'}) sales.head() # + #head the dataframe # - sales.dtypes # Note that as well as specifying that the date field should be parsed as a date, we specified that certain variables are categorical (as opposed to integers). Some plotting commands understand pandas DataFrames and will treat categorical variables differently to numerical variables. # ### Toy data trains = pd.read_csv('./data/trains_example.csv') trains trains.to_csv('./data/trains_example.csv') # + fig, ax = plt.subplots() #make a barplot with data, x is the engine and y is the speed use ax # - # Here Seaborn has interpreted the x and y arguments as field names in the supplied DataFrame. Notice also that Seaborn has performed the summary statistics for us - in this case, using the default estimator, which is mean(). # + fig, ax = plt.subplots() #make a barplot with data, x is the engine and y is the speed use ax. Swap x and y. # - # Notice also what happens if we simply swap the x and y parameters. Seaborn will automatically deduce that the categorical or string-like variable must be the bar labels, and the numeric variable must be the numeric axis: # #### Exercise # Create a (vertical) bar plot using the **sales** data, showing how house prices vary with the value of the property grade. #barplot with x being grade and y being price sns.barplot(data=sales, x='grade', y='price') # Bar plots are often deplored as a way of showing statistical estimates, as only the top of the bar is really important, and the bar itself is a visual distraction. A point plot is an alternative, and plots like box plots can show more information. Several other plot types also show distributional information within categories. # # # #### Exercise: # # reproduce the plot you just made, using instead each of the Seaborn functions: # # * pointplot() # * boxplot() # * violinplot() (try the scale parameter) # * boxenplot() # * stripplot() [SEE WARNING] (try the jitter parameter) # * swarmplot() [SEE WARNING] # Note what sort of information about the distribution is shown by each. # # WARNING: stripplot() and swarmplot() will plot individual data points. There are too many house sales to easily display in this way - you may want to subsample the dataframe with e.g. data=sales.sample(100). # + active="" # #swarm plot with downsampling (own note think groupby) # sns.swarmplot(___=sales.sample(500),___, ___) # - # Let's try making a horizontal bar plot of price against grade by putting grade on the y axis: # WARNING: # Do not rerun this cell without modifying it! # It will take a long time to run. sns.barplot(data=sales, x='price', y='grade') # #### Questions: # # * What went wrong here? # * Can you fix it? (There are at least two sensible approaches.) # # # #look at what type grade is using info sales.info() #horizontal bargraph does the magic sns.barplot(data=sales, x='price', y='grade', orient='h') # + #make a copy of dataframe and fix the grade type with astype sales_fixed = sales_fixed['grade'] = sns.barplot(data=sales_fixed, x='price', y='grade') # - # ### Hue # Many Seaborn plotting functions take a hue parameter. This colours the plot elements by some categorical variable, but more than this, summary statistics are calculated for each level of the hue variable. # + #look at the head of trains # + # It appears that my hypothesis that more wheels make you faster is flawed #use lmplot x wheels and y top speed, use hue for engine type data is trains # - # #### Excerises # * Create an lmplot of house price against living area. Do this without a hue parameter, then add in waterfront as the hue parameter. What information is the hue giving in this case? # * Later on play around. Try adding the hue parameter to one of your previous plots of some other type - for instance, a box plot. # + active="" # sns.lmplot(data=sales, x=____, y=____, hue=____) # - # #### How to save figures that are 'real' seaborn objects g = sns.lmplot(data=sales, x='sqft_living', y='price', hue='waterfront') g.savefig('limplot.png') # ### Extra material below # #### Compound plots # # Seaborn has some plotting functions which create more complex figures made of multiple subplots. These include pairplot(), catplot(), jointplot(), lmplot() and clustermap(). Let's see a few examples: # jointplot shows a scatter or density plot, with marginal distributions sns.jointplot(data=sales, x='sqft_living', y='price') #, kind='reg') # + # pairplot shows pairwise relationships between variables # Note that a variable like engine_type would be ignored as it is not numeric sns.pairplot(data=trains[['wheels', 'top_speed_mph', 'weight_tons']]) # - # catplot conditions different subplots on different variable values # we map variables to row and column of a grid of plots (as well as to hue) # in this example, we just use columns, and so have only one row sns.catplot(data=trains, kind='bar', x='size', y='top_speed_mph', col='engine_type') # #### Exercise: # design a plot using sns.catplot, to show the relationship between house price and (at least): grade, waterfront, and view. Available channels of information are: # # * x and y coordinates # * hue # * row and column of subplot (row and col) # # You do not have to use all of these channels - in fact your plot may be difficult to take in if you do. # # You can set the kind parameter to the kind of plot you want to make: point, bar, count, box, violin, and strip. # # You can control the size of the overall figure with size and aspect. # One option sns.catplot(data=sales, y='price', x='grade', row='view', hue='waterfront', kind='violin', kwargs={'scale':'width'}, size=2, aspect=3) # #### Additional examples # * How to get a dataframe generated from a string within the notebook # + from io import StringIO data_string = """name number engine_type colour wheels top_speed_mph weight_tons Thomas 1 Tank Blue 6 40 52 Edward 2 Tender Blue 14 70 41 Henry 3 Tender Green 18 90 72.2 Gordon 4 Tender Blue 18 100 91.35 James 5 Tender Red 14 70 46 Percy 6 Tank Green 4 40 22.85 Toby 7 Tank Brown 6 20 27 Emily 12 Tender Green 8 85 45 Rosie 37 Tank Purple 6 65 37 Hiro 51 Tender Black 20 55 76.8""" trains = pd.read_table(StringIO(data_string)) trains['size'] = pd.cut(trains['weight_tons'], 3, labels=['Small','Medium','Big']) # - # * How to annotate a plot and use 'seaborn' graph objects with matplotlib # + #to show what you can do with matplotlib df = pd.DataFrame({ 'Time': [1,2,3,4,5], 'Projected': [2,5,10,17,26], 'Actual': [1,4,9,11,9] }) fig, ax = plt.subplots() sns.scatterplot(data=df, x='Time', y='Actual', color='red', ax=ax) sns.lineplot(data=df, x='Time', y='Projected', color='blue', ax=ax) ax.set_ylabel('Huge profits') ax.annotate("where it all went wrong", xy=(3,10), xytext=(1,12), arrowprops={'width':2})
05022019_ABC_python_day3_afternoon_session_1_wo_answers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''base'': conda)' # name: python3 # --- # + """ Digitized Signal """ import math import numpy as np import matplotlib.pyplot as plt fs = 20 dt = 1 / fs N = 64 # - #%% Cell 1 k = np.arange(0,N) t = k * dt f_signal = 2 s = np.cos(2 * math.pi * f_signal * t) # + #%% Cell 2 plt.figure(1) plt.subplot(211) plt.plot(k,s,'k*-') plt.grid() plt.xlabel('Time index k') plt.ylabel('s(t)') plt.show() plt.subplot(212) plt.plot(t,s,'k*-') plt.grid() plt.xlabel('Time t in seconds') plt.ylabel('s(t)') plt.show()
sonstiges/DSP_Python_Matlab/08.11 Digitized Signal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:rmg_env] # language: python # name: conda-env-rmg_env-py # --- # + import os import cantera as ct import numpy as np import pandas as pd # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt # - def simulate(model, T, P, mol_frac, time): model.TPX = T, P, mol_frac reactor = ct.IdealGasReactor(model) simulation = ct.ReactorNet([reactor]) simulation.advance(time) conditions = pd.Series() conditions['time'] = simulation.time conditions['temperature0'] = T conditions['temperature'] = model.T conditions['pressure0'] = P conditions['pressure'] = model.P species = pd.Series() for key, value in model.mole_fraction_dict().iteritems(): species[key] = value return conditions, species # Convert chemkin to cantera from cantera import ck2cti directory = '/home/mjliu/Documents/Models/Naphthalene/acetylene/run7/' filename = 'chem_mod' chemkin_file = os.path.join(directory, filename + '.inp') cantera_file = os.path.join(directory, filename + '.cti') if os.path.exists(cantera_file): raise Exception('File already exists') ck2cti.Parser().convertMech(chemkin_file, outName=cantera_file) filename = '/home/mjliu/Documents/Models/Naphthalene/acetylene/run7/chem.cti' input_conditions = [ (900, 2.92E-06), (910, 3.88E-06), (920, 5.12E-06), (930, 6.72E-06), (940, 8.75E-06), (950, 1.13E-05), (960, 1.46E-05), (970, 1.87E-05), (980, 2.37E-05), (990, 3.00E-05), (1000, 3.76E-05), (1010, 4.69E-05), (1020, 5.81E-05), (1030, 7.14E-05), (1040, 8.72E-05), (1050, 1.06E-04), (1060, 1.27E-04), (1070, 1.51E-04), (1080, 1.78E-04), (1090, 2.07E-04), (1100, 2.39E-04), (1110, 2.73E-04), (1120, 3.07E-04), (1130, 3.40E-04), (1140, 3.73E-04), (1150, 4.03E-04), (1160, 4.29E-04), (1170, 4.50E-04), (1180, 4.68E-04), (1190, 4.80E-04), (1200, 4.89E-04), ] model = ct.Solution(filename) # + conditions_df = pd.DataFrame() species_df = pd.DataFrame() for T, X in input_conditions: #T = 1200 P = 2 * 101325 mol_frac = {'Ne':0.95, 'C2H2(2)':0.05-X, 'A2_rad1(6)':X} conditions, species = simulate(model, T, P, mol_frac, 0.002) conditions_df = conditions_df.append(conditions, ignore_index=True) species_df = species_df.append(species, ignore_index=True) print 'T = {0:4} Completed!'.format(T) data = pd.concat([conditions_df, species_df], axis=1) print 'Simulation Completed!' # - selected = [ 'A2_5(5)', 'A2(1)', 'C12H8(768)', ] selected2 = [ 'C4H4(2237)', 'C4H6(758)', 'A1(3)', 'C2H4(39)', ] # + plt.style.use('seaborn-poster') #plt.rcParams['axes.labelsize'] = 24 #plt.rcParams['xtick.labelsize'] = 20 #plt.rcParams['ytick.labelsize'] = 20 fig = plt.figure() colormap = mpl.cm.tab10 new_labels = ['Acenaphthylene-RMG', 'Naphthalene-RMG', '1-Ethynylnaphthalene-RMG'] for i, label in enumerate(selected): if i == 0: plt.plot(acenaphthylene_expt[0], acenaphthylene_expt[1], 'o', c=colormap(0), label='Acenaphthylene-Lifshitz-expt') plt.plot(acenaphthylene_model[0], acenaphthylene_model[1], '--', c=colormap(0), label='Acenaphthylene-Lifshitz-model') elif i == 1: plt.plot(naphthalene_expt[0], naphthalene_expt[1], 'o', c=colormap(1), label='Naphthalene-Lifshitz-expt') plt.plot(naphthalene_model[0], naphthalene_model[1], '--', c=colormap(1), label='Naphthalene-Lifshitz-model') elif i == 2: plt.plot(naphthylacetylene_model[0], naphthylacetylene_model[1], '--', c=colormap(2), label='1-Ethynylnaphthalene-Lifshitz-model') x = data['temperature0'] y = data[label]/0.0005*100 #y = [value if value > 1e-8 else np.nan for value in y] plt.plot(x, y, c=colormap(i), label=new_labels[i]) plt.yscale('log') plt.xlabel('Temperature (K)') plt.ylabel('Percent Yield') plt.legend(bbox_to_anchor=(1.02, 1), loc=2, borderaxespad=0.) # + plt.style.use('seaborn-poster') #plt.rcParams['axes.labelsize'] = 24 #plt.rcParams['xtick.labelsize'] = 20 #plt.rcParams['ytick.labelsize'] = 20 fig = plt.figure() colormap = mpl.cm.tab10 new_labels = ['Vinylacetylene-RMG', '1,3-Butadiene-RMG', 'Benzene-RMG', 'Ethene-RMG'] for i, label in enumerate(selected2): x = data['temperature0'] y = data[label]/0.0005*100 #y = [value if value > 1e-8 else np.nan for value in y] plt.plot(x, y, c=colormap(i), label=new_labels[i]) plt.yscale('log') #plt.ylim([1e-2, 1e2]) plt.xlabel('Temperature (K)') plt.ylabel('Percent Yield') plt.legend(bbox_to_anchor=(1.02, 1), loc=2, borderaxespad=0.) # - def read(string): lines = string.strip().splitlines() t = [] x = [] for line in lines: tokens = line.split(', ') t.append(float(tokens[0])) x.append(float(tokens[1])) return t, x # + acenaphthylene_expt = read(""" 915.4268970605044, 0.3415404042277774 921.5134921706965, 0.6715524451826531 928.7821198865349, 0.5607593310313395 934.1343086626282, 0.5360442632233882 943.6822144408309, 0.6229504549550771 934.500888394759, 0.780427904847085 929.9018090003221, 1.0699507430108044 937.5482382768259, 0.9925156358515191 943.6691223075404, 0.8540523704988493 943.6585239139245, 1.1025907776988502 944.7981629451066, 1.300745990643067 950.1403767625025, 1.5813252556028443 958.5467732047672, 1.6295653372450312 967.7256055111648, 1.3813178940615518 970.7742022630688, 1.7046987181440536 961.592876216997, 2.1356344447852025 976.1114286011159, 2.337107180252829 978.0197628869194, 2.4818742411244785 975.3358755623902, 3.0629067035391007 999.38550098191, 5.844145824906985 1009.7052191893268, 5.671141364589379 1014.6665142714644, 6.690346537955973 1013.5131596720732, 7.892721045083737 1021.5523529472887, 5.671141364589379 1023.4475950998016, 8.256625948468923 1037.5590444820814, 16.48032387326401 1045.9947423655199, 8.38161861795969 1047.5103126526117, 11.4910282067327 1053.9884248916783, 18.03505433766841 1058.9621886721875, 15.753965345666082 1059.3350027535043, 19.736455876957965 1067.7625959830011, 12.20281515169512 1064.6784634407375, 23.28344873792214 1070.414688126682, 21.276275019923478 1065.8137384274894, 30.514232219840185 1073.4639083135048, 25.865704200651447 1073.4495693103772, 36.5431409271721 1078.4239565258051, 31.445102733871515 1083.0005922631726, 39.39420133510683 1086.0529296245884, 44.425731144189385 1087.2168826175955, 29.169339185186825 1092.5753057428747, 23.99373617818744 1101.7235897382614, 42.46769871051222 1118.911690443782, 53.20323127577195 1118.1398780145678, 63.71496304689414 1125.3997776415458, 65.65865869604717 1129.6110805166197, 54.82625488288746 1133.4246319136334, 66.65263021381031 1138.778067559564, 61.828806690373085 1146.0342265770307, 69.72574261950383 1170.8712502987294, 76.3035706426307 1197.2413004852401, 75.1656774240269 1204.884612587151, 75.1656774240269 """) acenaphthylene_model = read(""" 899.0473914443949, 0.0938139747458477 924.613210585925, 0.24174668198151125 949.4165688220197, 0.5954943576897983 974.9873754428986, 1.3607186973116348 999.0326368179882, 2.8842481744935187 1025.372762128407, 5.844145824906985 1049.4223875479265, 11.150858882940748 1074.6222503922445, 19.442131782914633 1099.4436882409784, 30.976170954430213 1124.6510323043194, 45.098268663080006 1150.2436591473488, 60.90677146640111 1174.6947766544404, 72.94054515547023 1199.9145894161531, 78.63129573545962 """) naphthylacetylene_model = read(""" 1049.7889672800575, 0.0016234557539947677 1074.9782317307593, 0.0036543140927172055 1099.7896946207957, 0.007404475327995067 1124.6061449901808, 0.013303944489966755 1150.1906671792688, 0.021843128718956658 1175.0133518978398, 0.033771326842741985 1199.8391537910038, 0.04843451725861638 """) naphthalene_expt = read(""" 943.2857142857142, 0.07765334766497867 967.2857142857142, 0.2581661590809044 971.5714285714286, 0.27501682740068584 976.7142857142858, 0.3812726894956135 978.4285714285714, 0.4757135674410709 1010.1428571428571, 0.9240065657365751 1014, 1.1528821020645799 1022.5714285714286, 1.0937085364819383 1023.8571428571429, 1.4690888241257416 1037.5714285714287, 2.3604657956801818 1046.142857142857, 2.102105276036458 1060.2857142857142, 2.0800692950143547 1065.857142857143, 2.3604657956801818 1066.7142857142858, 3.13738275971436 1047.857142857143, 3.7926901907322508 1054.7142857142858, 3.997888371280274 1059.4285714285713, 5.3700350051598065 1068.4285714285716, 6.158482110660267 1071, 6.356289385961481 1072.2857142857142, 5.5425177652873545 1074, 4.489251258218608 1084.2857142857142, 4.732135897849963 1080, 6.988654854126644 1087.2857142857142, 7.062691746120908 1093.2857142857142, 7.4448087720823 1101.857142857143, 4.126298487843327 1118.5714285714287, 6.356289385961481 1119.4285714285713, 7.062691746120908 1126.7142857142858, 6.6299506764795675 1134.4285714285713, 7.603382956801465 1128.857142857143, 10.87960861640183 1137.857142857143, 10.87960861640183 1145.5714285714284, 6.70018750350959 1170.857142857143, 9.288880734072603 1197.857142857143, 10.765559690436053 1205.142857142857, 10.430536235688859 """) naphthalene_model = read(""" 900, 0.011407966521057087 925.2857142857142, 0.030397476305387174 950.1428571428571, 0.07603382956801465 975.4285714285714, 0.17853207274376864 1000.2857142857142, 0.39351897202119607 1025.142857142857, 0.822871096239495 1050.2142857142858, 1.615243441329967 1075.2857142857142, 2.9763514416313184 1100.142857142857, 5.041005397797906 1124.7857142857142, 7.806358870258297 1150.0714285714284, 10.652706318278561 1174.2857142857142, 13.014170014555933 1200, 15.242856516212672 """) # -
Analysis/ShockTube_Pyrolysis_Simulation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="3j2TvMM-ZLOB" import numpy as np import pandas as pd import matplotlib.pyplot as plt # + [markdown] id="Y-ihd3ayFhyI" # ## Specifying the path of .csv files # + id="7cmoO3AeZQ-0" md1_hybrid_hbonds = '/content/drive/My Drive/iGEM2020/Dry Lab Stuff/Human-protein-study/PfEMP1-5MZA--Ananth/MD-run/MD-hybrid/MD1/md1_hybrid_hbonds.csv' md2_hybrid_hbonds = '/content/drive/My Drive/iGEM2020/Dry Lab Stuff/Human-protein-study/PfEMP1-5MZA--Ananth/MD-run/MD-hybrid/MD2/md2_hybrid_hbonds.csv' md3_hybrid_hbonds = '/content/drive/My Drive/iGEM2020/Dry Lab Stuff/Human-protein-study/PfEMP1-5MZA--Ananth/MD-run/MD-hybrid/MD3/md3_hybrid_hbonds.csv' md1_16ile_hbonds = '/content/drive/My Drive/iGEM2020/Dry Lab Stuff/Human-protein-study/PfEMP1-5MZA--Ananth/MD-run/MD-16ile/MD1/md1_16ile_hbonds.csv' md2_16ile_hbonds = '/content/drive/My Drive/iGEM2020/Dry Lab Stuff/Human-protein-study/PfEMP1-5MZA--Ananth/MD-run/MD-16ile/MD2/md2_16ile_hbonds.csv' md3_16ile_hbonds = '/content/drive/My Drive/iGEM2020/Dry Lab Stuff/Human-protein-study/PfEMP1-5MZA--Ananth/MD-run/MD-16ile/MD3/md3_16ile_hbonds.csv' # + [markdown] id="owsdc10FD2db" # # Auxillary functions # + id="uCN0myjXZ-1v" def preprocessing_df(path): df = pd.read_csv(path) df.drop('Unnamed: 0',axis=1,inplace=True) df['DA_pair'] = df['Donor']+' | '+df['Acceptor'] df['Timestep'] = df['Fname'].apply(lambda x : x.split('.')[0][3:]) df['Timestep'] = df['Timestep'].apply(lambda x : str(x).zfill(3)) df["Timestep"] = pd.to_numeric(df["Timestep"], downcast="float") df.sort_values('Timestep', inplace=True) df.drop('Fname',axis=1,inplace=True) cols = df.columns.tolist() cols = cols[-1:] + cols[:-1] df = df[cols] return df def get_run_hbond_data(df,DA_pairs): counter_1,counter_2,counter_3,counter_4,counter_5,counter_6 = ([] for i in range(len(DA_pairs))) tgroup = df.groupby('Timestep') for i,j in tgroup: for k in j['DA_pair'] : if k == DA_pairs[0]: counter_1.append(int(i)) elif k == DA_pairs[1]: counter_2.append(int(i)) elif k == DA_pairs[2]: counter_3.append(int(i)) elif k == DA_pairs[3]: counter_4.append(int(i)) elif k == DA_pairs[4]: counter_5.append(int(i)) elif k == DA_pairs[5]: counter_6.append(int(i)) else : continue run_hbond_data = np.array([np.array(counter_1),np.array(counter_2), np.array(counter_3),np.array(counter_4), np.array(counter_5),np.array(counter_6)]) return run_hbond_data # + [markdown] id="RCeuvEvukrLb" # # Dummy plotting # + id="tKhR7W6SkobD" run1 = np.array([np.random.rand(i+1) for i in range(7)]) run2 = np.array([np.random.rand(i+1) for i in range(7)]) run3 = np.array([np.random.rand(i+1) for i in range(7)]) # + id="4gOw0U9Ekyme" outputId="21d8d8b7-fa62-4053-fa26-b2ea909ee07d" colab={"base_uri": "https://localhost:8080/", "height": 282} fig = plt.figure(figsize=(8,4)) for i in range(7): plt.plot(run1[i]*100,(i+0.65)*np.ones(np.size(run1[i])),'|',markersize='8.5',mew=5,color='seagreen',alpha=0.8) plt.plot(run2[i]*100,(i+1)*np.ones(np.size(run1[i])),'|',markersize='8.5',mew=5,color='indigo',alpha=0.8) plt.plot(run3[i]*100,(i+1.35)*np.ones(np.size(run1[i])),'|',markersize='8.5',mew=5,color='darkorange',alpha=0.8) plt.axhline(i+0.5,alpha=0.5,linewidth=0.5) #plt.axhline(i+1+0.5,alpha=0.5,linewidth=0.5) plt.legend(['run1','run2','run3']); plt.plot() # + [markdown] id="ZWxOTvT8o1Z8" # # MD Hybrid # # # + [markdown] id="eMTGeUxilFh5" # ## DA MD Hybrid hbonds (>25%) # # 1. :356.A@ND2 | :16.B@O # 2. :16.B@N | :356.A@OD1 # 3. :13.B@NE | :393.A@OD2 # 4. :13.B@NH2 | :393.A@OD1 # 5. :13.B@NH2 | :393.A@OD2 # 6. :362.A@OH | :17.B@OD2 # + id="ixR_g4lOeurH" md_h1 = preprocessing_df(md1_hybrid_hbonds) md_h2 = preprocessing_df(md2_hybrid_hbonds) md_h3 = preprocessing_df(md3_hybrid_hbonds) mdh_da_pairs = [':356.A@ND2 | :16.B@O', ':16.B@N | :356.A@OD1', ':13.B@NE | :393.A@OD2', ':13.B@NH2 | :393.A@OD1', ':13.B@NH2 | :393.A@OD2',':362.A@OH | :17.B@OD2'] run1_data = get_run_hbond_data(md_h1,mdh_da_pairs) run2_data = get_run_hbond_data(md_h2,mdh_da_pairs) run3_data = get_run_hbond_data(md_h3,mdh_da_pairs) # + id="QT6riN6NpI-T" outputId="2e902c75-1abb-45eb-90af-b908ae1c7a2f" colab={"base_uri": "https://localhost:8080/", "height": 373} fig = plt.figure(figsize=(8,5)) for i in range(len(run1_data)): plt.plot(run1_data[i]*0.5,(i+0.65)*np.ones(np.size(run1_data[i])),'|',markersize='8.5',mew=4,color='seagreen',alpha=0.8) plt.plot(run2_data[i]*0.5,(i+1)*np.ones(np.size(run2_data[i])),'|',markersize='8.5',mew=4,color='indigo',alpha=0.8) plt.plot(run3_data[i]*0.5,(i+1.35)*np.ones(np.size(run3_data[i])),'|',markersize='8.5',mew=4,color='darkorange',alpha=0.8) plt.axhline(i+0.5,alpha=0.5,linewidth=0.5) plt.legend(['Run1','Run2','Run3'], bbox_to_anchor=(1.01, 1), loc='upper left') plt.xlabel('Time (ns)',fontsize=14) plt.ylabel('Hydrogen bonds Index',fontsize=14) plt.title('Hbond Profile for the Hybrid Inhibitor of 5MZA',fontsize=16) plt.plot() # + [markdown] id="VuaF1GfU11xK" # # MD 16ile # # + [markdown] id="qEFhkZuU3Vr3" # ## DA MD S16I Hbonds (>25%) # # 1. :356.A@ND2 | :16.B@O # 1. :16.B@N | :356.A@OD1 # 3. :394.A@OG1 | :13.B@O # 4. :10.B@N | :363.A@OD1 # 5. :363.A@N | :10.B@O # 6. :349.A@NZ | :18.B@OC2 # + id="UZVesapQ13MG" md_inh1 = preprocessing_df(md1_16ile_hbonds) md_inh2 = preprocessing_df(md2_16ile_hbonds) md_inh3 = preprocessing_df(md3_16ile_hbonds) # + id="3fn7UaOv3VeJ" md_inh_DA_pairs = [':356.A@ND2 | :16.B@O',':16.B@N | :356.A@OD1',':394.A@OG1 | :13.B@O', ':10.B@N | :363.A@OD1',':363.A@N | :10.B@O',':349.A@NZ | :18.B@OC2'] ile_run1_data = get_run_hbond_data(md_inh1,md_inh_DA_pairs) ile_run2_data = get_run_hbond_data(md_inh2,md_inh_DA_pairs) ile_run3_data = get_run_hbond_data(md_inh3,md_inh_DA_pairs) # + id="XnenoLGs6foI" outputId="5b6dc60d-17a0-4127-c95a-ecb50ed8f21e" colab={"base_uri": "https://localhost:8080/", "height": 373} fig = plt.figure(figsize=(8,5)) for i in range(len(ile_run1_data)): plt.plot(ile_run1_data[i]*0.5,(i+0.65)*np.ones(np.size(ile_run1_data[i])),'|',markersize='8.5',mew=4,color='seagreen',alpha=0.8) plt.plot(ile_run2_data[i]*0.5,(i+1)*np.ones(np.size(ile_run2_data[i])),'|',markersize='8.5',mew=4,color='indigo',alpha=0.8) plt.plot(ile_run3_data[i]*0.5,(i+1.35)*np.ones(np.size(ile_run3_data[i])),'|',markersize='8.5',mew=4,color='darkorange',alpha=0.8) plt.axhline(i+0.5,alpha=0.5,linewidth=0.5) plt.legend(['Run1','Run2','Run3'], bbox_to_anchor=(1.01, 1), loc='upper left') plt.xlabel('Time (ns)',fontsize=14) plt.ylabel('Hydrogen bonds Index',fontsize=14) plt.title('Hbond Profile for the Mutant Inhibitor of 5MZA',fontsize=16) plt.plot() # + id="ooiN6kE-C9ag"
MD-simulation/Hbond-analysis/hBond_plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + code_folding=[10] import pprint import sys import json import datetime from IPython.display import display, Math, HTML, Markdown # appending the directory for modules # in the sys.path list sys.path.append('../modules/') # now we can import mod from pyUtils.metricsUtils import * display(HTML("<style>.container { width:310mm !important; }</style>")) display(HTML("<style>div.output_scroll { height: 70em; }</style>")) # # To Create WebPdf use: # jupyter nbconvert .\TFStackStateReport.ipynb --to webpdf --no-input --output-dir='../truccr_atlas/reports/environments/oci' --output TFStackStateReport-dev # def get_application_functions(appl_id,resourceMap): functionsList = [] for rType, resources in resourceMap.items(): if rType != "Functions": continue for resourceIndex in range(len(resources)): for k,v in resources[resourceIndex].items(): if v['Application ID'] == appl_id: # print(v['Application ID']) functionsList.append(resources[resourceIndex]) return functionsList # - resourceMap = {} resourceList = [] resourceMap = get_speedometer_resource_map('../../data/envResourceMap.json') TFResourceMap = get_speedometer_resource_map('../../data/TFResourceMap.json') # print(TFResourceMap['Environment-Name']) # pprint.pprint(baseResourceMap.keys()) for key, value in resourceMap.items(): # print(key) # if 'Vault' in key: # print('Value; ', value) resourceList.append(key) # if 'Monitors' in key: # pprint.pprint(value) # print(resourceList) speedResList = [match for match in resourceList if "speedometer" in match] speedResList =speedResList + [match for match in resourceList if "manual" in match] speedResList =speedResList + [match for match in resourceList if "-data-truccr-iad" in match] speedResList = speedResList + [ 'fn_app_invoke_logs', 'dev_truccr_API_Gateway', 'dev_truccr_oci-ca-dumptruck', 'truccr-Log-Group', 'event_rule_ca_transmission_execute_logs'] # print('Speedometer', speedResList) # print() accountResList = [match for match in resourceList if "account" in match] accountResList = accountResList + [match for match in resourceList if "compartm" in match] accountResList =accountResList + [match for match in resourceList if "-data-truccr-iad" in match] accountResList = accountResList + [ 'fn_app_invoke_logs', 'dev_truccr_API_Gateway', 'truccr-Log-Group'] # print('Account' , accountResList) # print() serviceResList = [match for match in resourceList if "service" in match] serviceResList =serviceResList + [match for match in resourceList if "ized-data-truccr" in match] serviceResList = serviceResList + [ 'fn_app_invoke_logs', 'truccr-Log-Group'] # print('Service' , serviceResList) # + HTTP_MON_TEMPLATE = \ """\hspace {10mm} \\normalsize {\\textsf {Timed Event Name:} } \hspace {1em} \\color{black} {\\textit{EVENTNAME}} \\\[1pt] \hspace {10mm} \\begin{array}{|l|l|} \\hline \small { \\textsf{ID: }} & \\small { \\texttt{EVENTID }} \\\[1pt] \small { \\textsf{Target: }} & \\small { \\texttt{TARGET} } \\\[1pt] \small { \\textsf{Path: } } & \\small {\\texttt{PATH} } \\\[1pt] \hline \\end{array} \\\[2pt] """ API_GTW_TEMPLATE = \ """ \hspace {10mm} \\normalsize {\\textbf {Gateway Name:} } \\hspace {3mm} {\\textit{GTWNAME} } \\\[1pt] \hspace {10mm} \\begin{array}{|l|l|} \\hline \small { \\textsf{ID: }} & \\small { \\texttt{GTWID}} \\\[1pt] \small { \\textsf{Hostname: }} & \\small{ \\texttt {HOSTNAME }} \\\[1pt] \small { \\textsf{IP Address: } } & \\small { \\texttt{IPADDRESS } } \\\[1pt] \hline \\end{array} \\\[2pt] """ GTW_DEPL_TEMPLATE = \ """ \hspace {20mm} \\small {\\textsf {Gateway Deployment Name:} } \\hspace {3mm} \\color{black} {{\\textsf{DEPLNAME} }} \\\[1pt] \hspace {20mm} \\begin{array}{|l|l|} \\hline \small { \\textsf{ID: }} & \\scriptsize { \\texttt{DEPLID}} \\\[1pt] \small { \\textsf{Endpoint: }} & \\scriptsize { \\texttt{DEPLEND}} \\\[1pt] \small { \\textsf{Function: }} & \\scriptsize { \\texttt{DEPLFUNC}} \\\[1pt] \small { \\textsf{Path: } } & \\scriptsize { \\texttt{DEPLPATH} } \\\[1pt] \hline \\end{array} """ FUNC_APPL_TEMPLATE = \ """ \hspace {10mm} \\normalsize {\\textbf {Application Name:} } \hspace {3mm} {\\textit{APPLNAME} } \\\[1pt] \hspace {10mm} \\begin{array}{|l|l|} \\hline \small { \\textsf{ID: }} & \\small { \\texttt{APPLID}} \\\[1pt] \hline \\end{array} """ FUNCIONS_TEMPLATE = \ """ \hspace {20mm} \\small {\\textsf {Function Name:} } \hspace {3mm} \\color{green} {{\\textsf{FUNCNAME} }} \\Huge \\phantom{x} \\\[1pt] \hspace {20mm} \\begin{array}{|l|l|} \\hline \small { \\textsf{ID: }} & \\scriptsize { \\texttt{FUNCID}} \\\[1pt] \small { \\textsf{Image: }} & \\scriptsize { \\texttt{IMAGE }} \\\[1pt] \small { \\textsf{Digest: }} & \\scriptsize { \\texttt{DIGEST }} \\\[1pt] \small { \\textsf{Configuration: }} & {\\scriptsize { \hspace {0mm} \\begin{array}{l l} FUNCTCONF \end{array} } } \\\[1pt] \hline \\end{array} \\\[3pt] """ OBJ_STORE_TEMPLATE = \ """ \hspace {10mm} \\normalsize {\\textbf {Bucket Name:} } \\hspace {3mm} {\\textit{OBJNAME} } \\Huge \\phantom{X} \\\[1pt] \hspace {10mm} \\begin{array}{|l|l|} \\hline \small { \\textsf{ID: }} & \\small { \\texttt{OBJID}} \\\[1pt] \hline \\end{array} """ EVENT_RULE_TEMPLATE = \ """ \hspace {10mm} \\normalsize {\\textsf {Rule Name:} } \hspace {3mm} \\color{green} { {\\textit{EVENTNAME} }} \\Huge \\phantom{X} \\\[1pt] \hspace {10mm} \\begin{array}{|l|l|} \\hline \small { \\textsf{ID: }} & \\small { \\texttt{EVENTID}} \\\[1pt] \small { \\textsf{Bucket Name: }} & \\small { \\texttt{EVENTBUCK}} \\\[1pt] \small { \\textsf{Filters: } } & \\scriptsize { \hspace {0mm} \\begin{array}{l l} EVENTFILTERS \end{array} } \\\[1pt] \small { \\textsf{Even Types: }} & {\\scriptsize { \hspace {0mm} \\begin{array}{l l} EVENTTYPES \end{array} } } \\\[1pt] \hline \\end{array} """ LOGGING_TEMPLATE = \ """ \hspace {10mm} \\normalsize {\\textbf {Log Name:} } \hspace {3mm} \\color{green} { {\\textit{LOGNAME} }} \\Huge \\phantom{X} \\\[1pt] \hspace {10mm} \\begin{array}{|l|l|} \\hline \small { \\textsf{ID: }} & \\small { \\texttt{LOGID}} \\\[1pt] \hline \\end{array} """ KEY_VAULT_TEMPLATE = \ """ \hspace {10mm} \\normalsize {\\textbf {Key Vault Name:} } \hspace {3mm} \\color{green} { {\\textit{VAULTNAME} } } \\\[1pt] \hspace {10mm} \\begin{array}{|l|l|} \\hline \small { \\textsf{ID: }} & \\scriptsize { \\texttt{VAULTID}} \\\[1pt] \hline \\end{array} """ # + code_folding=[111] # # Report # UseFilter = False resourceFilter = speedResList displayList = {} for resourceType,resources in resourceMap.items(): displayType = 'HC Monitors' if displayType in resourceType : displayList[displayType] = [] for resourceIndex in range(len(resources)): displayString = HTTP_MON_TEMPLATE for resource,resourceAttrs in resources[resourceIndex].items(): if UseFilter and resource not in resourceFilter: continue displayString = displayString.replace('EVENTNAME',resource ) displayString = displayString.replace('EVENTID',resourceAttrs['ID'] ) displayString = displayString.replace('TARGET',resourceAttrs['Targets'] ) displayString = displayString.replace('PATH',resourceAttrs['Path'] ) if 'EVENTNAME' in displayString: continue displayList[displayType].append(displayString) displayType = 'API Gateways' if displayType in resourceType : displayList[displayType] = [] for resourceIndex in range(len(resources)): displayString = API_GTW_TEMPLATE for resource,resourceAttrs in resources[resourceIndex].items(): if UseFilter and resource not in resourceFilter: continue displayString = displayString.replace('GTWNAME',resource ) displayString = displayString.replace('GTWID',resourceAttrs['ID'] ) displayString = displayString.replace('HOSTNAME',resourceAttrs['hostname'] ) displayString = displayString.replace('IPADDRESS',resourceAttrs['IP Address'] ) if 'GTWNAME' in displayString: continue displayList[displayType].append(displayString) displayType = 'API Gateway Deployments' if displayType in resourceType : displayList[displayType] = [] for resourceIndex in range(len(resources)): displayString = GTW_DEPL_TEMPLATE for resource,resourceAttrs in resources[resourceIndex].items(): if UseFilter and resource not in resourceFilter: continue displayString = displayString.replace('DEPLNAME',resource ) displayString = displayString.replace('DEPLID',resourceAttrs['ID'] ) displayString = displayString.replace('DEPLEND',resourceAttrs['Endpoint'] ) displayString = displayString.replace('DEPLFUNC',resourceAttrs['Function'] ) displayString = displayString.replace('DEPLPATH',resourceAttrs['Path'] ) if 'DEPLNAME' in displayString: continue displayList[displayType].append(displayString) displayType = 'Function Applications' if displayType in resourceType : displayList[displayType] = [] for resourceIndex in range(len(resources)): displayString = FUNC_APPL_TEMPLATE for resource,resourceAttrs in resources[resourceIndex].items(): displayString = displayString.replace('APPLNAME',resource ) displayString = displayString.replace('APPLID',resourceAttrs['ID'] ) displayList[displayType].append(displayString) # Add Functions funList = get_application_functions( resourceAttrs['ID'] , resourceMap) if len(funList) > 0 : for function in funList: displayString = FUNCIONS_TEMPLATE for funcName, funcAttrs in function.items(): displayString = displayString.replace('FUNCNAME',funcName ) displayString = displayString.replace('FUNCID',funcAttrs['ID'] ) displayString = displayString.replace('IMAGE',funcAttrs['Image'] ) displayString = displayString.replace('DIGEST',funcAttrs['Digest'] ) functString= ' ' for fType,fValue in funcAttrs['Configuration'].items() : functString = functString + '\\texttt{' + fType + '} & ' + \ ' \\texttt{' + fValue + '} \\\[1pt] ' displayString = displayString.replace('FUNCTCONF',functString ) displayList[displayType].append(displayString) displayType = 'Object Storage Buckets' if displayType in resourceType : displayList[displayType] = [] for resourceIndex in range(len(resources)): displayString = OBJ_STORE_TEMPLATE for resource,resourceAttrs in resources[resourceIndex].items(): if UseFilter and resource not in resourceFilter: continue displayString = displayString.replace('OBJNAME',resource ) displayString = displayString.replace('OBJID',resourceAttrs['ID'] ) if 'OBJNAME' in displayString: continue displayList[displayType].append(displayString) displayType = 'Event Rules' if displayType in resourceType : displayList[displayType] = [] for resourceIndex in range(len(resources)): displayString = EVENT_RULE_TEMPLATE for resource,resourceAttrs in resources[resourceIndex].items(): if UseFilter and resource not in resourceFilter: continue displayString = displayString.replace('EVENTNAME',resource ) displayString = displayString.replace('EVENTID',resourceAttrs['ID'] ) displayString = displayString.replace('EVENTBUCK',resourceAttrs['Bucket Name'] ) filterStr = ' ' filterStr = filterStr + '\\texttt{'+resourceAttrs['Filter'] +'} \\\[1pt] ' displayString = displayString.replace('EVENTFILTERS',filterStr) eventString= ' ' for eType in resourceAttrs['Event Types']: eventString = eventString + '\\texttt{'+eType+'} \\\[1pt] ' displayString = displayString.replace('EVENTTYPES',eventString ) if 'EVENTNAME' in displayString: continue displayList[displayType].append(displayString) displayType = 'Vault' if displayType in resourceType : displayList[displayType] = [] for resourceIndex in range(len(resources)): displayString = KEY_VAULT_TEMPLATE for resource,resourceAttrs in resources[resourceIndex].items(): if UseFilter and resource not in resourceFilter: continue displayString = displayString.replace('VAULTNAME',resource ) displayString = displayString.replace('VAULTID',resourceAttrs['ID'] ) if 'VAULTNAME' in displayString: continue displayList[displayType].append(displayString) displayType = 'Logsxxx' if displayType in resourceType : displayList[displayType] = [] for resourceIndex in range(len(resources)): displayString = LOGGING_TEMPLATE for resource,resourceAttrs in resources[resourceIndex].items(): if UseFilter and resource not in resourceFilter: continue displayString = displayString.replace('LOGNAME',resource ) displayString = displayString.replace('LOGID',resourceAttrs['ID'] ) if 'LOGNAME' in displayString: continue displayList[displayType].append(displayString) # pprint.pprint(displayList) # print('--------------') # + code_folding=[] reportTitle = \ """\hspace {40mm} \\Large {\\textbf{TRUCCR Terraform Stack/State Report}} \\\[1pt] \hspace {80mm} \\large \\color{blue} {\\textbf{ENV } \\textit{environment}} \\\[1pt] \hspace {85mm} \\small {\\textsf{Date: TDATE } } \\\[20pt] """ td = datetime.datetime.today() today = td.astimezone(pytz.timezone('US/Eastern')) todayDate = today.strftime("%m/%d/%Y") rTitle = reportTitle.replace('ENV', TFResourceMap['Environment-Prefix'].replace('_','') ) display(Markdown( '$ ' + rTitle.replace('TDATE', todayDate ) + ' $' )) for k,v in displayList.items(): if 1==1 : #'Vault' in k: # print (v) if 'Deployment' not in k: display(Markdown('$ \hspace {5mm} \\large \\color{blue}{\\textbf{ ' + k + ' }} \\Huge {\\phantom{X}} $ ' )) for i in range(len(v)): display(Markdown ('$ ' + v[i] + ' $' )) # print( '$ ' + v[i] + ' $' ) # break # print(displayString) # -
notebooks/TFStackStateReport.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Importing Libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import sklearn from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # %matplotlib inline # # Importing Dataset df = pd.read_csv("C:\\wine\\wine.csv", sep=";") df # # Exploratory Data Analysis df.head() df.tail() df.sample() df.info() df.describe() # # Getting the EDA Report in HTML format import pandas_profiling eda_report = pandas_profiling.ProfileReport(df) eda_report.to_file("wine_eda.html") pwd() # # From EDA, we get to know, there is no missing values, no dummy coding, no outliers. so no cleaning required # # Selecting the dependent and independent variables X = df.drop(["fixed acidity","volatile acidity","free sulfur dioxide","total sulfur dioxide"], axis = 1) X.head() X = df.corr() X plt.subplots(figsize=(20,20)) sns.heatmap(X,cmap = 'RdYlGn', annot= True) plt.show() y = df["quality"] y.head() # # Dividing the dataset into train and test model from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) X_train.shape X_test.shape y_train.shape y_test.shape # # Since the feature are not in same range, we need to do data normalization from sklearn.preprocessing import StandardScaler sc = StandardScaler() sc.fit(X_train) X_train_sc = sc.transform(X_train) X_test_sc = sc.transform(X_test) # # Importing the model and instantiating it from sklearn.linear_model import LinearRegression lm = LinearRegression() lm # # Fitting the Model lm.fit(X_train, y_train) # # Predicting the Model predict_test = lm.predict(X_test) # # finding coefficient of the parameter print(lm.coef_) df_m = pd.DataFrame({'features':X.columns, 'coeff':lm.coef_}) df_m df_m.sort_values(by='coeff') df_m # # Plotting the barchart for feature importance df_m.plot(X='features', y='coeff', kind='bar', figsize = (15,10)) plt.show() # # Checking the Model Performance # RSquare Value for Test dataset np.round(lm.score(X_test, y_test)*100,0) # RSquare Value for Train dataset np.round(lm.score(X_train, y_train)*100,0) # # Checking the Mean Squared Error (MSE) and Mean Absolute Error (MAE) # + from sklearn import metrics print("Mean Squared Error for Test data is ") np.round(metrics.mean_squared_error(y_test, predict_test),0) from sklearn import metrics print("Mean Absolute Error for Test data is ") np.round(metrics.mean_absolute_error(y_test, predict_test),0) # - # # Estimating the Prediction Error fdf = pd.concat([X_test, y_test], 1) fdf['Predicted'] = np.round(predict_test,1) fdf['Predicted Error'] = fdf['quality'] - fdf['Predicted']
White-Wine-Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Current and near-term quantum computers suffer from imperfections, as we repeatedly pointed it out. This is why we cannot run long algorithms, that is, deep circuits on them. A new breed of algorithms started to appear since 2013 that focus on getting an advantage from imperfect quantum computers. The basic idea is extremely simple: run a short sequence of gates where some gates are parametrized. Then read out the result, make adjustments to the parameters on a classical computer, and repeat the calculation with the new parameters on the quantum hardware. This way we create an iterative loop between the quantum and the classical processing units, creating classical-quantum hybrid algorithms. # # <img src="../figures/hybrid_classical_quantum.svg" alt="Hybrid classical-quantum paradigm" style="width: 400px;"/> # # These algorithms are also called variational to reflect the variational approach to changing the parameters. One of the most important example of this approach is the quantum approximate optimization algorithm, which is the subject of this notebook. # # Quantum approximate optimization algorithm # # The quantum approximate optimization algorithm (QAOA) is shallow-circuit variational algorithm for gate-model quantum computers that was inspired by quantum annealing. We discretize the adiabatic pathway in some $p$ steps, where $p$ influences precision. Each discrete time step $i$ has two parameters, $\beta_i, \gamma_i$. The classical variational algorithms does an optimization over these parameters based on the observed energy at the end of a run on the quantum hardware. # # More formally, we want to discretize the time-dependent $H(t)=(1-t)H_0 + tH_1$ under adiabatic conditions. We achieve this by Trotterizing the unitary. For instance, for time step $t_0$, we can split this unitary as $U(t_0) = U(H_0, \beta_0)U(H_1, \gamma_0)$. We can continue doing this for subsequent time steps, eventually splitting up the evolution to $p$ such chunks: # # $$ # U = U(H_0, \beta_0)U(H_1, \gamma_0)\ldots U(H_0, \beta_p)U(H_1, \gamma_p). # $$ # # At the end of optimizing the parameters, this discretized evolution will approximate the adiabatic pathway: # # <img src="../figures/qaoa_process.svg" alt="Quantum approximate optimization algorithm" style="width: 400px;"/> # # The Hamiltonian $H_0$ is often referred to as the driving or mixing Hamiltonian, and $H_1$ as the cost Hamiltonian. The simplest mixing Hamiltonian is $H_0 = -\sum_i \sigma^X_i$, the same as the initial Hamiltonian in quantum annealing. By alternating between the two Hamiltonian, the mixing Hamiltonian drives the state towards and equal superposition, whereas the cost Hamiltonian tries to seek its own ground state. # # Let us import the necessary packages first: import itertools import numpy as np from functools import partial, reduce from qiskit import BasicAer, QuantumRegister, execute from qiskit.quantum_info import Pauli from qiskit_aqua import Operator, get_aer_backend from qiskit_aqua.components.initial_states import Custom from scipy.optimize import minimize np.set_printoptions(precision=3, suppress=True) # Now we can define our mixing Hamiltonian on some qubits. As in the notebook on classical and quantum many-body physics, we had to define, for instance, an `IZ` operator to express $\mathbb{I}\otimes\sigma_1^Z$, that is, the $\sigma_1^Z$ operator acting only on qubit 1. We can achieve the same effect the following way (this time using the Pauli-X operator): def pauli_x(qubit, coeff): eye = np.eye((n_qubits)) return Operator([[coeff, Pauli(np.zeros(n_qubits), eye[qubit])]]) # The coefficient here means the strength of the transverse field at the given qubit. This operator will act trivially on all qubits, except the given one. Let's define the mixing Hamiltonian over two qubits: # + n_qubits = 2 Hm = reduce(lambda x, y: x+y, [pauli_x(i, 1) for i in range(n_qubits)]) Hm.to_matrix() # - # As an example, we will minimize the Ising problem defined by the cost Hamiltonian $H_c=-\sigma^Z_1 \otimes \sigma^Z_2$. First let's create the functions defining the operators using the Pauli-Z matrix: # + def pauli_z(qubit, coeff): eye = np.eye((n_qubits)) return Operator([[coeff, Pauli(eye[qubit], np.zeros(n_qubits))]]) def product_pauli_z(q1, q2, coeff): eye = np.eye((n_qubits)) return Operator([[coeff, Pauli(eye[q1], np.zeros(n_qubits)) * Pauli(eye[q2], np.zeros(n_qubits))]]) # - # Then we define the cost Hamiltonian: J = np.array([[0,1],[0,0]]) Hc = reduce(lambda x,y:x+y, [product_pauli_z(i,j, -J[i,j]) for i,j in itertools.product(range(n_qubits), repeat=2)]) Hc.to_matrix() # We set $p=2$ and initialize the $\beta_i$ and $\gamma_i$ parameters: n_iter = 10 # number of iterations of the optimization procedure p = 2 beta = np.random.uniform(0, np.pi*2, p) gamma = np.random.uniform(0, np.pi*2, p) # The initial state is a uniform superposition of all the states $|q_1,...,q_n\rangle$ init_state_vect = [1 for i in range(2**n_qubits)] init_state = Custom(n_qubits, state_vector=init_state_vect) # The initial circuit prepares the initial state qr = QuantumRegister(n_qubits) circuit_init = init_state.construct_circuit('circuit', qr) # We define a function `evolve` that takes a Hamiltonian $H$ and an angle $t$ and returns a circuit component made of the unitary matrix $e^{j H t}$ def evolve(hamiltonian, angle, quantum_registers): return hamiltonian.evolve(None, angle, 'circuit', 1, quantum_registers=quantum_registers, expansion_mode='suzuki', expansion_order=3) # To create the circuit, we need to compose the different unitary matrice given by `evolve`. def create_circuit(beta, gamma): circuit_evolv = reduce(lambda x,y: x+y, [evolve(Hc, beta[i], qr) + evolve(Hm, gamma[i], qr) for i in range(p)]) circuit = circuit_init + circuit_evolv return circuit # We now create a function `evaluate_circuit` that takes a single vector `gamma_beta` (the concatenation of `gamma` and `beta`) and returns $\langle H_c \rangle = \langle \psi | H_c | \psi \rangle$ where $\psi$ is defined by the circuit created with the function above. def evaluate_circuit(beta_gamma): n = len(beta_gamma)//2 circuit = create_circuit(beta_gamma[:n], beta_gamma[n:]) return np.real(Hc.eval("matrix", circuit, get_aer_backend('statevector_simulator'))[0]) # Finally, we optimize the angles: result = minimize(evaluate_circuit, np.concatenate([beta, gamma]), method='L-BFGS-B') result # # Analysis of the results # # We create a circuit using the optimal parameters found. circuit = create_circuit(result['x'][:p], result['x'][p:]) # We use the `statevector_simulator` backend in order to display the state created by the circuit. backend = BasicAer.get_backend('statevector_simulator') job = execute(circuit, backend) state = np.asarray(job.result().get_statevector(circuit)) print(np.absolute(state)) print(np.angle(state)) # We see that the state is approximately $e^{0.79j} \frac{1}{\sqrt{2}} \left( |00 \rangle + |11 \rangle \right)$. It corresponds to a uniform superposition of the two solutions of the classicial problem: $(\sigma_1=1$, $\sigma_2=1)$ and $(\sigma_1=-1$, $\sigma_2=-1)$ # Let's now try to evaluate the operators $\sigma^Z_1$ and $\sigma^Z_2$ independently: Z0 = pauli_z(0, 1) Z1 = pauli_z(1, 1) print(Z0.eval("matrix", circuit, get_aer_backend('statevector_simulator'))[0]) print(Z1.eval("matrix", circuit, get_aer_backend('statevector_simulator'))[0]) # We see that both are approximatively equal to zero. It's expected given the state we found above and corresponds a typical quantum behavior where $\mathbb{E}[\sigma^Z_1 \sigma^Z_2] \neq \mathbb{E}[\sigma^Z_1] \mathbb{E}[\sigma^Z_2]$
qiskit_version/07_Variational_Circuits.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #imports import pandas as pd import numpy as np import matplotlib.pyplot as plt # - # ## Comparação entre notificações de sífilis adquirida e em gestantes e variáveis do PMAQ # Neste notebook são unidos os quantitativos de casos de # sífilis adquirida e em gestantes e respostas do PMAQ, agrupados pelo *código IBGE*. # Isso é feito para poder unir os dados das duas bases. pmaq_df = pd.read_csv('../data/pmaq_ibge.csv', index_col=[0, 1], header=[0,1]) pmaq_df adquirida_df = pd.read_csv('../data/adquirida.csv', sep=';', parse_dates=['dt_notific']) adquirida_df.info() # ### Ajustando as datas # Primeiro, filtramos os diagnosticos que foram dados entre o terceiro ciclo. Depois criamos a nova coluna que guarda mês/ano do diagnóstico. adq_ciclo3_df = adquirida_df.query("'2016-08-01' < dt_notific < '2018-01-22'") adq_agrup_df = adq_ciclo3_df.groupby('id_municip')['Unnamed: 0'].count() # + pmaq_df['adquirida'] = adq_agrup_df pmaq_df # + pmaq_df1 = pmaq_df.copy() pmaq_df1.index = pmaq_df.index.droplevel() pmaq_df1 # + pmaq_df1['adquirida'] = adq_agrup_df pmaq_df1 # - # ### Correlação pmaq_df1.corr() # O que nos interessa é a corelação de notificações com as respostas do PMAQ, que respondem questões sobre teste rápido e applicação de benzetacil penicilina. pmaq_df1.corr().loc['adquirida', :'adquirida'] pmaq_df1.corr().iloc[-1:, :-1] # ## Gestante gestante_df = pd.read_csv('../data/gestante.csv', sep=';', parse_dates=['dt_notific']) gestante_df.info() ges_ciclo3_df = gestante_df.query("'2016-08-01' < dt_notific < '2018-01-22'") ges_agrup_df = ges_ciclo3_df.groupby('id_municip')['Unnamed: 0'].count() ges_ciclo3_df.head() # + pmaq_df1['gestante'] = ges_agrup_df pmaq_df1 # - pmaq_df1.corr().iloc[-2:, :-2] # + cormat = pmaq_df1.corr().iloc[-2:, :-2] f = plt.figure(figsize=(19, 15)) plt.matshow(cormat, fignum=f.number) plt.xticks(range(cormat.shape[1]), cormat.columns, fontsize=14, rotation=90) plt.yticks(range(2), ['adq', 'ges'], fontsize=14) for (i, j), z in np.ndenumerate(cormat): plt.text(j, i, '{:0.2f}'.format(z), ha='center', va='center', bbox=dict(boxstyle='round', facecolor='white', edgecolor='0.3')) cb = plt.colorbar() cb.ax.tick_params(labelsize=14) # - # As notificações de sífilis adquirida tem forte correlação positiva (0.83) com a resposta 1 da variável **II.12.1.7**, ou seja, com o acesso satisfatório e a presença do exame VDRL nos municípios. Dessa forma, quanto mais testes realizados de forma responsável, maior o número de notificações. # # Também é notada uma forte correlação positiva entre as notificaões de sífilis adquirida com a sinalização de que é feita a aplicação de penicilina G benzatina (var **II.15.13**, corr 0.84), da realização de exame de sífilis, VDRL, durante o pré-natal por parte das usuárias do SUS(var **III.13.7.3**, cor 0.79) e da disponibilidade de benzilpenicilina benzatina em quantidade suficiente (var **I.15.9.3**, corr 0.74). # ### OBS # Variável II.15.2 - Equipe recebe o exame VDRL das gestantes do território em tempo oportuno para intervenções necessárias
notebooks/5.2-compara-adquirida-gestante-pmaq-ibge.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + from pathlib import Path import shutil p = Path("..").resolve() source = ['inputs.txt', 'puzzle.py', 'scratchwork.ipynb', 'tests.txt'] dir_names = ["day" + str(x).zfill(2) for x in range(1, 26)] for dir_name in dir_names: new_folder = p / dir_name try: new_folder.mkdir(parents=True, exist_ok=False) for file in source: shutil.copy(file, new_folder / str(file)) except FileExistsError: print(f"{dir_name} is already there") # -
2021/day_template/populate_folders.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os logs = ["tb-logs/" + item for item in os.listdir("tb-logs") if ".csv" in item] filename = logs[0] filename with open(filename) as f: lines = [line.rstrip('\n') for line in f] steps = len(lines) - 1 first = lines[1].split(',')[0] last = lines[len(lines)-1].split(',')[0] len_secs = float(last) - float(first) len_secs per_step = len_secs / steps per_step def getLenAndSteps(lines): steps = len(lines) - 1 first = lines[1].split(',')[0] last = lines[steps].split(',')[0] len_secs = float(last) - float(first) return len_secs, steps for filename in logs: if "pretraining" in filename: #ignoring pretraining times continue with open(filename) as f: lines = [line.rstrip('\n') for line in f] time, steps = getLenAndSteps(lines) print(os.path.splitext(os.path.basename(filename))[0], "{:.2f}".format(time), steps, "{:.2f}".format(time/steps), sep="\t")
notebooks/RuntimesFromLogs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import ipywidgets as widgets from ipywidgets import interact, interact_manual import numpy as np from matplotlib import pyplot as plt import colormaps_from_palettable as cfp # ### First, need to get the dictionary containing the colormaps t = cfp.extract() # ### Now, we contruct some data for the plotting # Here you could also just load your own data... some_sin = np.sin(np.linspace(0, np.pi, 20)) some_cos = np.cos(np.linspace(0, np.pi, 20)) some_data = some_sin[:, None] * some_cos[None, :] # ## Display the colormaps # You can choose the different packages and colormaps by choosing other entries in the dropdown menues for `pack` and `cmap`, respectively. # + pack_widget = widgets.Dropdown(options = {l: w for (l, w) in t.items()}) cmap_widget = widgets.Dropdown() def update(*args): cmap_widget.options = {k: v for (k, v) in pack_widget.value.items()} pack_widget.observe(update) def contour_plot(pack, cmap): plt.pcolormesh(some_data, cmap=cmap) plt.colorbar() interact(contour_plot, pack=pack_widget, cmap=cmap_widget); # -
examples/example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Assignment 2: Conway's Game of Life # # ## ** DUE: Friday October 20, 2017 11:59 PM ** # # In this assignment, we're going to be writing a "game" that simulates a self propagating population of organisms known as ["Conway's Game of Life"](https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life). # # In 1970, mathematician <NAME> wanted to find a set of simple rules that if applied to an environment would produce complex and self sustaining patterns. # # He modeled the world as an $NxN$ grid where each cell in the grid can be either alive or dead (1 or 0). # # The goal was to start with some initial grid where some cells are alive and some are dead, and then have the grid updated repetitively using these simple rules: # # ### Rules: # 1. Any living cell that has **fewer than** 2 living neighbours dies in the next generation (as if by underpopulation). # 2. Any living cell that has **exactly** 2 or 3 living neighbours lives in the next generation. # 3. Any living cell that has **more than** 3 living neighbours dies (as if by overpopulation). # 4. Any dead cell that has **exactly** 3 living neighbours comes back to life in the next generation (as if by reproduction). # # So say we have an initial grid `G_initial` with some cells having value 0 (dead) and others having value 1 (alive). The goal is to come up with values for each entry in `G_next` by looking at each cell in `G_initial[i][j]` and seeing if it will be alive or dead in `G_next[i][j]`. Using the rules above we can always determine if a cell will be alive or dead in the next generation. Once we can do this, we can just repeat the procedure as many times as we want to simulate the evolution of our world. # # Turns out that with these 4 simple rules Conway was able to simulate some very interesting patterns which have had a large impact on many fields such as biology, physics, computer science, mathematics, and philosophy. Conway was able to show that with a few simple rules and an initial state, complex patterns can emerge without any external intervention. # # Here is a video of some interesting patterns that can emerge if you have the right starting state: https://www.youtube.com/watch?v=C2vgICfQawE&t=15s # # You can also watch Conway himself talking about how he invented the game and how he is a bit tired of everyone associating him to only that https://www.youtube.com/watch?v=R9Plq-D1gEk # # And definitely read the [Wikipedia](https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life) article # # # ** Requirements ** # # In this assignment we'll be working with the python package `numpy`, as well as two visualization packages so we can make cool animations. Since you are using anaconda this will be very easy to install. All you have to do is open your Terminal or Command Prompt and type in the following commands # # ``` # conda install numpy # conda install matplotlib # conda install -c conda-forge jsanimation # ``` # If this is not working, please Google it. If that doesn't work go to office hours and we will help you with the installation. Try this out as soon as possible. # # **NOTA BENE:** This is a very well studied problem and you will find many examples online of other people's implementations of the game of life. It's perfectly okay to look at other sources for inspiration but keep in mind that we do run plagiarism checks so make sure that the code comes from you in the end. # # Warmup (40 points) # # # But first, some warmup! # **Q1.** Write a function that **returns** the items of a given list in reverse order, then call the function and print its output. (You may **NOT** use any built-in python functions other than `len()` or `range()`) **(5 points)** # + def my_reverse(given_list): #swap 1st and last item, and 2nd and second last, etc for number in range(len(given_list)): #if even number of items in the list, if len(given_list)%2==0: if number == (len(given_list)/2): #at this point, it'll be past halfway, and if swapped, it will undo the work done before break else: temp = given_list[number] given_list[number] = given_list[len(given_list)-number-1] given_list[len(given_list)-number-1] = temp #if odd number of items in the list else: if number == int(len(given_list)/2): #this item is the midway point and cannot be swapped with anything break else: temp = given_list[number] given_list[number] = given_list[len(given_list)-number-1] given_list[len(given_list)-number-1] = temp return given_list # - #call it and print its output. (do this for the rest of the warmup questions) example_list = ["apples", "oranges", "pineapples", "strawberries", "blueberries", "peaches", "kiwis"] print(my_reverse(example_list)) # **Q2.** Write a function called `mygrid(n, val)` that creates a 2D list with $N$ rows and $N$ columns where every index has the value $val$. We refer to this kind of list as a **matrix** or a **grid**. **(5 points)** # # Example: # ```python # >>> g = mygrid(3, 1) # [[1, 1, 1], [1, 1, 1], [1, 1, 1]] # ``` def mygrid(n, val): row=[] grid=[] #first create contents of each row (2nd list) for i in range(n): row.append(val) #now create the columns for i in range(n): grid.append(row) return grid print(mygrid(3,1)) # **Q3.** Write a function called `trace(matrix, reverse=False)` that takes as input a 2D list `matrix` and returns the sum of the elements on the diagonal. The diagonal elements of the matrix are those that lie along a diagonal line from the top left to the bottom right. In other words, $(i, j) \in M \vert i == j$. This is read as: all index pairs $(i, j)$ in the matrix such that both indices are equal to each other. **Note:** You may not use any numpy functions. Just simple loops and if statements. # # Additionally, the function takes an optional keyword argument `reverse`. If `reverse` is `True`, every row in the grid is reversed before the trace is taken. Otherwise the trace is taken with the grid as-is. Use your `my_reverse()` function that you defined above. **(10 points)** # # Example: # # ```python # >>> M = [[1, 2, 3], [4, 5, 6], [7, 8 ,9]] # >>> trace(M) # 15 # ``` def trace(matrix, reverse=False): sum = 0 if reverse == True: for item in matrix: my_reverse(item) for i in range(len(matrix)): #assume that the matrix is N x N for j in range (len(matrix)): if i==j: sum += matrix[i][j] return sum matrix = [[1,2,3], [4,5,6], [7,8,9]] print(trace(matrix, reverse=True)) #testing reverse=true # **Q4.** Write a function called `encrypt(s, encryption_key)` that accepts a string and an integer `encryption_key` and produces a string whose characters have been shifted over to the right in the alphabet by `encryption_key` places. **(15 points)** # # Example: # # ```python # >>> encrypt("carlos", 3) # fduorv # ``` # # Characters in Python can be mapped to integers. You can think of these as positions in the alphabet. The `ord(c)` function returns the integer value of a character. # # ```python # >>> ord('a') # 97 # >>> ord('b') # 98 # ... # >>> ord('z') # 122 # ``` # # You can go the other way too. Given an integer you can get its corresponding character. # # ```python # >>> chr(97) # 'a' # >>> chr(ord('a') + 1) # 'b' # ``` # # Our encryption function will only accept lowercase strings and only produce lowercase 'letter' strings. # # This would correspond to integers from `ord('a') --> 97` to `ord('z') --> 122`. # # We want to make sure that when shifting a character we always stay in the same range of numbers so our encrypted string is still all lowercase letters. # # For example: # # ```python # >>> encrypt("z", 1) # 'a' # >>> encrypt("z", 27) # 'a' # >>> encrypt("z", 26) # 'z' # >>> encrypt("z", 233) # 'y' # ``` # # This means that for any `encryption_key` value, you will always shift the original to some number between 97 and 122. # # **Hint:** remember the modulo operator!!! # # **User interface: (5 points) ** # 1. Prompt the user for a string and an encryption key (you can ask them to enter it in one line separated by a space or on separate lines) # 2. Try to convert the encryption key to integer and handle the ValueError in case the integer conversion files (if the user gives something that can't be converted to integer) # 3. Print the result of encrypting the string. # 4. Repeat 1., 2., and 3. until the user enters the string "@" # + #encrypt function def encrypt(s, encrypt_key): s = s.lower() encrypted="" remainder = encrypt_key%26 for letter in s: if remainder !=0: left = 122-ord(letter) #to avoid modulo operation by zero. if left != 0: add = remainder%left new = ord(letter)+add encrypted+=chr(new) else: new = 97+remainder-1 encrypted += chr(new) #if remainder is zero, keep current letter. else: encrypted+=letter return encrypted print(encrypt("Carlos",3)) # - #user interface while True: s = input("Enter the string you would like to encrypt: ") encryption_key ="" if s == "@": break while True: try: encryption_key = int(input("Enter your encryption key: ")) except ValueError: print("Invalid encryption key. Must be an integer.") continue else: break #if encryption key is not an int, ask until a valid one is provided #if encryption key is valid, break from loop asking for encryption key #at this point, all inputs should be valid s = s.lower() print(encrypt(s,encryption_key)) # Since we're dealing with a grid of numbers (matrices), we're going to use python's [`numpy`](https://docs.scipy.org/doc/numpy-1.13.0/user/index.html) package which lets us handle matrices really efficiently. For our purposes you won't notice much of a difference from regular lists and nested lists. Let's import `numpy` and give it a nickname `np`. import numpy as np # The main `type` numpy uses is called a numpy array. We can think of it as a regular list but with some very nice functionality. Numpy arrays are highly optimized to be efficient for numerical operations and unlike lists, they hold objects of the same type. We can create a new numpy array by calling the numpy function `array()`. myarray = np.array([1, 2, 3, 4]) # We can index arrays just like Python lists. myarray[0:-1] # We can make arrays of arrays to work with grids (aka matrices) mygrid = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) print(mygrid) # Numpy arrays have a very useful attribute called `shape`. Which gives us a tuple representing the dimensions of the array. For a 2D array the numbers in the tuple woulr represent `(rows, cols)` print(mygrid.shape) # The main functionality of numpy arrays that we will use for this assignment is that we can easily initialize arrays of any shape we want with some initial value. As you saw in the warmup, doing this with normal lists involves writing nested loops and it's not very fun. With numpy, we can use the `np.full()`, `np.zeros()`, `np.ones()` functions. np.full((4, 3), 20) np.zeros((4, 3)) np.ones((4, 3)) # ## Coding the Game of Life (60 points) # # Okay now that we're all warmed up and have all the tools we need, let's code the game of life. Before starting any coding project it's good to have an plan (aka algorithm). So let's write one out to keep in mind while we code our program. # # The main objective is to take a grid of cells (NxN grid with 0 for dead cells and 1 for living cells), let's call it $G_t$ and figure out what the grid will look like at the next time step $G_{t+1}$. Once we can to this, we just have to repeat this update for as many time steps as we want the simulation to run for. So what do we need to actually do this updating? # # 1. Start a grid full of 0 $G_{t+1}$ # # 1. For every cell $(i, j)$ in $G_t$: # # 1. Get all the cell's neighbhours (up, down, left, right, and diagonal) # 2. Count how many of those neighbours are alive. # 3. Use the rules of the Game of Life to update the value of cell (i, j) in $G_{t+1}$ # # # ### Coding the Game of life: Get the neighbours # # **Q5.** Write a function called `get_neighbours(i, j, G)` which returns a list containing the **indices** of the neighbours to cell $(i, j)$ as tuples. $G$ is the grid. For this function you will only need to use the dimensions of the grid. **(20 points)** # # Example: # # ```python # >>> get_neighbours(1, 1, 5) # [(0, 0), (0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1), (2, 2)] # ``` # The list should exclude index $(i, j)$. # # We count as neighbours any index that is adjacent or immediately diagonal to $(i, j)$. Imagine a 3x3 grid. The midle cell would have every other cell as neighbours. # # **Important:** in some cases, the neighbours of $(i, j)$ will fall outside of the grid. For example, if you are getting the neighbours of $(2, 2)$ in a $3\times 3$ grid, anything to the right or down from that index will fall oustide of the grid. In this case, you should include the corresponding index on the opposite end of the grid. **Hint:** remember the modulo operator and the encryption example. # # For example, if we want the neighbours of (2, 2) in a 3x3 grid, the neighbour at $(i, j+1)$ would be $(2, 0)$. The $(i+1, j+1)$ neighbour would then be $(0, 0)$, and the $(i+1, j)$ neighbour would be $(0, 2)$. # + def get_neighbours(i,j,G): #get up, down, left, right and diagonals. #exclude i,j neighbours = [] #get left neighbour dimensions = np.array(G).shape #where G.shape returns (#row, #columns) #i = y coordinate, length is #rows, j = x coordinate, length is #columns #get NORTHWEST x = (j%dimensions[1]-1)%dimensions[1] y = (i%dimensions[0]-1)%dimensions[0] neighbours.append([y,x]) #get NORTH x = j y = (i%dimensions[0]-1)%dimensions[0] neighbours.append([y,x]) #get NORTHEAST x = (j%dimensions[1]+1)%dimensions[1] y = (i%dimensions[0]-1)%dimensions[0] neighbours.append([y,x]) #get WEST x = (j%dimensions[1]-1)%dimensions[1] y = i neighbours.append([y,x]) #get EAST x = (j%dimensions[1]+1)%dimensions[1] y = i neighbours.append([y,x]) #get SOUTHWEST x = (j%dimensions[1]-1)%dimensions[1] y = (i%dimensions[0]+1)%dimensions[0] neighbours.append([y,x]) #get SOUTH x = j y = (i%dimensions[0]+1)%dimensions[0] neighbours.append([y,x]) #get SOUTHEAST x = (j%dimensions[1]+1)%dimensions[1] y = (i%dimensions[0]+1)%dimensions[0] neighbours.append([y,x]) return neighbours # - grid = np.zeros((3,3)) print(get_neighbours(1,1,grid)) # **Q6.** Write a function called `count_live_neighbours(G, i, j)` that returns the number of live neighbours to a given index $(i, j)$. The function should call your `get_neighbours()` function in order to get the indices of the cell's neighbours. **(20 points)** # # Example: # # ```python # >>> G = np.array([[0, 0, 1],[0, 1, 0], [1, 0, 0]]) # >>> count_live_neighbours(G, 1, 1) # 2 # # ``` def count_live_neighbours(G, i, j): neighbours = get_neighbours(i,j,G) count = 0 m = 0 for k in neighbours: if G[k[m]][k[m+1]] == 1: count+=1 m+=1 m=0 return count G = np.array([[0, 0, 1],[0, 1, 0], [1, 0, 0]]) print(count_live_neighbours(G, 1,1)) # **Q7.** Write a function called `life_step(G)` which takes a grid of cells and returns a new grid after the rules of the game of life have been applied. **(20 points)** # # Example: # # ```python # >>> G_0 = np.array([[1, 0, 1],[0, 1, 0], [0, 0, 0]]) # >>> G_next = life_step(G_0) # >>> print(G_next) # [[ 1. 1. 1.] # [ 1. 1. 1.] # [ 1. 1. 1.]] # >>> G_0 = np.array([[0, 0, 0],[0, 1, 0], [0, 0, 0]]) # >>> G_next = life_step(G_0) # >>> print(G_next) # [[ 0. 0. 0.] # [ 0. 0. 0.] # [ 0. 0. 0.]] # >>> life_step(np.array([[0, 0, 0,0], [0, 1, 1, 1], [0, 0, 0,0], [0, 0, 0, 0]])) # [[ 0. 0. 1. 0.] # [ 0. 0. 1. 0.] # [ 0. 0. 1. 0.] # [ 0. 0. 0. 0.]] # ``` #rules of life #1. Any living cell that has **fewer than** 2 living neighbours dies in the next generation (as if by underpopulation). # 2. Any living cell that has **exactly** 2 or 3 living neighbours lives in the next generation. # 3. Any living cell that has **more than** 3 living neighbours dies (as if by overpopulation). # 4. Any dead cell that has **exactly** 3 living neighbours comes back to life in the next generation (as if by reproduction). def life_step(G): #iterate through each cell in G. G_new = np.zeros((len(G),len(G[0]))).tolist() for index, i in enumerate(G): for j in range(len(i)): nn = count_live_neighbours(G,index,j) if G[index][j] == 1: if nn < 2: G_new[index][j] = 0 elif nn == 2 or nn == 3: G_new[index][j] = 1 elif nn > 3: G_new[index][j] = 0 if G[index][j] == 0: if nn == 3: G_new[index][j] = 1 else: G_new[index][j] = 0 return G_new G_0 = np.array([[0, 0, 0, 0],[0, 1, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0]]) print(G_0) G_next = life_step(G_0) print(np.array(G_next)) # ### Visualizing your simulation # # Ok now the hard part is done, all that's left is to visualize what we've coded! Since we have a function that can always give us the next step in the simulation, we just have to call it repeatedly to advance the simulation in time. # # The next cell contains a function called `life_animation()` that does just that. It has some more advanced code that is used for turning the numpy grids of 0 and 1 into a nice animation so don't worry about what's inside. All you need to know is how to call the function. # # `life_animation(X)` takes a numpy grid like the ones we were working on above and then it takes some **keyword** arguments which let you control how you want the simulation to go. The only one you will want to play with is `frames=10` which sets the number of steps you want the simulation to take. You can set this to whatever you want when calling the function. # # So just execute the cell to store the function and scroll down. # + from JSAnimation.IPython_display import display_animation, anim_to_html from matplotlib import animation import matplotlib.pyplot as plt def life_animation(X, dpi=10, frames=10, interval=300, mode='loop'): """Produce a Game of Life Animation (Borrowed from https://jakevdp.github.io) Parameters ---------- X : array_like a two-dimensional numpy array showing the game board dpi : integer the number of dots per inch in the resulting animation. This controls the size of the game board on the screen frames : integer The number of frames to compute for the animation interval : float The time interval (in milliseconds) between frames mode : string The default mode of the animation. Options are ['loop'|'once'|'reflect'] """ X = np.asarray(X) assert X.ndim == 2 X = X.astype(bool) X_blank = np.zeros_like(X) figsize = (X.shape[1] * 1. / dpi, X.shape[0] * 1. / dpi) #fig = plt.figure(figsize=figsize, dpi=dpi) fig = plt.figure(dpi=dpi) fig.set_size_inches(40, 40) ax = fig.add_axes([0, 0, 1, 1], xticks=[], yticks=[], frameon=False) im = ax.imshow(X, cmap=plt.cm.binary, interpolation='nearest') im.set_clim(-0.05, 1) # Make background gray # initialization function: plot the background of each frame def init(): im.set_data(X_blank) return (im,) # animation function. This is called sequentially def animate(i): im.set_data(animate.X) animate.X = life_step(animate.X) return (im,) animate.X = X anim = animation.FuncAnimation(fig, animate, init_func=init, frames=frames, interval=interval) #print anim_to_html(anim) return display_animation(anim, default_mode=mode) # - # The cell below defines a grid to start off the simulation and passes it to `life_animation()`. If your implementation was correct, you should be able to see how the group of cells keeps growing. unbounded = [[1, 1, 1, 0, 1], [1, 0, 0, 0, 0], [0, 0, 0, 1, 1], [0, 1, 1, 0, 1], [1, 0, 1, 0, 1]] X = np.zeros((40, 40)) X[15:20, 18:23] = unbounded life_animation(X, dpi=10, frames=100, interval=200, mode='once') # I'm leaving a few other fun grids that you can check out. Feel free to play around and make your own! # + glider_gun =\ [[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1], [0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1], [1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0], [1,1,0,0,0,0,0,0,0,0,1,0,0,0,1,0,1,1,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] X = np.zeros((40, 40)) X[1:10,1:37] = glider_gun life_animation(X, dpi=15, frames=180, interval=50, mode='once') #life animation used to work here but doesn't anymore. # - X = np.zeros((17, 17)) X[2, 4:7] = 1 X[4:7, 7] = 1 X += X.T X += X[:, ::-1] X += X[::-1, :] life_animation(X, frames=6) #life animation used to work here but doesn't anymore. # Feel free to define your own grids and see how they evolve. The simplest way is to use the `np.array()` function which takes a list and converts it to a numpy array. # # ```python # >>> mygrid = [[1, 1, 1, 0], [0, 0, 0, 1], [1, 1, 0, 0], [0, 0, 0, 0]] # >>> life_animation(np.array(mygrid), frames=20) # ``` # ### Bonus: User-Friendly Interface (10 points) # # Finally, let's make a nice interface so users can enter a grid, the number of steps to run, and automatically visualize the simulation. # # Write a function called `play()` that takes no input and prompts the user to enter the grid row by run until they enter an `@` character. # # Feel free to be creative with this one. # # The function should do something like, check: # # 1. Each value taken as input is an integer # 2. That the value is either 0 or 1 # 3. The user entered the same number of rows as columns. # 4. Check that a valid number of steps was provided (you can give a default value to number of steps if not specified) # # For top marks, you should let the user re-enter a row or the whole grid if they did it incorrectly without crashing the program. def play(): grid = [] row = [] rowcount = 0 wrong = 0 breakout = False bad = False while True: neat_row = [] row = input("Enter your row: ") if row == "@": break else: if " " in row: row = row.split() #incase the rows are entered with spaces in between try: for i in row: wrong = neat_row.append(int(i)) except ValueError: print("All values must be integers. Try again.") continue else: for i in neat_row: if i != 0 and i != 1: bad = True #because I want to continue to next iteration of while loop, #but if I use continue in the for loop, #it will simply conitnue to next iteration of the for loop if bad == True: print("All values must be 0 or 1. Try again.") else: if rowcount == 0: #for first iteration, I do not want to attempt to access grid[0] #because it doesn't exist yet grid.append(neat_row) rowcount+=1 else: if len(neat_row) == len(grid[0]): #make sure all subsequent col counts are equal to the first row grid.append(neat_row) rowcount+= 1 else: print("The number of columns in each row must be the same.") continue if rowcount == len(grid[0]): # once row count = col count, no more user input allowed break bad = False #reset while True: steps = input("How many steps? ") try: steps = int(steps) except ValueError: print("Invalid number. Try again.") else: break life_animation(np.array(grid), frames=steps) #cannot get life_animation to "animate" even though I moved it out of the try block. play()
hw02/A2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # ## First ML Algorithm: Decision Tree for Classification and Regression # + [markdown] slideshow={"slide_type": "slide"} # ## Let's take a look at the DT [video](https://www.youtube.com/watch?v=ZVR2Way4nwQ) # + [markdown] slideshow={"slide_type": "slide"} # **Decision Trees (DTs)** are a non-parametric supervised learning method used for classification and regression. The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features. A tree can be seen as a piecewise constant approximation. # + [markdown] slideshow={"slide_type": "slide"} # For instance, in the example below, decision trees learn from data to approximate a sine curve with a set of if-then-else decision rules. The deeper the tree, the more complex the decision rules and the fitter the model. # ![image.png](attachment:image.png) # + [markdown] slideshow={"slide_type": "slide"} # **Advantages of decision trees** # - Simple to understand and to interpret. Trees can be visualised. # # - Requires little data preparation. Other techniques often require data normalisation, dummy variables need to be created and blank values to be removed. Note however that this module does not support missing values. # # - The cost of using the tree (i.e., predicting data) is logarithmic in the number of data points used to train the tree. # # - Able to handle both numerical and categorical data. However scikit-learn implementation does not support categorical variables for now. Other techniques are usually specialised in analysing datasets that have only one type of variable. See algorithms for more information. # # - Able to handle multi-output problems. # # - Uses a white box model. If a given situation is observable in a model, the explanation for the condition is easily explained by boolean logic. By contrast, in a black box model (e.g., in an artificial neural network), results may be more difficult to interpret. # # - Possible to validate a model using statistical tests. That makes it possible to account for the reliability of the model. # # - Performs well even if its assumptions are somewhat violated by the true model from which the data were generated. # + [markdown] slideshow={"slide_type": "slide"} # **The disadvantages of decision trees** # - Decision-tree learners can create over-complex trees that do not generalise the data well. This is called overfitting. Mechanisms such as pruning, setting the minimum number of samples required at a leaf node or setting the maximum depth of the tree are necessary to avoid this problem. # # - Decision trees can be unstable because small variations in the data might result in a completely different tree being generated. This problem is mitigated by using decision trees within an ensemble. # # - Predictions of decision trees are neither smooth nor continuous, but piecewise constant approximations as seen in the above figure. Therefore, they are not good at extrapolation. # # - The problem of learning an optimal decision tree is known to be NP-complete under several aspects of optimality and even for simple concepts. Consequently, practical decision-tree learning algorithms are based on heuristic algorithms such as the greedy algorithm where locally optimal decisions are made at each node. Such algorithms cannot guarantee to return the globally optimal decision tree. This can be mitigated by training multiple trees in an ensemble learner, where the features and samples are randomly sampled with replacement. # # - There are concepts that are hard to learn because decision trees do not express them easily, such as XOR, parity or multiplexer problems. # # - Decision tree learners create biased trees if some classes dominate. It is therefore recommended to balance the dataset prior to fitting with the decision tree. # + [markdown] slideshow={"slide_type": "slide"} # Setup: # - pip install -i http://pypi.douban.com/simple/ --trusted-host pypi.douban.com scikit-learn==0.23.2 # - pip install -i http://pypi.douban.com/simple/ --trusted-host pypi.douban.com graphviz # # + [markdown] slideshow={"slide_type": "slide"} # Now how it works? # ## Classification # + [markdown] slideshow={"slide_type": "slide"} # - tree.DecisionTreeClassifier(*[, criterion, ...]) A decision tree classifier. # # - tree.DecisionTreeRegressor(*[, criterion, ...]) A decision tree regressor. # # - tree.ExtraTreeClassifier(*[, criterion, ...]) An extremely randomized tree classifier. # # - tree.ExtraTreeRegressor(*[, criterion, ...]) An extremely randomized tree regressor. # # - tree.export_graphviz(decision_tree[, ...]) Export a decision tree in DOT format. # # - tree.export_text(decision_tree, *[, ...]) Build a text report showing the rules of a decision tree. # # + [markdown] slideshow={"slide_type": "slide"} # [DecisionTreeClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html#sklearn.tree.DecisionTreeClassifier) is a class capable of performing multi-class classification on a dataset. # # # + [markdown] slideshow={"slide_type": "slide"} # As with other classifiers, DecisionTreeClassifier takes as input two arrays: an array X, sparse or dense, of shape (n_samples, n_features) holding the training samples, and an array Y of integer values, shape (n_samples,), holding the class labels for the training samples: # + slideshow={"slide_type": "slide"} from sklearn import tree X = [[0, 0], [1, 1]] Y = [0, 1] clf = tree.DecisionTreeClassifier() clf = clf.fit(X, Y) # + [markdown] slideshow={"slide_type": "slide"} # After being fitted, the model can then be used to predict the class of samples: # + slideshow={"slide_type": "slide"} clf.predict([[2., 2.]]) # + [markdown] slideshow={"slide_type": "slide"} # In case that there are multiple classes with the same and highest probability, the classifier will predict the class with the lowest index amongst those classes. # # As an alternative to outputting a specific class, the probability of each class can be predicted, which is the fraction of training samples of the class in a leaf: # + [markdown] slideshow={"slide_type": "slide"} # DecisionTreeClassifier is capable of both binary (where the labels are [-1, 1]) classification and multiclass (where the labels are [0, …, K-1]) classification. # # # + [markdown] slideshow={"slide_type": "slide"} # As we have mentioned earlier, the dataset we are going to use here in this tutorial is the Iris Plants Dataset. Scikit learn Python comes with this dataset, so we don’t need to download it externally from any other source. We will import the dataset directly, but before we do that we need to import Scikit learn and Pandas using the following commands: # + slideshow={"slide_type": "slide"} import sklearn import pandas as pd #After importing sklearn, we can easily import the dataset from it from sklearn.datasets import load_iris # + [markdown] slideshow={"slide_type": "slide"} # We have successfully imported the Iris Plants Dataset from sklearn. We need to import Pandas now, because we are going to load the imported data into a Pandas DataFrame and use head() and tail() functions of Python Pandas to display the content of the DataFrame. Let’s see how to convert this dataset into a Pandas DataFrame. # + slideshow={"slide_type": "slide"} iriss = load_iris() df_iris = pd.DataFrame(iriss.data, columns=iriss.feature_names) # + [markdown] slideshow={"slide_type": "slide"} # ## Data Exploration # Now, we have a DataFrame named df_iris that contains the Iris Plants Dataset imported from Scikit-learn in a tabular form. We will be performing all operations of Machine Learning on this DataFrame. # # Let’s display the records from this DataFrame using the head() function: # + [markdown] slideshow={"slide_type": "slide"} # The **head()** function, when used with no arguments, displays the first five rows of the DataFrame. However, we can pass any integer argument to display the same number of rows from the DataFrame. The output of the above command would be: # + slideshow={"slide_type": "slide"} df_iris.head() # + [markdown] slideshow={"slide_type": "slide"} # Now, let’s see how to display the records from the DataFrame, using the tail() function.The tail() function, when used without any argument, displays the last five rows of the DataFrame. Similar to the head() function, we can pass any integer as an argument to display the same number of records from the end. The output of the above command would be: # + slideshow={"slide_type": "slide"} df_iris.tail() # + [markdown] slideshow={"slide_type": "slide"} # Since the tail() function displays the last records of the DataFrame, we can see that the index number of the last row is 149. When we use the head() function, on the other hand, the index number of the first row is 0, i.e., the total number of entries is 150 or a total of 150 records are present in the Iris Plants Dataset. # + [markdown] slideshow={"slide_type": "slide"} # Now, let’s see how we can check the data types of the fields present in the DataFrame. # + slideshow={"slide_type": "slide"} df_iris.dtypes # + [markdown] slideshow={"slide_type": "slide"} # ### Data Visualization # Having performed data exploration with our dataset, now let’s create some plots to visually represent the data in our dataset which will help us uncover more stories hidden in it. # # Python has many libraries that provide functions to perform data visualizations on datasets. We can use the .plot extension of Pandas to create a scatterplot of features or fields of our dataset against each other, and we also need to import python matplotlib which will provide an object-oriented API to embed plots into applications. # + slideshow={"slide_type": "slide"} from pandas.plotting import scatter_matrix import matplotlib.pyplot as plt scatter_matrix(df_iris,figsize=(10,10)) plt.show() # + [markdown] slideshow={"slide_type": "slide"} # We can also use Seaborn library to create pairplots of all features in the dataset against each other. # - To use Seaborn, we need to import Seaborn library, first. # - Let’s see how it is done and how to create a Seaborn pairplot. # + slideshow={"slide_type": "slide"} import seaborn as sns iris = sns.load_dataset('iris') # style used as a theme of graph # for example if we want black # graph with grid then write "darkgrid" sns.set_style("whitegrid") # sepal_length, petal_length are iris # feature data height used to define # Height of graph whereas hue store the # class of iris dataset. sns.FacetGrid(iris, hue ="species", height = 6).map(plt.scatter, 'sepal_length', 'petal_length').add_legend() # + slideshow={"slide_type": "slide"} from sklearn.datasets import load_iris from sklearn import tree iris = load_iris() X, y = iris.data, iris.target clf = tree.DecisionTreeClassifier() clf = clf.fit(X, y) # + [markdown] slideshow={"slide_type": "slide"} # Once trained, you can plot the tree with the plot_tree function: # + [markdown] slideshow={"slide_type": "slide"} # ## [Video](https://www.youtube.com/watch?v=7VeUPuFGJHk) # # + slideshow={"slide_type": "slide"} import graphviz import os os.environ["PATH"] += os.pathsep + 'D:\Program Files\Graphviz\bin' os.environ["PATH"] += os.pathsep + 'D:\Program Files\Graphviz\bin\dot.exe' dot_data = tree.export_graphviz(clf, out_file=None) graph = graphviz.Source(dot_data) graph.render("iris") graph # + slideshow={"slide_type": "slide"} dot_data = tree.export_graphviz(clf, out_file=None, feature_names=iris.feature_names, class_names=iris.target_names, filled=True, rounded=True, special_characters=True) graph = graphviz.Source(dot_data) graph # + slideshow={"slide_type": "slide"} [Addtional resrouce] (https://scikit-learn.org/0.20/auto_examples/tree/plot_iris.html#sphx-glr-auto-examples-tree-plot-iris-py) # + [markdown] slideshow={"slide_type": "slide"} # Alternatively, the tree can also be exported in textual format with the function export_text. This method doesn’t require the installation of external libraries and is more compact: # + slideshow={"slide_type": "slide"} print(__doc__) # Import the necessary modules and libraries import numpy as np from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt # Create a random dataset rng = np.random.RandomState(1) X = np.sort(5 * rng.rand(80, 1), axis=0) y = np.sin(X).ravel() y[::5] += 3 * (0.5 - rng.rand(16)) # Fit regression model regr_1 = DecisionTreeRegressor(max_depth=2) regr_2 = DecisionTreeRegressor(max_depth=5) regr_1.fit(X, y) regr_2.fit(X, y) # Predict X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis] y_1 = regr_1.predict(X_test) y_2 = regr_2.predict(X_test) # Plot the results plt.figure() plt.scatter(X, y, s=20, edgecolor="black", c="darkorange", label="data") plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2) plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2) plt.xlabel("data") plt.ylabel("target") plt.title("Decision Tree Regression") plt.legend() plt.show() # + [markdown] slideshow={"slide_type": "slide"} # [Homework](https://scikit-learn.org/0.20/auto_examples/tree/plot_iris.html#sphx-glr-auto-examples-tree-plot-iris-py) # + [markdown] slideshow={"slide_type": "slide"} # ## Regression # + slideshow={"slide_type": "slide"} print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_olivetti_faces from sklearn.utils.validation import check_random_state from sklearn.ensemble import ExtraTreesRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import LinearRegression from sklearn.linear_model import RidgeCV # Load the faces datasets data = fetch_olivetti_faces() targets = data.target data = data.images.reshape((len(data.images), -1)) train = data[targets < 30] test = data[targets >= 30] # Test on independent people # Test on a subset of people n_faces = 5 rng = check_random_state(4) face_ids = rng.randint(test.shape[0], size=(n_faces, )) test = test[face_ids, :] n_pixels = data.shape[1] # Upper half of the faces X_train = train[:, :(n_pixels + 1) // 2] # Lower half of the faces y_train = train[:, n_pixels // 2:] X_test = test[:, :(n_pixels + 1) // 2] y_test = test[:, n_pixels // 2:] # Fit estimators ESTIMATORS = { "Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32, random_state=0), "K-nn": KNeighborsRegressor(), "Linear regression": LinearRegression(), "Ridge": RidgeCV(), } y_test_predict = dict() for name, estimator in ESTIMATORS.items(): estimator.fit(X_train, y_train) y_test_predict[name] = estimator.predict(X_test) # Plot the completed faces image_shape = (64, 64) n_cols = 1 + len(ESTIMATORS) plt.figure(figsize=(2. * n_cols, 2.26 * n_faces)) plt.suptitle("Face completion with multi-output estimators", size=16) for i in range(n_faces): true_face = np.hstack((X_test[i], y_test[i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1, title="true faces") sub.axis("off") sub.imshow(true_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest") for j, est in enumerate(sorted(ESTIMATORS)): completed_face = np.hstack((X_test[i], y_test_predict[est][i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j, title=est) sub.axis("off") sub.imshow(completed_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest") plt.show()
Classnotebook/Class5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/hiren14/fire-dection/blob/main/Copy_of_fire_control_dept.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="_sEdK5ns5NOE" outputId="85b5bc57-2fce-4cc1-dc88-b77973d2f11e" colab={"base_uri": "https://localhost:8080/"} # !git clone https://github.com/robmarkcole/fire-detection-from-images.git # + colab={"base_uri": "https://localhost:8080/"} id="3dvtgWGhBRyp" outputId="7e6b39d0-0ccf-4013-b8cd-05c90ec0f5fb" from google.colab import drive drive.mount('/content/drive') # + id="8XmhBFw4K23o" outputId="62aabde5-a192-4add-a308-b543fa77e4a3" colab={"base_uri": "https://localhost:8080/"} # %cd "/content/fire-detection-from-images" # + id="1131er_FK3hQ" outputId="933eed7b-d4ca-4943-a614-13ccdd417d31" colab={"base_uri": "https://localhost:8080/"} # !git clone https://github.com/ultralytics/yolov5.git # + id="r8KERmc86YjV" colab={"base_uri": "https://localhost:8080/"} outputId="2ef4174f-93ad-4731-e0f0-d33018590488" # !pip install gradio # + colab={"base_uri": "https://localhost:8080/"} id="a_ytB0CN2-VV" outputId="a0afef8b-48c0-47d8-96f5-d6a2e179639f" # !python demo.py
Copy_of_fire_control_dept.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # # Enter State Farm from theano.sandbox import cuda cuda.use('gpu1') # %matplotlib inline from __future__ import print_function, division #path = "data/state/" path = "data/state/sample/" import utils; reload(utils) from utils import * from IPython.display import FileLink batch_size=64 # + [markdown] heading_collapsed=true # ## Create sample # + [markdown] hidden=true # The following assumes you've already created your validation set - remember that the training and validation set should contain *different drivers*, as mentioned on the Kaggle competition page. # + hidden=true # %cd data/state # + hidden=true # %cd train # + hidden=true # %mkdir ../sample # %mkdir ../sample/train # %mkdir ../sample/valid # + hidden=true for d in glob('c?'): os.mkdir('../sample/train/'+d) os.mkdir('../sample/valid/'+d) # + hidden=true from shutil import copyfile # + hidden=true g = glob('c?/*.jpg') shuf = np.random.permutation(g) for i in range(1500): copyfile(shuf[i], '../sample/train/' + shuf[i]) # + hidden=true # %cd ../valid # + hidden=true g = glob('c?/*.jpg') shuf = np.random.permutation(g) for i in range(1000): copyfile(shuf[i], '../sample/valid/' + shuf[i]) # + hidden=true # %cd ../../.. # + hidden=true # %mkdir data/state/results # + hidden=true # %mkdir data/state/sample/test # - # ## Create batches batches = get_batches(path+'train', batch_size=batch_size) val_batches = get_batches(path+'valid', batch_size=batch_size*2, shuffle=False) (val_classes, trn_classes, val_labels, trn_labels, val_filenames, filenames, test_filename) = get_classes(path) # ## Basic models # ### Linear model # First, we try the simplest model and use default parameters. Note the trick of making the first layer a batchnorm layer - that way we don't have to worry about normalizing the input ourselves. model = Sequential([ BatchNormalization(axis=1, input_shape=(3,224,224)), Flatten(), Dense(10, activation='softmax') ]) # As you can see below, this training is going nowhere... model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy']) model.fit_generator(batches, batches.nb_sample, nb_epoch=2, validation_data=val_batches, nb_val_samples=val_batches.nb_sample) # Let's first check the number of parameters to see that there's enough parameters to find some useful relationships: model.summary() # Over 1.5 million parameters - that should be enough. Incidentally, it's worth checking you understand why this is the number of parameters in this layer: 10*3*224*224 # Since we have a simple model with no regularization and plenty of parameters, it seems most likely that our learning rate is too high. Perhaps it is jumping to a solution where it predicts one or two classes with high confidence, so that it can give a zero prediction to as many classes as possible - that's the best approach for a model that is no better than random, and there is likely to be where we would end up with a high learning rate. So let's check: np.round(model.predict_generator(batches, batches.N)[:10],2) # Our hypothesis was correct. It's nearly always predicting class 1 or 6, with very high confidence. So let's try a lower learning rate: model = Sequential([ BatchNormalization(axis=1, input_shape=(3,224,224)), Flatten(), Dense(10, activation='softmax') ]) model.compile(Adam(lr=1e-5), loss='categorical_crossentropy', metrics=['accuracy']) model.fit_generator(batches, batches.nb_sample, nb_epoch=2, validation_data=val_batches, nb_val_samples=val_batches.nb_sample) # Great - we found our way out of that hole... Now we can increase the learning rate and see where we can get to. model.optimizer.lr=0.001 model.fit_generator(batches, batches.nb_sample, nb_epoch=4, validation_data=val_batches, nb_val_samples=val_batches.nb_sample) # We're stabilizing at validation accuracy of 0.39. Not great, but a lot better than random. Before moving on, let's check that our validation set on the sample is large enough that it gives consistent results: rnd_batches = get_batches(path+'valid', batch_size=batch_size*2, shuffle=True) val_res = [model.evaluate_generator(rnd_batches, rnd_batches.nb_sample) for i in range(10)] np.round(val_res, 2) # Yup, pretty consistent - if we see improvements of 3% or more, it's probably not random, based on the above samples. # + [markdown] heading_collapsed=true # ### L2 regularization # + [markdown] hidden=true # The previous model is over-fitting a lot, but we can't use dropout since we only have one layer. We can try to decrease overfitting in our model by adding [l2 regularization](http://www.kdnuggets.com/2015/04/preventing-overfitting-neural-networks.html/2) (i.e. add the sum of squares of the weights to our loss function): # + hidden=true model = Sequential([ BatchNormalization(axis=1, input_shape=(3,224,224)), Flatten(), Dense(10, activation='softmax', W_regularizer=l2(0.01)) ]) model.compile(Adam(lr=10e-5), loss='categorical_crossentropy', metrics=['accuracy']) model.fit_generator(batches, batches.nb_sample, nb_epoch=2, validation_data=val_batches, nb_val_samples=val_batches.nb_sample) # + hidden=true model.optimizer.lr=0.001 # + hidden=true model.fit_generator(batches, batches.nb_sample, nb_epoch=4, validation_data=val_batches, nb_val_samples=val_batches.nb_sample) # + [markdown] hidden=true # Looks like we can get a bit over 50% accuracy this way. This will be a good benchmark for our future models - if we can't beat 50%, then we're not even beating a linear model trained on a sample, so we'll know that's not a good approach. # - # ### Single hidden layer # The next simplest model is to add a single hidden layer. # + model = Sequential([ BatchNormalization(axis=1, input_shape=(3,224,224)), Flatten(), Dense(100, activation='relu'), BatchNormalization(), Dense(10, activation='softmax') ]) model.compile(Adam(lr=1e-5), loss='categorical_crossentropy', metrics=['accuracy']) model.fit_generator(batches, batches.nb_sample, nb_epoch=2, validation_data=val_batches, nb_val_samples=val_batches.nb_sample) model.optimizer.lr = 0.01 model.fit_generator(batches, batches.nb_sample, nb_epoch=5, validation_data=val_batches, nb_val_samples=val_batches.nb_sample) # - # Not looking very encouraging... which isn't surprising since we know that CNNs are a much better choice for computer vision problems. So we'll try one. # ### Single conv layer # 2 conv layers with max pooling followed by a simple dense network is a good simple CNN to start with: def conv1(batches): model = Sequential([ BatchNormalization(axis=1, input_shape=(3,224,224)), Convolution2D(32,3,3, activation='relu'), BatchNormalization(axis=1), MaxPooling2D((3,3)), Convolution2D(64,3,3, activation='relu'), BatchNormalization(axis=1), MaxPooling2D((3,3)), Flatten(), Dense(200, activation='relu'), BatchNormalization(), Dense(10, activation='softmax') ]) model.compile(Adam(lr=1e-4), loss='categorical_crossentropy', metrics=['accuracy']) model.fit_generator(batches, batches.nb_sample, nb_epoch=2, validation_data=val_batches, nb_val_samples=val_batches.nb_sample) model.optimizer.lr = 0.001 model.fit_generator(batches, batches.nb_sample, nb_epoch=4, validation_data=val_batches, nb_val_samples=val_batches.nb_sample) return model conv1(batches) # The training set here is very rapidly reaching a very high accuracy. So if we could regularize this, perhaps we could get a reasonable result. # # So, what kind of regularization should we try first? As we discussed in lesson 3, we should start with data augmentation. # ## Data augmentation # To find the best data augmentation parameters, we can try each type of data augmentation, one at a time. For each type, we can try four very different levels of augmentation, and see which is the best. In the steps below we've only kept the single best result we found. We're using the CNN we defined above, since we have already observed it can model the data quickly and accurately. # Width shift: move the image left and right - gen_t = image.ImageDataGenerator(width_shift_range=0.1) batches = get_batches(path+'train', gen_t, batch_size=batch_size) model = conv1(batches) # Height shift: move the image up and down - gen_t = image.ImageDataGenerator(height_shift_range=0.05) batches = get_batches(path+'train', gen_t, batch_size=batch_size) model = conv1(batches) # Random shear angles (max in radians) - gen_t = image.ImageDataGenerator(shear_range=0.1) batches = get_batches(path+'train', gen_t, batch_size=batch_size) model = conv1(batches) # Rotation: max in degrees - gen_t = image.ImageDataGenerator(rotation_range=15) batches = get_batches(path+'train', gen_t, batch_size=batch_size) model = conv1(batches) # Channel shift: randomly changing the R,G,B colors - gen_t = image.ImageDataGenerator(channel_shift_range=20) batches = get_batches(path+'train', gen_t, batch_size=batch_size) model = conv1(batches) # And finally, putting it all together! gen_t = image.ImageDataGenerator(rotation_range=15, height_shift_range=0.05, shear_range=0.1, channel_shift_range=20, width_shift_range=0.1) batches = get_batches(path+'train', gen_t, batch_size=batch_size) model = conv1(batches) # At first glance, this isn't looking encouraging, since the validation set is poor and getting worse. But the training set is getting better, and still has a long way to go in accuracy - so we should try annealing our learning rate and running more epochs, before we make a decisions. model.optimizer.lr = 0.0001 model.fit_generator(batches, batches.nb_sample, nb_epoch=5, validation_data=val_batches, nb_val_samples=val_batches.nb_sample) # Lucky we tried that - we starting to make progress! Let's keep going. model.fit_generator(batches, batches.nb_sample, nb_epoch=25, validation_data=val_batches, nb_val_samples=val_batches.nb_sample) # Amazingly, using nothing but a small sample, a simple (not pre-trained) model with no dropout, and data augmentation, we're getting results that would get us into the top 50% of the competition! This looks like a great foundation for our futher experiments. # # To go further, we'll need to use the whole dataset, since dropout and data volumes are very related, so we can't tweak dropout without using all the data.
deeplearning1/nbs/statefarm-sample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 4兆通りの足し算を同時に行う # 量子状態生成の感覚がつかめてきたところで、量子計算をするとはどういうことかに話を移して行きましょう。 # # ```{contents} 目次 # --- # local: true # --- # ``` # # $\newcommand{\ket}[1]{|#1\rangle}$ # ## 量子計算の特徴 # # 量子計算をするとはどういうことかを一概に決めることはできませんし、それ自体がまだ世界中で盛んに研究されているトピックです。ただ、現在知られている量子コンピュータの実用法に共通する大まかな特徴を挙げるならば、以下の2点になります。 # # - **並列性を利用する**:Equal superposition状態で見たように、$n$量子ビットがあるとき、比較的簡単な操作で$2^n$個の計算基底が顕に登場する状態が作れます。また、この状態には全ての量子ビットが関与するため、どのビットに対するゲート操作も全ての計算基底に影響を及ぼします。つまり、各ゲート操作が常に$2^n$重の並列演算となります。 # - **干渉を利用する**:量子振幅は複素数なので、二つの振幅が足し合わされるとき、それぞれの位相によって和の振幅の値が変わります。特に絶対値が等しく位相が逆である($\pi$だけ異なる)場合和が0となるので、そのことを利用して回路の量子状態の重ね合わせから特定の計算基底を除くといったことが可能です。 # # この2つの要素のうち、特に干渉を上手に使ったアルゴリズムを見出すのが難しいため、量子コンピュータの応用可能性にまだ未知数な部分が大きい、というのが現状です。今回の実習でも、干渉を利用する部分もありますが、主に並列性ということに着目します。 # # ## 巨大SIMDマシンとしての量子コンピュータ # # SIMD (single instruction multiple data)とは並列計算パラダイムの一つで、プロセッサの命令(instruction)が多数のデータに同時に適用されるケースを指します。私達の身の回りの(古典)コンピュータのプロセッサにもSIMD用のインストラクションセットが搭載されており、例えば(2021年2月現在)最先端の商用CPUでは、16個の単精度浮動小数点数に対し同時に四則演算や平方根の計算を行えます。 # # 量子コンピュータでは、第一回の実習で触れられたように、すべてのゲートがすべての計算基底に作用します。ゲート操作を命令、各計算基底の振幅をデータとして解釈すれば、これは常に$2^n$個のデータに命令を与えながら計算をしていることにあたります。量子コンピュータは巨大SIMDマシンとも考えられるのです。 # # ただし、これもすでに触れられたことですが、巨大並列計算ができたとしても、そのデータをすべて引き出すことはできません[^and_you_dont_want_to]ので、古典計算機のSIMDとはいろいろな意味で単純に比較できるものではありません。 # # とはいえ、並列計算を行っているんだということを実感できると、より量子コンピュータを使う感覚が身に付いてくると思われるので、今回は最も単純に「足し算」をたくさん並列に行う回路を書いてみましょう。 # # [^and_you_dont_want_to]: そもそも、例えば65量子ビットの計算機からすべてのデータを保存しようと思うと、各振幅を128(古典)ビットの浮動小数点複素数で表現したとすれば512EiB (エクサバイト)のストレージが必要です。これはだいたい現在インターネットを行き来する情報二ヶ月分に相当するので、保存するファシリティを作るにはそれなりの投資が必要です。 # ## 量子フーリエ変換による足し算 # # これまで量子「計算機」の話をしていながら、単純であるはずの四則演算のやりかたについて触れていませんでした。理由は、実は量子コンピュータでは四則演算がそんなに単純でないから、です。 # # 足し算を行う量子サブルーチンはいくつか知られていますが、その中で量子ビットの数や用いるゲートの種類の面で効率的なのが、フーリエ変換を用いたものです{cite}`quantum_addition`。ただの足し算にフーリエ変換を持ち出すのは奇妙に思えますが、実際に動かしてみるとなかなかスマートな手法であることがわかります。 # # まずは計算の流れを数式で追ってみましょう。整数$a$と$b$の足し算を考えます。まず、2つのレジスタをそれぞれ状態$\ket{a}$と$\ket{b}$に用意します。以下のように、第3のレジスタ(初期状態$\ket{0}$)の状態が和$a+b$を表すようにすることが目標です。それぞれのレジスタは十分に大きい(レジスタ$i$のビット数を$n_i$として$2^{n_1} > a$, $2^{n_2} > b$, $2^{n_3} > a + b$)とします。 # # $$ # \ket{a}\ket{b}\ket{0} \rightarrow \ket{a}\ket{b}\ket{a+b} # $$ # # 量子フーリエ変換は、ビット数$n$のレジスタの計算基底$\ket{j}$を # # $$ # \ket{j} \xrightarrow{\text{Q.F.T.}} \frac{1}{\sqrt{2^n}}\sum_{k=0}^{2^n-1} e^{2\pi i jk/2^n} \ket{k} # $$ # # という状態に変える操作でした。では、その逆を考えると、整数$a+b < 2^n$について # # $$ # \frac{1}{\sqrt{2^n}}\sum_{k=0}^{2^n-1} e^{2\pi i (a+b)k/2^n} \ket{k} \xrightarrow{\text{Q.F.T.}^{-1}} \ket{a+b} # $$ # # ができることがわかります。すべての量子ゲートには逆操作が存在するので、すべての量子サブルーチンは逆回しできます。 # # 左辺の状態を作るには、これも量子フーリエ変換のアルゴリズムを参考にします。整数$a, b, k$の二進分解 # # $$ # a = \sum_{m=0}^{n_1-1} 2^m a_m \\ # b = \sum_{m=0}^{n_2-1} 2^m b_m \\ # k = \sum_{m=0}^{n_3-1} 2^m k_m # $$ # # を用いて、 # # $$ # \exp\left(2\pi i \frac{(a+b)k}{2^{n_3}}\right) = \left[\prod_{l=0}^{n_1-1}\prod_{m=0}^{n_3-1} \exp\left(2\pi i \frac{2^{l+m} a_l k_m}{2^{n_3}}\right)\right]\left[\prod_{l=0}^{n_2-1}\prod_{m=0}^{n_3-1} \exp\left(2\pi i \frac{2^{l+m} b_l k_m}{2^{n_3}}\right)\right] # $$ # # と書けることを利用します。つまり、レジスタ1または2の各ビットとレジスタ3の各ビットを一つずつ組み合わせて、両方のビットが1である($a_l = k_m = 1$または$b_l = k_m = 1$の)ときに対応する分($2\pi 2^{l + m} / 2^{n_3}$)位相を進めれば、左辺の状態ができあがります。 # # 具体的には、まずレジスタ3をequal superpositionに用意し、レジスタ1の各ビットを制御、レジスタ3の各ビットを標的とした$C[P]$ゲートをかけていきます。 # # $$ # \begin{align} # \ket{a}\ket{b}\ket{0} & \xrightarrow{H^{\otimes n_3}} \ket{a}\ket{b} \frac{1}{\sqrt{2^{n_3}}} \sum_{k=0}^{2^{n_3}-1} \ket{k} \\ # & \xrightarrow{C^{1;0}_{3;0}[P(2\pi \cdot 2^0 \cdot 2^0/2^{n_3})]} \ket{a}\ket{b} \frac{1}{\sqrt{2^{n_3}}} \sum_{k=0}^{2^{n_3}-1} \exp \left( 2\pi i \frac{a_0 k_0}{2^{n_3}} \right) \ket{k} \\ # & \xrightarrow{C^{1;0}_{3;1}[P(2\pi \cdot 2^0 \cdot 2^1/2^{n_3})]} \ket{a}\ket{b} \frac{1}{\sqrt{2^{n_3}}} \sum_{k=0}^{2^{n_3}-1} \exp \left( 2\pi i \frac{a_0 (k_0 + 2k_1)}{2^{n_3}} \right) \ket{k} \\ # \cdots & \\ # & \xrightarrow{C^{1;0}_{3;n_3 - 1}[P(2\pi \cdot 2^0 \cdot 2^{n_3 - 1}/2^{n_3})]} \ket{a}\ket{b} \frac{1}{\sqrt{2^{n_3}}} \sum_{k=0}^{2^{n_3}-1} \exp \left( 2\pi i \frac{a_0 k}{2^{n_3}} \right) \ket{k} \\ # & \xrightarrow{C^{1;1}_{3;0}[P(2\pi \cdot 2^1 \cdot 2^0/2^{n_3})]} \ket{a}\ket{b} \frac{1}{\sqrt{2^{n_3}}} \sum_{k=0}^{2^{n_3}-1} \exp \left( 2\pi i \frac{a_0 k + 2a_1 k_0}{2^{n_3}} \right) \ket{k} \\ # \cdots & \\ # & \xrightarrow{C^{1;n_1 - 1}_{3;n_3 - 1}[P(2\pi \cdot 2^{n_1-1} \cdot 2^{n_3 - 1}/2^{n_3})]} \ket{a}\ket{b} \frac{1}{\sqrt{2^{n_3}}} \sum_{k=0}^{2^{n_3}-1} \exp \left( 2\pi i \frac{a k}{2^{n_3}} \right) \ket{k} # \end{align} # $$ # # 続いてレジスタ2のビットを制御として、同様の$C[P]$ゲートをかけていくと、 # # $$ # \begin{align} # & \xrightarrow{C^{2;0}_{3;0}[P(2\pi \cdot 2^0 \cdot 2^0/2^{n_3})]} \ket{a}\ket{b} \frac{1}{\sqrt{2^{n_3}}} \sum_{k=0}^{2^{n_3}-1} \exp \left( 2\pi i \frac{ak + b_0 k_0}{2^{n_3}} \right) \ket{k} \\ # & \xrightarrow{C^{2;0}_{3;1}[P(2\pi \cdot 2^0 \cdot 2^1/2^{n_3})]} \ket{a}\ket{b} \frac{1}{\sqrt{2^{n_3}}} \sum_{k=0}^{2^{n_3}-1} \exp \left( 2\pi i \frac{ak + b_0 (k_0 + 2k_1)}{2^{n_3}} \right) \ket{k} \\ # \cdots & \\ # & \xrightarrow{C^{2;0}_{3;n_3 - 1}[P(2\pi \cdot 2^0 \cdot 2^{n_3 - 1}/2^{n_3})]} \ket{a}\ket{b} \frac{1}{\sqrt{2^{n_3}}} \sum_{k=0}^{2^{n_3}-1} \exp \left( 2\pi i \frac{(a + b_0) k}{2^{n_3}} \right) \ket{k} \\ # & \xrightarrow{C^{2;1}_{3;0}[P(2\pi \cdot 2^1 \cdot 2^0/2^{n_3})]} \ket{a}\ket{b} \frac{1}{\sqrt{2^{n_3}}} \sum_{k=0}^{2^{n_3}-1} \exp \left( 2\pi i \frac{(a + b_0) k + 2b_1 k_0}{2^{n_3}} \right) \ket{k} \\ # \cdots & \\ # & \xrightarrow{C^{2;n_2 - 1}_{3;n_3 - 1}[P(2\pi \cdot 2^{n_2 - 1} \cdot 2^{n_3 - 1}/2^{n_3})]} \ket{a}\ket{b} \frac{1}{\sqrt{2^{n_3}}} \sum_{k=0}^{2^{n_3}-1} \exp \left( 2\pi i \frac{(a + b) k}{2^{n_3}} \right) \ket{k} # \end{align} # $$ # # となり、めでたく$\ket{a+b}$のフーリエ変換状態が実現されました。 # # では以上の操作をQiskitで実装してみましょう。レジスタ1と2は4ビットとして、$a=9, b=13$を考えます。後の便利のために、まずは足し算のサブルーチン部分だけを関数化します。 # + tags=["remove-output"] # まずは全てインポート import numpy as np import matplotlib.pyplot as plt from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit, IBMQ, Aer, transpile from qiskit.tools.monitor import job_monitor from qiskit.providers.ibmq import least_busy from utils.show_state import show_state from utils.optimized_additions import optimized_additions, get_initial_layout print('notebook ready') # + tags=["remove-output"] def setup_addition(circuit, reg1, reg2, reg3): """Set up an addition subroutine to a circuit with three registers """ # Equal superposition in register 3 circuit.h(reg3) # Smallest unit of phi dphi = 2. * np.pi / (2 ** reg3.size) # Loop over reg1 and reg2 for reg_ctrl in [reg1, reg2]: # Loop over qubits in the control register (reg1 or reg2) for qctrl in reg_ctrl: # Loop over qubits in the target register (reg3) for qtarg in reg3: # C[P(phi)], phi = 2pi * 2^{ictrl} * 2^{itarg} / 2^{n3} circuit.cp(dphi * (2 ** (qctrl.index + qtarg.index)), qctrl, qtarg) # Insert a barrier for better visualization circuit.barrier() # Inverse QFT for j in range(reg3.size // 2): circuit.swap(reg3[j], reg3[-1 - j]) for itarg in range(reg3.size): for ictrl in range(itarg): power = ictrl - itarg - 1 + reg3.size circuit.cp(-dphi * (2 ** power), reg3[ictrl], reg3[itarg]) circuit.h(reg3[itarg]) print('Defined function setup_addition') # - # 回路を作り、レジスタ1と2を入力9と13を表すように初期化します。 # + tags=["remove-output"] a = 9 b = 13 # Calculate the necessary register sizes n1 = np.ceil(np.log2(a + 1)).astype(int) n2 = np.ceil(np.log2(b + 1)).astype(int) n3 = np.ceil(np.log2(a + b + 1)).astype(int) print('n1 =', n1, 'n2 =', n2, 'n3 =', n3) reg1 = QuantumRegister(n1, 'r1') reg2 = QuantumRegister(n2, 'r2') reg3 = QuantumRegister(n3, 'r3') # QuantumCircuit can be instantiated from multiple registers circuit = QuantumCircuit(reg1, reg2, reg3) # Set register 1 to state |a> for j in range(n1): if ((a >> j) & 1) == 1: circuit.x(reg1[j]) # Set register 2 to state |b> for j in range(n2): if ((b >> j) & 1) == 1: circuit.x(reg2[j]) setup_addition(circuit, reg1, reg2, reg3) circuit.draw('mpl', scale=0.6, fold=100) # - # 再び`show_state`関数を使って終状態を確認してみましょう。 show_state(circuit, register_sizes=(n1, n2, n3), draw=False) # ここで、`register_sizes`というオプション引数を渡して、13ビットの回路を$n_1 + n_2 + n_3$ビットに分けて解釈するよう指定しました。結果表示された状態は期待通り単一の計算基底$22:13:9$、つまりレジスタ1, 2, 3がそれぞれ$\ket{9}, \ket{13}, \ket{22}$となっている状態です(回路全体でビットを右から書いて状態を表示するため、レジスタも右から左に並びます)。つまり、めでたく状態の変遷 # # $$ # \ket{9}\ket{13}\ket{0} \rightarrow \ket{9}\ket{13}\ket{22} # $$ # # が実現しました。 # ## 足し算の並列化 # # 上では小学一年生ができる足し算を一回行うために13個の量子ビットと67個のゲートを利用しました。しかし、出来上がった回路は入力の値(9と13)によらず、4ビットで表現できる2つの整数すべてに対して成り立ちます(一般に2つの$n$ビット数の和は$n+1$ビットに収まるので、レジスタ3の大きさにも不足はありません)。さらに、量子演算は線形(つまり演算$U$について$U(\sum_{k} c_k \ket{k}) = \sum_{k} c_k U\ket{k}$)なので、初期状態としてレジスタ1と2がどんな計算基底の重ね合わせにあっても、それぞれの組み合わせに対してレジスタ3の状態が和を表してくれます。特に、初期状態がequal superpositionであれば、この回路は # # $$ # \sum_{j=0}^{2^{n_1}-1} \sum_{k=0}^{2^{n_2}-1} \ket{j}\ket{k}\ket{0} \rightarrow \sum_{j=0}^{2^{n_1}-1} \sum_{k=0}^{2^{n_2}-1} \ket{j}\ket{k}\ket{j+k} # $$ # # を行うので、$\mathcal{O}\left((n_1 + n_2 + n_3) n_3\right)$個のゲートで$2^{n_1+n_2}$通りの足し算を並列に行います。実際にこれを確認してみましょう。 # + tags=["remove-output"] n1 = 4 n2 = 4 n3 = np.ceil(np.log2((2 ** n1) + (2 ** n2) - 1)).astype(int) reg1 = QuantumRegister(n1, 'r1') reg2 = QuantumRegister(n2, 'r2') reg3 = QuantumRegister(n3, 'r3') # QuantumCircuit can be instantiated from multiple registers circuit = QuantumCircuit(reg1, reg2, reg3) # Set register 1 and 2 to equal superpositions circuit.h(reg1) circuit.h(reg2) setup_addition(circuit, reg1, reg2, reg3) show_state(circuit, register_sizes=(n1, n2, n3), amp_norm=(1. / np.sqrt(2 ** (n1 + n2)), r'\frac{1}{\sqrt{2^{n_1 + n_2}}}'), draw=False) # - # 65量子ビットのマシンを最大限利用するならば、$n_1 = n_2 = 21, n_3 = 22$で$2^{42}$通り、つまり約4兆通りの足し算を同時に行うことができます。 # # もちろん、上で書いたようにここには重要な但し書きがあって、実機でこの計算をして測定から答えを得ようとすると、毎回の測定でどの組み合わせが得られるかをコントロールできないので、これはあまり実用的とは言えない回路です。強いて言えば毎日ランダムに6桁+6桁の正しい足し算を教えてくれる「日めくり足し算カレンダー」にくらいは使えます。4兆通りなので100億年程度使い続けられます。 # # ## シミュレータでの実行 # # 上の足し算回路の結果がランダムに出る様子をシミュレーションで確認しましょう。課題では実機でも実行します。その際、上の回路実装では非効率的でエラーが出すぎるので、これからは[こちらの効率化した回路](https://github.com/UTokyo-ICEPP/qc-workbook/tree/master/source/utils/optimized_additions.py)を代わりに使用します。 # + # 元の回路に測定を加える circuit.measure_all() circuit_original = circuit # 効率化した回路(測定付き) circuit_optimized = optimized_additions(n1, n2) print('Constructed an optimized addition circuit') # - # 回路の効率化とは具体的にどういうことでしょうか。もともとの回路と効率化したものとを比べてみましょう。まずは、単純にオペレーションの数を比較します。ゲート一つ一つで一定の確率でエラーが起こるということは、同じことをする回路ならゲートの数が少ないほうがより正確な計算をしてくれます。 print('Number of operations in the original circuit:', circuit_original.size()) print('Number of operations in the optimized circuit:', circuit_optimized.size()) # 効率化したはずの回路のほうがはるかにゲート数が多いという結果になりました。なぜでしょうか。 # ### トランスパイルと物理的回路 # # {doc}`circuit_from_scratch`でも少し触れましたが、回路が実機で実行される前には、トランスパイルという回路の変換が行われ、様々な複合ゲートからなる「論理的」な回路から、実機のハードウェアに実装されている基本ゲートのみで書かれる「物理的」な回路が作られます[^physical]。論理的な回路はどのようにでも書ける(例えば回路全体を一つのゲートと呼んでしまうこともできる)ので、回路の効率の比較はトランスパイル後でなければ意味がありません。 # # いい機会なので、実機での量子計算について少し詳細に考えてみましょう。トランスパイルがまさに論理的なアルゴリズムの世界と物理的実装の世界のインターフェースとなるので、この過程に注目します。 # # トランスパイル時には、以下のような回路の変換が起こります。 # # - 冗長なゲートの削除 # - 多重制御ゲートのCNOTと1量子ビットゲートへの分解 # - 実機のトポロジーに即した量子ビットのマッピング(詳細は下) # - 物理的に隣接しない量子ビット間の制御ゲートを実行するためのSWAPの挿入 # - 1量子ビットゲートの基本ゲートへの分解 # - 物理的回路の最適化 # # 実機のトポロジーとは、実際の量子プロセッサチップ上での量子ビット同士の繋がりかたのことを指します。2つの量子ビットが繋がっているとは、その間で基本制御ゲート(IBMQではCNOT)が実行できるということを意味します。これまで考慮してきませんでしたが、実はすべての量子ビットが繋がっているわけではないのです。例えば後で使うibmq_16_melbourneというマシンは以下のようなトポロジーを持っています。 # # ```{image} figs/melbourne_topology.png # :height: 200px # :name: ibmq_16_melbourne # ``` # # 図中、数字のついた丸が量子ビットを表し、線が量子ビット同士の繋がりを表します。 # # 直接接続のない量子ビット間で制御ゲートを実行する場合、SWAPを使って2つの量子ビットが隣り合うように状態を遷移させていく必要があります。例えば上のibmq_16_melbourneでビット2と6の間のCNOTが必要なら、(いくつか方法がありますが)2↔3, 3↔4, 4↔5とSWAPを繰り返して、5と6の間でCNOTを行い、ビットの並びを元に戻す必要があれば再度5↔4, 4↔3, 3↔2とSWAPをすることになります。 # # {ref}`ゲートの解説 <other_single_and_double_qubit_gates>`に出てきたように、SWAPは3つのCNOTに分解されます。つまり、直接接続のない量子ビット同士の制御ゲートが多出するような回路があると、莫大な数のCNOTが使われることになります。**CNOTのエラー率(ゲート操作一回あたりに操作の結果を間違える確率)は1量子ビットゲートのエラー率より一桁ほど高い**ので、これは大きな問題になります。そこで、論理的回路の量子ビットと実機の量子ビットとのマッピング(SWAPが発生すれば対応は変わっていくので、あくまで初期対応)と、回路中にどうSWAPを挿入していくかというルーティングの両方を上手に決めるということが、トランスパイルにおける中心的な課題です。 # # しかし、実は一般の回路に対して最適なマッピングとルーティングを探すという問題自体がいわゆるNP-hardな問題なので、qiskitのトランスパイル・ルーチンではこの問題の最適解を探してくれません。代わりにstochastic swapという、乱数を用いた手法が標準設定では利用されます。Stochastic swapは多くの回路で比較的効率のいいルーティングを作ることが知られていますが、乱数を利用するため実行のたびに異なるルーティングが出てくるなど、やや扱いにくい面もあります。また、単純な回路で事前に最適なルーティングがわかっている場合は、stochastic swapを使うべきではありません。 # ### 回路の比較 # # 上を踏まえて、改めて2つの足し算回路を比較してみましょう。 # # これまで実機で回路を実行するときは`execute`という関数を利用していましたが、実はこの関数にトランスパイル前の回路を渡すと、自動で標準設定のトランスパイルがなされていました。しかし`transpile`という関数を使い、トランスパイルを明示的に行うこともできます。この関数は主にトランスパイルの設定を細かくコントロールしたいときなどに使います。今回の効率化した回路は特定のマッピングのために作られているので、stochastic swapを使用しないよう設定を変更してトランスパイルをします。マッピングはibmq_16_melbourneのトポロジーとエラー率を参考にして事前に決めてあり、`get_initial_layout`という関数の中にハードコードされています。 # # IBMQのマシンはチップごとに異なるトポロジーを持っているので、`transpile`関数にも引数としてバックエンドを指定し、そのバックエンド専用の物理的回路を作ります。 # # [^physical]: 「物理的」な回路もまだ実は論理的な存在であり、本当にハードウェアが理解するインストラクションに変換するには、さらに基本ゲートを特定のマイクロ波パルス列に直す必要があります。 # + tags=["remove-output", "raises-exception"] IBMQ.enable_account('__paste_your_token_here__') provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main') # 4 + 4 + 5 = 13量子ビット以上のマシンで ibm-q/open/main からアクセス可能なのはibmq_16_melbourneのみ(2021年3月現在) backend = provider.get_backend('ibmq_16_melbourne') # オリジナルの回路をトランスパイルする。optimization_level=3は自動設定のうち、最も効率のいい回路を作る print('Transpiling the original circuit with standard settings') circuit_original_tr = transpile(circuit_original, backend=backend, optimization_level=3) # 効率化した回路をトランスパイルする。ibmq_16_melbourneのうち、一直線に繋がった量子ビット列にマッピングする print('Transpiling the optimized circuit with basic routing and custom qubit mapping') initial_layout = get_initial_layout(backend, n1, n2) circuit_optimized_tr = transpile(circuit_optimized, backend=backend, routing_method='basic', initial_layout=initial_layout, optimization_level=3) print('Number of operations in the original circuit:', circuit_original_tr.size()) print(' Breakdown: N(Rz)={rz}, N(SX)={sx}, N(CNOT)={cx}'.format(**circuit_original_tr.count_ops())) print('Number of operations in the optimized circuit:', circuit_optimized_tr.size()) print(' Breakdown: N(Rz)={rz}, N(SX)={sx}, N(CNOT)={cx}'.format(**circuit_optimized_tr.count_ops())) # - # ```{tip} # 上のように`optimization_level=3`でトランスパイルをすると、回路の最適化ルーチンに時間がかかりすぎることがたまにあります。セルを実行して1分以上反応がない場合は、一度Jupyterのカーネルを再起動してみてください。 # ``` # # 上のセルを実行すると、今度は効率化回路のオペレーションの数が元の回路の半数以下という結果になることがわかります。このマシンの基本ゲート$R_z, X$, SX, CNOTに分解した内訳でも、エラー率の大きいCNOTゲートの数が大幅に削減されています(両回路とも$X$ゲートは0個です)。 # # 元の回路と効率化した回路の違いは、後者では「数珠つなぎ」になった量子ビット列というトポロジーを仮定して、制御ゲートの順番を工夫して直接明示的にSWAPを挿入していることです。さらに、可能なところでは$C[P]$ゲートの分解で生じるCNOTとSWAPのCNOTが打ち消し合うことも利用しています。最後の逆フーリエ変換でもゲートの順番が工夫してあります。 # # それでは、トランスパイルした回路を実行してみます。`transpile`関数を使って「手動で」トランスパイルを行った回路を`execute`に渡すと再び標準設定でトランスパイルされてしまうので、別の手続きを踏みます。 # + tags=["raises-exception", "remove-output"] qasm_simulator = Aer.get_backend('qasm_simulator') # Will call backend.run() instead of execute() job_original = qasm_simulator.run(circuit_original_tr, shots=100) counts_original = job_original.result().get_counts() job_optimized = qasm_simulator.run(circuit_optimized_tr, shots=100) counts_optimized = job_optimized.result().get_counts() # Keys of counts are single binaries; need to split them into three parts and interpret as decimals def interpret_counts(counts, n1, n2): heights = [] labels = [] for key, value in counts.items(): heights.append(value) x1 = int(key[-n1:], 2) x2 = int(key[-n1 - n2:-n1], 2) x3 = int(key[:-n1 - n2], 2) labels.append('{} + {} = {}'.format(x1, x2, x3)) return np.linspace(0., len(labels), len(labels), endpoint=False), heights, labels fig, (ax_original, ax_optimized) = plt.subplots(2, figsize=[16, 10]) x, heights, labels = interpret_counts(counts_original, n1, n2) ax_original.bar(x, heights, width=0.5) ax_original.set_xticks(x - 0.5) ax_original.set_xticklabels(labels, rotation=70) ax_original.tick_params('x', length=0.) x, heights, labels = interpret_counts(counts_optimized, n1, n2) ax_optimized.bar(x, heights, width=0.5) ax_optimized.set_xticks(x - 0.5) ax_optimized.set_xticklabels(labels, rotation=70) ax_optimized.tick_params('x', length=0.) fig.subplots_adjust(bottom=-0.2) # - # 正しい足し算の式がランダムに出現することを確認してください。 # ## 参考文献 # # ```{bibliography} # :filter: docname in docnames # ```
source/ja/extreme_simd.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Задание к занятию «Базовые типы данных и циклы» # ## Задание 1 # # Дан список с визитами по городам и странам. Напишите код, который возвращает отфильтрованный список geo_logs, содержащий только визиты из России. Считайте, что список geo_logs легко помещается в оперативной памяти. # # ```python # geo_logs = [ # ['visit1', ['Москва', 'Россия']], # ['visit2', ['Дели', 'Индия']], # ['visit3', ['Владимир', 'Россия']], # ['visit4', ['Лиссабон', 'Португалия']], # ['visit5', ['Париж', 'Франция']], # ['visit6', ['Лиссабон', 'Португалия']], # ['visit7', ['Тула', 'Россия']], # ['visit8', ['Тула', 'Россия']], # ['visit9', ['Курск', 'Россия']], # ['visit10', ['Архангельск', 'Россия']], # ] # ``` def filter_visit_with_country(geo_logs, country): '''Фильтрует список geo_logs только визитами из country''' return [geo_logs_elem for geo_logs_elem in geo_logs if country in geo_logs_elem[1]] # + # Исходные данные geo_logs = [ ['visit1', ['Москва', 'Россия']], ['visit2', ['Дели', 'Индия']], ['visit3', ['Владимир', 'Россия']], ['visit4', ['Лиссабон', 'Португалия']], ['visit5', ['Париж', 'Франция']], ['visit6', ['Лиссабон', 'Португалия']], ['visit7', ['Тула', 'Россия']], ['visit8', ['Тула', 'Россия']], ['visit9', ['Курск', 'Россия']], ['visit10', ['Архангельск', 'Россия']], ] # Проверка работы filter_visit_with_country(geo_logs, 'Россия') # - # ## Задание 2 # # Выведите на экран все уникальные гео-ID из значений словаря ids. Т. е. список вида [213, 15, 54, 119, 98, 35] # # ```python # ids = {'user1': [213, 213, 213, 15, 213], 'user2': [54, 54, 119, 119, 119], 'user3': [213, 98, 98, 35]} # ``` def collect_uniq_geo_id(ids): '''Возвращает список, содержащий уникальные geo-id''' uniq_id = set() [uniq_id.update(val) for _, val in ids.items()] return list(uniq_id) # + # Исходные данные ids = { 'user1': [213, 213, 213, 15, 213], 'user2': [54, 54, 119, 119, 119], 'user3': [213, 98, 98, 35], } # Проверка работы collect_uniq_geo_id(ids) # - # ## Задание 3 # # Список поисковых запросов. Получить распределение количества слов в них. Т. е. поисковых запросов из одного слова 5%, из двух - 7%, из трех - 3% итд. # # ```python # queries = [ # 'смотреть сериалы онлайн', # 'новости спорта', # 'афиша кино', # 'курс доллара', # 'сериалы этим летом', # 'курс по питону', # 'сериалы про спорт', # ] # ``` def word_count_spreading(words_list): '''Вычисляет распределение количества слов каждого элемента words_list''' words_spreading = {} words_list_len = len(words_list) for words in words_list: words_count = len(words.split(' ')) words_spreading.setdefault(words_count, 0) words_spreading[words_count] += 1 return {key : val / words_list_len for key, val in words_spreading.items()} # + # Исходные данные queries = [ 'смотреть сериалы онлайн', 'новости спорта', 'афиша кино', 'курс доллара', 'сериалы этим летом', 'курс по питону', 'сериалы про спорт', ] # Проверка работы word_count_spreading(queries) # - # ## Задание 4 # # Дана статистика рекламных каналов по объемам продаж. Напишите скрипт, который возвращает название канала с максимальным объемом. # Т. е. в данном примере скрипт должен возвращать 'yandex'. # # ```python # stats = { # 'facebook': 55, # 'yandex': 120, # 'vk': 115, # 'google': 99, # 'email': 42, # 'ok': 98 # } # ``` # + # Исходные данные stats = { 'facebook': 55, 'yandex': 120, 'vk': 115, 'google': 99, 'email': 42, 'ok': 98 } # Решение stats_expanded = [{'company': key, 'value': val} for key, val in stats.items()] # Расширяем хеш, превращаем в список stats_sorted = sorted(stats_expanded, key=lambda x: -x['value']) # Сортируем список хешей по 'value' stats_sorted [0]['company'] # Берем первый элемент, обращаемся по ключу 'company'. Вернет 'yandex' # - # ## Задание 5 # # Дан поток логов по количеству просмотренных страниц для каждого пользователя. Список отсортирован по ID пользователя. Вам необходимо написать алгоритм, который считает среднее значение просмотров на пользователя. Т. е. надо посчитать отношение суммы всех просмотров к количеству уникальных пользователей. Учтите, что весь список stream не помещается в оперативную память, т. е. его нужно обрабатывать поэлементно в цикле. # # ```python # stream = [ # '2018-01-01,user1,3', # '2018-01-07,user1,4', # '2018-03-29,user1,1', # '2018-04-04,user1,13', # '2018-01-05,user2,7', # '2018-06-14,user3,4', # '2018-07-02,user3,10', # '2018-03-21,user4,19', # '2018-03-22,user4,4', # '2018-04-22,user4,8', # '2018-05-03,user4,9', # '2018-05-11,user4,11', # ] # ``` # Исходные данные stream = [ '2018-01-01,user1,3', '2018-01-07,user1,4', '2018-03-29,user1,1', '2018-04-04,user1,13', '2018-01-05,user2,7', '2018-06-14,user3,4', '2018-07-02,user3,10', '2018-03-21,user4,19', '2018-03-22,user4,4', '2018-04-22,user4,8', '2018-05-03,user4,9', '2018-05-11,user4,11', ] # Решение total_view_count = 0 # Общее кол-во просмотров total_uniq_users = set() # Храним уникальных пользователей for item in stream: # Итерируемся по каждому эл-ту в stream _, user, view = item.split(',') # Распаковываем значение total_uniq_users.update(user) # Добавляем в мн-во если это новый пользователь total_view_count += int(view) # Увеличиваем счетчик просмотров total_view_count / len(total_uniq_users) # ## Задание 6 # # Дана статистика рекламных кампаний по дням. Напишите алгоритм, который по паре дата-кампания ищет значение численного столбца. Т. е. для даты '2018-01-01' и 'google' нужно получить число 25. Считайте, что все комбинации дата-кампания уникальны, а список stats легко помещается в оперативной памяти. # # ```python # stats = [ # ['2018-01-01', 'google', 25], # ['2018-01-01', 'yandex', 65], # ['2018-01-01', 'market', 89], # ['2018-01-02', 'google', 574], # ['2018-01-02', 'yandex', 249], # ['2018-01-02', 'market', 994], # ['2018-01-03', 'google', 1843], # ['2018-01-03', 'yandex', 1327], # ['2018-01-03', 'market', 1764], # ] # ``` def search_date_compary(stats, date, company): '''Возвращает знание численного столбца по паре date, company''' for elem in stats: _date, _company, count = elem if _date == date and _company == company: return count return -1 # Означает что значение не найдено # + # Исходные данные stats = [ ['2018-01-01', 'google', 25], ['2018-01-01', 'yandex', 65], ['2018-01-01', 'market', 89], ['2018-01-02', 'google', 574], ['2018-01-02', 'yandex', 249], ['2018-01-02', 'market', 994], ['2018-01-03', 'google', 1843], ['2018-01-03', 'yandex', 1327], ['2018-01-03', 'market', 1764], ] # Проверка работы search_date_compary(stats, '2018-01-01', 'google') # - # ## Бонусный вариант (делать его не обязательно) # Рассмотрите обобщенный вариант этой задачи: имеется таблица из n столбцов. Первые n-1 столбцов - признаки (комбинации этих признаков в таблице уникальные). Последний столбец - значение. Вам необходимо по набору n-1 признаков найти соответствующую строчку в таблице и вернуть значение из правого столбца. Учтите, что исходная таблица может быть любого размера. # # Одно из применений задачи - объединение двух таблиц по n столбцов, одна из которых помещается в оперативной памяти. Например, 7 столбцов по 1 и 10 миллионов строк. def search_row_attributes(table, attributes): ''' Входные данные: table - список из списков (каждый элемент списка имеет длину n). attributes - список из атрибутов длиной (n-1) Возвращаемое значение: При совпадении атрибутов attributes с одной из строк в table, возвращает значение n-го элемента ''' for row in table: *row_attributes, value = row for i in range(len(row_attributes)): if row_attributes[i] != attributes[i]: continue return value return -1 # Означает что значение не найдено # + # Исходные данные stats = [ ['2018-01-01', 'google', 25], ['2018-01-01', 'yandex', 65], ['2018-01-01', 'market', 89], ['2018-01-02', 'google', 574], ['2018-01-02', 'yandex', 249], ['2018-01-02', 'market', 994], ['2018-01-03', 'google', 1843], ['2018-01-03', 'yandex', 1327], ['2018-01-03', 'market', 1764], ] # Проверка работы search_row_attributes(stats, ['2018-01-01', 'google']) # -
homeworks_python/2/hw2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This script loads behavioral mice data (from `biasedChoiceWorld` protocol and, separately, the last three sessions of training) only from mice that pass a given (stricter) training criterion. For the `biasedChoiceWorld` protocol, only sessions achieving the `trained_1b` and `ready4ephysrig` training status are collected. # The data are slightly reformatted and saved as `.csv` files. # + import datajoint as dj dj.config['database.host'] = 'datajoint.internationalbrainlab.org' from ibl_pipeline import subject, acquisition, action, behavior, reference, data from ibl_pipeline.analyses.behavior import PsychResults, SessionTrainingStatus from ibl_pipeline.utils import psychofit as psy from ibl_pipeline.analyses import behavior as behavior_analysis import numpy as np import matplotlib.pyplot as plt import pandas as pd # - import os myPath = r"C:\Users\Luigi\Documents\GitHub\ibl-changepoint\data" # Write here your data path os.chdir(myPath) # + # Get list of mice that satisfy given training criteria (stringent trained_1b) # Check query from behavioral paper: # https://github.com/int-brain-lab/paper-behavior/blob/master/paper_behavior_functions.py subj_query = (subject.Subject * subject.SubjectLab * reference.Lab * subject.SubjectProject & 'subject_project = "ibl_neuropixel_brainwide_01"').aggr( (acquisition.Session * behavior_analysis.SessionTrainingStatus()) # & 'training_status="trained_1a" OR training_status="trained_1b"', # & 'training_status="trained_1b" OR training_status="ready4ephysrig"', & 'training_status="trained_1b"', 'subject_nickname', 'sex', 'subject_birth_date', 'institution', date_trained='min(date(session_start_time))') subjects = (subj_query & 'date_trained < "2019-09-30"') mice_names = sorted(subjects.fetch('subject_nickname')) print(mice_names) # + sess_train = ((acquisition.Session * behavior_analysis.SessionTrainingStatus) & 'task_protocol LIKE "%training%"' & 'session_start_time < "2019-09-30"') sess_stable = ((acquisition.Session * behavior_analysis.SessionTrainingStatus) & 'task_protocol LIKE "%biased%"' & 'session_start_time < "2019-09-30"' & ('training_status="trained_1b" OR training_status="ready4ephysrig"')) stable_mice_names = list() # Perform at least this number of sessions MinSessionNumber = 4 def get_mouse_data(df): position_deg = 35. # Stimuli appear at +/- 35 degrees # Create new dataframe datamat = pd.DataFrame() datamat['trial_num'] = df['trial_id'] datamat['session_num'] = np.cumsum(df['trial_id'] == 1) datamat['stim_probability_left'] = df['trial_stim_prob_left'] signed_contrast = df['trial_stim_contrast_right'] - df['trial_stim_contrast_left'] datamat['contrast'] = np.abs(signed_contrast) datamat['position'] = np.sign(signed_contrast)*position_deg datamat['response_choice'] = df['trial_response_choice'] datamat.loc[df['trial_response_choice'] == 'CCW','response_choice'] = 1 datamat.loc[df['trial_response_choice'] == 'CW','response_choice'] = -1 datamat.loc[df['trial_response_choice'] == 'No Go','response_choice'] = 0 datamat['trial_correct'] = np.double(df['trial_feedback_type']==1) datamat['reaction_time'] = df['trial_response_time'] - df['trial_stim_on_time'] # double-check # Since some trials have zero contrast, need to compute the alleged position separately datamat.loc[(datamat['trial_correct'] == 1) & (signed_contrast == 0),'position'] = \ datamat.loc[(datamat['trial_correct'] == 1) & (signed_contrast == 0),'response_choice']*position_deg datamat.loc[(datamat['trial_correct'] == 0) & (signed_contrast == 0),'position'] = \ datamat.loc[(datamat['trial_correct'] == 0) & (signed_contrast == 0),'response_choice']*(-position_deg) return datamat # + # Loop over all mice for mouse_nickname in mice_names: mouse_subject = {'subject_nickname': mouse_nickname} # Get mouse data for biased sessions behavior_stable = (behavior.TrialSet.Trial & (subject.Subject & mouse_subject)) \ * sess_stable.proj('session_uuid','task_protocol','session_start_time','training_status') * subject.Subject.proj('subject_nickname') \ * subject.SubjectLab.proj('lab_name') df = pd.DataFrame(behavior_stable.fetch(order_by='subject_nickname, session_start_time, trial_id', as_dict=True)) if len(df) > 0: # The mouse has performed in at least one stable session with biased blocks datamat = get_mouse_data(df) # Take mice that have performed a minimum number of sessions if np.max(datamat['session_num']) >= MinSessionNumber: # Should add 'N' to mice names that start with numbers? # Save dataframe to CSV file filename = mouse_nickname + '.csv' datamat.to_csv(filename,index=False) stable_mice_names.append(mouse_nickname) # Get mouse last sessions of training data behavior_train = (behavior.TrialSet.Trial & (subject.Subject & mouse_subject)) \ * sess_train.proj('session_uuid','task_protocol','session_start_time') * subject.Subject.proj('subject_nickname') \ * subject.SubjectLab.proj('lab_name') df_train = pd.DataFrame(behavior_train.fetch(order_by='subject_nickname, session_start_time, trial_id', as_dict=True)) datamat_train = get_mouse_data(df_train) Nlast = np.max(datamat_train['session_num']) - 3 datamat_final = datamat_train[datamat_train['session_num'] > Nlast] # Save final training dataframe to CSV file filename = mouse_nickname + '_endtrain.csv' datamat_final.to_csv(filename,index=False) print(stable_mice_names) # - ss = (((acquisition.Session * behavior_analysis.SessionTrainingStatus) & 'task_protocol LIKE "%biased%"' & 'session_start_time < "2019-09-30"' & ('training_status="trained_1b" OR training_status="ready4ephysrig"')) * subject.Subject) & ('subject_nickname = "ibl_witten_07"') ss
python/.ipynb_checkpoints/fetch_data_script-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import numpy as np import pandas as pd from tqdm import tqdm_notebook as tqdm from sklearn.externals import joblib # %matplotlib inline import seaborn as sns from sklearn import tree from sklearn.model_selection import train_test_split DIR = 'PATH/TO/YOUR/DATA' # - X = pd.read_csv(os.path.join(DIR,'files/unzipped_data/application_train.csv')) X.head() X_ext = X[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'TARGET']] # + # X_ext = X_ext.fillna(0) # - for function_name in ['nanmin', 'nanmax', 'sum', 'mean', 'var', 'median', 'std', 'nanmedian', 'nanmean', 'min', 'max']: X_ext['external_sources_{}'.format(function_name)] = eval('np.{}'.format(function_name))( X_ext[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']], axis=1) X_ext.head() X_ext['EXT_SRC_weighted3'] = (X.EXT_SOURCE_1*2+X.EXT_SOURCE_2*3+X.EXT_SOURCE_3*4)/9 X_ext['EXT_SRC_weighted2'] = (X.EXT_SOURCE_1*3+X.EXT_SOURCE_2*4+X.EXT_SOURCE_3*2)/9 X_ext['EXT_SRC_weighted1'] = (X.EXT_SOURCE_1*4+X.EXT_SOURCE_2*2+X.EXT_SOURCE_3*3)/9 X_ext.head() X_ext_corr = abs(X_ext.corr()) X_ext_corr.sort_values('TARGET', ascending=False)['TARGET'] sns.heatmap(X_ext_corr, xticklabels=X_ext_corr.columns, yticklabels=X_ext_corr.columns) # Tree X_train, X_test = train_test_split(X_ext) # + Y_train = X_train['TARGET'] Y_test = X_test['TARGET'] X_train = X_train.drop(columns='TARGET') X_test = X_test.drop(columns='TARGET') # - X_train = X_train.fillna(0) X_test = X_test.fillna(0) # + clf = tree.DecisionTreeClassifier() clf.fit(X_train, Y_train) print("R^2 on the train set:") print(clf.score(X_train, Y_train)) print("\nR^2 on the test set:") print(clf.score(X_test, Y_test)) # - X_train.head() feature_importances = pd.Series(clf.feature_importances_, index=X_train.columns.values) feature_importances.sort_values(ascending=False)
notebooks/eda-external_sources.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CH.7 Data Cleaning and Preparation # ## 1. Missing Data import pandas as pd import numpy as np string_data = pd.Series(['aardvark','artichoke',np.nan,'avocado']) string_data string_data.isnull() string_data[0] = None string_data.isnull() from numpy import nan as NA data = pd.Series([1,NA,3.5,NA,7]) data.dropna() data[data.notnull()] data = pd.DataFrame([[1.,6.5,3.],[1.,NA,NA],[NA,NA,NA],[NA,6.5,3.]]) cleaned = data.dropna() data cleaned data.dropna(how='all') data[4] = NA data data.dropna(axis=1,how='all') df = pd.DataFrame(np.random.randn(7,3)) df.iloc[:4,1] = NA df.iloc[:2,2] = NA df df.dropna() df.dropna(thresh=2) # thresh # fill na df.fillna(0) df.fillna({1:0.5,2:0}) _ = df.fillna(0,inplace=True) df df = pd.DataFrame(np.random.randn(6,3)) df.iloc[2:,1] = NA df.iloc[4:,2] = NA df df.fillna(method='ffill') df.fillna(method='ffill',limit=2) data = pd.Series([1.,NA,3.5,NA,7]) data.fillna(data.mean()) # fill NA with mean # ## 2. Data Transformation data = pd.DataFrame({'k1':['one','two']*3+['two'],'k2':[1,1,2,3,3,4,4]}) data data.duplicated() data.drop_duplicates() data['v1'] = range(7) data.drop_duplicates(['k1']) # drop duplicates in k1 data.drop_duplicates(['k1','k2'],keep='last') # ## 3. Data transformation witn function and mapping data = pd.DataFrame({'food':['bacon','pulled pork','bacon','Pastrami','corned beef' ,'Bacon','pastrami','honey ham','nova lox'], 'ounces':[4,3,12,6,7.5,8,3,5,6]}) data meat_to_animal={ 'bacon':'pig', 'pulled pork':'pig', 'pastrami':'cow', 'corned beef':'cow', 'honey ham':'pig', 'nova lox':'salmon' } lowercased = data['food'].str.lower() lowercased data['animal'] = lowercased.map(meat_to_animal) data data['food'].map(lambda x:meat_to_animal[x.lower()]) # ## 4. Replacing Values data=pd.Series([1.,-999.,2.,-999.,-1000.,3.]) data data.replace(-999, np.nan) data.replace([-999,-1000],np.nan) data.replace([-999,-1000],[np.nan,0]) # ## 5. Renaming Axis Indexes data = pd.DataFrame(np.arange(12).reshape((3,4)), index=['Ohio','Colorada','New York'], columns=['one','two','three','four']) transform = lambda x: x[:4].upper() data.index.map(transform) data.index=data.index.map(transform) data data.rename(index=str.title,columns=str.upper) data.rename(index={'OHIO':'INDIANA'},columns={'three':'peekaboo'}) data.rename(index={'OHIO':'INDIANA'},inplace=True) data # ## 6. Discretization and Binning ages=[20,22,25,27,21,23,37,31,61,45,41,32] bins=[18,25,35,60,100] cats=pd.cut(ages,bins) cats print(cats.codes) print(cats.categories) pd.value_counts(cats) pd.cut(ages,[18,26,36,61,100],right=False) group_names=['Youth','YoungAdult','MiddleAged','Senior'] pd.cut(ages,bins, labels=group_names) data=np.random.rand(20) pd.cut(data,4,precision=2) data=np.random.randn(1000) cats=pd.qcut(data,4) cats pd.value_counts(cats) # ## 7. Outliers data = pd.DataFrame(np.random.randn(1000,4)) data.describe() col = data[2] col[np.abs(col)>3] data[(np.abs(data)>3).any(1)] data[np.abs(data)>3] = np.sign(data)*3 data.describe() np.sign(data).head() # ## 8. Permutation and Random Sampling df = pd.DataFrame(np.arange(5*4).reshape((5,4))) sampler = np.random.permutation(5) print(sampler) df df.take(sampler) df.sample(n=3) choices = pd.Series([5,7,-1,6,4]) draws = choices.sample(n=10,replace=True) draws # ## 9. Indicator/Dummy Variables df = pd.DataFrame({'key':['b','b','a','c','a','b'],'data1':range(6)}) pd.get_dummies(df['key']) dummies = pd.get_dummies(df['key'],prefix='key') df_with_dummy = df[['data1']].join(dummies) df_with_dummy mnames=['movie_id','title','generes'] movies=pd.read_table('movies.dat',sep='::',header=None,names=mnames) movies[:10] # + all_generes = [] for x in movies.generes: all_generes.extend(x.split('|')) generes = pd.unique(all_generes) generes # + zero_matrix = np.zeros((len(movies),len(generes))) dummies = pd.DataFrame(zero_matrix, columns=generes) gen = movies.generes[0] print(gen.split('|')) print(dummies.columns.get_indexer(gen.split('|'))) # + for i, gen in enumerate(movies.generes): indices = dummies.columns.get_indexer(gen.split('|')) dummies.iloc[i,indices]=1 movies_windic = movies.join(dummies.add_prefix('Genre_')) movies_windic.iloc[0] # - np.random.seed(12345) values=np.random.rand(10) values bins=[0,0.2,0.4,0.6,0.8,1] pd.get_dummies(pd.cut(values,bins)) # ## 10. String Manipulation # + val = 'a,b, guido' print(val.split(',')) pieces = [x.strip()for x in val.split(',')] print(pieces) first,second,third = pieces print(first+'::'+second+'::'+third) # - print('[1]', '::'.join(pieces)) print('[2]', 'guido'in val) print('[3]', val.index(',')) print('[4]', val.find(':')) print('[5]', val.count(',')) print('[6]', val.replace(',','::')) print('[7]', val.replace(',','')) # ## 11. Regular Expressions # + import re text = "foo bar\t baz \tqux" print(re.split('\s+',text)) regex = re.compile('\s+') print(regex.split(text)) print(regex.findall(text)) # + text="""Dave <EMAIL> Steve <EMAIL> Rob <EMAIL> Ryan <EMAIL> """ pattern=r'[A-Z0.9._%+-]+@[A-Z0.9.-]+\.[A-Z]{2,4}' regex=re.compile(pattern, flags=re.IGNORECASE) print(regex.findall(text)) m = regex.search(text) print(m) print(text[m.start():m.end()] ) print(regex.match(text)) print(regex.sub('REDACATED',text)) # - pattern = r'([A-Z0.9._%+-]+)@([A-Z0.9.-]+)\.([A-Z]{2,4})' regex = re.compile(pattern,flags=re.IGNORECASE) m = regex.match('<EMAIL>') print(m.groups()) regex.findall(text) print(regex.sub(r'Username: \1, Domain: \2, Suffix: \3',text)) # ## 12. Vectorized String Functions in pandas data={'Dave':'<EMAIL>','Steve':'<EMAIL>', 'Rob':'<EMAIL>','Wes':np.nan} data = pd.Series(data) data data.isnull() data.str.contains('gmail') pattern data.str.findall(pattern,flags=re.IGNORECASE) matches = data.str.match(pattern,flags=re.IGNORECASE) matches data.str[:5]
01. Python_Tutorials/5_Data_Cleaning_and_Preparation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Logistic Regression with Python and Scikit-Learn # # # In this project, I implement Logistic Regression with Python and Scikit-Learn. I build a classifier to predict whether or not it will rain tomorrow in Australia by training a binary classification model using Logistic Regression. I have used the **Rain in Australia** dataset downloaded from the Kaggle website for this project. # ## Table of Contents # # # The table of contents for this project is as follows:- # # # 1. Introduction to Logistic Regression # 2. Logistic Regression intuition # 3. The problem statement # 4. Dataset description # 5. Import libraries # 6. Import dataset # 7. Exploratory data analysis # 8. Declare feature vector and target variable # 9. Split data into separate training and test set # 10. Feature engineering # 11. Feature scaling # 12. Model training # 13. Predict results # 14. Check accuracy score # 15. Confusion matrix # 16. Classification metrices # 17. Adjusting the threshold level # 18. ROC - AUC # 19. Recursive feature elimination # 20. k-Fold Cross Validation # 21. Hyperparameter optimization using GridSearch CV # 22. Results and conclusion # # # ## 1. Introduction to Logistic Regression # # # When data scientists may come across a new classification problem, the first algorithm that may come across their mind is **Logistic Regression**. It is a supervised learning classification algorithm which is used to predict observations to a discrete set of classes. Practically, it is used to classify observations into different categories. Hence, its output is discrete in nature. **Logistic Regression** is also called **Logit Regression**. It is one of the most simple, straightforward and versatile classification algorithms which is used to solve classification problems. # ## 2. Logistic Regression intuition # # # In statistics, the **Logistic Regression model** is a widely used statistical model which is primarily used for classification purposes. It means that given a set of observations, Logistic Regression algorithm helps us to classify these observations into two or more discrete classes. So, the target variable is discrete in nature. # # # Logistic Regression algorithm works by implementing a linear equation with independent or explanatory variables to predict a response value. This predicted response value, denoted by z is then converted into a probability value that lie between 0 and 1. We use the **sigmoid function** in order to map predicted values to probability values. This sigmoid function then maps any real value into a probability value between 0 and 1. # # # # The sigmoid function returns a probability value between 0 and 1. This probability value is then mapped to a discrete class which is either “0” or “1”. In order to map this probability value to a discrete class (pass/fail, yes/no, true/false), we select a threshold value. This threshold value is called **Decision boundary**. Above this threshold value, we will map the probability values into class 1 and below which we will map values into class 0. # # # Mathematically, it can be expressed as follows:- # # # p ≥ 0.5 => class = 1 # # p < 0.5 => class = 0 # # # Generally, the decision boundary is set to 0.5. So, if the probability value is 0.8 (> 0.5), we will map this observation to class 1. Similarly, if the probability value is 0.2 (< 0.5), we will map this observation to class 0. # # # We can use our knowledge of `sigmoid function` and `decision boundary` to write a prediction function. A prediction function in logistic regression returns the probability of the observation being positive, `Yes` or `True`. We call this as `class 1` and it is denoted by `P(class = 1)`. If the probability inches closer to one, then we will be more confident about our model that the observation is in class 1. # # Logistic regression intuition is discussed in depth in the readme document. # ## 3. The problem statement # # # In this project, I try to answer the question that whether or not it will rain tomorrow in Australia. I implement Logistic Regression with Python and Scikit-Learn. # # # To answer the question, I build a classifier to predict whether or not it will rain tomorrow in Australia by training a binary classification model using Logistic Regression. I have used the **Rain in Australia** dataset downloaded from the Kaggle website for this project. # ## 4. Dataset description # # # I have used the **Rain in Australia** data set downloaded from the Kaggle website. # # # I have downloaded this data set from the Kaggle website. The data set can be found at the following url:- # # # https://www.kaggle.com/jsphyg/weather-dataset-rattle-package # # # This dataset contains daily weather observations from numerous Australian weather stations. # ## 5. Import libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # + import warnings warnings.filterwarnings('ignore') # - # ## 6. Import dataset # + data = 'C:/datasets/weatherAUS.csv' df = pd.read_csv(data) # - # ## 7. Exploratory data analysis # # # Now, I will explore the data to gain insights about the data. # + # view dimensions of dataset df.shape # - # We can see that there are 142193 instances and 24 variables in the data set. # + # preview the dataset df.head() # + col_names = df.columns col_names # - # ### Drop RISK_MM variable # # It is given in the dataset description, that we should drop the `RISK_MM` feature variable from the dataset description. So, we # should drop it as follows- df.drop(['RISK_MM'], axis=1, inplace=True) # + # view summary of dataset df.info() # - # ### Types of variables # # # In this section, I segregate the dataset into categorical and numerical variables. There are a mixture of categorical and numerical variables in the dataset. Categorical variables have data type object. Numerical variables have data type float64. # # # First of all, I will find categorical variables. # + # find categorical variables categorical = [var for var in df.columns if df[var].dtype=='O'] print('There are {} categorical variables\n'.format(len(categorical))) print('The categorical variables are :', categorical) # + # view the categorical variables df[categorical].head() # - # ### Summary of categorical variables # # # - There is a date variable. It is denoted by `Date` column. # # # - There are 6 categorical variables. These are given by `Location`, `WindGustDir`, `WindDir9am`, `WindDir3pm`, `RainToday` and `RainTomorrow`. # # # - There are two binary categorical variables - `RainToday` and `RainTomorrow`. # # # - `RainTomorrow` is the target variable. # ## Explore problems within categorical variables # # # First, I will explore the categorical variables. # # # ### Missing values in categorical variables # + # check missing values in categorical variables df[categorical].isnull().sum() # + # print categorical variables containing missing values cat1 = [var for var in categorical if df[var].isnull().sum()!=0] print(df[cat1].isnull().sum()) # - # We can see that there are only 4 categorical variables in the dataset which contains missing values. These are `WindGustDir`, `WindDir9am`, `WindDir3pm` and `RainToday`. # ### Frequency counts of categorical variables # # # Now, I will check the frequency counts of categorical variables. # + # view frequency of categorical variables for var in categorical: print(df[var].value_counts()) # + # view frequency distribution of categorical variables for var in categorical: print(df[var].value_counts()/np.float(len(df))) # - # ### Number of labels: cardinality # # # The number of labels within a categorical variable is known as **cardinality**. A high number of labels within a variable is known as **high cardinality**. High cardinality may pose some serious problems in the machine learning model. So, I will check for high cardinality. # + # check for cardinality in categorical variables for var in categorical: print(var, ' contains ', len(df[var].unique()), ' labels') # - # We can see that there is a `Date` variable which needs to be preprocessed. I will do preprocessing in the following section. # # # All the other variables contain relatively smaller number of variables. # ### Feature Engineering of Date Variable df['Date'].dtypes # We can see that the data type of `Date` variable is object. I will parse the date currently coded as object into datetime format. # + # parse the dates, currently coded as strings, into datetime format df['Date'] = pd.to_datetime(df['Date']) # + # extract year from date df['Year'] = df['Date'].dt.year df['Year'].head() # + # extract month from date df['Month'] = df['Date'].dt.month df['Month'].head() # + # extract day from date df['Day'] = df['Date'].dt.day df['Day'].head() # + # again view the summary of dataset df.info() # - # We can see that there are three additional columns created from `Date` variable. Now, I will drop the original `Date` variable from the dataset. # + # drop the original Date variable df.drop('Date', axis=1, inplace = True) # + # preview the dataset again df.head() # - # Now, we can see that the `Date` variable has been removed from the dataset. # # ### Explore Categorical Variables # # # Now, I will explore the categorical variables one by one. # + # find categorical variables categorical = [var for var in df.columns if df[var].dtype=='O'] print('There are {} categorical variables\n'.format(len(categorical))) print('The categorical variables are :', categorical) # - # We can see that there are 6 categorical variables in the dataset. The `Date` variable has been removed. First, I will check missing values in categorical variables. # + # check for missing values in categorical variables df[categorical].isnull().sum() # - # We can see that `WindGustDir`, `WindDir9am`, `WindDir3pm`, `RainToday` variables contain missing values. I will explore these variables one by one. # ### Explore `Location` variable # + # print number of labels in Location variable print('Location contains', len(df.Location.unique()), 'labels') # + # check labels in location variable df.Location.unique() # + # check frequency distribution of values in Location variable df.Location.value_counts() # + # let's do One Hot Encoding of Location variable # get k-1 dummy variables after One Hot Encoding # preview the dataset with head() method pd.get_dummies(df.Location, drop_first=True).head() # - # ### Explore `WindGustDir` variable # + # print number of labels in WindGustDir variable print('WindGustDir contains', len(df['WindGustDir'].unique()), 'labels') # + # check labels in WindGustDir variable df['WindGustDir'].unique() # + # check frequency distribution of values in WindGustDir variable df.WindGustDir.value_counts() # + # let's do One Hot Encoding of WindGustDir variable # get k-1 dummy variables after One Hot Encoding # also add an additional dummy variable to indicate there was missing data # preview the dataset with head() method pd.get_dummies(df.WindGustDir, drop_first=True, dummy_na=True).head() # + # sum the number of 1s per boolean variable over the rows of the dataset # it will tell us how many observations we have for each category pd.get_dummies(df.WindGustDir, drop_first=True, dummy_na=True).sum(axis=0) # - # We can see that there are 9330 missing values in WindGustDir variable. # ### Explore `WindDir9am` variable # + # print number of labels in WindDir9am variable print('WindDir9am contains', len(df['WindDir9am'].unique()), 'labels') # + # check labels in WindDir9am variable df['WindDir9am'].unique() # + # check frequency distribution of values in WindDir9am variable df['WindDir9am'].value_counts() # + # let's do One Hot Encoding of WindDir9am variable # get k-1 dummy variables after One Hot Encoding # also add an additional dummy variable to indicate there was missing data # preview the dataset with head() method pd.get_dummies(df.WindDir9am, drop_first=True, dummy_na=True).head() # + # sum the number of 1s per boolean variable over the rows of the dataset # it will tell us how many observations we have for each category pd.get_dummies(df.WindDir9am, drop_first=True, dummy_na=True).sum(axis=0) # - # We can see that there are 10013 missing values in the `WindDir9am` variable. # ### Explore `WindDir3pm` variable # + # print number of labels in WindDir3pm variable print('WindDir3pm contains', len(df['WindDir3pm'].unique()), 'labels') # + # check labels in WindDir3pm variable df['WindDir3pm'].unique() # + # check frequency distribution of values in WindDir3pm variable df['WindDir3pm'].value_counts() # + # let's do One Hot Encoding of WindDir3pm variable # get k-1 dummy variables after One Hot Encoding # also add an additional dummy variable to indicate there was missing data # preview the dataset with head() method pd.get_dummies(df.WindDir3pm, drop_first=True, dummy_na=True).head() # + # sum the number of 1s per boolean variable over the rows of the dataset # it will tell us how many observations we have for each category pd.get_dummies(df.WindDir3pm, drop_first=True, dummy_na=True).sum(axis=0) # - # There are 3778 missing values in the `WindDir3pm` variable. # ### Explore `RainToday` variable # + # print number of labels in RainToday variable print('RainToday contains', len(df['RainToday'].unique()), 'labels') # + # check labels in WindGustDir variable df['RainToday'].unique() # + # check frequency distribution of values in WindGustDir variable df.RainToday.value_counts() # + # let's do One Hot Encoding of RainToday variable # get k-1 dummy variables after One Hot Encoding # also add an additional dummy variable to indicate there was missing data # preview the dataset with head() method pd.get_dummies(df.RainToday, drop_first=True, dummy_na=True).head() # + # sum the number of 1s per boolean variable over the rows of the dataset # it will tell us how many observations we have for each category pd.get_dummies(df.RainToday, drop_first=True, dummy_na=True).sum(axis=0) # - # There are 1406 missing values in the `RainToday` variable. # ### Explore Numerical Variables # + # find numerical variables numerical = [var for var in df.columns if df[var].dtype!='O'] print('There are {} numerical variables\n'.format(len(numerical))) print('The numerical variables are :', numerical) # + # view the numerical variables df[numerical].head() # - # ### Summary of numerical variables # # # - There are 16 numerical variables. # # # - These are given by `MinTemp`, `MaxTemp`, `Rainfall`, `Evaporation`, `Sunshine`, `WindGustSpeed`, `WindSpeed9am`, `WindSpeed3pm`, `Humidity9am`, `Humidity3pm`, `Pressure9am`, `Pressure3pm`, `Cloud9am`, `Cloud3pm`, `Temp9am` and `Temp3pm`. # # # - All of the numerical variables are of continuous type. # ## Explore problems within numerical variables # # # Now, I will explore the numerical variables. # # # ### Missing values in numerical variables # + # check missing values in numerical variables df[numerical].isnull().sum() # - # We can see that all the 16 numerical variables contain missing values. # ### Outliers in numerical variables # + # view summary statistics in numerical variables print(round(df[numerical].describe()),2) # - # On closer inspection, we can see that the `Rainfall`, `Evaporation`, `WindSpeed9am` and `WindSpeed3pm` columns may contain outliers. # # # I will draw boxplots to visualise outliers in the above variables. # + # draw boxplots to visualize outliers plt.figure(figsize=(15,10)) plt.subplot(2, 2, 1) fig = df.boxplot(column='Rainfall') fig.set_title('') fig.set_ylabel('Rainfall') plt.subplot(2, 2, 2) fig = df.boxplot(column='Evaporation') fig.set_title('') fig.set_ylabel('Evaporation') plt.subplot(2, 2, 3) fig = df.boxplot(column='WindSpeed9am') fig.set_title('') fig.set_ylabel('WindSpeed9am') plt.subplot(2, 2, 4) fig = df.boxplot(column='WindSpeed3pm') fig.set_title('') fig.set_ylabel('WindSpeed3pm') # - # The above boxplots confirm that there are lot of outliers in these variables. # ### Check the distribution of variables # # # Now, I will plot the histograms to check distributions to find out if they are normal or skewed. If the variable follows normal distribution, then I will do `Extreme Value Analysis` otherwise if they are skewed, I will find IQR (Interquantile range). # + # plot histogram to check distribution plt.figure(figsize=(15,10)) plt.subplot(2, 2, 1) fig = df.Rainfall.hist(bins=10) fig.set_xlabel('Rainfall') fig.set_ylabel('RainTomorrow') plt.subplot(2, 2, 2) fig = df.Evaporation.hist(bins=10) fig.set_xlabel('Evaporation') fig.set_ylabel('RainTomorrow') plt.subplot(2, 2, 3) fig = df.WindSpeed9am.hist(bins=10) fig.set_xlabel('WindSpeed9am') fig.set_ylabel('RainTomorrow') plt.subplot(2, 2, 4) fig = df.WindSpeed3pm.hist(bins=10) fig.set_xlabel('WindSpeed3pm') fig.set_ylabel('RainTomorrow') # - # We can see that all the four variables are skewed. So, I will use interquantile range to find outliers. # + # find outliers for Rainfall variable IQR = df.Rainfall.quantile(0.75) - df.Rainfall.quantile(0.25) Lower_fence = df.Rainfall.quantile(0.25) - (IQR * 3) Upper_fence = df.Rainfall.quantile(0.75) + (IQR * 3) print('Rainfall outliers are values < {lowerboundary} or > {upperboundary}'.format(lowerboundary=Lower_fence, upperboundary=Upper_fence)) # - # For `Rainfall`, the minimum and maximum values are 0.0 and 371.0. So, the outliers are values > 3.2. # + # find outliers for Evaporation variable IQR = df.Evaporation.quantile(0.75) - df.Evaporation.quantile(0.25) Lower_fence = df.Evaporation.quantile(0.25) - (IQR * 3) Upper_fence = df.Evaporation.quantile(0.75) + (IQR * 3) print('Evaporation outliers are values < {lowerboundary} or > {upperboundary}'.format(lowerboundary=Lower_fence, upperboundary=Upper_fence)) # - # For `Evaporation`, the minimum and maximum values are 0.0 and 145.0. So, the outliers are values > 21.8. # + # find outliers for WindSpeed9am variable IQR = df.WindSpeed9am.quantile(0.75) - df.WindSpeed9am.quantile(0.25) Lower_fence = df.WindSpeed9am.quantile(0.25) - (IQR * 3) Upper_fence = df.WindSpeed9am.quantile(0.75) + (IQR * 3) print('WindSpeed9am outliers are values < {lowerboundary} or > {upperboundary}'.format(lowerboundary=Lower_fence, upperboundary=Upper_fence)) # - # For `WindSpeed9am`, the minimum and maximum values are 0.0 and 130.0. So, the outliers are values > 55.0. # + # find outliers for WindSpeed3pm variable IQR = df.WindSpeed3pm.quantile(0.75) - df.WindSpeed3pm.quantile(0.25) Lower_fence = df.WindSpeed3pm.quantile(0.25) - (IQR * 3) Upper_fence = df.WindSpeed3pm.quantile(0.75) + (IQR * 3) print('WindSpeed3pm outliers are values < {lowerboundary} or > {upperboundary}'.format(lowerboundary=Lower_fence, upperboundary=Upper_fence)) # - # For `WindSpeed3pm`, the minimum and maximum values are 0.0 and 87.0. So, the outliers are values > 57.0. # ## 8. Declare feature vector and target variable # + X = df.drop(['RainTomorrow'], axis=1) y = df['RainTomorrow'] # - # ## 9. Split data into separate training and test set # + # split X and y into training and testing sets from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0) # + # check the shape of X_train and X_test X_train.shape, X_test.shape # - # ## 10. Feature Engineering # # # **Feature Engineering** is the process of transforming raw data into useful features that help us to understand our model better and increase its predictive power. I will carry out feature engineering on different types of variables. # # # First, I will display the categorical and numerical variables again separately. # + # check data types in X_train X_train.dtypes # + # display categorical variables categorical = [col for col in X_train.columns if X_train[col].dtypes == 'O'] categorical # + # display numerical variables numerical = [col for col in X_train.columns if X_train[col].dtypes != 'O'] numerical # - # ### Engineering missing values in numerical variables # # # + # check missing values in numerical variables in X_train X_train[numerical].isnull().sum() # + # check missing values in numerical variables in X_test X_test[numerical].isnull().sum() # + # print percentage of missing values in the numerical variables in training set for col in numerical: if X_train[col].isnull().mean()>0: print(col, round(X_train[col].isnull().mean(),4)) # - # ### Assumption # # # I assume that the data are missing completely at random (MCAR). There are two methods which can be used to impute missing values. One is mean or median imputation and other one is random sample imputation. When there are outliers in the dataset, we should use median imputation. So, I will use median imputation because median imputation is robust to outliers. # # # I will impute missing values with the appropriate statistical measures of the data, in this case median. Imputation should be done over the training set, and then propagated to the test set. It means that the statistical measures to be used to fill missing values both in train and test set, should be extracted from the train set only. This is to avoid overfitting. # + # impute missing values in X_train and X_test with respective column median in X_train for df1 in [X_train, X_test]: for col in numerical: col_median=X_train[col].median() df1[col].fillna(col_median, inplace=True) # + # check again missing values in numerical variables in X_train X_train[numerical].isnull().sum() # + # check missing values in numerical variables in X_test X_test[numerical].isnull().sum() # - # Now, we can see that there are no missing values in the numerical columns of training and test set. # ### Engineering missing values in categorical variables # + # print percentage of missing values in the categorical variables in training set X_train[categorical].isnull().mean() # + # print categorical variables with missing data for col in categorical: if X_train[col].isnull().mean()>0: print(col, (X_train[col].isnull().mean())) # + # impute missing categorical variables with most frequent value for df2 in [X_train, X_test]: df2['WindGustDir'].fillna(X_train['WindGustDir'].mode()[0], inplace=True) df2['WindDir9am'].fillna(X_train['WindDir9am'].mode()[0], inplace=True) df2['WindDir3pm'].fillna(X_train['WindDir3pm'].mode()[0], inplace=True) df2['RainToday'].fillna(X_train['RainToday'].mode()[0], inplace=True) # + # check missing values in categorical variables in X_train X_train[categorical].isnull().sum() # + # check missing values in categorical variables in X_test X_test[categorical].isnull().sum() # - # As a final check, I will check for missing values in X_train and X_test. # + # check missing values in X_train X_train.isnull().sum() # + # check missing values in X_test X_test.isnull().sum() # - # We can see that there are no missing values in X_train and X_test. # ### Engineering outliers in numerical variables # # # We have seen that the `Rainfall`, `Evaporation`, `WindSpeed9am` and `WindSpeed3pm` columns contain outliers. I will use top-coding approach to cap maximum values and remove outliers from the above variables. # + def max_value(df3, variable, top): return np.where(df3[variable]>top, top, df3[variable]) for df3 in [X_train, X_test]: df3['Rainfall'] = max_value(df3, 'Rainfall', 3.2) df3['Evaporation'] = max_value(df3, 'Evaporation', 21.8) df3['WindSpeed9am'] = max_value(df3, 'WindSpeed9am', 55) df3['WindSpeed3pm'] = max_value(df3, 'WindSpeed3pm', 57) # - X_train.Rainfall.max(), X_test.Rainfall.max() X_train.Evaporation.max(), X_test.Evaporation.max() X_train.WindSpeed9am.max(), X_test.WindSpeed9am.max() X_train.WindSpeed3pm.max(), X_test.WindSpeed3pm.max() X_train[numerical].describe() # We can now see that the outliers in `Rainfall`, `Evaporation`, `WindSpeed9am` and `WindSpeed3pm` columns are capped. # ### Encode categorical variables categorical X_train[categorical].head() # + # encode RainToday variable import category_encoders as ce encoder = ce.BinaryEncoder(cols=['RainToday']) X_train = encoder.fit_transform(X_train) X_test = encoder.transform(X_test) # - X_train.head() # We can see that two additional variables `RainToday_0` and `RainToday_1` are created from `RainToday` variable. # # Now, I will create the `X_train` training set. X_train = pd.concat([X_train[numerical], X_train[['RainToday_0', 'RainToday_1']], pd.get_dummies(X_train.Location), pd.get_dummies(X_train.WindGustDir), pd.get_dummies(X_train.WindDir9am), pd.get_dummies(X_train.WindDir3pm)], axis=1) X_train.head() # Similarly, I will create the `X_test` testing set. X_test = pd.concat([X_test[numerical], X_test[['RainToday_0', 'RainToday_1']], pd.get_dummies(X_test.Location), pd.get_dummies(X_test.WindGustDir), pd.get_dummies(X_test.WindDir9am), pd.get_dummies(X_test.WindDir3pm)], axis=1) X_test.head() # We now have training and testing set ready for model building. Before that, we should map all the feature variables onto the same scale. It is called `feature scaling`. I will do it as follows. # ## 11. Feature Scaling X_train.describe() cols = X_train.columns # + from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) # - X_train = pd.DataFrame(X_train, columns=[cols]) X_test = pd.DataFrame(X_test, columns=[cols]) X_train.describe() # We now have `X_train` dataset ready to be fed into the Logistic Regression classifier. I will do it as follows. # ## 12. Model training # + # train a logistic regression model on the training set from sklearn.linear_model import LogisticRegression # instantiate the model logreg = LogisticRegression(solver='liblinear', random_state=0) # fit the model logreg.fit(X_train, y_train) # - # ## 13. Predict results # + y_pred_test = logreg.predict(X_test) y_pred_test # - # ### predict_proba method # # # **predict_proba** method gives the probabilities for the target variable(0 and 1) in this case, in array form. # # `0 is for probability of no rain` and `1 is for probability of rain.` # + # probability of getting output as 0 - no rain logreg.predict_proba(X_test)[:,0] # + # probability of getting output as 1 - rain logreg.predict_proba(X_test)[:,1] # - # ## 14. Check accuracy score # + from sklearn.metrics import accuracy_score print('Model accuracy score: {0:0.4f}'. format(accuracy_score(y_test, y_pred_test))) # - # Here, **y_test** are the true class labels and **y_pred_test** are the predicted class labels in the test-set. # ### Compare the train-set and test-set accuracy # # # Now, I will compare the train-set and test-set accuracy to check for overfitting. # + y_pred_train = logreg.predict(X_train) y_pred_train # - print('Training-set accuracy score: {0:0.4f}'. format(accuracy_score(y_train, y_pred_train))) # ### Check for overfitting and underfitting # + # print the scores on training and test set print('Training set score: {:.4f}'.format(logreg.score(X_train, y_train))) print('Test set score: {:.4f}'.format(logreg.score(X_test, y_test))) # - # The training-set accuracy score is 0.8476 while the test-set accuracy to be 0.8501. These two values are quite comparable. So, there is no question of overfitting. # # In Logistic Regression, we use default value of C = 1. It provides good performance with approximately 85% accuracy on both the training and the test set. But the model performance on both the training and test set are very comparable. It is likely the case of underfitting. # # I will increase C and fit a more flexible model. # + # fit the Logsitic Regression model with C=100 # instantiate the model logreg100 = LogisticRegression(C=100, solver='liblinear', random_state=0) # fit the model logreg100.fit(X_train, y_train) # + # print the scores on training and test set print('Training set score: {:.4f}'.format(logreg100.score(X_train, y_train))) print('Test set score: {:.4f}'.format(logreg100.score(X_test, y_test))) # - # We can see that, C=100 results in higher test set accuracy and also a slightly increased training set accuracy. So, we can conclude that a more complex model should perform better. # Now, I will investigate, what happens if we use more regularized model than the default value of C=1, by setting C=0.01. # + # fit the Logsitic Regression model with C=001 # instantiate the model logreg001 = LogisticRegression(C=0.01, solver='liblinear', random_state=0) # fit the model logreg001.fit(X_train, y_train) # + # print the scores on training and test set print('Training set score: {:.4f}'.format(logreg001.score(X_train, y_train))) print('Test set score: {:.4f}'.format(logreg001.score(X_test, y_test))) # - # So, if we use more regularized model by setting C=0.01, then both the training and test set accuracy decrease relatiev to the default parameters. # ### Compare model accuracy with null accuracy # # # So, the model accuracy is 0.8501. But, we cannot say that our model is very good based on the above accuracy. We must compare it with the **null accuracy**. Null accuracy is the accuracy that could be achieved by always predicting the most frequent class. # # So, we should first check the class distribution in the test set. # + # check class distribution in test set y_test.value_counts() # - # We can see that the occurences of most frequent class is 22067. So, we can calculate null accuracy by dividing 22067 by total number of occurences. # + # check null accuracy score null_accuracy = (22067/(22067+6372)) print('Null accuracy score: {0:0.4f}'. format(null_accuracy)) # - # We can see that our model accuracy score is 0.8501 but null accuracy score is 0.7759. So, we can conclude that our Logistic Regression model is doing a very good job in predicting the class labels. # Now, based on the above analysis we can conclude that our classification model accuracy is very good. Our model is doing a very good job in terms of predicting the class labels. # # # But, it does not give the underlying distribution of values. Also, it does not tell anything about the type of errors our classifer is making. # # # We have another tool called `Confusion matrix` that comes to our rescue. # ## 15. Confusion matrix # # # A confusion matrix is a tool for summarizing the performance of a classification algorithm. A confusion matrix will give us a clear picture of classification model performance and the types of errors produced by the model. It gives us a summary of correct and incorrect predictions broken down by each category. The summary is represented in a tabular form. # # # Four types of outcomes are possible while evaluating a classification model performance. These four outcomes are described below:- # # # **True Positives (TP)** – True Positives occur when we predict an observation belongs to a certain class and the observation actually belongs to that class. # # # **True Negatives (TN)** – True Negatives occur when we predict an observation does not belong to a certain class and the observation actually does not belong to that class. # # # **False Positives (FP)** – False Positives occur when we predict an observation belongs to a certain class but the observation actually does not belong to that class. This type of error is called **Type I error.** # # # # **False Negatives (FN)** – False Negatives occur when we predict an observation does not belong to a certain class but the observation actually belongs to that class. This is a very serious error and it is called **Type II error.** # # # # These four outcomes are summarized in a confusion matrix given below. # # + # Print the Confusion Matrix and slice it into four pieces from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred_test) print('Confusion matrix\n\n', cm) print('\nTrue Positives(TP) = ', cm[0,0]) print('\nTrue Negatives(TN) = ', cm[1,1]) print('\nFalse Positives(FP) = ', cm[0,1]) print('\nFalse Negatives(FN) = ', cm[1,0]) # - # The confusion matrix shows `20892 + 3285 = 24177 correct predictions` and `3087 + 1175 = 4262 incorrect predictions`. # # # In this case, we have # # # - `True Positives` (Actual Positive:1 and Predict Positive:1) - 20892 # # # - `True Negatives` (Actual Negative:0 and Predict Negative:0) - 3285 # # # - `False Positives` (Actual Negative:0 but Predict Positive:1) - 1175 `(Type I error)` # # # - `False Negatives` (Actual Positive:1 but Predict Negative:0) - 3087 `(Type II error)` # + # visualize confusion matrix with seaborn heatmap cm_matrix = pd.DataFrame(data=cm, columns=['Actual Positive:1', 'Actual Negative:0'], index=['Predict Positive:1', 'Predict Negative:0']) sns.heatmap(cm_matrix, annot=True, fmt='d', cmap='YlGnBu') # - # ## 16. Classification metrices # ### Classification Report # # # **Classification report** is another way to evaluate the classification model performance. It displays the **precision**, **recall**, **f1** and **support** scores for the model. I have described these terms in later. # # We can print a classification report as follows:- # + from sklearn.metrics import classification_report print(classification_report(y_test, y_pred_test)) # - # ### Classification accuracy TP = cm[0,0] TN = cm[1,1] FP = cm[0,1] FN = cm[1,0] # + # print classification accuracy classification_accuracy = (TP + TN) / float(TP + TN + FP + FN) print('Classification accuracy : {0:0.4f}'.format(classification_accuracy)) # - # ### Classification error # + # print classification error classification_error = (FP + FN) / float(TP + TN + FP + FN) print('Classification error : {0:0.4f}'.format(classification_error)) # - # ### Precision # # # **Precision** can be defined as the percentage of correctly predicted positive outcomes out of all the predicted positive outcomes. It can be given as the ratio of true positives (TP) to the sum of true and false positives (TP + FP). # # # So, **Precision** identifies the proportion of correctly predicted positive outcome. It is more concerned with the positive class than the negative class. # # # # Mathematically, precision can be defined as the ratio of `TP to (TP + FP).` # # # # + # print precision score precision = TP / float(TP + FP) print('Precision : {0:0.4f}'.format(precision)) # - # ### Recall # # # Recall can be defined as the percentage of correctly predicted positive outcomes out of all the actual positive outcomes. # It can be given as the ratio of true positives (TP) to the sum of true positives and false negatives (TP + FN). **Recall** is also called **Sensitivity**. # # # **Recall** identifies the proportion of correctly predicted actual positives. # # # Mathematically, recall can be given as the ratio of `TP to (TP + FN).` # # # # # + recall = TP / float(TP + FN) print('Recall or Sensitivity : {0:0.4f}'.format(recall)) # - # ### True Positive Rate # # # **True Positive Rate** is synonymous with **Recall**. # # + true_positive_rate = TP / float(TP + FN) print('True Positive Rate : {0:0.4f}'.format(true_positive_rate)) # - # ### False Positive Rate # + false_positive_rate = FP / float(FP + TN) print('False Positive Rate : {0:0.4f}'.format(false_positive_rate)) # - # ### Specificity # + specificity = TN / (TN + FP) print('Specificity : {0:0.4f}'.format(specificity)) # - # ### f1-score # # # **f1-score** is the weighted harmonic mean of precision and recall. The best possible **f1-score** would be 1.0 and the worst # would be 0.0. **f1-score** is the harmonic mean of precision and recall. So, **f1-score** is always lower than accuracy measures as they embed precision and recall into their computation. The weighted average of `f1-score` should be used to # compare classifier models, not global accuracy. # # # ### Support # # # **Support** is the actual number of occurrences of the class in our dataset. # ## 17. Adjusting the threshold level # + # print the first 10 predicted probabilities of two classes- 0 and 1 y_pred_prob = logreg.predict_proba(X_test)[0:10] y_pred_prob # - # ### Observations # # # - In each row, the numbers sum to 1. # # # - There are 2 columns which correspond to 2 classes - 0 and 1. # # - Class 0 - predicted probability that there is no rain tomorrow. # # - Class 1 - predicted probability that there is rain tomorrow. # # # - Importance of predicted probabilities # # - We can rank the observations by probability of rain or no rain. # # # - predict_proba process # # - Predicts the probabilities # # - Choose the class with the highest probability # # # - Classification threshold level # # - There is a classification threshold level of 0.5. # # - Class 1 - probability of rain is predicted if probability > 0.5. # # - Class 0 - probability of no rain is predicted if probability < 0.5. # # # + # store the probabilities in dataframe y_pred_prob_df = pd.DataFrame(data=y_pred_prob, columns=['Prob of - No rain tomorrow (0)', 'Prob of - Rain tomorrow (1)']) y_pred_prob_df # + # print the first 10 predicted probabilities for class 1 - Probability of rain logreg.predict_proba(X_test)[0:10, 1] # + # store the predicted probabilities for class 1 - Probability of rain y_pred1 = logreg.predict_proba(X_test)[:, 1] # + # plot histogram of predicted probabilities # adjust the font size plt.rcParams['font.size'] = 12 # plot histogram with 10 bins plt.hist(y_pred1, bins = 10) # set the title of predicted probabilities plt.title('Histogram of predicted probabilities of rain') # set the x-axis limit plt.xlim(0,1) # set the title plt.xlabel('Predicted probabilities of rain') plt.ylabel('Frequency') # - # ### Observations # # # - We can see that the above histogram is highly positive skewed. # # # - The first column tell us that there are approximately 15000 observations with probability between 0.0 and 0.1. # # # - There are small number of observations with probability > 0.5. # # # - So, these small number of observations predict that there will be rain tomorrow. # # # - Majority of observations predict that there will be no rain tomorrow. # ### Lower the threshold # + from sklearn.preprocessing import binarize for i in range(1,5): cm1=0 y_pred1 = logreg.predict_proba(X_test)[:,1] y_pred1 = y_pred1.reshape(-1,1) y_pred2 = binarize(y_pred1, i/10) y_pred2 = np.where(y_pred2 == 1, 'Yes', 'No') cm1 = confusion_matrix(y_test, y_pred2) print ('With',i/10,'threshold the Confusion Matrix is ','\n\n',cm1,'\n\n', 'with',cm1[0,0]+cm1[1,1],'correct predictions, ', '\n\n', cm1[0,1],'Type I errors( False Positives), ','\n\n', cm1[1,0],'Type II errors( False Negatives), ','\n\n', 'Accuracy score: ', (accuracy_score(y_test, y_pred2)), '\n\n', 'Sensitivity: ',cm1[1,1]/(float(cm1[1,1]+cm1[1,0])), '\n\n', 'Specificity: ',cm1[0,0]/(float(cm1[0,0]+cm1[0,1])),'\n\n', '====================================================', '\n\n') # - # ### Comments # # # - In binary problems, the threshold of 0.5 is used by default to convert predicted probabilities into class predictions. # # # - Threshold can be adjusted to increase sensitivity or specificity. # # # - Sensitivity and specificity have an inverse relationship. Increasing one would always decrease the other and vice versa. # # # - We can see that increasing the threshold level results in increased accuracy. # # # - Adjusting the threshold level should be one of the last step you do in the model-building process. # ## 18. ROC - AUC # # # # ### ROC Curve # # # Another tool to measure the classification model performance visually is **ROC Curve**. ROC Curve stands for **Receiver Operating Characteristic Curve**. An **ROC Curve** is a plot which shows the performance of a classification model at various # classification threshold levels. # # # # The **ROC Curve** plots the **True Positive Rate (TPR)** against the **False Positive Rate (FPR)** at various threshold levels. # # # # **True Positive Rate (TPR)** is also called **Recall**. It is defined as the ratio of `TP to (TP + FN).` # # # # **False Positive Rate (FPR)** is defined as the ratio of `FP to (FP + TN).` # # # # # # # # In the ROC Curve, we will focus on the TPR (True Positive Rate) and FPR (False Positive Rate) of a single point. This will give us the general performance of the ROC curve which consists of the TPR and FPR at various threshold levels. So, an ROC Curve plots TPR vs FPR at different classification threshold levels. If we lower the threshold levels, it may result in more items being classified as positve. It will increase both True Positives (TP) and False Positives (FP). # # # # # # + # plot ROC Curve from sklearn.metrics import roc_curve fpr, tpr, thresholds = roc_curve(y_test, y_pred1, pos_label = 'Yes') plt.figure(figsize=(6,4)) plt.plot(fpr, tpr, linewidth=2) plt.plot([0,1], [0,1], 'k--' ) plt.rcParams['font.size'] = 12 plt.title('ROC curve for RainTomorrow classifier') plt.xlabel('False Positive Rate (1 - Specificity)') plt.ylabel('True Positive Rate (Sensitivity)') plt.show() # - # ROC curve help us to choose a threshold level that balances sensitivity and specificity for a particular context. # ### ROC AUC # # # **ROC AUC** stands for **Receiver Operating Characteristic - Area Under Curve**. It is a technique to compare classifier performance. In this technique, we measure the `area under the curve (AUC)`. A perfect classifier will have a ROC AUC equal to 1, whereas a purely random classifier will have a ROC AUC equal to 0.5. # # # So, **ROC AUC** is the percentage of the ROC plot that is underneath the curve. # + # compute ROC AUC from sklearn.metrics import roc_auc_score ROC_AUC = roc_auc_score(y_test, y_pred1) print('ROC AUC : {:.4f}'.format(ROC_AUC)) # - # ### Comments # # # - ROC AUC is a single number summary of classifier performance. The higher the value, the better the classifier. # # - ROC AUC of our model approaches towards 1. So, we can conclude that our classifier does a good job in predicting whether it will rain tomorrow or not. # + # calculate cross-validated ROC AUC from sklearn.model_selection import cross_val_score Cross_validated_ROC_AUC = cross_val_score(logreg, X_train, y_train, cv=5, scoring='roc_auc').mean() print('Cross validated ROC AUC : {:.4f}'.format(Cross_validated_ROC_AUC)) # - # ## Model evaluation and improvement # # # # In this section, I will employ several techniques to improve the model performance. I will discuss 3 techniques which are used in practice for performance improvement. These are `recursive feature elimination`, `k-fold cross validation` and `hyperparameter optimization using GridSearchCV`. # ## 19. Recursive Feature Elimination with Cross Validation # # # `Recursive feature elimination (RFE)` is a feature selection technique that helps us to select best features from the given number of features. At first, the model is built on all the given features. Then, it removes the least useful predictor and build the model again. This process is repeated until all the unimportant features are removed from the model. # # # `Recursive Feature Elimination with Cross-Validated (RFECV) feature selection` technique selects the best subset of features for the estimator by removing 0 to N features iteratively using recursive feature elimination. Then it selects the best subset based on the accuracy or cross-validation score or roc-auc of the model. Recursive feature elimination technique eliminates n features from a model by fitting the model multiple times and at each step, removing the weakest features. # # # I will use this technique to select best features from this model. # + from sklearn.feature_selection import RFECV rfecv = RFECV(estimator=logreg, step=1, cv=5, scoring='accuracy') rfecv = rfecv.fit(X_train, y_train) # - print("Optimal number of features : %d" % rfecv.n_features_) # + # transform the training data X_train_rfecv = rfecv.transform(X_train) # train classifier logreg.fit(X_train_rfecv, y_train) # + # test classifier on test data X_test_rfecv = rfecv.transform(X_test) y_pred_rfecv = logreg.predict(X_test_rfecv) # + # print mean accuracy on transformed test data and labels print ("Classifier score: {:.4f}".format(logreg.score(X_test_rfecv,y_test))) # - # Our original model accuracy score is 0.8501 whereas accuracy score after RFECV is 0.8500. So, we can obtain approximately similar accuracy but with reduced or optimal set of features. # ### Confusion-matrix revisited # # # I will again plot the confusion-matrix for this model to get an idea of errors our model is making. # + from sklearn.metrics import confusion_matrix cm1 = confusion_matrix(y_test, y_pred_rfecv) print('Confusion matrix\n\n', cm1) print('\nTrue Positives(TP1) = ', cm1[0,0]) print('\nTrue Negatives(TN1) = ', cm1[1,1]) print('\nFalse Positives(FP1) = ', cm1[0,1]) print('\nFalse Negatives(FN1) = ', cm1[1,0]) # - # We can see that in the original model, we have FP = 1175 whereas FP1 = 1174. So, we get approximately same number of false positives. Also, FN = 3087 whereas FN1 = 3091. So, we get slightly higher false negatives. # ## 20. k-Fold Cross Validation # + # Applying 10-Fold Cross Validation from sklearn.model_selection import cross_val_score scores = cross_val_score(logreg, X_train, y_train, cv = 5, scoring='accuracy') print('Cross-validation scores:{}'.format(scores)) # - # We can summarize the cross-validation accuracy by calculating its mean. # + # compute Average cross-validation score print('Average cross-validation score: {:.4f}'.format(scores.mean())) # - # Our, original model score is found to be 0.8476. The average cross-validation score is 0.8474. So, we can conclude that cross-validation does not result in performance improvement. # ## 21. Hyperparameter Optimization using GridSearch CV # + from sklearn.model_selection import GridSearchCV parameters = [{'penalty':['l1','l2']}, {'C':[1, 10, 100, 1000]}] grid_search = GridSearchCV(estimator = logreg, param_grid = parameters, scoring = 'accuracy', cv = 5, verbose=0) grid_search.fit(X_train, y_train) # + # examine the best model # best score achieved during the GridSearchCV print('GridSearch CV best score : {:.4f}\n\n'.format(grid_search.best_score_)) # print parameters that give the best results print('Parameters that give the best results :','\n\n', (grid_search.best_params_)) # print estimator that was chosen by the GridSearch print('\n\nEstimator that was chosen by the search :','\n\n', (grid_search.best_estimator_)) # + # calculate GridSearch CV score on test set print('GridSearch CV score on test set: {0:0.4f}'.format(grid_search.score(X_test, y_test))) # - # ### Comments # # # - Our original model test accuracy is 0.8501 while GridSearch CV accuracy is 0.8507. # # # - We can see that GridSearch CV improve the performance for this particular model. # ## 22. Results and Conclusion # 1. The logistic regression model accuracy score is 0.8501. So, the model does a very good job in predicting whether or not it will rain tomorrow in Australia. # # 2. Small number of observations predict that there will be rain tomorrow. Majority of observations predict that there will be no rain tomorrow. # # 3. The model shows no signs of overfitting. # # 4. Increasing the value of C results in higher test set accuracy and also a slightly increased training set accuracy. So, we can conclude that a more complex model should perform better. # # 5. Increasing the threshold level results in increased accuracy. # # 6. ROC AUC of our model approaches towards 1. So, we can conclude that our classifier does a good job in predicting whether it will rain tomorrow or not. # # 7. Our original model accuracy score is 0.8501 whereas accuracy score after RFECV is 0.8500. So, we can obtain approximately similar accuracy but with reduced set of features. # # 8. In the original model, we have FP = 1175 whereas FP1 = 1174. So, we get approximately same number of false positives. Also, FN = 3087 whereas FN1 = 3091. So, we get slighly higher false negatives. # # 9. Our, original model score is found to be 0.8476. The average cross-validation score is 0.8474. So, we can conclude that cross-validation does not result in performance improvement. # # 10. Our original model test accuracy is 0.8501 while GridSearch CV accuracy is 0.8507. We can see that GridSearch CV improve the performance for this particular model. #
06_Logistic Regression/Logistic Regression Threshold Optimizations/Logistic Regression Threshold V2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Django Shell-Plus # language: python # name: django_extensions # --- from django.test.client import RequestFactory from cast import upload_handler factory = RequestFactory() request = factory.post('') request.FILES["original"] = "blub.jpg" form_class, context = upload_handler(request) form_class._meta.model == Image request = factory.post('') request.FILES["original"] = "blub.xls" form_class, context = upload_handler(request) request = factory.post('') request.FILES["original"] = "blub.ogg" form_class, context = upload_handler(request) context
notebooks/upload_handler.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Practice: download images from web # # **Download amazing pictures from [national geographic](http://www.nationalgeographic.com.cn/animals/)** # + from bs4 import BeautifulSoup import requests URL = "http://www.nationalgeographic.com.cn/animals/" # - # ## find list of image holder html = requests.get(URL).text soup = BeautifulSoup(html, 'lxml') img_ul = soup.find_all('ul', {"class": "img_list"}) # **Create a folder for these pictures** # + import os os.makedirs('./img/', exist_ok=True) # - # ## download # # **Find all picture urls and download them.** for ul in img_ul: imgs = ul.find_all('img') for img in imgs: url = img['src'] r = requests.get(url, stream=True) image_name = url.split('/')[-1] with open('./img/%s' % image_name, 'wb') as f: for chunk in r.iter_content(chunk_size=128): f.write(chunk) print('Saved %s' % image_name) # ![image](/img/20171214020322682.jpg)
notebook/3-3-practice-download-images.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- import numpy as np import pandas as pd import json import urllib import requests import re import matplotlib.pyplot as plt import requests from pandas.io.json import json_normalize from bs4 import BeautifulSoup import requests from pprint import pprint import json import ast import pickle # %matplotlib inline # ## 1. Read Data, perform EDA # file: recipes.json # form the website Yummly # df = pd.read_json('recipes.json') # Take a first look at the data; check for nulls df.head() df.shape print((df.isnull().sum())) df.shape df.cuisine.value_counts() df.cuisine.hist() plt.size = 200 # This dataset has a nice distripution of cuisines. The ingredient lists already has ingredient amounts removed. # ## 2. Flatten Data # To make preprocessing easier, make a dataframe with one ingredient per row #create the data frame ingredients = pd.DataFrame({'cuisine': [], 'id': [], 'ingredient': []}) ingredients['id'] = ingredients['id'].astype(int) ingredients.dtypes # + #populate the dataframe for i, row in df.iterrows(): for item in row.ingredients: #append is slow; this would be faster building a dict first ingredients = ingredients.append({'cuisine': row.cuisine ,'id': row.id ,'ingredient': item}, ignore_index=True) if i%1000 == 0: print (i, row.cuisine, row.ingredients) ingredients.to_csv('ingredients.csv', index_label = False) # - ingredients.shape ingredients.head() # ## 3. Build a Standardized List of Ingredients ingredients.to_csv('ingredients_before_preprocess.csv', index_label = False) # Apply the preprocess function to each ingredient in the dataframe. # # Then pull out the 1000 most common ingredients and save to a CSV. # # Update the preprocess function based on visual inspection of the CSV, # to remove more extraneous terms. # # Repeat until the we have reduced the ingredient list to almost all one and two word terms. # ingredients = pd.read_csv('ingredients_before_preprocess.csv') ingredients.head() def preprocess (strng): #descriptive words that should be stripped out replace_list = ['peeled', 'fresh', 'ground', 'chopped', 'seeds', 'frozen', 'plain', 'light','heavy','dark', 'roasted', 'diced','cooked', 'pitted', 'canned', 'unsweeetened', 'sweetened', 'low', 'reduced','sodium','skim', 'part-skim', 'whole', 'low-fat', 'extra', 'extra-virgin','leaves', 'leaf', 'leaves','crumbles', 'powder','yellow', 'kosher', 'boneless', 'skinless', 'grilled', 'shredded', 'peeled', 'coarse', 'reduced', 'all-purpose', 'red', 'white', 'oven-ready', 'reduced-fat', 'thread', 'dried', 'dry', 'fat', 'free', 'finely', 'firmly', 'freshly', '1%', '2%', 'for', 'dusting', 'seasoned', 'sliced', 'slivered', 'soft', 'softened', 'small', 'toasted', 'unsweetened', 'pod', 'pods','cube','granule', 'floret','fine', 'baby', 'lower', 'lump', 'halves', 'lowfat', 'slices', 'all', 'purpose', 'unbleached', 'bone-in', 'granule', 'whole-milk', 'asian', 'sprigs', 'stem', 'cubes', 'granules', 'in', 'crumbled', 'crushed', 'blend', 'boneless', 'bottled', 'florets', 'california', 'chunky', 'flavored', 'cooking', 'baked', 'thai', 'store', 'bought', 'steamed', 'stewed', 'unsalted', 'unflavored', 'vietnamese', 'organic', 'cap', 'concentrate', 'packed', 'boiling', 'deveined', 'fat-free', 'self', 'rising', 'self-rising', 'instant', 'of', 'grated' ] return_strng = '' strng = strng.lower() for w in strng.split(): if w in replace_list: w = '' else: #remove pluralizations w = re.sub('oes$', 'o', w) w = re.sub('s$', '', w) return_strng += w + ' ' #remove formatting characters, and convert alternate spellings or #anomolous data to the a more common usage. return_strng = re.sub('"', '', return_strng) return_strng = re.sub(',', '', return_strng) return_strng = re.sub('chilie', 'chile', return_strng) return_strng = re.sub('chillie', 'chile', return_strng) return_strng = re.sub('yoghurt', 'yogurt', return_strng) return_strng = re.sub('won ton ', 'wonton', return_strng) return_strng = re.sub('whipping', 'whipped', return_strng) return_strng = re.sub('anchovie', 'anchovy', return_strng) return_strng = re.sub('artichok heart marin', 'artichoke heart', return_strng) return_strng = re.sub('colouring', 'coloring', return_strng) return_strng = re.sub('hellmann\' or best food real mayonnai', 'mayonaise', return_strng) return_strng = re.sub('mayonnaise', 'mayonaise', return_strng) return_strng = re.sub('wontonwrapper', 'wonton wrapper', return_strng) return_strng = re.sub('filet', 'fillet', return_strng) return_strng = re.sub('-', ' ', return_strng) return_strng = return_strng.strip() return_strng = re.sub(' ', ' ', return_strng) return(return_strng) ingredients['ingredient2'] = ingredients['ingredient'].apply(preprocess) ingredients.head() #check for records made null by preprocessing ingredients[ingredients['ingredient2']==''].groupby('ingredient').count() # Since the original recipe ingredient in all of these cases was not very descriptive, # making all of these into nulls is ok. #drop nulls, keep only preprocessed ingredients ingredients.dropna(inplace=True) ingredients.drop(['ingredient'], axis=1, inplace=True) ingredients.rename(columns={'ingredient2':'ingredient'}, inplace=True) ingredients[ingredients['ingredient']==''].groupby('ingredient').count() #why are there still nulls? ingredients.head() #group all ingredients by cuisine i2 = ingredients.groupby(['cuisine','ingredient']).count().reset_index() i2.shape i2.head() #group all ingredients together i3 = ingredients.groupby(['ingredient']).count().reset_index() i3.drop(['id'], axis=1, inplace=True) i3.rename(columns={'cuisine':'count'}, inplace=True) #save the top 1000 ingredients to csv i3.sort_values('count', ascending=False).head(1000)[['count','ingredient']]\ .sort_values('ingredient').to_csv('ingredient_counts.csv',index_label = False) # The final version of ingredient_counts will be used as a custom vocabulary for vectorizing the data.
00 - Build Ingredient List.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab_type="code" id="WSLTKGWkYbbG" colab={} # !mkdir ~/.kaggle # !cp kaggle.json ~/.kaggle/kaggle.json # + colab_type="code" id="6e29FA4SYbd0" outputId="45ed4b68-fc63-4d9b-c775-f0c6f11176fd" colab={"base_uri": "https://localhost:8080/", "height": 224} # !kaggle competitions download -c grasp-and-lift-eeg-detection # + colab_type="code" id="Nly8zqezYbgV" outputId="ec778661-8612-4e65-e358-a93099aa1243" colab={"base_uri": "https://localhost:8080/", "height": 1000} # !unzip train.zip # !unzip test.zip # + colab_type="code" id="TgdT0K-iYbi3" colab={} from sklearn.preprocessing import StandardScaler import pandas as pd import numpy as np import torch from torch.utils.data import Dataset, DataLoader import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F from torch.nn.utils import weight_norm import random import matplotlib.pyplot as plt from sklearn.metrics import roc_auc_score as auc from scipy.interpolate import BSpline from sklearn.metrics import accuracy_score # + colab_type="code" id="gV0QGLTrYblZ" colab={} USE_CUDA = 1 # + colab_type="code" id="9mqmqaRWYbnv" colab={} def prepare_data_train(fname): data = pd.read_csv(fname) events_fname = fname.replace('_data','_events') labels= pd.read_csv(events_fname) clean=data.drop(['id' ], axis=1)#remove id labels=labels.drop(['id' ], axis=1)#remove id return clean,labels scaler= StandardScaler() def data_preprocess_train(X): X_prep=scaler.fit_transform(X) return X_prep def data_preprocess_test(X): X_prep=scaler.transform(X) return X_prep # + colab_type="code" id="lGUvW7Jfyt9B" colab={} def load_data(subjects,series): y_raw= [] raw = [] for subject in subjects: for ser in series: fname ='train/subj%d_series%d_data.csv' % (subject,ser) # print(fname) data,labels=prepare_data_train(fname) raw.append(data) y_raw.append(labels) X = pd.concat(raw) y = pd.concat(y_raw) X =np.asarray(X.astype(float)) y = np.asarray(y.astype(float)) return X,y # + colab_type="code" id="vSGbGQh-y88B" colab={} # some parameteres for the model num_features = 32 window_size = 1024 batch_size=2000 # + colab_type="code" id="r6yp8IaygD8f" colab={} def get_batch(dataset,target, batch_size=2000, val=False, index=None): if val == False: index = random.randint(window_size, len(dataset) - 16 * batch_size) indexes = np.arange(index, index + 16*batch_size, 16) else: indexes = np.arange(index, index + batch_size) batch = np.zeros((batch_size, num_features, window_size//4)) b = 0 for i in indexes: start = i - window_size if i - window_size > 0 else 0 tmp = dataset[start:i] batch[b,:,:] = tmp[::4].transpose() b += 1 targets = target[indexes] return torch.DoubleTensor(batch), torch.DoubleTensor(targets) # + colab_type="code" id="Pd-3dbRAgD6R" colab={} class convmodel(nn.Module): def __init__(self, drop=0.5, d_linear=124): super().__init__() self.conv2 = nn.Conv1d(32, 64, kernel_size=3, padding=0, stride=1) self.bn = nn.BatchNorm1d(64) self.pool = nn.MaxPool1d(2, stride=2) self.dropout1 = nn.Dropout(drop) self.conv = nn.Sequential(self.conv2, nn.ReLU(inplace=True), self.bn,self.pool, self.dropout1) def forward(self, x): x = self.conv(x) return x class Combine(nn.Module): def __init__(self,out_classes): super(Combine, self).__init__() self.cnn = convmodel().double() self.rnn = nn.LSTM(input_size=127, hidden_size=64, num_layers=1,batch_first=True) self.linear = nn.Linear(64,out_classes) def forward(self, x): x = self.cnn(x) out, hidden=self.rnn(x) out = self.linear(out[:, -1, :]) return torch.sigmoid(out) # + colab_type="code" id="tm9puItwgLh5" colab={} model = Combine(6).double() if USE_CUDA == 1: model = model.cuda() optim = torch.optim.Adam(model.parameters(), lr=1e-3, eps=1e-10) # + colab_type="code" id="RtKK5KVLgQA4" colab={} bs = batch_size def train(traindata,y_train, epochs, printevery=100, shuffle=True): model.train() for epoch in range(epochs): total_loss = 0 for i in range(len(traindata)//bs): optim.zero_grad() x, y = get_batch(traindata,y_train) if USE_CUDA == 1: x = Variable(x).cuda() y = Variable(y).cuda() preds = model(x) loss = F.binary_cross_entropy(preds.view(-1), y.view(-1)) loss.backward() total_loss += loss.item() optim.step() if (i + 1) % printevery == 0: print("epoch: %d, iter %d/%d, loss %.4f"%(epoch + 1, i + 1, len(traindata)//2000, total_loss/printevery)) total_loss = 0 # + colab_type="code" id="w6NzCniYglyV" colab={} def getPredictions(data,labels): model.eval() p = [] res = [] i = window_size bs = 2000 while i < len(data): if i + bs > len(data): bs = len(data) - i x, y = get_batch(data,labels, bs, index=i, val=True) x = (x) x = x.cuda() y = y preds = model(x) preds = preds.squeeze(1) p.append(np.array(preds.cpu().data)) res.append(np.array(y.data)) i += bs preds = p[0] for i in p[1:]: preds = np.vstack((preds,i)) targs = res[0] for i in res[1:]: targs = np.vstack((targs, i)) return preds, targs def valscore(preds, targs): aucs = [auc(targs[:, j], preds[:, j]) for j in range(6)] total_loss = np.mean(aucs) return total_loss def accurecy(preds, targs): preds=np.where(preds>0.3,1,0) acc_score=[] for j in range(6): acc_score.append(accuracy_score(targs[:, j],preds[:, j])) return np.mean(acc_score) # + colab_type="code" id="1g_wSqCvYbtZ" outputId="78c648fc-a5b7-4c80-e4c4-decb54d70ec3" colab={"base_uri": "https://localhost:8080/", "height": 1000} import numpy as np from sklearn.model_selection import KFold subjects=range(1,13) X=np.array([1,2,3,4,5,6,7,8]) kf = KFold(n_splits=4,shuffle=True) count=1 auc_score=[] for train_index, test_index in kf.split(X): print("Fold ",count) print("train",X[train_index],"test",X[test_index]) X_train,y_train=load_data(subjects,series =X[train_index]) X_train=data_preprocess_train(X_train) X_test,y_test=load_data(subjects,series=X[test_index]) X_test=data_preprocess_test(X_test) train(X_train,y_train,1) val_preds, val_targs=getPredictions(X_test,y_test) print("check results") print(valscore(val_preds, val_targs)) auc_score.append(valscore(val_preds, val_targs)) with open("results.txt", "a") as res_file: res_file.write("train : "+str(X[train_index])+" test : "+str(X[test_index])+" AUC score : "+str(valscore(val_preds, val_targs))+" Accurecy Score: "+str(accurecy(val_preds, val_targs))+"\n") count+=1 print(np.mean(auc_score)) # + colab_type="code" id="4jRdz_EQYbyp" colab={}
05.b.KFold-LSTM/4.Kfold_CNN_LSTM_EEG.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Union two DataFrames with different columns # The union of two DataFrames is the process of appending one DataFrame below another. # # The [PySpark `.union()` function](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.DataFrame.union.html) is equivalent to the SQL `UNION ALL` function, where both DataFrames must have the same number of columns. However the [sparklyr `sdf_bind_rows()` function](https://rdrr.io/github/rstudio/sparklyr/man/sdf_bind.html) can combine two DataFrames with different number of columns, by putting `NULL` values into the rows of data. # # Here's how we can use PySpark to mimic the behaviour of the `sdf_bind_rows()` function in sparklyr. import pyspark.sql.functions as F from pyspark.sql import SparkSession spark = SparkSession.builder.appName("union-example").getOrCreate() # Create a DataFrame of Wimbledon singles champions from 2017 to 2019 # + df1_schema = """ `tournament_year` int, `event` string, `champion` string """ df1 = spark.createDataFrame([ [2017, "<NAME>", "Federer"], [2018, "<NAME>", "Djokovic"], [2019, "<NAME>", "Djokovic"], [2017, "<NAME>", "Muguruza"], [2018, "<NAME>", "Kerber"], [2019, "<NAME>", "Halep"], ], schema=df1_schema ) df1.show() # - # Next we want to append 2020 data. However, there was no Wimbledon tournament in 2020. We'll just create two columns. # + df2_schema = """ `tournament_year` int, `event` string """ df2 = spark.createDataFrame([ [2020, "<NAME>"], [2020, "<NAME>"] ], schema=df2_schema ) df2.show() # - # Let's try to union these DataFrames together try: df_joined = df1.union(df2) except Exception as e: print(e) # The error message says we need the same number of columns. So let's try adding a column to `df2` full of `Null` values before the union df_joined = df1.union(df2.withColumn("champion", F.lit(None))) df_joined.printSchema() df_joined.show() # This time it worked. We get the result we were looking for. # # However, we need to be careful in doing this. What if the columns in `df2` were defined in a different order? # + df2_schema = """ `event` string, `tournament_year` int """ df2 = spark.createDataFrame([ ["<NAME>", 2020], ["<NAME>", 2020] ], schema=df2_schema ) df2.show() # - df_joined = df1.union(df2.withColumn("champion", F.lit(None))) df_joined.printSchema() df_joined.show() # The code runs, but the result isn't what we want. We should therefore write our code in a way that mitigates the risk of this happening. We might have the correct order now, but in future perhaps the order might change. # # We'll take the column order from the DataFrame with all the columns, `df1`, and force `df2` to have the same column order before doing the union. col_order = df1.columns df_joined = df1.union(df2.withColumn("champion", F.lit(None)).select(col_order)) df_joined.printSchema() df_joined.show() # Let's look at one more example where we have a third DataFrame with different columns. Such as results from the 2021 tournament, which hasn't taken place yet (at the time or writing). # + df3_schema = """ `tournament_year` int """ df3 = spark.createDataFrame([ [2021], [2021] ], schema=df3_schema ) df3.show() # - # We want a list of unique columns in all the DataFrames along with their types. We can use `set()` to get the unique column names and types, then convert into a dictionary to create key/value pairs col_dict = dict(set(df1.dtypes + df2.dtypes + df3.dtypes)) col_dict # Next we'll create a function that checks to see if a DataFrame has all the columns we need for the union. If the DataFrame is missing a column we'll add an empty column with that name, and give it the correct type using `.cast()` def add_empty_columns(df, col_dict): for col in col_dict.keys(): if col not in df.columns: df = df.withColumn(col, F.lit(None).cast(col_dict[col])) return df # Next we apply the function to all three DataFrames df1 = add_empty_columns(df1, col_dict) df2 = add_empty_columns(df2, col_dict) df3 = add_empty_columns(df3, col_dict) # We need to decide on a column order for the unions, we can get this from `col_dict.keys()` col_order = list(col_dict.keys()) col_order # And finally, do the union. Note we use `.select(col_order)` after referencing each DataFrame to make sure the columns are in a consistent order df_joined = df1.select(col_order).union(df2.select(col_order)).union(df3.select(col_order)) df_joined.printSchema() df_joined.show()
ons-spark/spark-functions/union_dataframes_with_different_columns.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (pynamical) # language: python # name: pynamical # --- # # Pynamical: animated 3D phase diagrams of the logistic map # # **Citation info**: <NAME>. 2016. "[Visual Analysis of Nonlinear Dynamical Systems: Chaos, Fractals, Self-Similarity and the Limits of Prediction](http://geoffboeing.com/publications/nonlinear-chaos-fractals-prediction/)." *Systems*, 4 (4), 37. doi:10.3390/systems4040037. # # Pynamical documentation: http://pynamical.readthedocs.org # # This notebook demonstrates how to make animated GIFs that pan and zoom around 3-D phase diagrams to visualize fractal data sets, strange attractors, and chaos. import glob import IPython.display as IPdisplay import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import pynamical import random from PIL import Image from pynamical import simulate, phase_diagram_3d # %matplotlib inline title_font = pynamical.get_title_font() label_font = pynamical.get_label_font() save_folder = 'images/phase-animate' # ## Create a 3-D phase diagram as an animated gif that pans, rotates, and zooms. This demonstrates how the viewing perspective is composed of an elevation, a distance, and an azimuth. # + # set a filename, run the logistic model, and create the plot gif_filename = '01-pan-rotate-zoom-demo' working_folder = '{}/{}'.format(save_folder, gif_filename) if not os.path.exists(working_folder): os.makedirs(working_folder) pops = simulate(num_gens=1000, rate_min=3.99, num_rates=1) fig, ax = phase_diagram_3d(pops, remove_ticks=False, show=False, save=False) # create 36 frames for the animated gif steps = 36 # a viewing perspective is composed of an elevation, distance, and azimuth # define the range of values we'll cycle through for the distance of the viewing perspective min_dist = 7. max_dist = 10. dist_range = np.arange(min_dist, max_dist, (max_dist-min_dist)/steps) # define the range of values we'll cycle through for the elevation of the viewing perspective min_elev = 10. max_elev = 60. elev_range = np.arange(max_elev, min_elev, (min_elev-max_elev)/steps) # now create the individual frames that will be combined later into the animation for azimuth in range(0, 360, int(360/steps)): # pan down, rotate around, and zoom out ax.azim = float(azimuth/3.) ax.elev = elev_range[int(azimuth/(360./steps))] ax.dist = dist_range[int(azimuth/(360./steps))] # set the figure title to the viewing perspective, and save each figure as a .png fig.suptitle('elev={:.1f}, azim={:.1f}, dist={:.1f}'.format(ax.elev, ax.azim, ax.dist)) plt.savefig('{}/{}/img{:03d}.png'.format(save_folder, gif_filename, azimuth)) # don't display the static plot... plt.close() # load all the static images into a list then save as an animated gif gif_filepath = '{}/{}.gif'.format(save_folder, gif_filename) images = [Image.open(image) for image in sorted(glob.glob('{}/*.png'.format(working_folder)))] gif = images[0] gif.info['duration'] = 75 #milliseconds per frame gif.info['loop'] = 0 #how many times to loop (0=infinite) gif.save(fp=gif_filepath, format='gif', save_all=True, append_images=images[1:]) IPdisplay.Image(url=gif_filepath) # - # ## Create a 3-D phase diagram as an animated gif starts by looking straight down at the x-y plane (this is what a 2-D plot would look like), then panning and rotating around to show the 3-D structure # + # set a filename, run the logistic model, and create the plot gif_filename = '02-pan-rotate-logistic-phase-diagram' working_folder = '{}/{}'.format(save_folder, gif_filename) if not os.path.exists(working_folder): os.makedirs(working_folder) pops = simulate(num_gens=1000, rate_min=3.99, num_rates=1) fig, ax = phase_diagram_3d(pops, color='#003399', xlabel='Population (t)', ylabel='Population (t + 1)', zlabel='', show=False, save=False) # look straight down at the x-y plane to start off ax.elev = 89.9 ax.azim = 270.1 ax.dist = 11.0 # sweep the perspective down and rotate to reveal the 3-D structure of the strange attractor for n in range(0, 100): if n > 19 and n < 23: ax.set_xlabel('') ax.set_ylabel('') #don't show axis labels while we move around, it looks weird ax.elev = ax.elev-0.5 #start by panning down slowly if n > 22 and n < 37: ax.elev = ax.elev-1.0 #pan down faster if n > 36 and n < 61: ax.elev = ax.elev-1.5 ax.azim = ax.azim+1.1 #pan down faster and start to rotate if n > 60 and n < 65: ax.elev = ax.elev-1.0 ax.azim = ax.azim+1.1 #pan down slower and rotate same speed if n > 64 and n < 74: ax.elev = ax.elev-0.5 ax.azim = ax.azim+1.1 #pan down slowly and rotate same speed if n > 73 and n < 77: ax.elev = ax.elev-0.2 ax.azim = ax.azim+0.5 #end by panning/rotating slowly to stopping position if n > 76: #add axis labels at the end, when the plot isn't moving around ax.set_xlabel('Population (t)') ax.set_ylabel('Population (t + 1)') ax.set_zlabel('Population (t + 2)') # add a figure title to each plot then save the figure to the disk fig.suptitle('Logistic Map, r=3.99', fontsize=16, x=0.5, y=0.85) plt.savefig('{}/{}/img{:03d}.png'.format(save_folder, gif_filename, n), bbox_inches='tight') # don't display the static plot plt.close() # load all the static images into a list then save as an animated gif gif_filepath = '{}/{}.gif'.format(save_folder, gif_filename) images = [Image.open(image) for image in sorted(glob.glob('{}/*.png'.format(working_folder)))] gif = images[0] gif.info['duration'] = 10 #milliseconds per frame gif.info['loop'] = 0 #how many times to loop (0=infinite) gif.save(fp=gif_filepath, format='gif', save_all=True, append_images=images[1:]) IPdisplay.Image(url=gif_filepath) # - # ## Do the same thing again, but this time plot both the chaotic logistic model output and random noise # run the logistic model and create random noise chaos_pops = simulate(num_gens=1000, rate_min=3.99, num_rates=1) random_pops = pd.DataFrame([random.random() for _ in range(0, 1000)], columns=['value']) pops = pd.concat([chaos_pops, random_pops], axis=1) pops.columns = ['chaos', 'random'] pops.tail() # + # set a filename and then create the plot gif_filename = '03-pan-rotate-logistic-random' working_folder = '{}/{}'.format(save_folder, gif_filename) if not os.path.exists(working_folder): os.makedirs(working_folder) fig, ax = phase_diagram_3d(pops, color=['#003399','#cc0000'], xlabel='Population (t)', ylabel='Population (t + 1)', zlabel='', legend=True, legend_bbox_to_anchor=(0.94, 0.96), show=False, save=False) # configure the initial viewing perspective to look straight down at the x-y plane ax.elev = 89.9 ax.azim = 270.1 ax.dist = 11.0 # sweep the perspective down and rotate to reveal the 3-D structure of the strange attractor for n in range(0, 100): if n >= 20 and n <= 22: ax.set_xlabel('') ax.set_ylabel('') #don't show axis labels while we move around, it looks weird ax.elev = ax.elev-0.5 #start by panning down slowly if n >= 23 and n <= 36: ax.elev = ax.elev-1.0 #pan down faster if n >= 37 and n <= 60: ax.elev = ax.elev-1.5 ax.azim = ax.azim+1.1 #pan down faster and start to rotate if n >= 61 and n <= 64: ax.elev = ax.elev-1.0 ax.azim = ax.azim+1.1 #pan down slower and rotate same speed if n >= 65 and n <= 73: ax.elev = ax.elev-0.5 ax.azim = ax.azim+1.1 #pan down slowly and rotate same speed if n >= 74 and n <= 76: ax.elev = ax.elev-0.2 ax.azim = ax.azim+0.5 #end by panning/rotating slowly to stopping position if n == 77: #add axis labels at the end, when the plot isn't moving around ax.set_xlabel('Population (t)') ax.set_ylabel('Population (t + 1)') ax.set_zlabel('Population (t + 2)') # add a figure title to each plot then save the figure to the disk fig.suptitle(u'3-D phase diagram, chaos vs random', fontsize=16, x=0.5, y=0.85) plt.savefig('{}/{}/img{:03d}.png'.format(save_folder, gif_filename, n), bbox_inches='tight') # don't display the static plot plt.close() # load all the static images into a list then save as an animated gif gif_filepath = '{}/{}.gif'.format(save_folder, gif_filename) images = [Image.open(image) for image in sorted(glob.glob('{}/*.png'.format(working_folder)))] gif = images[0] gif.info['duration'] = 10 #milliseconds per frame gif.info['loop'] = 0 #how many times to loop (0=infinite) gif.save(fp=gif_filepath, format='gif', save_all=True, append_images=images[1:]) IPdisplay.Image(url=gif_filepath) # - # ## Create a 3-D phase diagram to show the logistic map's strange attractors across the chaotic regime (from r=3.6 to r=4.0), twisting and curling around their state space in three dimensions. Animated it by panning and rotating to reveal the structure and its odd folds. # run the model for 2,000 generations for 50 growth rate parameters between 3.6 and 4.0 pops = simulate(num_gens=2000, rate_min=3.6, rate_max=4.0, num_rates=50) # + # set a filename and create the plot gif_filename = '04-pan-rotate-chaotic-regime' working_folder = '{}/{}'.format(save_folder, gif_filename) if not os.path.exists(working_folder): os.makedirs(working_folder) fig, ax = phase_diagram_3d(pops, color='viridis', color_reverse=False, xlabel='Population (t)', ylabel='Population (t + 1)', zlabel='', show=False, save=False) # configure the initial viewing perspective to look straight down at the x-y plane ax.elev = 89.9 ax.azim = 270.1 ax.dist = 11.0 # sweep the perspective down and rotate to reveal the 3-D structure of the strange attractor for n in range(0, 100): if n > 19 and n < 23: ax.set_xlabel('') ax.set_ylabel('') #don't show axis labels while we move around, it looks weird ax.elev = ax.elev-0.5 #start by panning down slowly if n > 22 and n < 37: ax.elev = ax.elev-1.0 #pan down faster if n > 36 and n < 61: ax.elev = ax.elev-1.5 ax.azim = ax.azim+1.1 #pan down faster and start to rotate if n > 60 and n < 65: ax.elev = ax.elev-1.0 ax.azim = ax.azim+1.1 #pan down slower and rotate same speed if n > 64 and n < 74: ax.elev = ax.elev-0.5 ax.azim = ax.azim+1.1 #pan down slowly and rotate same speed if n > 73 and n < 77: ax.elev = ax.elev-0.2 ax.azim = ax.azim+0.5 #end by panning/rotating slowly to stopping position if n > 76: #add axis labels at the end, when the plot isn't moving around ax.set_xlabel('Population (t)') ax.set_ylabel('Population (t + 1)') ax.set_zlabel('Population (t + 2)') # add a figure title to each plot then save the figure to the disk fig.suptitle('Logistic Map, r=3.6 to r=4.0', fontsize=16, x=0.5, y=0.85) plt.savefig('{}/{}/img{:03d}.png'.format(save_folder, gif_filename, n), bbox_inches='tight') # don't display the static plot plt.close() # load all the static images into a list then save as an animated gif gif_filepath = '{}/{}.gif'.format(save_folder, gif_filename) images = [Image.open(image) for image in sorted(glob.glob('{}/*.png'.format(working_folder)))] gif = images[0] gif.info['duration'] = 10 #milliseconds per frame gif.info['loop'] = 0 #how many times to loop (0=infinite) gif.save(fp=gif_filepath, format='gif', save_all=True, append_images=images[1:]) IPdisplay.Image(url=gif_filepath) # - # ## Now zoom into the 3D plot # run the model for 4,000 generations for 50 growth rate parameters between 3.6 and 4.0 pops = simulate(num_gens=4000, rate_min=3.6, rate_max=4.0, num_rates=50) # + # set a filename and create the plot gif_filename = '05-logistic-3d-phase-diagram-chaotic-regime' working_folder = '{}/{}'.format(save_folder, gif_filename) if not os.path.exists(working_folder): os.makedirs(working_folder) fig, ax = phase_diagram_3d(pops, color='viridis', color_reverse=False, show=False, save=False) # configure the initial viewing perspective ax.elev = 25. ax.azim = 321. ax.dist = 11.0 # zoom in to reveal the 3-D structure of the strange attractor for n in range(0, 100): if n <= 18: ax.azim = ax.azim-0.2 #begin by rotating very slowly if n >= 19 and n <= 29: ax.azim = ax.azim-10 ax.dist = ax.dist-0.05 ax.elev = ax.elev-2 #quickly whip around to the other side if n >= 33 and n <= 49: ax.azim = ax.azim+3 ax.dist = ax.dist-0.55 ax.elev = ax.elev+1.4 #zoom into the center if n >= 61 and n <= 79: ax.azim = ax.azim-2 ax.elev = ax.elev-2 ax.dist = ax.dist+0.2 #pull back and pan up if n >= 80: ax.azim = ax.azim-0.2 #end by rotating very slowly # add a figure title to each plot then save the figure to the disk fig.suptitle('Logistic Map, r=3.6 to r=4.0', fontsize=16, x=0.5, y=0.85) plt.savefig('{}/{}/img{:03d}.png'.format(save_folder, gif_filename, n), bbox_inches='tight') # don't display the static plot plt.close() # load all the static images into a list then save as an animated gif gif_filepath = '{}/{}.gif'.format(save_folder, gif_filename) images = [Image.open(image) for image in sorted(glob.glob('{}/*.png'.format(working_folder)))] gif = images[0] gif.info['duration'] = 10 #milliseconds per frame gif.info['loop'] = 0 #how many times to loop (0=infinite) gif.save(fp=gif_filepath, format='gif', save_all=True, append_images=images[1:]) IPdisplay.Image(url=gif_filepath) # - # ## For more info: # - [Read the journal article](http://geoffboeing.com/publications/nonlinear-chaos-fractals-prediction/) # - [Pynamical documentation](http://pynamical.readthedocs.org) # - [Chaos Theory and the Logistic Map](http://geoffboeing.com/2015/03/chaos-theory-logistic-map/) # - [Visualizing Chaos and Randomness with Phase Diagrams](http://geoffboeing.com/2015/04/visualizing-chaos-and-randomness/) # - [Animated 3D Plots in Python](http://geoffboeing.com/2015/04/animated-3d-plots-python/)
examples/pynamical-demo-3d-animation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + """A quick test for updating a Compound and traversing its hierarchy. """ from __future__ import division, print_function from numpy import pi import mbuild as mb from mbuild.examples.pmpc.brush import Brush # Create a compound and write it to file. brush1 = Brush() brush1.save("brush1.pdb", overwrite=True) # Create another compound, rotate it and write it to file. brush2 = Brush() mb.rotate_around_z(brush2, pi/2) brush2.save("brush2.pdb", overwrite=True) # Load brush2.pdb into brush1, modifying the atom positions of brush1. brush1.update_coordinates("brush2.pdb") brush1.save("modified_brush1.pdb", overwrite=True) # Access the internals of the updated brush1. print(brush1['pmpc']) for mpc in brush1['pmpc']['monomer']: print(mpc) # -
mbuild/examples/reload/reload.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp scrapers # - # # Scrapers # # > A set of classes for scraping data about items, series, and agencies from the National Archives of Australia's online database, RecordSearch. #hide from nbdev.showdoc import * from IPython.display import display, JSON # The main entities described within RecordSearch are **Items**, **Series**, and **Agencies**. Put simply, items are contained within series, and series are created and controlled by agencies. But the [Series System](https://www.naa.gov.au/help-your-research/getting-started/commonwealth-record-series-crs-system), on which RecordSearch is based, allows a much more complex range of relationships between entities to be documented. # + #export import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry import mechanicalsoup from bs4 import BeautifulSoup import re import datetime import math import arrow from sqlitedict import SqliteDict import time # We need separate browser/sessions so that we can nest searches -- eg series search -> series_details -> item search. # But we also want to reuse them as much as possible or we get misleading ConnectionErrors (actually due to too many open files) def make_session(): s = requests.Session() retries = Retry(total=10, backoff_factor=1, status_forcelist=[ 502, 503, 504 ]) s.mount('https://', HTTPAdapter(max_retries=retries)) s.mount('http://', HTTPAdapter(max_retries=retries)) return s def make_browser(): s = make_session() browser = mechanicalsoup.StatefulBrowser(soup_config={'features': 'lxml'}, session=s) return browser s = make_session() browsers = {} browsers['item'] = make_browser() browsers['series'] = make_browser() browsers['agency'] = make_browser() browsers['item_search'] = make_browser() browsers['series_search'] = make_browser() browsers['agency_search'] = make_browser() cache_db = SqliteDict('./cache_db.sqlite', autocommit=True) RS_URLS = { 'item': 'https://recordsearch.naa.gov.au/scripts/AutoSearch.asp?O=I&Number=', 'series': 'https://recordsearch.naa.gov.au/scripts/AutoSearch.asp?Number=', 'agency': 'https://recordsearch.naa.gov.au/scripts/AutoSearch.asp?Number=', 'search_results': 'https://recordsearch.naa.gov.au/SearchNRetrieve/Interface/ListingReports/ItemsListing.aspx', 'ns_results': 'https://recordsearch.naa.gov.au/NameSearch/Interface/ItemsListing.aspx', 'series_results': 'https://recordsearch.naa.gov.au/SearchNRetrieve/Interface/ListingReports/SeriesListing.aspx', 'agency_results': 'https://recordsearch.naa.gov.au/SearchNRetrieve/Interface/ListingReports/AgencyListing.aspx' } ITEM_FORM = { 'kw': { 'id': 'ctl00$ContentPlaceHolderSNR$txbKeywords', 'type': 'input', }, 'kw_options': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlUsingKeywords', 'type': 'select' }, 'kw_exclude': { 'id': 'ctl00$ContentPlaceHolderSNR$txbExKeywords', 'type': 'input' }, 'kw_exclude_options': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlUsingExKwd', 'type': 'select' }, # Set to 'on' to search in item notes # It's a checkbox, but uses Javascript to set text value. # Pretend it's a select for validation purposes. 'search_notes': { 'id': 'ctl00$ContentPlaceHolderSNR$cbxKwdTitleNotes', 'type': 'select' }, 'series': { 'id': 'ctl00$ContentPlaceHolderSNR$txbSerNo', 'type': 'input' }, 'series_exclude': { 'id': 'ctl00$ContentPlaceHolderSNR$txbExSerNo', 'type': 'input' }, 'control': { 'id': 'ctl00$ContentPlaceHolderSNR$txbIteControlSymb', 'type': 'input' }, 'control_exclude': { 'id': 'ctl00$ContentPlaceHolderSNR$txbExIteControlSymb', 'type': 'input' }, 'item_id': { 'id': 'ctl00$ContentPlaceHolderSNR$txbIteBarcode', 'type': 'input' }, 'date_from': { 'id': 'ctl00$ContentPlaceHolderSNR$txbDateFrom', 'type': 'input' }, 'date_to': { 'id': 'ctl00$ContentPlaceHolderSNR$txbDateTo', 'type': 'input' }, # Select lists (options below) 'formats': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlPhysFormat', 'type': 'select' }, 'formats_exclude': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlExPhysFormat', 'type': 'select' }, 'locations': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlLocation', 'type': 'select' }, 'locations_exclude': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlExLocation', 'type': 'select' }, 'access': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlAccessStatus', 'type': 'select' }, 'access_exclude': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlExAccessStatus', 'type': 'select' }, # Checkbox 'digital': { 'id': 'ctl00$ContentPlaceHolderSNR$cbxDigitalCopies', 'type': 'checkbox' } } KW_OPTIONS = [ 'ALL', 'ANY', 'EXACT' ] FORMATS = [ 'Paper files and documents', 'Index cards', 'Bound volumes', 'Cartographic records', 'Photographs', 'Microforms', 'Audio-visual records', 'Audio records', 'Electronic records', '3-dimensional records', 'Scientific specimens', 'Textiles' ] LOCATIONS = [ 'NAT,ACT', 'SA', 'AWM', 'QLD', 'NT', 'TAS', 'VIC', 'WA', 'NSW' ] ACCESS = [ 'OPEN', 'OWE', 'CLOSED', 'NYE' ] AGENCY_FORM = { 'kw': { 'id': 'ctl00$ContentPlaceHolderSNR$txbKeywords', 'type': 'input' }, 'kw_options': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlUsingKeywords', 'type': 'select' }, 'kw_exclude': { 'id': 'ctl00$ContentPlaceHolderSNR$txbExcludeKeywords', 'type': 'input' }, 'kw_exclude_options': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlExcludeUsingKeywords', 'type': 'select' }, 'function': { 'id': 'ctl00$ContentPlaceHolderSNR$txtFunctions', 'type': 'input' }, 'date_from': { 'id': 'ctl00$ContentPlaceHolderSNR$txbDateFrom', 'type': 'input' }, 'date_to': { 'id': 'ctl00$ContentPlaceHolderSNR$txbDateTo', 'type': 'input' }, 'locations': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlLocation', 'type': 'select' }, 'locations_exclude': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlExcludeLocation', 'type': 'select' }, 'agency_status': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlAgencyStatus', 'type': 'select' }, 'agency_status_exclude': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlExcludeAgencyStatus', 'type': 'select' } } AGENCY_LOCATIONS = [ 'NAT,ACT', 'COCOS OR CHRISTMAS ISLAND', 'NSW', 'NT', 'OVERSEAS', 'PNG', # Papua New Guinea 'QLD', 'SA', 'TAS', 'VIC', 'WA' ] AGENCY_STATUS = [ 'DOS', # Department of State 'HO', # Head Office 'RO', # Regional or State Office 'INTGOV', # Intergovernmental agency 'COURT', #Judicial Court or Tribunal 'LO', #Local Office 'NONEX' #Non-Executive government agency (Courts, Parliament) ] SERIES_FORM = { 'kw': { 'id': 'ctl00$ContentPlaceHolderSNR$txbKeywords', 'type': 'input' }, 'kw_options': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlUsingKeywords', 'type': 'select' }, 'kw_exclude': { 'id': 'ctl00$ContentPlaceHolderSNR$txbExKeywords', 'type': 'input' }, 'kw_exclude_options': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlUsingExKwd', 'type': 'select' }, 'search_notes': { 'id': 'ctl00$ContentPlaceHolderSNR$cbxKwdTitleNotes', 'type': 'select' }, 'series_id': { 'id': 'ctl00$ContentPlaceHolderSNR$txbSerNo', 'type': 'input' }, 'agency_recording': { 'id': 'ctl00$ContentPlaceHolderSNR$txbAgencyRecording', 'type': 'input' }, 'agency_controlling': { 'id': 'ctl00$ContentPlaceHolderSNR$txbAgencyControlling', 'type': 'input' }, 'date_from': { 'id': 'ctl00$ContentPlaceHolderSNR$txbDateFrom', 'type': 'input' }, 'date_to': { 'id': 'ctl00$ContentPlaceHolderSNR$txbDateTo', 'type': 'input' }, 'formats': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlPhysFormat', 'type': 'select' }, 'formats_exclude': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlExPhysFormat', 'type': 'select' }, 'locations': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlLocation', 'type': 'select' }, 'locations_exclude': { 'id': 'ctl00$ContentPlaceHolderSNR$ddlExLocation', 'type': 'select' }, } # + #export class RSBase(object): ''' Base class with utility methods. ''' def parse_date(self, date_string): ''' Tests a date string against a list of expected formats. If a known format is found, the date is refromatted as an ISO date. ''' date_formats = [ ['D MMMM YYYY', 'YYYY-MM-DD'], ['D MMM YYYY', 'YYYY-MM-DD'], ['MMMM YYYY', 'YYYY-MM'], ['MMM YYYY', 'YYYY-MM'], ['YYYY', 'YYYY'] ] for df in date_formats: try: iso_date = arrow.get(date_string, df[0]).format(df[1]) return iso_date # No matching formats except arrow.parser.ParserMatchError: pass return def process_date_string(self, date_string, prefix=''): ''' Takes a date range in a string and returns ISO formatted start and end dates. ''' if not date_string: start_date = None end_date = None else: dates = date_string.strip().replace(u'\u2013', '-').split('-') if dates: start_date = self.parse_date(dates[0].strip()) try: end_date = self.parse_date(dates[1].strip()) except IndexError: end_date = None return {f'{prefix}date_str': date_string, f'{prefix}start_date': start_date, f'{prefix}end_date': end_date} def get_url(self, url): ''' Get a RecordSearch page, setting up a session and handling the weird redirects. ''' self.browser.open(url, timeout=30) # Instead of going directly to the page, you get this weird intermediary page. # We have to select and submit the form to redirect to the page we want. self.browser.select_form('form[name="t"]') return self.browser.submit_selected() class RSEntity(RSBase): ''' Base class for individual RecordSearch entities – item, series, or agency. Not for direct use – use the appropriate subclass instead. ''' def __init__(self, identifier=None, cache=True, **kwargs): self.identifier = identifier self.cache = cache self.browser = browsers[self.entity_type] def get_entity_page(self): item_url = f'{RS_URLS[self.entity_type]}{self.identifier}' response = self.get_url(item_url) return response def refresh_cache(self): ''' Delete data for this entity from the cache, then extract a fresh version from RecordSearch. ''' del cache_db[f'{self.entity_type}_{self.identifier}'] self.__init__(self.identifier, self.cache) def get_cell(self, label): ''' RecordSearch results for individual entities are formatted in tables, with labels and values. This retrieves the cell immediately after the cell containing the supplied label. ''' try: cell = ( self.details.find(text=re.compile(label)).parent.parent.find_next_siblings('td')[0] ) except (IndexError, AttributeError): # Sometimes the cell labels are inside an enclosing div, # but sometimes not. Try again assuming no div. try: cell = ( self.details.find(text=re.compile(label)).parent.find_next_siblings('td')[0] ) except (IndexError, AttributeError): cell = None return cell def get_value(self, label): ''' Get the value associated with the supplied label from the RecordSearch entity description. ''' # Get the table cell containing the value cell = self.get_cell(label) try: # Get the text from the cell value = ' '.join([string for string in cell.stripped_strings]) except AttributeError: value = None return value def get_details(self): ''' Extract the main results table from the HTML page. ''' details = self.browser.page.find('div', 'mainDetailsTable') return details def get_formatted_dates(self, label, prefix=''): ''' Extract dates from a date range string, try to parse them and format as ISO dates. Return the original date string, the start date and the end date. ''' date_str = self.get_value(label) if date_str: formatted_dates = self.process_date_string(date_str, prefix) else: formatted_dates = {f'{prefix}date_str': '', f'{prefix}start_date': None, f'{prefix}end_date': None} return formatted_dates def get_formatted_date(self, label, prefix=''): date_str = self.get_value(label) if date_str: formatted_date = {f'{prefix}date_str': date_str, f'{prefix}date': self.parse_date(date_str)} else: formatted_date = {f'{prefix}date_str': '', f'{prefix}date': None} return formatted_date def get_relations(self, label): cell = self.get_cell(label) relations = [] if cell is not None: for relation in cell.find_all('li'): try: date_str = relation.find('div', 'dates').string.strip() except AttributeError: formatted_dates = {'date_str': '', 'start_date': None, 'end_date': None} else: formatted_dates = self.process_date_string(date_str) details = [string for string in relation.find('div', 'linkagesInfo').stripped_strings] try: identifier = details[0] title = details[1][2:] except IndexError: identifier = details[0] title = details[0] relations.append({ 'date_str': formatted_dates['date_str'], 'start_date': formatted_dates['start_date'], 'end_date': formatted_dates['end_date'], 'identifier': identifier, 'title': title }) relation.decompose() cell.decompose() return relations class RSSearch(RSBase): ''' Base class for an advanced search in RecordSearch. There are different search forms for the different RecordSearch entities, so don't use this directly. Use one of the subclasses. ''' def __init__(self, results_per_page=20, sort=None, record_detail='brief', **kwargs): self.browser = browsers[f'{self.entity_type}_search'] self.page = 0 params = locals().copy() params.pop('self') params.pop('kwargs') self.params = params self.kwargs = kwargs self.initialise_search() def get_full_details(self, results): full_results = [] for result in results: full_results.append(self.entity(result['identifier'], cache=False).data) time.sleep(0.5) return full_results def get_results(self, page=None): ''' Return a list of results from a search results page. The `page` value is incremented with each request, so you can call this method in a loop to retrieve the complete results set. When you reach then of the results, this method will return an empty list. Optional parameter: * `page` – request a specific page from the results set ''' if self.total_results == '20,000+': return {'total_results': self.total_results, 'page': None, 'number_of_results': 0, 'results': [], 'error': 'Your search returns too many results.'} if page: self.page = page else: self.page += 1 if self.page <= self.total_pages: results = self.search(**self.params, **self.kwargs) else: # No more data, so return an empty list results = {'total_results': self.total_results, 'page': None, 'number_of_results': 0, 'results': []} return results def process_list(self, details): results = [] retrieved = arrow.now(tz='Australia/Sydney').isoformat() for row in details.find_all('tr')[1:]: record = self.process_row(row) record['retrieved'] = retrieved results.append(record) return results def process_page(self, soup, record_detail): # Do something return [] def get_total_results(self, soup): total = 0 if soup.find(id='ContentPlaceHolderSNR_lblToManyRecordsError') is not None: total = '20,000+' elif soup.find('span', attrs={'id': re.compile('lblDisplaying$')}) is not None: total_text = soup.find('span', attrs={'id': re.compile('lblDisplaying$')}).text total = int(re.search(r'of (\d+)', total_text).group(1)) elif soup.find('span', text='Displaying 1 of 1'): total = 1 return total def refresh_cache(self): ''' Delete data for this search from the cache, then retrieve a fresh version from RecordSearch. ''' cache_key = self.generate_cache_key() cache_key = re.sub('_page_\d+$', '', cache_key) for key in cache_db.keys(): if cache_key in key: del cache_db[key] self.page = 0 self.initialise_search() def generate_cache_key(self): ''' Use the search parameters to generate a key to use in storing the cached results. ''' params = self.params.copy() params.update(self.kwargs) search_key = '_'.join(sorted([f'{k}_{v}' for k, v in params.items() if v is not None])) search_key = f'{self.entity_type}_{search_key}_page_{self.page}' return search_key def search(self, results_per_page=None, sort=None, record_detail='brief', **kwargs): # Generate key to use with cache cache_key = self.generate_cache_key() # Try to get results from cache first try: results = cache_db[cache_key] except KeyError: # Set the number of results per page if results_per_page != 20: search_form = self.browser.select_form('#formSNRMaster') search_form.set('ctl00$ContentPlaceHolderSNR$ddlResultsPerPage', results_per_page) submit_button = self.browser.page.find(id='ContentPlaceHolderSNR_btnSearch') self.browser.submit_selected() # Apply sort if sort: r = self.browser.open(f'{self.browser.url}?sort={sort}') # Retrieve a specific page in the results set if self.page > 1: url = self.browser.url.split('?')[0] self.browser.open(f'{url}?page={self.page-1}') # Get item details from list of search results data = self.process_page(self.browser.page, record_detail) results = { 'total_results': self.total_results, 'page': self.page, 'number_of_results': len(data), 'results': data, 'retrieved': arrow.now(tz='Australia/Sydney').isoformat() } cache_db[cache_key] = results #self.page = results['page'] return results def initialise_search(self): ''' Populates the search form and retrieves the total number of results. ''' # Start a session self.get_url('https://recordsearch.naa.gov.au/scripts/Logon.asp?N=guest') # Find the main advanced search link main_link = self.browser.find_link(url_regex='AdvSearchMain.aspx') self.browser.follow_link(main_link) # Find the advanced search link for this entity search_link = self.browser.find_link(url_regex=self.search_page) self.browser.follow_link(search_link) # Get the submit button for the search form submit_button = self.browser.page.find(id='ContentPlaceHolderSNR_btnSearch') # Get the search form search_form = self.browser.select_form('#formSNRMaster') # Populate the search form with the supplied params for key, value in self.kwargs.items(): search_form.set(self.search_params[key]['id'], value) # Submit the form search_form.choose_submit(submit_button) self.browser.submit_selected() # There's a 'search is running' page that has a form that needs to be submitted. running_form = self.browser.select_form('#Form1') self.browser.submit_selected() # Save the total number of results self.total_results = self.get_total_results(self.browser.page) # Calcuate the number of pages in the results set try: self.total_pages = math.ceil(self.total_results / self.params['results_per_page']) except TypeError: # More than 20,000 results pass # + #export class RSItem(RSEntity): ''' Class used for extracting data about an individual item (usually a file, but can be a volume, box, photograph etc) from RecordSearch. You need to supply one of the following parameters: * `identifier` – the Item ID (aka barcode) * `details` – the BeautifulSoup HTML element containing the item details You'd only use `details` if you already have a RecordSearch page and want to extract item data from it. (There's an example of this in the `RSItemSearch` class.) The item data is obtained by accessing the item's `.data` attribute. ''' entity_type = 'item' def __init__(self, identifier=None, cache=True, details=None): super(RSItem, self).__init__(identifier, cache) self.details = details if details: self.identifier = self.get_value('Item ID') self.data = self.get_item() def get_series(self): cell = self.get_cell('Series number') return cell.find('a').string.strip() def get_access_reasons(self): ''' Extract the list of reasons why material has been withheld after access examination. ''' cell = self.get_cell('Reason for restriction') reasons = [] if cell: for link in cell.find_all('a'): reasons.append(link.string.strip()) return reasons def get_digitised_pages(self): ''' Returns the number of pages (images) in a digitised file. This is scraped from the RecordSearch digitised file viewer. The file viewer is outside of RecordSearch's session system, so it can be requested directly. ''' url = f'https://recordsearch.naa.gov.au/SearchNRetrieve/Interface/ViewImage.aspx?B={self.identifier}' response = s.get(url, timeout=30) soup = BeautifulSoup(response.text, features='lxml') try: # The last page number from the navigation will be the total number of pages pages = int(soup.find('span', attrs={'id': "lblEndPage"}).string) except AttributeError: # If there's no navigation it might be a single page if soup.find('span', attrs={'id': "lblCitation"}): pages = 1 # Or something else... else: pages = 0 return pages def check_if_digitised(self): ''' Check to see if the file is digitised, by looking for a link to the digital copy. ''' if self.details.find(text=re.compile("View digital copy")): return True else: return False def get_item(self, date_format='iso'): # Try to retrieve from cache first try: item = cache_db[f'item_{self.identifier}'] # If not in the cache and the details are not supplied, get it from RS. except KeyError: if not self.details: self.get_entity_page() self.details = self.get_details() if self.details: item = { 'title': self.get_value('Title'), 'identifier': self.identifier, 'series': self.get_series(), 'control_symbol': self.get_value('Control symbol'), 'digitised_status': self.check_if_digitised(), 'digitised_pages': self.get_digitised_pages(), 'access_status': self.get_value('Access status'), 'access_decision_reasons': self.get_access_reasons(), 'location': self.get_value('Location'), 'retrieved': arrow.now(tz='Australia/Sydney').isoformat() } item.update(self.get_formatted_dates('Contents date range', 'contents_')) item.update(self.get_formatted_date('Date of decision', 'access_decision_')) if self.cache: # Add to the cache cache_db[f'item_{self.identifier}'] = item else: item = {'identifier': self.identifier, 'error': 'Item not found'} return item def __repr__(self): return f'NAA: {self.data["series"]}, {self.data["control_symbol"]}' # - # Items in RecordSearch are usually paper files, but can be other things like volumes, boxes, videos, or digital objects. Items have a unique identifier described as the 'Item ID', this was previously referred to as the item's 'barcode'. The `RSItem` class extracts information about an individual item from RecordSearch, using it's Item ID. # # Here are the fields returned: # # * `title` (string) # * `identifier` (string) # * `series` (string) # * `control_symbol` (string) # * `digitised_status` (boolean) – `True` if the item has been digitised # * `digitised_pages` (integer) – number of pages in the digitised file # * `access_status` (string) – one of 'Open', 'OWE', 'Closed', 'NYE' (see [access status options](#Access-status-options)) # * `access_decision_reasons` (list) – a list of reasons why material has been been witheld from public access (if CLOSED, or OWE) # * `location` (string) # * `contents_date_str` (ISO formatted date) # * `contents_start_date` (ISO formatted date) # * `contents_end_date` (ISO formatted date) # * `access_decision_date_str` (ISO formatted date) # * `access_decision_date` (ISO formatted date) # * `retrieved` (ISO formatted datetime) - when this record was scraped # # Note that `digitised_pages` is not part of the metadata presented in RecordSearch's item description. This value is obtained from the digitised file viewer using `get_digitised_pages()`. # To retrieve information about an item, just give `RSItem()` the Item ID (also known as the barcode). # Get an item item = RSItem('3445411') # You can then access the item data using the `.data` attribute. display(item.data) # Use `item.data[FIELD NAME]` to access individual fields. The `series` value of this item should be 'B2455'. assert item.data['series'] == 'B2455' # As an added bonus, the string representation of the item is also its brief citation. str(item) # The extracted data is saved into a simple key-value cache to speed up repeat requests. If you want to scrape a fresh version, use `.refresh_cache()`. show_doc(RSItem.refresh_cache) # We can check that this has worked by comparing the value of `retrieved`, which is the date/time the data was scraped. # + old_retrieved_date = item.data['retrieved'] item.refresh_cache() new_retrieved_date = item.data['retrieved'] assert old_retrieved_date != new_retrieved_date # + #export class RSItemSearch(RSSearch): ''' Search for items in RecordSearch. Supply any of the item search parameters as kwargs to initialise the search. Optional parameters: * `results_per_page` (default: 20) * `sort` (default: 1 – order by id) * `page` – to retrieve a specific page of results * `record_detail` – amount of detail to include, options are: * 'brief' (default) – just the info in the search results * `digitised` – add the number of pages if the file is digitised (slower) * 'full' – get the full individual record for each result (slowest) To access a page of results, use the `.get_results()` method. This method increments the results page, so you can call it in a loop to retrieve the complete result set. Useful attributes: * `.total_results` – the total number of results in the results set * `.total_pages` – the total number of result pages * `.kwargs` – a dict containing the supplied search parameters * `.params` – a dict containing the values of the optional parameters ''' entity_type = 'item' search_params = ITEM_FORM search_page = 'AdvSearchItems.aspx' entity = RSItem def __init__(self, results_per_page=20, sort=9, record_detail='brief', **kwargs): super(RSItemSearch, self).__init__(results_per_page=results_per_page, sort=sort, record_detail=record_detail, **kwargs) def process_row(self, row): cells = row.find_all('td') item = { 'series': cells[1].string.strip(), 'control_symbol': cells[2].string.strip(), 'title': cells[3].contents[0].string.strip(), 'identifier': cells[6].string.strip() } access_string = cells[3].find('div', 'CombinedTitleBottomLeft').string item['access_status'] = re.search(r'Access status: ([\w ]+)', access_string).group(1).strip() location_string = cells[3].find('div', 'CombinedTitleBottomRight').string item['location'] = re.search(r'Location: ([\w ]+)', location_string).group(1).strip() date_str = cells[4].string.strip() item.update(self.process_date_string(date_str, 'contents_')) if cells[5].find('a') is not None: item['digitised_status'] = True else: item['digitised_status'] = False return item def get_digitised_page_counts(self, items): for item in items: if item['digitised_status'] == True: item['digitised_pages'] = RSItem(item['identifier']).get_digitised_pages() else: item['digitised_pages'] = 0 return items def process_page(self, soup, record_detail): ''' Extract item data from a search results page. Level of item data can be varied using the `record_detail` parameter: * 'brief' - just the data in the search results * 'digitised' - if the file is digitised, get the number of pages * 'full': retrieve the individual item record to get extra fields ''' # There's a list of items if details := soup.find(id=re.compile('tblItemDetails$')): items = self.process_list(details) # There's a single item elif soup.find(id=re.compile('ContentPlaceHolderSNR_ucItemDetails_phDetailsView')) is not None: details = soup.find('div', 'detailsTable') items = [self.entity(details=details).data] # No items? else: raise Exception('No results found on page!') # Add number of pages in digitised files if record_detail == 'digitised': items = self.get_digitised_page_counts(items) # Get full item information if record_detail == 'full': items = self.get_full_details(items) return items # - # ### Item search parameters # # These are the parameters you can supply as keyword arguments to `RSItemSearch`. # # | Parameter | Input type | Values | # | :--- | :--- | :--- | # | **kw** | Text | Keywords or phrase to search for| # | **kw_options** | Select | How to combine the keywords – see [keyword options](#Keyword-options) | # | **kw_exclude** | Text | Keywords or phrase to exclude | # | **kw_exclude_options** | Select | How to combine the keywords – see [keyword options](#Keyword-options) | # | **search_notes** | Checkbox | Set to `True` to search notes as well as titles | # | **series** | Text | Limit to items from this series – eg 'A1' | # | **series_exclude** | Text | Exclude items from this series | # | **control** | Text | Limit to items with this control symbol (use * for wildcards) – eg '1947/2*' | # | **control_exclude** | Text | Exclude items with this control symbol | # | **item_id** | Text | Get the item with this identifier (no wildcards allowed)| # | **date_from** | Text | Include items with content after this date (year only) – eg '1925' | # | **date_to** | Text | Include items with content before this date (year only) – eg '1945' | # | **formats** | Select | Limit to items in this format – see [format options](#Format-options) | # | **formats_exclude** | Select | Exclude items in this format – see [format options](#Format-options) | # | **locations** | Select | Limit to items held in this location – see list of [locations](#Location-options) | # | **locations_exclude** | Select | Exclude items held in this location – see list of [locations](#Location-options) | # | **access** | Select | Limit to items with this access status – see [access status options](#Access-status-options) | # | **access_exclude** | Select | Exclude items with this access status – see [access status options](#Access-status-options) | # | **digital** | Checkbox | Limit to digitised items – set to `True` | # # ### Keyword options # # Use one of the following values to specify how keywords or phrases should be treated using the `kw_options` parameter. The default is 'ALL'. # # * **'ALL'** (default) – must include all keywords # * **'ANY'** – must include at least one of the keywords # * **'EXACT'** – treat the keywords as a phrase # # ### Format options # # Use one of the following values with the `formats` and `formats_exclude` parameters to limit your results to items in that format. The default is to include all formats. # # * **'Paper files and documents'** # * **'Index cards'** # * **'Bound volumes'** # * **'Cartographic records'** # * **'Photographs'** # * **'Microforms'** # * **'Audio-visual records'** # * **'Audio records'** # * **'Electronic records'** # * **'3-dimensional records'** # * **'Scientific specimens'** # * **'Textiles'** # # ### Location options # # Use one of the following values with the `locations` and `locations_exclude` parameters to limit your results to items held in that location. The default is to include all locations. # # * **'NAT,ACT'** – National office (ACT) # * **'AWM'** – Australian War Memorial # * **'NSW'** # * **'NT'** # * **'QLD'** # * **'SA'** # * **'TAS'** # * **'VIC'** # * **'WA'** # # # ### Access status options # # Use one of the following values with the `access` and `access_exclude` parameters to limit your results to items with this access examination status. The default is to include all. # # * **'OPEN'** – available for public access # * **'OWE'** – open with exceptions (eg it might have pages withheld or redactions applied) # * **'CLOSED'** – withheld completely from public access # * **'NYE'** – not yet examined (no access decision has been made) # ### Examples # Here's a basic keyword search for items. item_results = RSItemSearch(kw='wragge') # Initialising the `RSItemSearch` class sets up the search and retrieves some information about the results set. For example, to see the total number of results, we just access the `.total_results` attribute. item_results.total_results show_doc(RSItemSearch.get_results) item_results.get_results() item_results.params # Calling `.refresh_cache` will remove all of the data for this search from the cache, and set the results page back to 1. show_doc(RSItemSearch.refresh_cache) # + #export class RSSeries(RSEntity): ''' Class used for extracting data about an individual series. You need to supply the following parameter: * `identifier` – the series number, eg 'A1', 'B2455' Optional parameters: * `include_number_digitised` (boolean, default: True) – include the number of items in this series that have been digitised. * `include_access_status` (boolean, default: True) – include the number of items in this series in each of the access status categories. The series data is obtained by accessing the series' `.data` attribute. ''' entity_type = 'series' def __init__(self, identifier=None, cache=True, details=None, include_number_digitised=True, include_access_status=True): super(RSSeries, self).__init__(identifier, cache) self.digitised = include_number_digitised self.access_status = include_access_status self.details = details if details: self.identifier = self.get_value('Series number') self.data = self.get_series() def get_number_described(self): described = self.get_value('Items in this series on RecordSearch') try: described_number, described_note = re.search(r'(\d+)(.*)', described).groups() try: described_number = int(described_number) except ValueError: pass except AttributeError: described_number = 0 described_note = described except TypeError: described_number = 0 described_note = '' return {'items_described': described_number, 'items_described_note': described_note.strip()} def get_quantity_locations(self): cell = self.get_cell('Quantity and location') locations = [] if cell: for location in cell.findAll('li'): try: quantity, location = re.search(r'(\d+\.*\d*) metres held in ([A-Z,a-z]+)', location.string).groups() quantity = float(quantity) except AttributeError: quantity = None location = None locations.append({ 'quantity': quantity, 'location': location }) return locations def get_access_status_totals(self): totals = {} for status in ACCESS: totals[status] = RSItemSearch(series=self.identifier, access=status).total_results time.sleep(0.5) return totals def generate_cache_key(self): cache_key = f'series_{self.identifier}' if self.digitised: cache_key = f'{cache_key}_digitised' if self.access_status: cache_key = f'{cache_key}_access' return cache_key def refresh_cache(self): ''' Delete data for this item from the cache, then extract a fresh version from RecordSearch. ''' cache_key = self.generate_cache_key() del cache_db[cache_key] self.__init__(self.identifier, self.cache, self.digitised, self.access_status) def get_series(self): # Try to retrieve from cache first cache_key = self.generate_cache_key() try: series = cache_db[cache_key] # If not in the cache and the details are not supplied, get it from RS. except KeyError: if not self.details: self.get_entity_page() self.details = self.get_details() if self.details: series = { 'identifier': self.identifier, 'title': self.get_value('Title'), 'physical_format': self.get_value('Predominant physical format'), 'arrangement': self.get_value('System of arrangement/ control'), 'control_symbols': self.get_value('Range of control symbols'), 'locations': self.get_quantity_locations(), 'recording_agencies': self.get_relations('recording'), 'controlling_agencies': self.get_relations('controlling'), 'previous_series': self.get_relations('Previous'), 'subsequent_series': self.get_relations('Subsequent'), 'controlling_series': self.get_relations('Controlling'), 'related_series': self.get_relations('Related'), 'retrieved': arrow.now(tz='Australia/Sydney').isoformat() } series.update(self.get_number_described()) series.update(self.get_formatted_dates('Contents dates', 'contents_')) series.update(self.get_formatted_dates('Accumulation dates', 'accumulation_')) if self.digitised: series['items_digitised'] = RSItemSearch(series=self.identifier, digital=True).total_results if self.access_status: series['access_status_totals'] = self.get_access_status_totals() if self.cache: # Add to the cache cache_db[cache_key] = series else: series = {'identifier': self.identifier, 'error': 'Series not found'} return series def __repr__(self): return f'NAA: {self.identifier}' # - # A series is a group of records that have something in common, for example, they might have been part of the same filing system. Series can be related to other series, and to agencies. A single series can also be held across multiple locations. All this means the data can be quite complex. # # Not that as well as the standard RecordSearch metadata, the scraper can also extract some extra information about the series, such as the number of items digitised, and the access status of items in the series. # # Here are the fields returned: # # * `identifier` (string) # * `title` (string) # * `physical_format` (string) # * `arrangement` (string) # * `control_symbols` (string) # * `locations` (list) – a list of locations, each with the fields: # * `quantity` (string) # * `location` (string) # * `recording_agencies` – a list of agencies, each with the fields: # * `identifier` (string) # * `title` (string) # * `date_str` (string) # * `start_date` (ISO formatted date) # * `end_date` (ISO formatted date) # * `controlling_agencies` – a list of agencies, each with the fields: # * `identifier` (string) # * `title` (string) # * `date_str` (string) # * `start_date` (ISO formatted date) # * `end_date` (ISO formatted date) # * `previous_series` – a list of series, each with the fields: # * `identifier` (string) # * `title` (string) # * `date_str` (string) # * `start_date` (ISO formatted date) # * `end_date` (ISO formatted date) # * `subsequent_series` – a list of series, each with the fields: # * `identifier` (string) # * `title` (string) # * `date_str` (string) # * `start_date` (ISO formatted date) # * `end_date` (ISO formatted date) # * `controlling_series` – a list of series, each with the fields: # * `identifier` (string) # * `title` (string) # * `date_str` (string) # * `start_date` (ISO formatted date) # * `end_date` (ISO formatted date) # * `related_series` (list) – a list of series, each with the fields: # * `identifier` (string) # * `title` (string) # * `date_str` (string) # * `start_date` (ISO formatted date) # * `end_date` (ISO formatted date) # * `items_described` (integer) # * `items_described_note` (string) # * `contents_date_str` (string) # * `contents_start_date` (ISO formatted date) # * `contents_end_date` (ISO formatted date) # * `accumulation_date_str` (string) # * `accumulation_start_date` (ISO formatted date) # * `accumulation_end_date` (ISO formatted date) # * `items_digitised` (integer) – the number of items in this series that have been digitised # * `access_status_totals` (dict) – the number of items in each of the access status categories, OPEN, OWE, CLOSED, and NYE. # # To retrieve information about a series, just give `RSSeries()` the series number. series = RSSeries('A863') # You can then access the series data using the `.data` attribute. display(series.data) # You can find out how many items within the series are closed to public access. In this case, it should be none. assert series.data['access_status_totals']['CLOSED'] == 0 # You can access both the number of items described and digitised within each series. We'd expect the number described to be greater than or equal to the number digitised. assert series.data['items_described'] >= series.data['items_digitised'] # By default, the scraper adds some extra information to the basic metadata – `items_digitised` and `access_status_totals`. To obtain these values, the scraper runs item searches – one to find digitised files, and another four to find all the access status values. This can slow things down considerably. If you want a quick response and don't care about these values, you can set `include_number_digitised` and/or `include_access_status` to `False`. # # In the case below, the series data should **not** include a value for `items_digitised`. # + series = RSSeries('A3', include_number_digitised=False) assert 'items_digitised' not in series.data # - # The extracted data is saved into a simple key-value cache to speed up repeat requests. If you want to scrape a fresh version, use `.refresh_cache()`. show_doc(RSSeries.refresh_cache) # We can check that this has worked by comparing the value of `retrieved`, which is the date/time the data was scraped. # + old_retrieved_date = series.data['retrieved'] series.refresh_cache() new_retrieved_date = series.data['retrieved'] assert old_retrieved_date != new_retrieved_date # + #export class RSSeriesSearch(RSSearch): ''' Search for series in RecordSearch. Supply any of the series search parameters as kwargs to initialise the search. Optional parameters: * `results_per_page` (default: 20) * `sort` (default: 1 – order by id) * `page` – to retrieve a specific page of results * `record_detail` – amount of detail to include, options are: * 'brief' (default) – just the info in the search results * 'full' – get the full individual record for each result (slow) To access a page of results, use the `.get_results()` method. This method increments the results page, so you can call it in a loop to retrieve the complete result set. Useful attributes: * `.total_results` – the total number of results in the results set * `.total_pages` – the total number of result pages * `.kwargs` – a dict containing the supplied search parameters * `.params` – a dict containing the values of the optional parameters ''' entity_type = 'series' search_params = SERIES_FORM search_page = 'AdvSearchSeries.aspx' entity = RSSeries def __init__(self, results_per_page=20, sort=1, record_detail='brief', **kwargs): super(RSSeriesSearch, self).__init__(results_per_page=results_per_page, sort=sort, record_detail=record_detail, **kwargs) def get_locations(self, cell): locations = [] for row in cell.stripped_strings: location, quantity = row.split(': ') locations.append({'location': location, 'quantity': quantity}) return locations def get_items_described(self, cell): if link := cell.find('a'): return int(link.string.strip()) else: return int(cell.string.strip()) def process_row(self, row): cells = row.find_all('td') series = { 'identifier': cells[1].string.strip(), 'title': cells[2].contents[0].string.strip(), 'locations': self.get_locations(cells[5]), 'items_described': self.get_items_described(cells[6]) } accumulated_date_str = cells[3].string series.update(self.process_date_string(accumulated_date_str, 'accumulation_')) contents_date_str = cells[4].string series.update(self.process_date_string(contents_date_str, 'contents_')) return series def process_page(self, soup, record_detail): ''' Extract item data from a search results page. Level of item data can be varied using the `record_detail` parameter: * 'brief' - just the data in the search results * 'full': retrieve the individual item record to get extra fields ''' # There's a list of items if details := soup.find(id=re.compile('tblSeriesDetails$')): series = self.process_list(details) # There's a single item elif soup.find(id=re.compile('ContentPlaceHolderSNR_ucSeriesDetails_phDetailsView')) is not None: details = soup.find('div', 'detailsTable') series = [self.entity(details=details).data] # No items? else: raise Exception('No results found on page!') # Get full record information if record_detail == 'full': series = self.get_full_details(series) return series # - # ### Series search parameters # # These are the parameters you can supply as keyword arguments to `RSSeriesSearch`. # # | Parameter | Input type | Values | # | :--- | :--- | :--- | # | **kw** | Text | Keywords or phrase to search for| # | **kw_options** | Select | How to combine the keywords – see [keyword options](#Series-keyword-options) | # | **kw_exclude** | Text | Keywords or phrase to exclude | # | **kw_exclude_options** | Select | How to combine the keywords – see [keyword options](#Series-keyword-options) | # | **search_notes** | Checkbox | Set to `True` to search notes as well as titles | # | **series_id** | Text | Search for this series identifier | # | **date_from** | Text | Include series with content after this date (year only) – eg '1925' | # | **date_to** | Text | Include series with content before this date (year only) – eg '1945' | # | **formats** | Select | Limit to series with items in this format – see [format options](#Series-format-options) | # | **formats_exclude** | Select | Exclude series with items in this format – see [format options](#Series-format-options) | # | **locations** | Select | Limit to series held in this location – see list of [locations](#Series-location-options) | # | **locations_exclude** | Select | Exclude series held in this location – see list of [locations](#Series-location-options) | # | **agency_recording** | Select | Limit to series created by this agency or person | # | **agency_controlling** | Select | Limit to series controlled by this agency or person | # # ### Series keyword options # # Use one of the following values to specify how keywords or phrases should be treated using the `kw_options` parameter. The default is 'ALL'. # # * **'ALL'** (default) – must include all keywords # * **'ANY'** – must include at least one of the keywords # * **'EXACT'** – treat the keywords as a phrase # # ### Series location options # # Use one of the following values with the `locations` and `locations_exclude` parameters to limit your results to items held in that location. The default is to include all locations. # # * **'NAT,ACT'** – National office (ACT) # * **'AWM'** – Australian War Memorial # * **'NSW'** # * **'NT'** # * **'QLD'** # * **'SA'** # * **'TAS'** # * **'VIC'** # * **'WA'** # # ### Series format options # # Use one of the following values with the `formats` and `formats_exclude` parameters to limit your results to series containing that format. The default is to include all formats. # # * **'Paper files and documents'** # * **'Index cards'** # * **'Bound volumes'** # * **'Cartographic records'** # * **'Photographs'** # * **'Microforms'** # * **'Audio-visual records'** # * **'Audio records'** # * **'Electronic records'** # * **'3-dimensional records'** # * **'Scientific specimens'** # * **'Textiles'** # # ### Examples # # Initialise a search. series_results = RSSeriesSearch(agency_recording='CA 1196') # You can access the `.total_results` attribute to find out how many results there are. series_results.total_results # Naturally enough, the `.total_results` value should be an integer, as should `.total_pages`. assert isinstance(series_results.total_results, int) assert isinstance(series_results.total_pages, int) series_results.params show_doc(RSSeriesSearch.get_results) series_results.get_results(2) # Calling `.refresh_cache` will remove all of the data for this search from the cache, and set the results page back to 1. show_doc(RSSeriesSearch.refresh_cache) # + #export class RSAgency(RSEntity): ''' Class used for extracting data about an individual agency. You need to supply the following parameter: * `identifier` – the series number, eg 'A1', 'B2455' The series data is obtained by accessing the series' `.data` attribute. ''' entity_type = 'agency' def __init__(self, identifier=None, cache=True, details=None, include_series_count=True): super(RSAgency, self).__init__(identifier, cache) self.series = include_series_count self.details = details if details: self.identifier = self.get_value('Agency number') self.data = self.get_agency() def get_series_count(self): series_count = RSSeriesSearch(agency_recording=self.identifier).total_results return series_count def get_agency(self): # Try to retrieve from cache first cache_key = f'{self.entity_type}_{self.identifier}' try: agency = cache_db[cache_key] # If not in the cache and the details are not supplied, get it from RS. except KeyError: if not self.details: self.get_entity_page() self.details = self.get_details() if self.details: agency = { 'identifier': self.identifier, 'title': self.get_value('Title'), 'agency_status': self.get_value('Agency status'), 'location': self.get_value('Location'), 'functions': self.get_relations('Function'), 'controlling_organisation': self.get_relations('Organisation controlling'), 'previous_agencies': self.get_relations('Previous'), 'subsequent_agencies': self.get_relations('Subsequent'), 'superior_agencies': self.get_relations('Superior'), 'controlled_agencies': self.get_relations('Controlled'), 'associated_people': self.get_relations('Persons'), 'retrieved': arrow.now(tz='Australia/Sydney').isoformat() } # series.update(self.get_number_described()) agency.update(self.get_formatted_dates('Date range')) if self.series: agency['number_of_series'] = self.get_series_count() if self.cache: # Add to the cache cache_db[cache_key] = agency else: agency = {'identifier': self.identifier, 'error': 'Agency not found'} return agency def __repr__(self): return f'{self.identifier}, self.data["title"]' # - # Here are the the fields returned: # # * `identifier` (string) # * `title` (string) # * `location` (string) # * `functions` – a list of functions performed by this agency, each with the fields: # * `identifier` (string) # * `title` (string) # * `date_str` (string) # * `start_date` (ISO formatted date) # * `end_date` (ISO formatted date) # * `controlling_organisation` – a list of organisations, each with the fields: # * `identifier` (string) # * `title` (string) # * `date_str` (string) # * `start_date` (ISO formatted date) # * `end_date` (ISO formatted date) # * `previous_agencies` – a list of agencies, each with the fields: # * `identifier` (string) # * `title` (string) # * `date_str` (string) # * `start_date` (ISO formatted date) # * `end_date` (ISO formatted date) # * `subsequent_agencies` – a list of agencies, each with the fields: # * `identifier` (string) # * `title` (string) # * `date_str` (string) # * `start_date` (ISO formatted date) # * `end_date` (ISO formatted date) # * `superior_agencies` – a list of agencies, each with the fields: # * `identifier` (string) # * `title` (string) # * `date_str` (string) # * `start_date` (ISO formatted date) # * `end_date` (ISO formatted date) # * `controlled_agencies` – a list of agencies, each with the fields: # * `identifier` (string) # * `title` (string) # * `date_str` (string) # * `start_date` (ISO formatted date) # * `end_date` (ISO formatted date) # * `associated_people` (list) – a list of people, each with the fields: # * `identifier` (string) # * `title` (string) # * `date_str` (string) # * `start_date` (ISO formatted date) # * `end_date` (ISO formatted date) # * `date_str` (string) # * `start_date` (ISO formatted date) # * `end_date` (ISO formatted date) # * `number_of_series` (integer) – number of series created by this agency # * `retrieved` (ISO formatted date) # # ### Examples # # To retrieve information about an agency, just give `RSAgency` the agency identifier. agency = RSAgency('CA 343') # You can then access the agency data using the `.data` attribute. agency.data # Use `agency.data[FIELD NAME]` to access individual fields. The `agency_status` value of this agency should be 'Head Office'. assert agency.data['agency_status'] == 'Head Office' # The extracted data is saved into a simple key-value cache to speed up repeat requests. If you want to scrape a fresh version, use `.refresh_cache()`. show_doc(RSAgency.refresh_cache) # + #export class RSAgencySearch(RSSearch): ''' Search for agencies in RecordSearch. Supply any of the agency search parameters as kwargs to initialise the search. Optional parameters: * `results_per_page` (default: 20) * `sort` (default: 1 – order by id) * `page` – to retrieve a specific page of results * `record_detail` – amount of detail to include, options are: * 'brief' (default) – just the info in the search results * 'full' – get the full individual record for each result (slow) To access a page of results, use the `.get_results()` method. This method increments the results page, so you can call it in a loop to retrieve the complete result set. Useful attributes: * `.total_results` – the total number of results in the results set * `.total_pages` – the total number of result pages * `.kwargs` – a dict containing the supplied search parameters * `.params` – a dict containing the values of the optional parameters ''' entity_type = 'agency' search_params = AGENCY_FORM search_page = 'AdvSearchAgencies.aspx' entity = RSAgency def __init__(self, results_per_page=20, sort=1, record_detail='brief', **kwargs): super(RSAgencySearch, self).__init__(results_per_page=results_per_page, sort=sort, record_detail=record_detail, **kwargs) def process_row(self, row): cells = row.find_all('td') agency = { 'identifier': cells[1].string.strip(), 'title': cells[2].contents[0].string.strip() } date_str = cells[3].string agency.update(self.process_date_string(date_str)) return agency def process_page(self, soup, record_detail): ''' Extract item data from a search results page. Level of record data can be varied using the `record_detail` parameter. ''' # There's a list of items if results := soup.find(id=re.compile('tblProvDetails$')): agencies = self.process_list(results) # There's a single item elif soup.find(id=re.compile('ucAgencyDetails_phDetailsView')) is not None: details = soup.find('div', 'detailsTable') agencies = [self.entity(details=details).data] # No items? else: raise Exception('No results found on page!') # Get full item information if record_detail == 'full': agencies = self.get_full_details(agencies) return agencies # - # ### Agency search parameters # # These are the parameters you can supply as keyword arguments to `RSAgencySearch`. # # | Parameter | Input type | Values | # | :--- | :--- | :--- | # | **kw** | Text | Keywords or phrase to search for| # | **kw_options** | Select | How to combine the keywords – see [keyword options](#Agency-keyword-options) | # | **kw_exclude** | Text | Keywords or phrase to exclude | # | **kw_exclude_options** | Select | How to combine the keywords – see [keyword options](#Agency-keyword-options) | # | **function** | Text | Limit to agencies that performed this function – see [note](#Agency-function-note) | # | **date_from** | Text | Include agencies that existed after this date (year only) – eg '1925' | # | **date_to** | Text | Include agencies that existed before this date (year only) – eg '1945' | # | **locations** | Select | Limit to agencies in this location – see list of [locations](#Agency-location-options) | # | **locations_exclude** | Select | Exclude agencies in this location – see list of [locations](#Agency-location-options) | # | **agency_status** | Select | Limit to agencies with this status – see [list of possible values](#Agency-status-options) | # | **agency_status_exclude** | Select | Exclude agencies with this status – see [list of possible values](#Agency-status-options) | # # ### Agency keyword options # # Use one of the following values to specify how keywords or phrases should be treated using the `kw_options` parameter. The default is 'ALL'. # # * **'ALL'** (default) – must include all keywords # * **'ANY'** – must include at least one of the keywords # * **'EXACT'** – treat the keywords as a phrase # # ### Agency function note # # In theory, functions are a controlled, hierarchical list, but previous examinations have shown that the use of functions in RecordSearch can be inconsistent. Here's a [list of functions](https://github.com/GLAM-Workbench/recordsearch/blob/master/data/functions.txt) extracted from the RecordSearch interface that you can use as values with the `function` parameter. # # ### Agency location options # # Use one of the following values with the `locations` and `locations_exclude` parameters to limit your results to agencies in that location. The default is to include all locations. # # * **'NAT,ACT'** # * **'COCOS OR CHRISTMAS ISLAND'** # * **'NSW'** # * **'NT'** # * **'OVERSEAS'** # * **'PNG'** – Papua New Guinea # * **'QLD'** # * **'SA'** # * **'TAS'** # * **'VIC'** # * **'WA'** # # ### Agency status options # # Use one of the following values with the `agency_status` and `agency_status_exclude` parameters to limit your results to agencies with that status. The default is to include all status values. # # * **'DOS'** – Department of State # * **'HO'** – Head Office # * **'RO'** – Regional or State Office # * **'INTGOV'** – Intergovernmental agency # * **'COURT'** – Judicial Court or Tribunal # * **'LO'** – Local Office # * **'NONEX'** – Non-Executive government agency (Courts, Parliament) # # ### Examples # # Search for all agencies that have performed the 'SCIENCE' function. agency_search = RSAgencySearch(function='science') # Initialising the `RSAgencySearch` class sets up the search and retrieves some information about the results set. For example, to see the total number of results, we just access the `.total_results` attribute. agency_search.total_results show_doc(RSAgencySearch.get_results) agency_search.get_results() # Calling `.refresh_cache` will remove all of the data for this search from the cache, and set the results page back to 1. show_doc(RSAgencySearch.refresh_cache)
00_scrapers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import cv2 import matplotlib.pyplot as plt import glob from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, UpSampling2D, Dropout, Cropping2D from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, LearningRateScheduler from keras import backend as K from keras.models import Model # + import os filenames = [] for path, subdirs, files in os.walk('../Data/1obj'): for name in files: if 'src_color' in path: filenames.append(os.path.join(path, name)) print('# Training images: {}'.format(len(filenames))) # - n_examples = 3 for i in range(n_examples): plt.subplot(2, 2, 1) image = cv2.imread(filenames[i]) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) plt.imshow(image) plt.subplot(2, 2, 2) mask_file = filenames[i].replace('src_color', 'human_seg') mask = cv2.imread(glob.glob(mask_file[:-4]+'*')[0]) ret, mask = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY_INV) mask = mask[:,:,0] plt.imshow((mask), cmap='gray') plt.show() # + def dice_coef(y_true, y_pred, smooth=0.9): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) def dice_coef_loss(y_true, y_pred): return -dice_coef(y_true, y_pred) # + img_rows = 240 img_cols = 240 img_channels = 3 inputs = Input((img_rows, img_cols, img_channels)) conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs) conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1) conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2) conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3) conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4) drop4 = Dropout(0.5)(conv4) pool4 = MaxPooling2D(pool_size=(2, 2))(drop4) conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4) conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5) drop5 = Dropout(0.5)(conv5) up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5)) merge6 = concatenate([drop4,up6], axis = 3) conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6) conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6) up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6)) merge7 = concatenate([conv3,up7], axis = 3) conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7) conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7) up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7)) merge8 = concatenate([conv2,up8], axis = 3) conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8) conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8) up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8)) merge9 = concatenate([conv1,up9], axis = 3) conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9) conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9) conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9) conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9) model = Model(inputs = inputs, outputs = conv10) opt = Adam() model.compile(optimizer = opt, loss=dice_coef_loss, metrics = [dice_coef]) model.summary() # - X = np.ndarray((len(filenames), img_rows, img_cols , img_channels), dtype=np.uint8) y = np.ndarray((len(filenames), img_rows, img_cols , 1), dtype=np.uint8) i=0 for image in filenames: img = cv2.imread(image) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (240,240)) mask_file = image.replace('src_color', 'human_seg') label = cv2.imread(glob.glob(mask_file[:-4]+'*')[0], 0) ret, label = cv2.threshold(label, 0, 255, cv2.THRESH_BINARY_INV) label = cv2.resize(img, (240, 240)) label = label[:,:,0].reshape((240, 240, 1)) img = np.array([img/255.]) label = np.array([label]) X[i] = img y[i] = label i+=1 n_epochs = 1 batch_size = 1 history = model.fit(X, y, batch_size=batch_size, epochs=n_epochs, verbose=1, shuffle=True, validation_split=0.1)
Chapter07/Chapter 7 - Segmenting classes in images with U-net.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Composing transport plans across frames # # In this notebook, I investigate the effects of composing transport plans to infer temporal couplings between MPs over long time intervals. Ultimately, the evidence suggests that this probably won't be accurate enough to use for OT-based regression on the Zimmer data if the time interval is longer than 10 frames. # %load_ext autoreload # %autoreload 2 # %matplotlib inline # + from functools import reduce from itertools import accumulate import ot import numpy as np import matplotlib.pyplot as plt from otimage import io, viewers, imagerep from otimage.utils import plot_maxproj # - # ### Load data # + # Time points to load t_start = 5 t_stop = 45 n_frames = t_stop - t_start # Load MP representations of video frames mp_path = '/home/mn2822/Desktop/WormOT/data/zimmer/mp_components/mp_0000_0050.mat' with io.MPReader(mp_path) as reader: mps = [reader.get_frame(t) for t in range(t_start, t_stop)] # - # ### Compute transport plans for a set of frames def compute_ot(pts_1, pts_2, wts_1, wts_2): # Normalize weights p_1 = wts_1 / np.sum(wts_1) p_2 = wts_2 / np.sum(wts_2) # Normalized distance matrix M_nn = ot.dist(pts_1, pts_2, metric='sqeuclidean') M = M_nn / np.median(M_nn) # Compute transport plan P, _ = ot.emd(p_1, p_2, M, log=True) return P # + p_mtx = [] q_mtx = [] for idx in range(n_frames - 1): mp_1 = mps[idx] mp_2 = mps[idx + 1] P = compute_ot(mp_1.pts, mp_2.pts, mp_1.wts, mp_2.wts) Q = P / np.sum(P, 1)[:, np.newaxis] p_mtx.append(P) q_mtx.append(Q) # - # ### Visualize pushforward of composed transport plans # + # Composed pushforward matrix q_comp = q_mtx[0] for q in q_mtx[1:]: q_comp = q_comp @ q # Composed transport matrix p_0 = np.sum(p_mtx[0], 1)[:, np.newaxis] p_comp = p_0 * q_comp plt.figure(figsize=(10, 10)) plt.subplot(211) plt.imshow(q_comp) plt.title('Composed pushforward') row_plt = 5 plt.subplot(212) plt.plot(q_comp[row_plt, :]) plt.title(f'Row: {row_plt}'); # - viewer = viewers.PushforwardViewer(mps[0], mps[-1], q_comp) # ### Plot entropy of composed transport plans as a function of time interval length # + def mtx_entropy(P): vals = P[P > 0] return -np.sum(vals * np.log(vals)) # Compose transport matrices for all time steps pc_mtx = accumulate([p_mtx[0]] + q_mtx, func=np.matmul) # Compute entropy of each transport matrix ent_vals = [mtx_entropy(p) for p in pc_mtx] # Maximum possible entropy for transport matrix n_states = p_mtx[0].shape[0] * p_mtx[0].shape[1] ent_max = np.log(n_states) # Number of time steps for each composition t_steps = range(len(ent_vals)) plt.plot(t_steps, [ent_max] * len(t_steps), '--', label='maxent') plt.plot(t_steps, ent_vals, label='H(P)') plt.xlabel('number of time steps') plt.ylabel('entropy (bits)') plt.title('Transport plan entropy');
python/notebooks/image_registration/multi_frame/01_compose_ot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/nair-jishnu/Twitter-Sentiment-Analysis/blob/master/PolaSent.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="Z4oc5c_-VcxN" colab_type="code" colab={} import tweepy # To consume Twitter's API import pandas as pd # To handle data import numpy as np # For number computing # For plotting and visualization: from IPython.display import display import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # + id="0BJGzuf6VhJV" colab_type="code" colab={} consumer_key = '' consumer_secret = '' access_token = '' access_secret = '' # + id="UqV8k6tSVyyd" colab_type="code" colab={} # This will allow us to use the keys as variables # API's setup: def twitter_setup(): auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_secret) # Return API with authentication: api = tweepy.API(auth) return api # + id="fETMNLAJWAjE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 298} outputId="dc91698b-73b3-4ed3-d09c-6b177103412c" extractor = twitter_setup() # We create a tweet list as follows: tweets = extractor.user_timeline(screen_name="icyphox", count=100) print("Number of tweets extracted: {}.\n".format(len(tweets))) # We print the most recent 5 tweets: print("5 recent tweets:\n") for tweet in tweets[:5]: print(tweet.text) print() # + id="ZuhCGqIfWUUn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 343} outputId="675b963d-b86b-4b0f-f87e-77c7b2f5a4a6" data = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['Tweets']) # We display the first 10 elements of the dataframe: display(data.head(10)) # + id="mi_6mtb5WYU4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="5b68b8f4-0577-4960-dae7-8cd4ba8297ff" print(dir(tweets[0])) # + id="2hNtlLz6WbJ3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 178} outputId="e6d2a00b-818e-411d-97a8-13cafaa86794" print(tweets[0].id) print(tweets[0].created_at) print(tweets[0].source) print(tweets[0].favorite_count) print(tweets[0].retweet_count) print(tweets[0].geo) print(tweets[0].coordinates) print(tweets[0].entities) # + id="WU82hDibWeng" colab_type="code" colab={} data['len'] = np.array([len(tweet.text) for tweet in tweets]) data['ID'] = np.array([tweet.id for tweet in tweets]) data['Date'] = np.array([tweet.created_at for tweet in tweets]) data['Source'] = np.array([tweet.source for tweet in tweets]) data['Likes'] = np.array([tweet.favorite_count for tweet in tweets]) data['RTs'] = np.array([tweet.retweet_count for tweet in tweets]) # + id="e1fviatSWlr_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 343} outputId="844e2683-3042-42a1-92c4-a06915ef0955" display(data.head(10)) # + id="c1JZ2TqgWns_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="40443213-86dc-456a-f2c0-46e5ca18494e" mean = np.mean(data['len']) print("The lenght's average in tweets: {}".format(mean)) # + id="JxSjVbEeWufn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 193} outputId="df2f6dab-e2e3-4bd8-d531-1fe7df80f8c8" fav_max = np.max(data['Likes']) rt_max = np.max(data['RTs']) fav = data[data.Likes == fav_max].index[0] rt = data[data.RTs == rt_max].index[0] # Max FAVs: print("The tweet with more likes is: \n{}".format(data['Tweets'][fav])) print("Number of likes: {}".format(fav_max)) print("{} characters.\n".format(data['len'][fav])) # Max RTs: print("The tweet with more retweets is: \n{}".format(data['Tweets'][rt])) print("Number of retweets: {}".format(rt_max)) print("{} characters.\n".format(data['len'][rt])) # + id="HzBZQ8GyWw6S" colab_type="code" colab={} tlen = pd.Series(data=data['len'].values, index=data['Date']) tfav = pd.Series(data=data['Likes'].values, index=data['Date']) tret = pd.Series(data=data['RTs'].values, index=data['Date']) # + id="BPz2IFeiW1Eq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="b16d6ced-8fd4-48c9-d978-e570e251ae91" tlen.plot(figsize=(16,4), color='r'); # + id="IYewbeYPW3Ll" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 284} outputId="6091fcfb-3d34-442e-9e69-373d2e411c1c" tfav.plot(figsize=(16,4), label="Likes", legend=True) tret.plot(figsize=(16,4), label="Retweets", legend=True); # + id="GuSL0_JRW6ZK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="026d08df-d884-4198-d305-43db1e027fe7" sources = [] for source in data['Source']: if source not in sources: sources.append(source) # We print sources list: print("Creation of content sources:") for source in sources: print("* {}".format(source)) # + id="B9-mLgfDW-_z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="73f1c553-542a-4ec3-b3a5-ef5b5304cf20" percent = np.zeros(len(sources)) for source in data['Source']: for index in range(len(sources)): if source == sources[index]: percent[index] += 1 pass percent /= 100 # Pie chart: pie_chart = pd.Series(percent, index=sources, name='Sources') pie_chart.plot.pie(fontsize=11, autopct='%.2f', figsize=(6, 6)); # + id="hlEXdoLRXCBr" colab_type="code" colab={} from textblob import TextBlob import re def clean_tweet(tweet): ''' Utility function to clean the text in a tweet by removing links and special characters using regex. ''' return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split()) def analize_sentiment(tweet): ''' Utility function to classify the polarity of a tweet using textblob. ''' analysis = TextBlob(clean_tweet(tweet)) if analysis.sentiment.polarity > 0: return 1 elif analysis.sentiment.polarity == 0: return 0 else: return -1 # + id="UewbJiSiXKUT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 343} outputId="40a842a2-7718-4b9b-e49b-b3c213ffc527" data['SA'] = np.array([ analize_sentiment(tweet) for tweet in data['Tweets'] ]) # We display the updated dataframe with the new column: display(data.head(10)) # + id="6VOK5E-pXPUs" colab_type="code" colab={} p_tweets = [ tweet for index, tweet in enumerate(data['Tweets']) if data['SA'][index] > 0] n_tweets = [ tweet for index, tweet in enumerate(data['Tweets']) if data['SA'][index] == 0] ng_tweets = [ tweet for index, tweet in enumerate(data['Tweets']) if data['SA'][index] < 0] # + id="s9WobPjWXS2s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="9db84944-0dcc-4f27-bc6d-f133ff3cb5fb" print("Percentage of positive tweets: {}%".format(len(p_tweets)*100/len(data['Tweets']))) print("Percentage of neutral tweets: {}%".format(len(n_tweets)*100/len(data['Tweets']))) print("Percentage of negative tweets: {}%".format(len(ng_tweets)*100/len(data['Tweets']))) # + id="ivXMfLC2XVe2" colab_type="code" colab={}
PolaSent.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from pandas import Series,DataFrame import pandas as pandas from sklearn.linear_model import LogisticRegression # Sklearn also has a helper that makes it easy to do cross validation from sklearn.cross_validation import KFold import numpy as np from sklearn import cross_validation from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier import re import operator from sklearn.feature_selection import SelectKBest, f_classif import matplotlib.pyplot as plt titanic = pandas.read_csv("titanic_train.csv") # Print the first 5 rows of the dataframe. print(titanic.head(5)) titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median()) titanic.loc[titanic["Sex"] == "male", "Sex"] = 0 titanic.loc[titanic["Sex"] == "female", "Sex"] = 1 print(titanic.head(5)) print(titanic["Embarked"].unique()) titanic["Embarked"] = titanic["Embarked"].fillna("S") titanic.loc[titanic["Embarked"] == "S", "Embarked"] = 0 titanic.loc[titanic["Embarked"] == "C", "Embarked"] = 1 titanic.loc[titanic["Embarked"] == "Q", "Embarked"] = 2 print(titanic.head(5)) titanic_test = pandas.read_csv("titanic_test.csv") titanic_test["Age"] = titanic_test["Age"].fillna(titanic["Age"].median()) titanic_test["Fare"] = titanic_test["Fare"].fillna(titanic_test["Fare"].median()) titanic_test.loc[titanic_test["Sex"] == "male", "Sex"] = 0 titanic_test.loc[titanic_test["Sex"] == "female", "Sex"] = 1 titanic_test["Embarked"] = titanic_test["Embarked"].fillna("S") titanic_test.loc[titanic_test["Embarked"] == "S", "Embarked"] = 0 titanic_test.loc[titanic_test["Embarked"] == "C", "Embarked"] = 1 titanic_test.loc[titanic_test["Embarked"] == "Q", "Embarked"] = 2 print titanic_test.head(5) # - # Using Random Forest # Initialize our algorithm with the default paramters # n_estimators is the number of trees we want to make # min_samples_split is the minimum number of rows we need to make a split # min_samples_leaf is the minimum number of samples we can have at the place where a tree branch ends (the bottom points of the tree) predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"] alg = RandomForestClassifier(random_state=1, n_estimators=10, min_samples_split=2, min_samples_leaf=1) scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic["Survived"], cv=3) print(scores.mean()) # Parameter tuning alg = RandomForestClassifier(random_state=1, n_estimators=150, min_samples_split=4, min_samples_leaf=2) scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic["Survived"], cv=3) print(scores.mean()) # + # Adding new feature # + # Generating a familysize column titanic["FamilySize"] = titanic["SibSp"] + titanic["Parch"] # The .apply method generates a new series titanic["NameLength"] = titanic["Name"].apply(lambda x: len(x)) # + # Using the title from the Name # + # A function to get the title from a name. def get_title(name): # Use a regular expression to search for a title. Titles always consist of capital and lowercase letters, and end with a period. title_search = re.search(' ([A-Za-z]+)\.', name) # If the title exists, extract and return it. if title_search: return title_search.group(1) return "" # Get all the titles and print how often each one occurs. titles = titanic["Name"].apply(get_title) print(pandas.value_counts(titles)) # Map each title to an integer. Some titles are very rare, and are compressed into the same codes as other titles. title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Dr": 5, "Rev": 6, "Major": 7, "Col": 7, "Mlle": 8, "Mme": 8, "Don": 9, "Lady": 10, "Countess": 10, "Jonkheer": 10, "Sir": 9, "Capt": 7, "Ms": 2} for k,v in title_mapping.items(): titles[titles == k] = v # Verify that we converted everything. print(pandas.value_counts(titles)) # Add in the title column. titanic["Title"] = titles print titanic.head(5) # + # Family groups # + # A dictionary mapping family name to id family_id_mapping = {} # A function to get the id given a row def get_family_id(row): # Find the last name by splitting on a comma last_name = row["Name"].split(",")[0] # Create the family id family_id = "{0}{1}".format(last_name, row["FamilySize"]) # Look up the id in the mapping if family_id not in family_id_mapping: if len(family_id_mapping) == 0: current_id = 1 else: # Get the maximum id from the mapping and add one to it if we don't have an id current_id = (max(family_id_mapping.items(), key=operator.itemgetter(1))[1] + 1) family_id_mapping[family_id] = current_id return family_id_mapping[family_id] # Get the family ids with the apply method family_ids = titanic.apply(get_family_id, axis=1) # There are a lot of family ids, so we'll compress all of the families under 3 members into one code. family_ids[titanic["FamilySize"] < 3] = -1 # Print the count of each unique id. print(pandas.value_counts(family_ids)) titanic["FamilyId"] = family_ids # + # Finding the best feature # + predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked", "FamilySize", "Title", "FamilyId"] # Perform feature selection selector = SelectKBest(f_classif, k=5) selector.fit(titanic[predictors], titanic["Survived"]) # Get the raw p-values for each feature, and transform from p-values into scores scores = -np.log10(selector.pvalues_) # Plot the scores. See how "Pclass", "Sex", "Title", and "Fare" are the best? plt.bar(range(len(predictors)), scores) plt.xticks(range(len(predictors)), predictors, rotation='vertical') plt.show() # Pick only the four best features. predictors = ["Pclass", "Sex", "Fare", "Title"] alg = RandomForestClassifier(random_state=1, n_estimators=150, min_samples_split=8, min_samples_leaf=4) scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic["Survived"], cv=3) print(scores.mean()) # + # Map changes to test data # + # First, we'll add titles to the test set. titles = titanic_test["Name"].apply(get_title) # We're adding the Dona title to the mapping, because it's in the test set, but not the training set title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Dr": 5, "Rev": 6, "Major": 7, "Col": 7, "Mlle": 8, "Mme": 8, "Don": 9, "Lady": 10, "Countess": 10, "Jonkheer": 10, "Sir": 9, "Capt": 7, "Ms": 2, "Dona": 10} for k,v in title_mapping.items(): titles[titles == k] = v titanic_test["Title"] = titles # Check the counts of each unique title. print(pandas.value_counts(titanic_test["Title"])) # Now, we add the family size column. titanic_test["FamilySize"] = titanic_test["SibSp"] + titanic_test["Parch"] # Now we can add family ids. # We'll use the same ids that we did earlier. print(family_id_mapping) family_ids = titanic_test.apply(get_family_id, axis=1) family_ids[titanic_test["FamilySize"] < 3] = -1 titanic_test["FamilyId"] = family_ids # The .apply method generates a new series titanic_test["NameLength"] = titanic_test["Name"].apply(lambda x: len(x)) # + # Predict the out put to file # + predictors = ["Pclass", "Sex", "Age", "Fare", "Embarked", "FamilySize", "Title", "FamilyId"] algorithms = [ [GradientBoostingClassifier(random_state=1, n_estimators=25, max_depth=3), predictors], [LogisticRegression(random_state=1), ["Pclass", "Sex", "Fare", "FamilySize", "Title", "Age", "Embarked"]] ] full_predictions = [] for alg, predictors in algorithms: # Fit the algorithm using the full training data. alg.fit(titanic[predictors], titanic["Survived"]) # Predict using the test dataset. We have to convert all the columns to floats to avoid an error. predictions = alg.predict_proba(titanic_test[predictors].astype(float))[:,1] full_predictions.append(predictions) # The gradient boosting classifier generates better predictions, so we weight it higher. predictions = (full_predictions[0] * 3 + full_predictions[1]) / 4 predictions[predictions <= .5] = 0 predictions[predictions > .5] = 1 predictions = predictions.astype(int) submission = pandas.DataFrame({ "PassengerId": titanic_test["PassengerId"], "Survived": predictions }) submission.to_csv("kaggle_Titanic_GB.csv", index=False)
Kaggle_Titanic_GB.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import cv2 from matplotlib import pyplot as plt import os import pandas as pd from PIL import Image import csv import random #define global variables #Training data is based on the Atha 2016 images path = '../data/photos/sky/Atha/2016/' files = [] #Create training data and perform training #Need to create CSV file for training data def trainingDataList(): randFiles = [] path = '../data/photos/sky/Atha/2016/' for file in os.listdir(path): files.append(path + file) iter = 0 while(iter<120): name = random.choice(files) if name not in randFiles: randFiles.append(name) iter+=1 return randFiles def trainingDataCSV(randFiles): with open(path + 'trainingData.csv', 'a', newline = '') as csvfile: writer = csv.writer(csvfile) for file in randFiles: writer.writerow([file]) #generate csv file with filenames in single column #use as training data randFiles = [] randFiles = trainingDataList() trainingDataCSV(randFiles) # + #get the path from trainingData.csv file #open .csv file #iterate through rows in column 1 img = cv2.imread(traingPath) # + #flatten image and get the mean and variance of blue to red pixels # -
notebooks/Generate random training data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PySpark3 # language: '' # name: pyspark3kernel # --- # # Wrangling, Exploration and Modeling of Airline Departure Delay (2011/2012) Dataset on Spark 2.0 HDInsight Clusters (pySpark3) # ### Last updated: # February 07, 2016 # --------------------------------- # ### Here we show key features and capabilities of Spark's MLlib toolkit using the Airline On-Time Performance Dataset from the Research and Innovative Technology Administration of the Bureau of Transportation Statistics (see below). This dataset spans 26 years, 1988 through 2012, and is fairly large: over 148 million records, or 14 GB of raw information. We used a part of the data, that from 2011 (training) and 2012 (testing), to evaluate the performance of binary classification models to predict if certain flight departures were delayed by 15 mins or not. # # ### The weather conditions (e.g. windspeed, humidity, precipitation) at origin and destination airports was integrated with the original airline data-set in order to incorporate the weather features in the models. # # ### This notebook takes about 15 mins to run on a 2 worker-node cluster(D12_V2). # # ### The nobebook runs in the PySpark3 kernel in Jupyter. # # ---------------------------------- # ### OBJECTIVE: Show use of Spark MLlib's functions for featurization and ML tasks. # # ### The learning task is to predict whether flight departures were delayed by 15 mins or not (binary classification), based on airport (origin and destination airports) and weather (e.g. windspeed, humidity, temperature) features. # # #### We have shown the following steps: # 1. Data ingestion, joining, and wrangling. # 2. Data exploration and plotting. # 3. Data preparation (featurizing/transformation). # 4. Modeling (using incl. hyperparameter tuning with cross-validation), prediction, model persistance. # 5. Model evaluation on an independent validation data-set. # # Through the above steps we highlight Spark SQL, as well as, MLlib's modeling and transformation functions. # ### Introductory material # # Airline On-Time Performance Dataset from the Research and Innovative Technology Administration of the Bureau of Transportation Statistics (http://www.transtats.bts.gov/DL_SelectFields.asp?Table_ID=236&DB_Short_Name=On-Time). # # The combined dataset can be obtained from the Revolution Analytics website (http://packages.revolutionanalytics.com/datasets/AirOnTime87to12). # # An interesting feature of the airline data-set is that there are several features (e.g. airport ids, flight numbers etc.) which have many (>100) categorical features. # # ---------------------------------- # ## Set directory paths and location of training, validation files, as well as model location in blob storage # NOTE: The blob storage attached to the HDI cluster is referenced as: wasb:/// (Windows Azure Storage Blob). Other blob storage accounts are referenced as: wasb:// # + # 1. Location of training data: contains Dec 2013 trip and fare data from NYC air_file_loc = "wasb://data@cdspsparksamples.blob.core.windows.net/Airline/AirlineSubsetCsv" weather_file_loc = "wasb://data@<EMAIL>.windows.net/Airline/WeatherSubsetCsv" # 3. Set model storage directory path. This is where models will be saved. modelDir = "wasb:///user/remoteuser/Airline/Models/"; # The last backslash is needed; # 4. Set data storage path. This is where data is sotred on the blob attached to the cluster. dataDir = "wasb:///HdiSamples/HdiSamples/Airline/"; # The last backslash is needed; # - # ## Set SQL context and import necessary libraries # + from pyspark import SparkConf from pyspark import SparkContext from pyspark.sql import SQLContext from pyspark.sql.functions import UserDefinedFunction from pyspark.sql.types import * import matplotlib.pyplot as plt import numpy as np import datetime sqlContext = SQLContext(sc) # - # ## Data ingestion and wrangling using Spark SQL # #### Data import and registering as tables # + ## READ IN AIR DATA FRAME FROM CSV air = spark.read.csv(path=air_file_loc, header=True, inferSchema=True) ## READ IN WEATHER DATA FRAME FROM CSV weather = spark.read.csv(path=weather_file_loc, header=True, inferSchema=True) # - ## CHECK SCHEMA OF TRIP AND FARE TABLES air.printSchema() weather.printSchema() # #### Using Spark SQL to join, clean and featurize data ## REGISTER DATA-FRAMEs AS A TEMP-TABLEs IN SQL-CONTEXT air.createOrReplaceTempView("airline") weather.createOrReplaceTempView("weather") # + ## USING SQL: CLEAN AND MERGE AIR AND WEATHER DATA-SETS TO CREATE A JOINED DATA-FRAME # COUNT FLIGHTS BY AIRPORT spark.sql("SELECT ORIGIN, COUNT(*) as CTORIGIN FROM airline GROUP BY ORIGIN").createOrReplaceTempView("countOrigin") spark.sql("SELECT DEST, COUNT(*) as CTDEST FROM airline GROUP BY DEST").createOrReplaceTempView("countDest") ## CLEAN AIRLINE DATA WITH QUERY, FILTER FOR AIRPORTS WHICH HAVE VERY FEW FLIGHTS (<100) sqlStatement = """SELECT ARR_DEL15 as ArrDel15, YEAR as Year, MONTH as Month, DAY_OF_MONTH as DayOfMonth, DAY_OF_WEEK as DayOfWeek, UNIQUE_CARRIER as Carrier, ORIGIN_AIRPORT_ID as OriginAirportID, ORIGIN, DEST_AIRPORT_ID as DestAirportID, DEST, floor(CRS_DEP_TIME/100) as CRSDepTime, floor(CRS_ARR_TIME/100) as CRSArrTime FROM airline WHERE ARR_DEL15 in ('0.0', '1.0') AND ORIGIN IN (SELECT DISTINCT ORIGIN FROM countOrigin where CTORIGIN > 100) AND DEST IN (SELECT DISTINCT DEST FROM countDest where CTDEST > 100) """ airCleaned = spark.sql(sqlStatement) # REGISTER CLEANED AIR DATASET airCleaned.createOrReplaceTempView("airCleaned") ## CLEAN WEATHER DATA WITH QUERY sqlStatement = """SELECT AdjustedYear, AdjustedMonth, AdjustedDay, AdjustedHour, AirportID, avg(Visibility) as Visibility, avg(DryBulbCelsius) as DryBulbCelsius, avg(DewPointCelsius) as DewPointCelsius, avg(RelativeHumidity) as RelativeHumidity, avg(WindSpeed) as WindSpeed, avg(Altimeter) as Altimeter FROM weather GROUP BY AdjustedYear, AdjustedMonth, AdjustedDay, AdjustedHour, AirportID""" weatherCleaned = spark.sql(sqlStatement) # REGISTER CLEANED AIR DATASET weatherCleaned.createOrReplaceTempView("weatherCleaned") # + # CREATE JOINED DATA SET AND REGISTER TABLE sqlStatement = """SELECT a.ArrDel15, a.Year, a.Month, a.DayOfMonth, a.DayOfWeek, a.Carrier, a.OriginAirportID, \ a.ORIGIN, a.DestAirportID, a.DEST, a.CRSDepTime, b.Visibility as VisibilityOrigin, \ b.DryBulbCelsius as DryBulbCelsiusOrigin, b.DewPointCelsius as DewPointCelsiusOrigin, b.RelativeHumidity as RelativeHumidityOrigin, b.WindSpeed as WindSpeedOrigin, \ b.Altimeter as AltimeterOrigin, c.Visibility as VisibilityDest, \ c.DryBulbCelsius as DryBulbCelsiusDest, c.DewPointCelsius as DewPointCelsiusDest, c.RelativeHumidity as RelativeHumidityDest, c.WindSpeed as WindSpeedDest, \ c.Altimeter as AltimeterDest FROM airCleaned a, weatherCleaned b, weatherCleaned c WHERE a.Year = b.AdjustedYear and a.Year = c.AdjustedYear and a.Month = b.AdjustedMonth and a.Month = c.AdjustedMonth and a.DayofMonth = b.AdjustedDay and a.DayofMonth = c.AdjustedDay and a.CRSDepTime= b.AdjustedHour and a.CRSDepTime = c.AdjustedHour and a.OriginAirportID = b.AirportID and a.DestAirportID = c.AirportID""" # SEVERAL COLUMNS CONTAIN NULL VALUES, IT IS IMPORTANT TO FILTER FOR THOSE, OTHERWISE SOME TRANSFORMATIONS # WILL HAVE ERRORS LATER. HWERE WE SHOW HOW TO FILTER A DATA-FRAME USING SQL STATEMENT joined = spark.sql(sqlStatement).filter("VisibilityOrigin is not NULL and DryBulbCelsiusOrigin is not NULL \ and DewPointCelsiusOrigin is not NULL and RelativeHumidityOrigin is not NULL \ and WindSpeedOrigin is not NULL and AltimeterOrigin is not NULL \ and VisibilityDest is not NULL and DryBulbCelsiusDest is not NULL \ and DewPointCelsiusDest is not NULL and RelativeHumidityDest is not NULL \ and WindSpeedDest is not NULL and AltimeterDest is not NULL \ and ORIGIN is not NULL and DEST is not NULL \ and OriginAirportID is not NULL and DestAirportID is not NULL \ and CRSDepTime is not NULL and Year is not NULL and Month is not NULL \ and DayOfMonth is not NULL and DayOfWeek is not NULL and Carrier is not NULL") # REGISTER JOINED joined.createOrReplaceTempView("joined") # - ## SHOW WHICH TABLES ARE REGISTERED IN SQL-CONTEXT spark.sql("show tables").show() # #### Split data by year, 2011 for training and 2012 for validation # + # CREATE TRAINING DATA AND VALIDATION DATA sqlStatement = """SELECT * from joined WHERE Year = 2011""" train = spark.sql(sqlStatement) # REGISTER JOINED sqlStatement = """SELECT * from joined WHERE Year = 2012""" validation = spark.sql(sqlStatement) # - # #### Save in blob # + # SAVE JOINED DATA IN BLOB trainfilename = dataDir + "TrainData"; train.write.mode("overwrite").parquet(trainfilename) validfilename = dataDir + "ValidationData"; validation.write.mode("overwrite").parquet(validfilename) # - # # ---------------------------------- # ## Data ingestion: Read in the training data from parquet file # + ## READ IN DATA FRAME FROM CSV trainfilename = dataDir + "TrainData"; train_df = spark.read.parquet(trainfilename) ## PERSIST AND MATERIALIZE DF IN MEMORY train_df.persist() train_df.count() ## REGISTER DATA-FRAME AS A TEMP-TABLE IN SQL-CONTEXT train_df.createOrReplaceTempView("train") # - train_df.printSchema() # # ---------------------------------- # ## Data exploration & visualization: Plotting of target variables and features # #### First, summarize data using SQL, this outputs a Spark data frame. If the data-set is too large, it can be sampled # NOTE: -m sample indicates that the datafame is being randomly sampled, -r 0.5 indicates 50% of rows are sampled, and -n -1 indicates all the rows are returned after sampling, prior to the spark dataframe being returned to local memory of head-node as pandas dataframe. # + magic_args="-q -o sqlResultsPD -m sample -r 0.5 -n -1" language="sql" # SELECT ArrDel15, WindSpeedDest, WindSpeedOrigin FROM train # - # #### Plot histogram of tip amount, relationship between tip amount vs. other features # + # %%local # %matplotlib inline import matplotlib.pyplot as plt # ## %%local creates a pandas data-frame on the head node memory, from spark data-frame, ## which can then be used for plotting. Here, sampling data is a good idea, depending on the memory of the head node # TIP BY PAYMENT TYPE AND PASSENGER COUNT ax1 = sqlResultsPD[['WindSpeedDest']].plot(kind='hist', bins=25, facecolor='lightblue') ax1.set_title('WindSpeed @ Destination distribution') ax1.set_xlabel('WindSpeedDest'); ax1.set_ylabel('Counts'); plt.figure(figsize=(4,4)); plt.suptitle(''); plt.show() # TIP BY PASSENGER COUNT ax2 = sqlResultsPD.boxplot(column=['WindSpeedDest'], by=['ArrDel15']) ax2.set_title('WindSpeed Destination') ax2.set_xlabel('ArrDel15'); ax2.set_ylabel('WindSpeed'); plt.figure(figsize=(4,4)); plt.suptitle(''); plt.show() # - # # ---------------------------------- # ## Feature engineering, transformation and data preparation for modeling # #### Split data into train/test. Training fraction will be used to create model, and testing fraction will be used to evaluate model. # + trainingFraction = 0.75; testingFraction = (1-trainingFraction); seed = 1234; # SPLIT SAMPLED DATA-FRAME INTO TRAIN/TEST, WITH A RANDOM COLUMN ADDED FOR DOING CV (SHOWN LATER) trainPartition, testPartition = train_df.randomSplit([trainingFraction, testingFraction], seed=seed); # CACHE DATA FRAMES IN MEMORY trainPartition.persist(); trainPartition.count() testPartition.persist(); testPartition.count() trainPartition.createOrReplaceTempView("TrainPartition") testPartition.createOrReplaceTempView("TestPartition") # - # #### Filter out null values, and filter test data by categories of features to ensure transformations trained on test data will fit the the test data # + ## EXAMPLES BELOW ALSO SHOW HOW TO USE SQL DIRECTLY ON DATAFRAMES trainPartitionFilt = trainPartition.filter("ArrDel15 is not NULL and DayOfMonth is not NULL and DayOfWeek is not NULL \ and Carrier is not NULL and OriginAirportID is not NULL and DestAirportID is not NULL \ and CRSDepTime is not NULL and VisibilityOrigin is not NULL and DryBulbCelsiusOrigin is not NULL \ and DewPointCelsiusOrigin is not NULL and RelativeHumidityOrigin is not NULL \ and WindSpeedOrigin is not NULL and AltimeterOrigin is not NULL \ and VisibilityDest is not NULL and DryBulbCelsiusDest is not NULL \ and DewPointCelsiusDest is not NULL and RelativeHumidityDest is not NULL \ and WindSpeedDest is not NULL and AltimeterDest is not NULL ") trainPartitionFilt.persist(); trainPartitionFilt.count() trainPartitionFilt.createOrReplaceTempView("TrainPartitionFilt") testPartitionFilt = testPartition.filter("ArrDel15 is not NULL and DayOfMonth is not NULL and DayOfWeek is not NULL \ and Carrier is not NULL and OriginAirportID is not NULL and DestAirportID is not NULL \ and CRSDepTime is not NULL and VisibilityOrigin is not NULL and DryBulbCelsiusOrigin is not NULL \ and DewPointCelsiusOrigin is not NULL and RelativeHumidityOrigin is not NULL \ and WindSpeedOrigin is not NULL and AltimeterOrigin is not NULL \ and VisibilityDest is not NULL and DryBulbCelsiusDest is not NULL \ and DewPointCelsiusDest is not NULL and RelativeHumidityDest is not NULL \ and WindSpeedDest is not NULL and AltimeterDest is not NULL") \ .filter("OriginAirportID IN (SELECT distinct OriginAirportID FROM TrainPartitionFilt) \ AND ORIGIN IN (SELECT distinct ORIGIN FROM TrainPartitionFilt) \ AND DestAirportID IN (SELECT distinct DestAirportID FROM TrainPartitionFilt) \ AND DEST IN (SELECT distinct DEST FROM TrainPartitionFilt) \ AND Carrier IN (SELECT distinct Carrier FROM TrainPartitionFilt) \ AND CRSDepTime IN (SELECT distinct CRSDepTime FROM TrainPartitionFilt) \ AND DayOfMonth in (SELECT distinct DayOfMonth FROM TrainPartitionFilt) \ AND DayOfWeek in (SELECT distinct DayOfWeek FROM TrainPartitionFilt)") testPartitionFilt.persist(); testPartitionFilt.count() testPartitionFilt.createOrReplaceTempView("TestPartitionFilt") # - # #### Indexing features using pipeline transformations # + # TRANSFORM SOME FEATURES BASED ON MLLIB TRANSFORMATION FUNCTIONS from pyspark.ml import Pipeline from pyspark.ml.feature import StringIndexer, VectorIndexer, Bucketizer, Binarizer sI0 = StringIndexer(inputCol = 'ArrDel15', outputCol = 'ArrDel15_ind'); bin0 = Binarizer(inputCol = 'ArrDel15_ind', outputCol = 'ArrDel15_bin', threshold = 0.5); sI1 = StringIndexer(inputCol="Carrier", outputCol="Carrier_ind"); transformPipeline = Pipeline(stages=[sI0, bin0, sI1]); transformedTrain = transformPipeline.fit(trainPartition).transform(trainPartitionFilt) transformedTest = transformPipeline.fit(trainPartition).transform(testPartitionFilt) transformedTrain.persist(); transformedTrain.count(); transformedTest.persist(); transformedTest.count(); # - # # ---------------------------------- # ## Train a regression model: Predict the amount of tip paid for taxi trips # ### Define the training formula and transformations that's to be applied to all training pipelines # + from pyspark.ml.feature import RFormula ## DEFINE REGRESSION FURMULA regFormula = RFormula(formula="ArrDel15_ind ~ \ DayOfMonth + DayOfWeek + Carrier_ind + OriginAirportID + DestAirportID + CRSDepTime \ + VisibilityOrigin + DryBulbCelsiusOrigin + DewPointCelsiusOrigin \ + RelativeHumidityOrigin + WindSpeedOrigin + AltimeterOrigin \ + VisibilityDest + DryBulbCelsiusDest + DewPointCelsiusDest \ + RelativeHumidityDest + WindSpeedDest + AltimeterDest"); ## DEFINE INDEXER FOR CATEGORIAL VARIABLES ## NOTE: Some categorical features (such as origin and destination airports have > 240 categories, which is why ## maxCategories is set to 250) featureIndexer = VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=250) # - # ### Train Elastic Net classification model, and evaluate performance on test data # + from pyspark.ml.classification import LogisticRegression from pyspark.mllib.evaluation import BinaryClassificationMetrics from sklearn.metrics import roc_curve,auc ## DEFINE ELASTIC NET REGRESSOR eNet = LogisticRegression(featuresCol="indexedFeatures", maxIter=25, regParam=0.01, elasticNetParam=0.5) ## TRAINING PIPELINE: Fit model, with formula and other transformations model = Pipeline(stages=[regFormula, featureIndexer, eNet]).fit(transformedTrain) # SAVE MODEL datestamp = datetime.datetime.now().strftime('%m-%d-%Y-%s'); fileName = "logisticRegModel_" + datestamp; logRegDirfilename = modelDir + fileName; model.save(logRegDirfilename) ## Evaluate model on test set predictions = model.transform(transformedTest) predictionAndLabels = predictions.select("label","prediction").rdd predictions.select("label","probability").createOrReplaceTempView("tmp_results") metrics = BinaryClassificationMetrics(predictionAndLabels) print("Area under ROC = %s" % metrics.areaUnderROC) # - # #### Bring predictions to the local pandas dataframe for plotting using matplotlib # NOTE: -n -1 means all of the data from predictions_pddf are brought to the local dataframe [You cannot do this for large dataframes] # + magic_args="-q -o predictions_pddf -n -1" language="sql" # SELECT label, probability from tmp_results # + # %%local ## PLOT ROC CURVE AFTER CONVERTING PREDICTIONS TO A PANDAS DATA FRAME from sklearn.metrics import roc_curve,auc import matplotlib.pyplot as plt # %matplotlib inline labels = predictions_pddf["label"] prob = [] for dv in predictions_pddf["probability"]: prob.append(list(dv.values())[1][1]) fpr, tpr, thresholds = roc_curve(labels, prob, pos_label=1); roc_auc = auc(fpr, tpr) plt.figure(figsize=(5,5)) plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]); plt.ylim([0.0, 1.05]); plt.xlabel('False Positive Rate'); plt.ylabel('True Positive Rate'); plt.title('ROC Curve'); plt.legend(loc="lower right"); plt.show() # - # ### Train Gradient Boosting Tree binary classification model, and evaluate performance on test data # + from pyspark.ml.regression import GBTRegressor ## DEFINE GRADIENT BOOSTING TREE CLASSIFIER gBT = GBTRegressor(featuresCol="indexedFeatures", maxIter=10, maxBins = 250) ## TRAINING PIPELINE: Fit model, with formula and other transformations model = Pipeline(stages=[regFormula, featureIndexer, gBT]).fit(transformedTrain) # SAVE MODEL datestamp = datetime.datetime.now().strftime('%m-%d-%Y-%s'); fileName = "gbtModel_" + datestamp; gbtDirfilename = modelDir + fileName; model.save(gbtDirfilename) ## Evaluate model on test set predictions = model.transform(transformedTest) predictionAndLabels = predictions.select("label","prediction").rdd metrics = BinaryClassificationMetrics(predictionAndLabels) print("Area under ROC = %s" % metrics.areaUnderROC) # - # ### Train a random forest binary classification model using the Pipeline function, save, and evaluate on test data set # + from pyspark.ml.classification import RandomForestClassifier ## DEFINE RANDOM FOREST CLASSIFIER randForest = RandomForestClassifier(featuresCol = 'indexedFeatures', labelCol = 'label', numTrees=20, \ maxDepth=6, maxBins=250) ## TRAINING PIPELINE: Fit model, with formula and other transformations model = Pipeline(stages=[regFormula, featureIndexer, randForest]).fit(transformedTrain) # SAVE MODEL datestamp = datetime.datetime.now().strftime('%m-%d-%Y-%s'); fileName = "rfModel_" + datestamp; rfDirfilename = modelDir + fileName; model.save(rfDirfilename) ## Evaluate model on test set predictions = model.transform(transformedTest) predictionAndLabels = predictions.select("label","prediction").rdd metrics = BinaryClassificationMetrics(predictionAndLabels) print("Area under ROC = %s" % metrics.areaUnderROC) # - # # ---------------------------------- # ## Hyper-parameter tuning: Train a random forest model using cross-validation # + from pyspark.ml.tuning import CrossValidator, ParamGridBuilder from pyspark.ml.evaluation import BinaryClassificationEvaluator ## DEFINE RANDOM FOREST MODELS ## DEFINE RANDOM FOREST CLASSIFIER randForest = RandomForestClassifier(featuresCol = 'indexedFeatures', labelCol = 'label', numTrees=20, \ maxDepth=6, maxBins=250) ## DEFINE MODELING PIPELINE, INCLUDING FORMULA, FEATURE TRANSFORMATIONS, AND ESTIMATOR pipeline = Pipeline(stages=[regFormula, featureIndexer, randForest]) ## DEFINE PARAMETER GRID FOR RANDOM FOREST paramGrid = ParamGridBuilder() \ .addGrid(randForest.numTrees, [10, 25, 50]) \ .addGrid(randForest.maxDepth, [3, 5, 7]) \ .build() ## DEFINE CROSS VALIDATION crossval = CrossValidator(estimator=pipeline, estimatorParamMaps=paramGrid, evaluator=BinaryClassificationEvaluator(metricName="areaUnderROC"), numFolds=3) ## TRAIN MODEL USING CV cvModel = crossval.fit(transformedTrain) ## Evaluate model on test set predictions = cvModel.transform(transformedTest) predictionAndLabels = predictions.select("label","prediction").rdd metrics = BinaryClassificationMetrics(predictionAndLabels) print("Area under ROC = %s" % metrics.areaUnderROC) ## SAVE THE BEST MODEL datestamp = datetime.datetime.now().strftime('%m-%d-%Y-%s'); fileName = "CV_RandomForestRegressionModel_" + datestamp; CVDirfilename = modelDir + fileName; cvModel.bestModel.save(CVDirfilename); # - # # ---------------------------------- # ## Load a saved pipeline model and evaluate it on test data set # + from pyspark.ml import PipelineModel savedModel = PipelineModel.load(logRegDirfilename) ## Evaluate model on test set predictions = savedModel.transform(transformedTest) predictionAndLabels = predictions.select("label","prediction").rdd metrics = BinaryClassificationMetrics(predictionAndLabels) print("Area under ROC = %s" % metrics.areaUnderROC) # - # ## Load and transform an independent validation data-set, and evaluate the saved pipeline model # ### Note that this validation data, by design, has a different format than the original trainig data. By grangling and transformations, we make the data format the same as the training data for the purpose of scoring. ## READ IN DATA FRAME FROM PARQUET validfilename = dataDir + "ValidationData"; validPartition = spark.read.parquet(validfilename) validPartition.persist(); validPartition.count() validPartition.printSchema() # #### It is a good idea to filter the validation dataset for null values, as well as, categorical values not observed in the training data set. Otherwise, errors could be thrown at scoring time validPartitionFilt = validPartition.filter("ArrDel15 is not NULL and DayOfMonth is not NULL and DayOfWeek is not NULL \ and Carrier is not NULL and OriginAirportID is not NULL and DestAirportID is not NULL \ and CRSDepTime is not NULL and VisibilityOrigin is not NULL and DryBulbCelsiusOrigin is not NULL \ and DewPointCelsiusOrigin is not NULL and RelativeHumidityOrigin is not NULL \ and WindSpeedOrigin is not NULL and AltimeterOrigin is not NULL \ and VisibilityDest is not NULL and DryBulbCelsiusDest is not NULL \ and DewPointCelsiusDest is not NULL and RelativeHumidityDest is not NULL \ and WindSpeedDest is not NULL and AltimeterDest is not NULL") \ .filter("OriginAirportID IN (SELECT distinct OriginAirportID FROM TrainPartitionFilt) \ AND ORIGIN IN (SELECT distinct ORIGIN FROM TrainPartitionFilt) \ AND DestAirportID IN (SELECT distinct DestAirportID FROM TrainPartitionFilt) \ AND DEST IN (SELECT distinct DEST FROM TrainPartitionFilt) \ AND Carrier IN (SELECT distinct Carrier FROM TrainPartitionFilt) \ AND CRSDepTime IN (SELECT distinct CRSDepTime FROM TrainPartitionFilt) \ AND DayOfMonth in (SELECT distinct DayOfMonth FROM TrainPartitionFilt) \ AND DayOfWeek in (SELECT distinct DayOfWeek FROM TrainPartitionFilt)") validPartitionFilt.persist(); validPartitionFilt.count() validPartitionFilt.createOrReplaceTempView("ValidPartitionFilt") # #### We need to apply the same transformation to the validation data as the ones that were applied to the training set, as in the transformPipeline function defined above transformedValid = transformPipeline.fit(trainPartition).transform(validPartitionFilt) # #### Load saved model, score validation data and evaluate savedModel = PipelineModel.load(logRegDirfilename) predictions = savedModel.transform(transformedValid) predictionAndLabels = predictions.select("label","prediction").rdd metrics = BinaryClassificationMetrics(predictionAndLabels) print("Area under ROC = %s" % metrics.areaUnderROC)
Misc/Spark/pySpark/Spark2.0/Spark2.0_pySpark3_Airline_Departure_Delay_Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import qiskit #from qiskit import * from qiskit.tools.visualization import * from qiskit.tools.monitor import job_monitor from qiskit.utils import QuantumInstance from qiskit.algorithms import Shor from qiskit import Aer from qiskit import IBMQ # check the qiskit version qiskit.__qiskit_version__ # IBM quantum experience with open('/Users/karim/staff/cred/IBMQ/tocken.txt', 'r') as file: myTocken = file.read().replace('\n', '') IBMQ.save_account(myTocken,overwrite=True) IBMQ.load_account() IBMQ.providers() # get a list of the quantum computing instances that we can run on it # you can find more here: https://quantum-computing.ibm.com/lab/docs/iql/manage/account/ibmq#backends provider = IBMQ.get_provider('ibm-q') provider.backends(simulator=False, operational=True) provider.backends() from qiskit.providers.ibmq import least_busy small_devices = provider.backends(filters=lambda x: x.configuration().n_qubits <= 5 and x.configuration().n_qubits >2 and not x.configuration().simulator) least_busy(small_devices) big_devices = provider.backends(filters=lambda x: x.configuration().n_qubits >= 5 and not x.configuration().simulator) big_devices backend = least_busy(small_devices) backend backend.configuration().n_qubits provider.get_backend('ibmq_quito') num2factorize=3 factors = Shor(num2factorize) simulator = Aer.get_backend('aer_simulator') #qasm_simulator sim_job = QuantumInstance(backend=simulator,shots=128, skip_qobj_validation = False) sim_algo = Shor(quantum_instance=sim_job) sim_result = sim_algo.factor(num2factorize) print(f"The list of factors of {num2factorize} as computed by the Shor's algorithm is {sim_result.factors[0]}.") import math print(f'Computed of qubits for circuit: {4 * math.ceil(math.log(num2factorize, 2)) + 2}') print(f'Actual number of qubits of circuit: {sim_algo.construct_circuit(num2factorize).num_qubits}') # ##do the same but on a quantum computer quantum_instance = QuantumInstance(backend=least_busy(small_devices),shots=128, skip_qobj_validation = False) #quantum_instance.backend.name() #quantum_instance.backend.provider() quantum_instance.backend.status() quantum_instance.backend.configuration().n_qubits qcalc_algo = Shor(quantum_instance=quantum_instance) qcalc_result = qcalc_algo.factor(num2factorize) print(f"The list of factors of {num2factorize} as computed by the Shor's algorithm is {qcalc_result.factors[0]}.")
qc_workshop_20220331/demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.5 64-bit (''deeplearning'': conda)' # language: python # name: python37564bitdeeplearningconda2f5dcc693383402099797ed40bd3951d # --- import pandas as pd, numpy as np from sklearn.model_selection import KFold from sklearn.metrics import roc_auc_score import matplotlib.pyplot as plt, cv2 import cuml #, cupy import timm import torch from torch.utils.data import DataLoader, Dataset import sys sys.path.append('../') from src.pl_module import MelanomaModel from src.transforms.albu import get_valid_transforms import skimage.io from tqdm.auto import tqdm print('RAPIDS version',cuml.__version__) print('timm version',timm.__version__) # %matplotlib inline class MelanomaDataset(Dataset): def __init__(self, df_path, data_path, image_folder, transform=None): super().__init__() self.image_folder = image_folder self.df = pd.read_csv(f"{data_path}/{df_path}.csv") self.transform = transform self.df.loc[:, 'bin_target'] = (self.df.target >= 0.5).astype(int) self.targets = self.df.bin_target.values self.target_counts = self.df.bin_target.value_counts().values def __len__(self) -> int: return self.df.shape[0] def __getitem__(self, index): row = self.df.iloc[index] img_id = row.image_name img_path = f"{self.image_folder}/{img_id}.jpg" image = skimage.io.imread(img_path) if self.transform is not None: image = self.transform(image=image)['image'] image = image.transpose(2, 0, 1) image = torch.from_numpy(image) label = row.target # target = onehot(2, label) target = torch.tensor(np.expand_dims(label, 0)).float() return{'features': image, 'target': target} # + # GAN generated data # - model = MelanomaModel.net_mapping('resnest50d', 'SingleHeadMax') model.load_state_dict(torch.load('../weights/train_384_balancedW_resnest50d_fold0_heavyaugs_averaged_best_weights.pth')) model.cuda() model.eval() gan_generated_data_dataset = MelanomaDataset( df_path='generated_data_v.003.cleaned', data_path='../data', image_folder='../data/generated_data_v.003//', transform=get_valid_transforms()) gan_generated_data_daloader = DataLoader(gan_generated_data_dataset, batch_size=16, num_workers=1) train_dataset = MelanomaDataset( df_path='train', data_path='../data/', image_folder='../data/jpeg-melanoma-384x384/train/', transform=get_valid_transforms()) train_daloader = DataLoader(train_dataset, batch_size=16, num_workers=1) external_dataset = MelanomaDataset( df_path='external_train', data_path='../data/', image_folder='../data/jpeg-isic2019-384x384/train/', transform=get_valid_transforms()) external_dataloader = DataLoader(external_dataset, batch_size=16, num_workers=1) features_gan = [] for batch in tqdm(gan_generated_data_daloader, total=len(gan_generated_data_daloader)): with torch.no_grad(): features_gan.extend(model.get_features(batch['features'].cuda()).cpu().numpy()) features_train = [] for batch in tqdm(train_daloader, total=len(train_daloader)): with torch.no_grad(): features_train.extend(model.get_features(batch['features'].cuda()).cpu().numpy()) features_external = [] for batch in tqdm(external_dataloader, total=len(external_dataloader)): with torch.no_grad(): features_external.extend(model.get_features(batch['features'].cuda()).cpu().numpy()) tsne_cuml_model = cuml.TSNE(perplexity=250.0, n_neighbors=150) all_features = np.stack(features_train + features_gan + features_external) targets = np.concatenate([ gan_generated_data_dataset.df.target.values, train_dataset.df.target.values, external_dataset.df.target.values]) sample_origin = np.array( ['gan'] * len(gan_generated_data_dataset.df.target.values) +\ ['train'] * len(train_dataset.df.target.values) +\ ['external'] * len(external_dataset.df.target.values) ) embed2Dn = tsne_cuml_model.fit_transform(all_features) f, ax = plt.subplots(1, 3, figsize=(30, 10)) ax[0].scatter( embed2Dn[np.where(sample_origin == 'gan')[0], 0], embed2Dn[np.where(sample_origin == 'gan')[0], 1], c=targets[np.where(sample_origin == 'gan')[0]], s=1.0, marker='s') ax[1].scatter( embed2Dn[np.where(sample_origin == 'train')[0], 0], embed2Dn[np.where(sample_origin == 'train')[0], 1], c=targets[np.where(sample_origin == 'train')[0]], s=1.0, marker='s') ax[2].scatter( embed2Dn[np.where(sample_origin == 'external')[0], 0], embed2Dn[np.where(sample_origin == 'external')[0], 1], c=targets[np.where(sample_origin == 'external')[0]], s=1.0, marker='s') CLUSTERS = 20 model_kmeans = cuml.KMeans(n_clusters=CLUSTERS) model_kmeans.fit(all_features) plt.scatter( embed2Dn[:, 0], embed2Dn[:, 1], c=model_kmeans.labels_, s=1.0, cmap='tab20', marker='s')
notebooks/cuML_embeddings_tnse.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="XDnap1jLv8so" import os import os.path as pth import json import shutil import numpy as np import pandas as pd from tqdm import tqdm import tensorflow as tf import tensorflow.keras as keras # - gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(gpus[0], True) # + id="ffY3gSLAvvSs" BASE_MODEL_NAME = 'ResNet50V2-for-upload' my_model_base = keras.applications.resnet_v2 my_model = my_model_base.ResNet50V2 config = { 'is_zscore':True, # 'input_shape': (540, 960, 3), 'aug': { #'resize': (270, 480), 'resize': (297, 528), }, # 'input_shape': (224, 360, 3), #'input_shape': (270, 480, 3), 'input_shape': (270, 480, 3), 'output_activation': 'softmax', 'num_class': 1049, 'output_size': 1049, 'conv':{ 'conv_num': (0), # (3,5,3), 'base_channel': 0, # 4, 'kernel_size': 0, # 3, 'padding':'same', 'stride':'X' }, 'pool':{ 'type':'X', 'size':'X', 'stride':'X', 'padding':'same' }, 'fc':{ 'fc_num': 0, }, 'activation':'relu', 'between_type': 'avg', 'is_batchnorm': True, 'is_dropout': False, 'dropout_rate': 0.5, 'add_dense':True, 'dense_size': 1024, 'batch_size': 64, #64, 'buffer_size': 256, #256, 'loss': 'CategoricalCrossentropy', 'num_epoch': 10000, 'learning_rate': 1e-3, 'random_state': 7777 } # + id="abOw4s0jv6JI" image_feature_description = { 'image_raw': tf.io.FixedLenFeature([], tf.string), 'randmark_id': tf.io.FixedLenFeature([], tf.int64), # 'id': tf.io.FixedLenFeature([], tf.string), } def _parse_image_function(example_proto): return tf.io.parse_single_example(example_proto, image_feature_description) def map_func(target_record): img = target_record['image_raw'] label = target_record['randmark_id'] img = tf.image.decode_jpeg(img, channels=3) img = tf.dtypes.cast(img, tf.float32) return img, label def resize_and_crop_func(image, label): result_image = tf.image.resize(image, config['aug']['resize']) result_image = tf.image.random_crop(image, size=config['input_shape'], seed=7777) return result_image, label def image_aug_func(image, label): pass return image, label def post_process_func(image, label): # result_image = result_image / 255 result_image = my_model_base.preprocess_input(image) onehot_label = tf.one_hot(label, depth=config['num_class']) return result_image, onehot_label # + id="j1UN3LYJzFgd" data_base_path = pth.join('data', 'public') os.makedirs(data_base_path, exist_ok=True) # + id="Ks1l_51cNzLP" category_csv_name = 'category.csv' category_json_name = 'category.json' submission_csv_name = 'sample_submisstion.csv' train_csv_name = 'train.csv' # train_zip_name = 'train.zip' train_tfrecord_name = 'all_train.tfrecords' train_tfrecord_path = pth.join(data_base_path, train_tfrecord_name) val_tfrecord_name = 'all_val.tfrecords' val_tfrecord_path = pth.join(data_base_path, val_tfrecord_name) # test_zip_name = 'test.zip' test_tfrecord_name = 'test.tfrecords' test_tfrecord_path = pth.join(data_base_path, test_tfrecord_name) # + id="MaBBHyX0dMig" train_csv_path = pth.join(data_base_path, train_csv_name) train_df = pd.read_csv(train_csv_path) train_dict = {k:v for k, v in train_df.values} submission_csv_path = pth.join(data_base_path, submission_csv_name) submission_df = pd.read_csv(submission_csv_path) # submission_df.head() category_csv_path = pth.join(data_base_path, category_csv_name) category_df = pd.read_csv(category_csv_path) category_dict = {k:v for k, v in category_df.values} # category_df.head() # - train_tfrecord_path # + [markdown] id="Rdng6pk8k0fH" # ### Model # + id="Q9-4T5OMcy1R" import tensorflow as tf from tensorflow.keras.preprocessing import image import cv2 import matplotlib.pyplot as plt from PIL import Image from sklearn.model_selection import train_test_split, KFold, RepeatedKFold, GroupKFold, RepeatedStratifiedKFold from sklearn.utils import shuffle import numpy as np import pandas as pd import os import os.path as pth import shutil import time from tqdm import tqdm import itertools from itertools import product, combinations import numpy as np from PIL import Image from IPython.display import clear_output from multiprocessing import Process, Queue import datetime import tensorflow.keras as keras from tensorflow.keras.utils import to_categorical, Sequence from tensorflow.keras.layers import Input, Dense, Activation, BatchNormalization, \ Flatten, Conv3D, AveragePooling3D, MaxPooling3D, Dropout, \ Concatenate, GlobalMaxPool3D, GlobalAvgPool3D from tensorflow.keras.models import Sequential, Model, load_model from tensorflow.keras.optimizers import SGD, Adam from tensorflow.keras.callbacks import ModelCheckpoint,LearningRateScheduler, \ EarlyStopping from tensorflow.keras.losses import mean_squared_error, mean_absolute_error from tensorflow.keras import backend as K from tensorflow.keras.constraints import max_norm # + id="HBKtUQ9mnKMn" conv_comb_list = [] conv_comb_list += [(0,)] base_channel_list = [0] fc_list = [0] # 128, 0 # between_type_list = [None, 'avg', 'max'] between_type_list = ['avg'] batch_size_list = [80] activation_list = ['relu'] # len(conv_comb_list), conv_comb_list # + id="NAaKPD3cnKB5" # + id="WrRPrv1aoOEA" def build_cnn(config): input_layer = Input(shape=config['input_shape'], name='input_layer') pret_model = my_model( input_tensor=input_layer, include_top=False, weights='imagenet', input_shape=config['input_shape'], pooling=config['between_type'], classes=config['output_size'] ) pret_model.trainable = False x = pret_model.output if config['between_type'] == None: x = Flatten(name='flatten_layer')(x) if config['is_dropout']: x = Dropout(config['dropout_rate'], name='output_dropout')(x) if config['add_dense']: x = Dense(config['dense_size'], activation=config['activation'], name='dense_layer')(x) x = Dense(config['output_size'], activation=config['output_activation'], name='output_fc')(x) # x = Activation(activation=config['output_activation'], name='output_activation')(x) model = Model(inputs=input_layer, outputs=x, name='{}'.format(BASE_MODEL_NAME)) return model # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="d5mZ06q3qAmN" outputId="edc1edc4-147a-43c3-e8c4-0ae69fa5e59d" model = build_cnn(config) for i, layer in enumerate(model.layers): print(i, layer.name) #model.summary(line_length=150) del model # + id="BCsZqqHyqAds" # + id="-CFjGGnr1iDN" origin_train_len = len(train_df) / 5 * 4 origin_val_len = len(train_df) / 5 * 1 train_num_steps = int(np.ceil((origin_train_len)/config['batch_size'])) val_num_steps = int(np.ceil((origin_val_len)/config['batch_size'])) print(train_num_steps, val_num_steps) # + id="YzKzI0vXsrJp" model_base_path = data_base_path model_checkpoint_path = pth.join(model_base_path, 'checkpoint') # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="Ta_kqsLstQcV" outputId="760909ad-2f6c-444c-aa85-43ab5e364c6b" for conv_comb, activation, base_channel, \ between_type, fc_num, batch_size \ in itertools.product(conv_comb_list, activation_list, base_channel_list, between_type_list, fc_list, batch_size_list): config['conv']['conv_num'] = conv_comb config['conv']['base_channel'] = base_channel config['activation'] = activation config['between_type'] = between_type config['fc']['fc_num'] = fc_num config['batch_size'] = batch_size base = BASE_MODEL_NAME base += '_resize_{}'.format(config['aug']['resize'][0]) base += '_conv_{}'.format('-'.join(map(lambda x:str(x),config['conv']['conv_num']))) base += '_basech_{}'.format(config['conv']['base_channel']) base += '_act_{}'.format(config['activation']) base += '_pool_{}'.format(config['pool']['type']) base += '_betw_{}'.format(config['between_type']) base += '_fc_{}'.format(config['fc']['fc_num']) base += '_zscore_{}'.format(config['is_zscore']) base += '_batch_{}'.format(config['batch_size']) if config['is_dropout']: base += '_DO_'+str(config['dropout_rate']).replace('.', '') if config['is_batchnorm']: base += '_BN'+'_O' else: base += '_BN'+'_X' model_name = base print(model_name) ### Define dataset dataset = tf.data.TFRecordDataset(train_tfrecord_path, compression_type='GZIP') dataset = dataset.map(_parse_image_function, num_parallel_calls=tf.data.experimental.AUTOTUNE) # dataset = dataset.cache() dataset = dataset.map(map_func, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.map(resize_and_crop_func, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.map(image_aug_func, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.shuffle(config['buffer_size']) dataset = dataset.batch(config['batch_size']) dataset = dataset.map(post_process_func, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) val_dataset = tf.data.TFRecordDataset(val_tfrecord_path, compression_type='GZIP') val_dataset = val_dataset.map(_parse_image_function, num_parallel_calls=tf.data.experimental.AUTOTUNE) val_dataset = val_dataset.map(map_func, num_parallel_calls=tf.data.experimental.AUTOTUNE) val_dataset = val_dataset.map(resize_and_crop_func, num_parallel_calls=tf.data.experimental.AUTOTUNE) # val_dataset = val_dataset.map(image_aug_func, num_parallel_calls=tf.data.experimental.AUTOTUNE) # val_dataset = val_dataset.shuffle(config['buffer_size']) val_dataset = val_dataset.batch(config['batch_size']) val_dataset = val_dataset.map(post_process_func, num_parallel_calls=tf.data.experimental.AUTOTUNE) # val_dataset = val_dataset.cache() val_dataset = val_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) model_path = pth.join( model_checkpoint_path, model_name, ) model = build_cnn(config) # model.summary() initial_epoch = 0 if pth.isdir(model_path) and len([_ for _ in os.listdir(model_path) if _.endswith('hdf5')]) >= 1: model.compile(loss=config['loss'], optimizer=Adam(lr=config['learning_rate']), metrics=['acc', 'Precision', 'Recall', 'AUC']) model_chk_name = sorted(os.listdir(model_path))[-1] initial_epoch = int(model_chk_name.split('-')[0]) model.load_weights(pth.join(model_path, model_chk_name)) else: # first: train only the top layers (which were randomly initialized) # i.e. freeze all convolutional InceptionV3 layers model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['acc', 'Precision', 'Recall', 'AUC']) PRE_TRAIN_EPOCH = 6 model.fit( x=dataset, epochs=PRE_TRAIN_EPOCH, # train only top layers for just a few epochs. validation_data=val_dataset, shuffle=True, #callbacks = [checkpointer, es], #batch_size=config['batch_size'] initial_epoch=initial_epoch, # steps_per_epoch=train_num_steps, validation_steps=val_num_steps, verbose=1) # at this point, the top layers are well trained and we can start fine-tuning # convolutional layers from inception V3. We will freeze the bottom N layers # and train the remaining top layers. # let's visualize layer names and layer indices to see how many layers # we should freeze: for i, layer in enumerate(model.layers): print(i, layer.name) # we chose to train the top 2 inception blocks, i.e. we will freeze # the first 249 layers and unfreeze the rest: for layer in model.layers[:166]: # [:249]: layer.trainable = False for layer in model.layers[166:]: # [249:]: layer.trainable = True # we need to recompile the model for these modifications to take effect # we use Adam with a low learning rate model.compile(loss=config['loss'], optimizer=Adam(lr=config['learning_rate']), metrics=['acc', 'Precision', 'Recall', 'AUC']) initial_epoch = PRE_TRAIN_EPOCH # IGNORE 4 lines below in InceptionV3 # ### Freeze first layer # conv_list = [layer for layer in model.layers if isinstance(layer, keras.layers.Conv2D)] # conv_list[0].trainable = False # # conv_list[1].trainable = False os.makedirs(model_path, exist_ok=True) model_filename = pth.join(model_path, '{epoch:06d}-{val_loss:0.6f}-{loss:0.6f}.hdf5') checkpointer = ModelCheckpoint( filepath=model_filename, verbose=1, period=1, save_best_only=True, monitor='val_loss' ) es = EarlyStopping(monitor='val_loss', verbose=1, patience=10) ### 16 at night. 10 genral, 6 for experiment hist = model.fit( x=dataset, epochs=config['num_epoch'], validation_data=val_dataset, shuffle=True, callbacks = [checkpointer, es], #batch_size=config['batch_size'] initial_epoch=initial_epoch, # steps_per_epoch=train_num_steps, validation_steps=val_num_steps, verbose=1 ) model_analysis_path = model_path.replace('checkpoint', 'analysis') visualization_path = pth.join(model_analysis_path,'visualization') os.makedirs(visualization_path, exist_ok=True) print() # clear_output() for each_label in ['loss', 'acc', 'precision', 'recall', 'auc']: fig, ax = plt.subplots() ax.plot(hist.history[each_label], 'g', label='train_{}'.format(each_label)) ax.plot(hist.history['val_{}'.format(each_label)], 'r', label='val_{}'.format(each_label)) ax.set_xlabel('epoch') ax.set_ylabel('loss') ax.legend(loc='upper left') if not each_label == 'loss': plt.ylim(0, 1) plt.show() filename = 'learning_curve_{}'.format(each_label) # fig.savefig(pth.join(visualization_path, filename), transparent=True) plt.cla() plt.clf() plt.close('all') np.savez_compressed(pth.join(visualization_path, 'learning_curve'), loss=hist.history['loss'], val_loss=hist.history['val_loss'], acc=hist.history['acc'], val_acc=hist.history['val_acc'], precision=hist.history['precision'], vaval_precisionl_mae=hist.history['val_precision'], recall=hist.history['recall'], val_recall=hist.history['val_recall'], auc=hist.history['auc'], val_auc=hist.history['val_auc'] ) model.save(pth.join(model_path, '000000_last.hdf5')) K.clear_session() del(model) model_analysis_base_path = pth.join(model_base_path, 'analysis', model_name) with open(pth.join(model_analysis_base_path, 'config.json'), 'w') as f: json.dump(config, f) chk_name_list = sorted([name for name in os.listdir(model_path) if name != '000000_last.hdf5']) for chk_name in chk_name_list[:-2]: os.remove(pth.join(model_path, chk_name)) # clear_output() # + id="ibXfENT5zvwZ" # + [markdown] id="57ARllmjWGk-" # ### Inference # + id="u1S7DrvwhFPM" image_feature_description_for_test = { 'image_raw': tf.io.FixedLenFeature([], tf.string), # 'randmark_id': tf.io.FixedLenFeature([], tf.int64), # 'id': tf.io.FixedLenFeature([], tf.string), } def _parse_image_function_for_test(example_proto): return tf.io.parse_single_example(example_proto, image_feature_description_for_test) def map_func_for_test(target_record): img = target_record['image_raw'] img = tf.image.decode_jpeg(img, channels=3) img = tf.dtypes.cast(img, tf.float32) return img def resize_and_crop_func_for_test(image): result_image = tf.image.resize(image, config['aug']['resize']) result_image = tf.image.random_crop(image, size=config['input_shape'], seed=7777) return result_image def post_process_func_for_test(image): # result_image = result_image / 255 result_image = my_model_base.preprocess_input(image) return result_image # + id="wi2igBp6WSYD" submission_base_path = pth.join(data_base_path, 'submission') os.makedirs(submission_base_path, exist_ok=True) # + colab={"base_uri": "https://localhost:8080/", "height": 88} id="71z9_wKaMPTJ" outputId="aae36664-100b-46f1-ebd1-25a3804e4b78" preds = [] for conv_comb, activation, base_channel, \ between_type, fc_num, batch_size \ in itertools.product(conv_comb_list, activation_list, base_channel_list, between_type_list, fc_list, batch_size_list): config['conv']['conv_num'] = conv_comb config['conv']['base_channel'] = base_channel config['activation'] = activation config['between_type'] = between_type config['fc']['fc_num'] = fc_num config['batch_size'] = batch_size base = BASE_MODEL_NAME base += '_resize_{}'.format(config['aug']['resize'][0]) base += '_conv_{}'.format('-'.join(map(lambda x:str(x),config['conv']['conv_num']))) base += '_basech_{}'.format(config['conv']['base_channel']) base += '_act_{}'.format(config['activation']) base += '_pool_{}'.format(config['pool']['type']) base += '_betw_{}'.format(config['between_type']) base += '_fc_{}'.format(config['fc']['fc_num']) base += '_zscore_{}'.format(config['is_zscore']) base += '_batch_{}'.format(config['batch_size']) if config['is_dropout']: base += '_DO_'+str(config['dropout_rate']).replace('.', '') if config['is_batchnorm']: base += '_BN'+'_O' else: base += '_BN'+'_X' model_name = base print(model_name) ### Define dataset test_dataset = tf.data.TFRecordDataset(test_tfrecord_path, compression_type='GZIP') test_dataset = test_dataset.map(_parse_image_function_for_test, num_parallel_calls=tf.data.experimental.AUTOTUNE) test_dataset = test_dataset.map(map_func_for_test, num_parallel_calls=tf.data.experimental.AUTOTUNE) test_dataset = test_dataset.map(resize_and_crop_func_for_test, num_parallel_calls=tf.data.experimental.AUTOTUNE) test_dataset = test_dataset.batch(config['batch_size']) test_dataset = test_dataset.map(post_process_func_for_test, num_parallel_calls=tf.data.experimental.AUTOTUNE) test_dataset = test_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) model_path = pth.join( model_checkpoint_path, model_name, ) model = build_cnn(config) # model.summary() model.compile(loss=config['loss'], optimizer=Adam(lr=config['learning_rate']), metrics=['acc', 'Precision', 'Recall', 'AUC']) initial_epoch = 0 model_chk_name = sorted(os.listdir(model_path))[-1] initial_epoch = int(model_chk_name.split('-')[0]) model.load_weights(pth.join(model_path, model_chk_name)) preds = model.predict(test_dataset, verbose=1) #pred_labels = np.argmax(preds, axis=1) #pred_probs = np.array([pred[indice] for pred, indice in zip(preds, pred_labels)]) # argmax --> top3 pred_labels = np.argsort(-preds) submission_csv_path = pth.join(data_base_path, submission_csv_name) submission_df = pd.read_csv(submission_csv_path) merged_df = [] RANK_TO_SAVE = 3 for i in range(RANK_TO_SAVE): tmp_df = submission_df.copy() tmp_labels = pred_labels[:, i] tmp_df['landmark_id'] = tmp_labels tmp_df['conf'] = np.array([pred[indice] for pred, indice in zip(preds, tmp_labels)]) merged_df.append(tmp_df) submission_df = pd.concat(merged_df) #submission_df['landmark_id'] = pred_labels #submission_df['conf'] = pred_probs today_str = datetime.date.today().strftime('%Y%m%d') result_filename = '{}.csv'.format(model_name) submission_csv_fileaname = pth.join(submission_base_path, '_'.join([today_str, result_filename])) submission_df.to_csv(submission_csv_fileaname, index=False) # - # + [markdown] id="oMStwUj7nYz9" #
20201108_ResNet50V2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import matplotlib.pyplot as plt import matplotlib.patches as patches import numpy as np import mxnet as mx from symbol import get_resnet_model from symbol import YOLO_loss from data_ulti import get_iterator # + def decodeBox(yolobox, size, dscale): i, j, cx, cy, w, h = yolobox cxt = j*dscale + cx*dscale cyt = i*dscale + cy*dscale wt = w*size ht = h*size return [cxt, cyt, wt, ht] def bboxdraw(img, label, dscale=32): assert label.shape == (7,7,5) size = img.shape[1] ilist, jlist = np.where(label[:,:,0]>0.2) # Create figure and axes fig,ax = plt.subplots(1) ax.imshow(np.uint8(img)) for i,j in zip(ilist, jlist): cx,cy,w,h = label[i,j,1:] cxt, cyt, wt ,ht = decodeBox([i, j, cx,cy,w,h], size, dscale) # Create a Rectangle patch rect = patches.Rectangle((cxt-wt/2,cyt-ht/2), wt,ht,linewidth=1,edgecolor='r',facecolor='none') # Add the patch to the Axes ax.add_patch(rect) plt.plot(int(cxt), int(cyt), '*') plt.show() # - data = mx.io.ImageRecordIter(path_imgrec='DATA_rec/cat_small.rec', data_shape=(3,224,224), label_width=7*7*5, batch_size=1,) # get sym sym, args_params, aux_params = mx.model.load_checkpoint('cat_detect_full_scale', 448) logit = sym.get_internals()['logit_output'] mod = mx.mod.Module(symbol=logit, context=mx.gpu(0)) mod.bind(data.provide_data) mod.init_params(allow_missing=False, arg_params=args_params, aux_params=aux_params, initializer=mx.init.Xavier(magnitude=2,rnd_type='gaussian',factor_type='in')) out = mod.predict(eval_data=data, num_batch=10) data.reset() batch = data.next() batch = data.next() batch = data.next() img = batch.data[0].asnumpy()[0].transpose((1,2,0)) label = batch.label[0].asnumpy().reshape((7,7,5)) pred = (out.asnumpy()[2]+1)/2 print pred.shape print "Prediction" bboxdraw(img, pred) print "Ground Truth" bboxdraw(img, label)
DS502-1704/MXNet-week2-part1/demo_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + hide_input=true slideshow={"slide_type": "skip"} tags=["remove-cell"] # %run -i ../python/common.py # %run -i ../python/ln_preamble.py # + [markdown] hide_input=true slideshow={"slide_type": "slide"} # # SLS Lecture 3 : I/O, Process Control and Credentials # # **Running and Managing Programs (Commands)** # + hide_input=false slideshow={"slide_type": "slide"} tags=["remove-input"] display(mkImgsAnimateBox(dir="../images/Processes")) # + [markdown] slideshow={"slide_type": "notes"} # ## Processes, Files and Streams # # # - What is a process in more detail and what can it do # - a running program # - process can launch another processes # - bash is implemented to start commands as process (when needed) # - passes command line arguments and environment variables # - Processes, Files and channel/stream model of I/O # - files and Kernel objects # - read, write # - everything is a file # - Streams: Processes and Files # - open : attach a file as a stream # - file descriptors/handles : stream # - read and write bytes to a stream # - close # - dup # - standard input, standard output, standard error # - Shell Syntax: # - `echo 'Hello world' > hello` # - `cat hello` # - `cat < ./hello` # - We now can understand what a pipe is # - pipe file object # - Process 1 stdout into pipe and Process 2 stdin from pipe # - Shell Syntax: # - 'ls -1 | wc -l' # - 'ls -1 | grep '^l*' | wc -l' # - `mknod mypipe p` and `mkfifo mypipe` # # ## Credentials and file permissions # # - Process have id's associated with them # - a single user id : `id` # - a single number that maps to a string user name (`/etc/passwd`) # - set of group ids # - user has a primary group but can be in many secondary groups # - each has a number that maps to a name (`/etc/group`) # - each group can have many users # - `ps auxgww` # - process's inherit their ids from their parent # - Files have id's and permissions `ls -l`, `chmod`, `chown`, and `chgrp` # - user, group, other -> read, write, execute # - kernel ensures that process id's and requested operations match permissions # # # - Process management # - `ps` - look at all processes # - The shell and its children # - `&` : foreground and background # - `ctrl-z` # - `jobs` # - so we know how to start process, list them how about stopping # - `kill` # - signals # - `ctrl-c` # - or without prejudice # + hide_input=true tags=["remove-cell"] display(showTerm(EDITORTERM,"Term1","100%","400")) display(showTerm(BUILDTERM,"Term2","100%","400"))
underthecovers/unix/L03.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.10 ('bbcm') # language: python # name: python3 # --- import sys sys.path.append('./') import pandas as pd from actlog_console import ActLogs al = ActLogs(path="./data/hdl_test.log") al.focus(focus_action_ids=['15', '1A', '1E', '1F'], focus_action_names=['弗萝北']) print(len(al)) lines = [al[idx] for idx in range(len(al))] pdd = pd.DataFrame(lines) columns = [ # 'time', 'skill_id', 'skill_name', 'action_id', 'skill_flags', # 'feat_a', 'feat_b', 'feat_c', 'feat_d', 'active', 'summon', 'summon_stack', 'next_attach', 'aetherflow'] slice = pdd[80:120][columns] slice.where(slice.notnull(), "") # + import socket serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) serversocket.bind(('localhost', 2022)) serversocket.listen(10) # become a server socket, maximum 5 connections while True: connection, address = serversocket.accept() buf = connection.recv(64) if len(buf) > 0: print(buf) break # - lines[0]
actlog_parser.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## pandas merge connects columns or indexes in DataFrame based on one or more keys import pandas as pd df1=pd.DataFrame({'ID':[1,2,3,4], 'Class':[88,99,89,67]} ) df1 df2=pd.DataFrame({'ID':[1,2,3,4], 'Name':['akshay','manali','aditi','aksh']}) df2 pd.merge(df1,df2, on ="ID" ) pd.merge(df2,df1, on ="ID" ) pd.merge(df2,df1, on ="ID" ,how ='left' ) pd.merge(df2,df1, on ="ID" ,how ='right' ) pd.merge(df2,df1, on ="ID" ,how ='outer' ) #outer all different values pd.merge(df2,df1, on ="ID" ,how ='right',indicator = True ) df2=pd.DataFrame({'ID':[5,6,7,8], 'Name':['akshay','manali','aditi','aksh']}) df2 pd.merge(df1,df2, left_index =True ,right_index = True ) df1=pd.DataFrame({'ID':[1,2,3,4], 'Class':[88,99,89,67]} ) df1 df2=pd.DataFrame({'ID':[1,2,3,4], 'Class':[88,99,89,67]} ) df2 pd.merge(df1,df2 ,on = 'ID')
merging function in pandas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="cWbTbDKguNaf" # Load necessary modules and libraries from sklearn.preprocessing import StandardScaler from sklearn.linear_model import Perceptron from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.model_selection import learning_curve from sklearn.neural_network import MLPRegressor from sklearn.linear_model import LinearRegression from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import RBF, RationalQuadratic, Matern, ExpSineSquared,DotProduct import pickle import seaborn as sns import matplotlib.pyplot as plt import numpy as np import pandas as pd # + # Load the data Geometry1 = pd.read_csv('Surface_features.csv',header=0, usecols=(4,8,9,10,11,12,14)) Geometry = pd.read_csv('Surface_features.csv',header=0, usecols=(4,6,7,8,9,10,11,12)).values Ra_ch = pd.read_csv('Surface_features.csv',header=0,usecols=(5,)).values Ra_ch = Ra_ch[:,0] ks = pd.read_csv('Surface_features.csv',header=0,usecols=(13,)).values ks = ks[:,0] Geometry1["ks"]= np.divide(ks,Ra_ch) Geometry1["krms_ch"]= np.divide(Geometry1["krms_ch"],Ra_ch) Geometry1.rename({'krms_ch': '$k_{rms}/R_a$', 'pro_ch': '$P_o$', 'ESx_ch': '$E_x$', 'ESz_ch': '$E_z$', 'sk_ch': '$S_k$', 'ku_ch': '$K_u$', 'ks': '$k_s/R_a$', 'label': 'Label', }, axis='columns', errors="raise",inplace = True) # + # Plot raw data plt.rc('text', usetex=True) sns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=3, color_codes=True, rc=None) g = sns.pairplot(Geometry1,diag_kind="kde", #palette="seismic", hue='Label', plot_kws=dict(s=70,facecolor="w", edgecolor="w", linewidth=1), diag_kws=dict(linewidth=1.5)) g.map_upper(sns.kdeplot) g.map_lower(sns.scatterplot, s=50,) plt.savefig('pair.pdf', dpi=None, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # + colab={} colab_type="code" id="vyIuRbHMz9n-" # Data reconfiguration, to be used in ML X = Geometry y = np.divide(ks,Ra_ch) X[:,0] = np.divide(X[:,0],Ra_ch) X[:,2] = np.abs(X[:,2]) # Generate secondary features and append them to the original dataset n,m = X.shape X0 = np.ones((n,1)) X1 = np.ones((n,1)) X2 = np.ones((n,1)) X3 = np.ones((n,1)) X4 = np.ones((n,1)) X5 = np.ones((n,1)) X6 = np.ones((n,1)) X7 = np.ones((n,1)) X8 = np.ones((n,1)) X9 = np.ones((n,1)) X1[:,0] = np.transpose(X[:,4]*X[:,5]) X2[:,0] = np.transpose(X[:,4]*X[:,6]) X3[:,0] = np.transpose(X[:,4]*X[:,7]) X4[:,0] = np.transpose(X[:,5]*X[:,6]) X5[:,0] = np.transpose(X[:,5]*X[:,7]) X6[:,0] = np.transpose(X[:,6]*X[:,7]) X7[:,0] = np.transpose(X[:,4]*X[:,4]) X8[:,0] = np.transpose(X[:,5]*X[:,5]) X9[:,0] = np.transpose(X[:,6]*X[:,6]) X = np.hstack((X,X1)) X = np.hstack((X,X2)) X = np.hstack((X,X3)) X = np.hstack((X,X4)) X = np.hstack((X,X5)) X = np.hstack((X,X6)) X = np.hstack((X,X7)) X = np.hstack((X,X8)) X = np.hstack((X,X9)) # + colab={"base_uri": "https://localhost:8080/", "height": 382} colab_type="code" executionInfo={"elapsed": 436, "status": "ok", "timestamp": 1570319447802, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mActOP60g51sl3VG9HD_x2_XB5iw2YXkUk9qKtI=s64", "userId": "04445914509865448303"}, "user_tz": 240} id="e9b5Nz5vIEDr" outputId="8438818d-30be-4258-8ca6-8d7add<PASSWORD>" # Load the trained ML network loaded_model = pickle.load(open('DNN_best.sav', 'rb')) loaded_model.get_params() # + yn = loaded_model.predict(X) print("PREDICTED k_s/R_a= ") print(yn) print("Max err: %f" % max(100.*abs(yn-y)/(y))) print("mean err: %f" % np.mean(100.*abs(yn-y)/(y))) Error=pd.DataFrame() Error["$k_s/R_a$"]= y Error["$k_{sp}/R_a$"]= yn Error["$error(\%)$"]= (100.*(yn-y)/(y)) Error["Label"]= Geometry1["Label"] # + # Plot the results plt.rc('text', usetex=True) sns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=2, color_codes=True, rc=None) g = sns.pairplot(Error,diag_kind="kde", #palette="seismic", hue='Label', aspect=1., plot_kws=dict(s=50,facecolor="w", edgecolor="w", linewidth=1.), diag_kws=dict(linewidth=1.5,kernel='gau')) g.map_upper(sns.kdeplot) g.map_lower(sns.scatterplot, s=50,legend='full') g.axes[-2,0].plot(range(18), range(18),'k--', linewidth= 1.7) for i in range(0,3): for ax in g.axes[:,i]: ax.spines['top'].set_visible(True) ax.spines['right'].set_visible(True) plt.savefig('DNN_result.pdf', dpi=None, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # -
1/loadDNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Table and Graph Extraction # # This notebook demonstrates how to extract tables and graphs from literature accurately and successfully. # # ### Tables # # This notebook focuses on extraction of tables and graphs, in order to prove the idea, test articles will be used as example # # In the ChemDataExtractor, the test_table.py only shows how to parse the designed tables. Our collected literature vary by formats, they include html, xml, and pdf files. # # Paragraph function splits table into unreadable content. We need to extract tables out first then parse their content. # # ### Graphs # # Graphs in html and xml are hard to extract due to how they are stored. Graphs in pdf can be mined by using the python library pdfminer.six. import logging import re import pandas as pd import urllib import time import pdfminer import chemdataextractor as cde from chemdataextractor import Document import chemdataextractor.model as model from chemdataextractor.model import Compound, UvvisSpectrum, UvvisPeak, BaseModel, StringType, ListType, ModelType from chemdataextractor.parse.common import hyphen from chemdataextractor.parse.base import BaseParser from chemdataextractor.utils import first from chemdataextractor.parse.actions import strip_stop from chemdataextractor.parse.elements import W, I, T, R, Optional, ZeroOrMore, OneOrMore from chemdataextractor.parse.cem import chemical_name from chemdataextractor.doc import Paragraph, Sentence, Caption, Figure,Table from chemdataextractor.doc.table import Table, Cell from chemdataextractor.reader import PdfReader, HtmlReader, XmlReader, PlainTextReader # + # open and read files f = open('test_articles/paper0.pdf', 'rb') doc = Document.from_file(f) abstract = [11] f1 = open('test_articles/paper1.pdf', 'rb') doc1 = Document.from_file(f1) abstract1 = [7,8] f2 = open('test_articles/paper2.pdf', 'rb') doc2 = Document.from_file(f2) abstract2 = [7,8] f3 = open('test_articles/paper3.pdf', 'rb') doc3 = Document.from_file(f3) abstract3 = [10] f4 = open('test_articles/paper4.pdf', 'rb') doc4 = Document.from_file(f4) abstract4 = [12] f5 = open('test_articles/paper5.pdf', 'rb') doc5 = Document.from_file(f5) abstract5 = [3,4] f6 = open('test_articles/paper6.pdf', 'rb') doc6 = Document.from_file(f6) abstract6 = [5,6,7,8] f7 = open('test_articles/paper7.pdf', 'rb') doc7 = Document.from_file(f7) abstract7 = [11] # - # split the paragraph into elements paras = doc.elements cems = doc.cems doc.records.serialize() paras # ## Built-in Test Examples in CDE t = Table( caption=Caption('Selected photophysical properties of biarylsubstituted pyrazoles 5–8 and 1-methyl-3,5-diphenylpyrazole (9) at room temperature'), headings=[ [ Cell('Compound'), Cell('Absorption maxima λmax,abs (ε) [nm] (L cm−1 mol−1)'), Cell('Emission maxima λmax,em (Φf) [nm] (a.u.)'), Cell('Stokes-shift Δṽ [cm−1]') ] ], rows=[ [Cell(' 5a '), Cell('273.5 (40 100)'), Cell('357.0 (0.77)'), Cell('9400')], [Cell(' 5b '), Cell('268.5 (36 700)'), Cell('359.0 (0.77)'), Cell('8600')], [Cell('Coumarin 343'), Cell('263.0 (38 400)'), Cell('344.5 (0.67)'), Cell('9000')], [Cell(' 5d '), Cell('281.0 (34 200)'), Cell('351.5 (0.97)'), Cell('7100')], [Cell(' 5e '), Cell('285.0 (44 000)'), Cell('382.0 (0.35)'), Cell('8900')], [Cell(' 5f '), Cell('289.0 (43 300)'), Cell('363.0 (0.80)'), Cell('7100')], [Cell(' 5g '), Cell('285.0 (42 000)'), Cell('343.5 (0.86)'), Cell('6000')], [Cell(' 6a '), Cell('283.5 (35 600)'), Cell('344.5 (0.49)'), Cell('6300')], [Cell(' 6b '), Cell('267.5 (35 800)'), Cell('338.5 (0.83)'), Cell('7800')], [Cell(' 6c '), Cell('286.0 (33 000)'), Cell('347.0 (0.27)'), Cell('6200')], [Cell(' 6d '), Cell('306.5 (36 600)'), Cell('384.0 (0.10)'), Cell('6600')], [Cell(' 7 '), Cell('288.5 (62 500)'), Cell('367.0 (0.07)'), Cell('7400')], [Cell('Compound 8a '), Cell('257.0 (36 300), 293.0 sh (25 000)'), Cell('385.0 (0.41)'), Cell('8200')], [Cell(' 8b '), Cell('257.0 (32 000), 296.0 sh (23000)'), Cell('388.0 (0.33)'), Cell('8000')], [Cell(' 8c '), Cell('257.0 (27 400), 307.5 (18900)'), Cell('387.0 (0.12)'), Cell('6700')], [Cell(' 8d '), Cell('268.5 (29 500)'), Cell('385.0 (0.29)'), Cell('11 300')], [Cell('Dye 8e '), Cell('261.5 (39 900), 288.0 sh (29 600), 311.0 sh (20 500)'), Cell('386.5 (0.37)'), Cell('6300')], [Cell(' 8f '), Cell('256.5 (27 260), 296.0 (28404)'), Cell('388.5 (0.35)'), Cell('8000')], [Cell(' 8g '), Cell('272.5 (39 600)'), Cell('394.0 (0.30)'), Cell('11 300')], [Cell(' 8h '), Cell('286.0 (22 900)'), Cell('382.5 (0.33)'), Cell('8800')], [Cell(' 9 '), Cell('254.0 (28 800)'), Cell('338.5 (0.40)'), Cell('9800')]] ) t [record.serialize() for record in t.records] # + # in paper0, the table is shown below t2 = Table( caption = Caption("Table 1. Physicochemical Properties of the Study Polymers"), headings=[ [ Cell('Polymer'), Cell('Mn (kg/mol)'), Cell('PDI'), Cell('Tg (C)'), Cell('Td (C)'), Cell('soluton/λmax [nm]'), Cell('film/λmax [nm]'), Cell('HOMO [eV]'), Cell('LUMO [eV]'), Cell('Egec[eV]'), Cell('Egopt[eV]'), ] ], rows=[ [Cell(' PBDT-IIG '), Cell('21'), Cell('2.6'), Cell('56'),Cell('380'),Cell('359, 446, 625'),Cell('367, 456, 631, 678'),Cell('-5.38'),Cell('-5.35'),Cell('2.03'),Cell('1.59')], [Cell(' PBDT-TIIG '), Cell('34'), Cell('2.8'), Cell('76'),Cell('411'),Cell('463, 826,854'),Cell('466, 833,856'),Cell('-4.96'),Cell('-3.29'),Cell('1.67'),Cell('1.05')] ] ) # - t2 # illustration from paper0.pdf # output result to json format [record.serialize() for record in t2.records] t3 = Table(caption = Caption("Table 3. Photovoltaic Parameters of Optimized Solar Cells"), headings = [ [ Cell('polymer'), Cell('polymer:PC71BM'), Cell('Voc [V]'), Cell('Jsc [mA cm-2]'), Cell('FF [%]'), Cell('PCE [%]'), ] ], rows = [ [Cell(' PBDT-IIG '), Cell('21'), Cell('2.6'), Cell('56'),Cell('380'),Cell('359, 446, 625'),Cell('367, 456, 631, 678'),Cell('-5.38'),Cell('-5.35'),Cell('2.03'),Cell('1.59')], [Cell(' PBDT-TIIG '), Cell('21'), Cell('2.6'), Cell('56'),Cell('380'),Cell('359, 446, 625'),Cell('367, 456, 631, 678'),Cell('-5.38'),Cell('-5.35'),Cell('2.03'),Cell('1.59')], ] ) [record.serialize() for record in t3.records] # Based on the test we can examine that the result is valid, all numbers are accurately extracted. Tabulated data is much easier to extract compare to graphs and texts. # # # ## Extracting Tables # # ### Tools # 1. pyPDF2 # 2. pymupdf # 3. pdfCandy (not python supported) -- good image extraction # 4. tabula -- **poor accuracy** # 5. pdftables def recog_tables(paras): """ This function extract table from paragraphs and then parse it :paras paragraphs """ for i in range(len(paras)): if str(paras[i]).startswith("Table") len(paras) # 196 elements paras[0] # only shows to 195 def find_table(paras): """ Find tables from paragraphs """ ls = [] length = len(paras) for i in range(length): if str(paras[i]).startswith("Table"): ls.append(i) return ls find_table(paras) paras[35] paras[88] paras[120] # how to read tables from pdf p = open('test_articles/paper0.pdf', 'rb') doc_0 = Document.from_file(p,readers=[PdfReader()]) doc_0 # ## Extracting Graphs # # Extracting chemical graphs and convert them to SMILES using OSRA or other software (usually without API) to process them # # ### Tools # 1. pyPDF2 # 2. pyMuPDF # # I decided to use pyMuPDF to extract images, but we found any graph with chemical graphs embedded cannot be detected and extracted. The rest of graphs can be accuratley extracted. # # Chemical graphs need different treatment. Refer to images from example papers # + # # ! pip install tabula # # ! pip install git+https://github.com/pdftables/python-pdftables-api.git # # ! pip install pypdf # # ! pip install tabula-py # # ! pip install PyMuPDF # # ! pip install pysimplegui # - # ### Test and Selection # # Among all packages for image extraction, we found that pyMuPDF is the best one with customizable package. In this case the code uses pysimpleGUI to select file and extract images # + from __future__ import print_function import os, sys, time import fitz import PySimpleGUI as sg """ PyMuPDF utility ---------------- For a given entry in a page's getImagleList() list, function "recoverpix" returns either the raw image data, or a modified pixmap if an /SMask entry exists. The item's first two entries are PDF xref numbers. The first one is the image in question, the second one may be 0 or the object id of a soft-image mask. In this case, we assume it being a sequence of alpha bytes belonging to our image. We then create a new Pixmap giving it these alpha values, and return it. If the result pixmap is CMYK, it will be converted to RGB first. """ print(fitz.__doc__) if not tuple(map(int, fitz.version[0].split("."))) >= (1, 13, 17): raise SystemExit("require PyMuPDF v1.13.17+") dimlimit = 100 # each image side must be greater than this relsize = 0.05 # image : pixmap size ratio must be larger than this (5%) abssize = 2048 # absolute image size limit 2 KB: ignore if smaller imgdir = "images" # found images are stored in this subfolder if not os.path.exists(imgdir): os.mkdir(imgdir) def recoverpix(doc, item): x = item[0] # xref of PDF image s = item[1] # xref of its /SMask if s == 0: # no smask: use direct image output return doc.extractImage(x) def getimage(pix): if pix.colorspace.n != 4: return pix tpix = fitz.Pixmap(fitz.csRGB, pix) return tpix # we need to reconstruct the alpha channel with the smask pix1 = fitz.Pixmap(doc, x) pix2 = fitz.Pixmap(doc, s) # create pixmap of the /SMask entry # sanity check if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and pix2.n == 1): pix2 = None return getimage(pix1) pix = fitz.Pixmap(pix1) # copy of pix1, alpha channel added pix.setAlpha(pix2.samples) # treat pix2.samples as alpha value pix1 = pix2 = None # free temp pixmaps # we may need to adjust something for CMYK pixmaps here: return getimage(pix) fname = sys.argv[1] if len(sys.argv) == 2 else None if not fname: fname = sg.PopupGetFile("Select file:", title="PyMuPDF PDF Image Extraction") if not fname: raise SystemExit() t0 = time.time() doc = fitz.open(fname) page_count = len(doc) # number of pages xreflist = [] imglist = [] for pno in range(page_count): sg.QuickMeter( "Extract Images", # show our progress pno + 1, page_count, "*** Scanning Pages ***", ) il = doc.getPageImageList(pno) imglist.extend([x[0] for x in il]) for img in il: xref = img[0] if xref in xreflist: continue width = img[2] height = img[3] if min(width, height) <= dimlimit: continue pix = recoverpix(doc, img) if type(pix) is dict: # we got a raw image ext = pix["ext"] imgdata = pix["image"] n = pix["colorspace"] imgfile = os.path.join(imgdir, "img-%i.%s" % (xref, ext)) else: # we got a pixmap imgfile = os.path.join(imgdir, "img-%i.png" % xref) n = pix.n imgdata = pix.getPNGData() if len(imgdata) <= abssize: continue if len(imgdata) / (width * height * n) <= relsize: continue fout = open(imgfile, "wb") fout.write(imgdata) fout.close() xreflist.append(xref) t1 = time.time() imglist = list(set(imglist)) print(len(set(imglist)), "images in total") print(len(xreflist), "images extracted") print("total time %g sec" % (t1 - t0)) # - # Automate the above scripts to make things easier, we only need to pre-define the folder for pdfs and images. We would also like to see image extraction from xml and html files.
DataExtractor/notebook/Demos/.ipynb_checkpoints/Test on Table and Graphs-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="4WFmm66VMMlO" ## Para codificar o arquivo JSON import base64 import datetime import json import os nome = "XXXXXX.json" # trocar pelo nome do arquivo spreadsheet_id = "xxxxxxxxxxx" # trocar pela ID da planilha # + [markdown] id="rVWMD-kKOgNh" # ### Setar no Heroku as variáveis `GOOGLE_SHEET_ID` com o valor da spreadsheet_id e `GOOGLE_SHEETS_CREDENTIALS` com o conteúdo que está entre aspas na saída do `print(conteudo_codificado)` # + id="H9PR-EDSQR7q" arquivo = open(nome, mode="rb") conteudo = arquivo.read() ##abrindo o arquivo json conteudo_codificado = base64.b64encode(conteudo) # codificando o conteudo em bytes print(conteudo_codificado) ## copiar o conteúdo do print entre as aspas e colar no Heroku # + id="9z315qaNOGxj" # NO HEROKU # GOOGLE_SHEET_ID = spreadsheet_id # GOOGLE_SHEETS_CREDENTIALS = conteúdo que está entre aspas na saída do print(conteudo_codificado) # + [markdown] id="S8Otnwm2PfIH" # ### Setar no Github as`GOOGLE_SHEETS_CREDENTIALS` com o conteúdo decodificado # + id="36xVHH-9Kvzl" ## Decodificando o conteúdo no github spreadsheet_id = os.environ["GOOGLE_SHEET_ID"] conteudo_codificado = os.environ["GOOGLE_SHEETS_CREDENTIALS"] conteudo = base64.b64decode(conteudo_codificado) credentials = json.loads(conteudo)
tutoriais/codificacao_decodificacao_das_credenciais.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Merge renaming file and routing file # # This code merges the a table containing information about routing in questions into the larger data description table. Each wave gets it's own routing column. import pandas as pd # Load tables renaming = pd.read_csv("tables/xyx-corona-questionnaire_renaming.csv", sep=";") routing = pd.read_csv("tables/old/routing_table.csv", sep=";") wave_columns = ["L_CoronavirusImpact_wave4_4p.dta", "L_CoronavirusImpact_wave3_4p.dta", "L_CoronavirusImpact_4.0p.dta","L_CoronavirusImpact_3p.dta"] for i, wave in enumerate(wave_columns): w = routing.loc[routing["wave"]==i+1] w = w.rename(columns={"routing":f"routing_wave_{i+1}"}) renaming = renaming.merge(w[["original_name", f"routing_wave_{i+1}"]], how="left", left_on=wave, right_on="original_name") renaming.drop("original_name", axis=1,inplace=True) renaming.to_csv("tables/xyx-corona-questionnaire_renaming_including_routing.csv", sep=";",index=False) renaming
python/merge_routing_and_renaming.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %reload_ext autoreload # %autoreload 2 from fastai import * from fastai.tabular import * from fastai.models.tabular import TabularModel PATH = Path('../data/rossmann/') train_df = pd.read_feather(PATH/'train_clean') test_df = pd.read_feather(PATH/'test_clean') # + cat_vars = ['Store', 'DayOfWeek', 'Year', 'Month', 'Day', 'StateHoliday', 'CompetitionMonthsOpen', 'Promo2Weeks', 'StoreType', 'Assortment', 'PromoInterval', 'CompetitionOpenSinceYear', 'Promo2SinceYear', 'State', 'Week', 'Events', 'Promo_fw', 'Promo_bw', 'StateHoliday_fw', 'StateHoliday_bw', 'SchoolHoliday_fw', 'SchoolHoliday_bw'] cont_vars = ['CompetitionDistance', 'Max_TemperatureC', 'Mean_TemperatureC', 'Min_TemperatureC', 'Max_Humidity', 'Mean_Humidity', 'Min_Humidity', 'Max_Wind_SpeedKm_h', 'Mean_Wind_SpeedKm_h', 'CloudCover', 'trend', 'trend_DE', 'AfterStateHoliday', 'BeforeStateHoliday', 'Promo', 'SchoolHoliday'] # - dep_var = 'Sales' train_df = pd.read_feather(PATH/'train_clean') train_df = train_df[cat_vars+cont_vars+[dep_var, 'Date']].copy() test_df['Date'].min(), test_df['Date'].max(), len(test_df) cut = train_df['Date'][(train_df['Date'] == train_df['Date'][len(test_df)])].index.max() cut train_df = train_df.set_index('Date') train_df,valid_df = train_df[cut:], train_df[:cut] len(train_df),len(valid_df) tfms = [FillMissing, Categorify] data = tabular_data_from_df(PATH, train_df, valid_df, dep_var, tfms=[FillMissing, Categorify], cat_names=cat_vars, cont_names=cont_vars, log_output=True, num_workers=0) cat_szs = [len(train_df[n].cat.categories)+1 for n in cat_vars] emb_szs = [(c, min(50, (c+1)//2)) for c in cat_szs] emb_szs max_log_y = np.log(np.max(train_df['Sales'])) y_range = torch.tensor([0, max_log_y*1.2], device=default_device) model = TabularModel(emb_szs, len(cont_vars), 1, [1000,500], [0.001,0.01], emb_drop=0.04, y_range=y_range, is_reg=True) learn = Learner(data, model) learn.loss_fn = F.mse_loss learn.metrics = [exp_rmspe] learn.fit_one_cycle(1, 1e-3, wd=0.2, pct_start=0.2)
dev_nb/examples/tabular.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 2015 DATA USED # + import warnings warnings.simplefilter('ignore', FutureWarning) import pandas as pd YEAR = 2015 GDP_INDICATOR = 'NY.GDP.MKTP.CD' gdpReset = pd.read_csv('gdp.csv') LIFE_INDICATOR = 'SP.DYN.LE00.IN' lifeReset = pd.read_csv('life.csv') lifeReset.head() # - gdpCountries = gdpReset[34:].dropna() lifeCountries = lifeReset[34:].dropna() # + def roundToMillions (value): return round(value / 1000000) def usdToGBP (usd): return usd / 1.564768 GDP = 'GDP (£m)' gdpCountries[GDP] = gdpCountries['2015' ].apply(usdToGBP).apply(roundToMillions) gdpCountries.head() # - COUNTRY = 'Country Name' headings = [COUNTRY, GDP] gdpClean = gdpCountries[headings] gdpClean.head() LIFE = 'Life expectancy (years)' lifeCountries[LIFE] = lifeCountries['2015'].apply(round) headings = [COUNTRY, LIFE] lifeClean = lifeCountries[headings] lifeClean.head() gdpVsLife = gdpClean.merge(lifeClean, on=COUNTRY, how='inner') gdpVsLife.head() # + from scipy.stats import spearmanr gdpColumn = gdpVsLife[GDP] lifeColumn = gdpVsLife[LIFE] (correlation, pValue) = spearmanr(gdpColumn, lifeColumn) print('The correlation is', correlation) if pValue < 0.05: print('It is statistically significant.') else: print('It is not statistically significant.') # - # %matplotlib inline gdpVsLife.plot(x=GDP, y=LIFE, kind='scatter', grid=True, logx=True, figsize=(10, 4)) # the 10 countries with lowest GDP gdpVsLife.sort_values(GDP).head(10) # the 10 countries with lowest life expectancy gdpVsLife.sort_values(LIFE).head(10)
Hamdiya Adams WT-21-099/GDP AND LIFE EXPECTANCY.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/tanaymukherjee/HackerRank-Challenges/blob/main/Sales_by_Match.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="hlH3XjGlMhrA" # # Sales by Match # + [markdown] id="bgb7mOylMqcV" # ## Problem # + [markdown] id="IiV2DNZZMjA-" # ``` # There is a large pile of socks that must be paired by color. Given an array of integers representing the color of each sock, determine how many pairs of socks with matching colors there are. # # Example # # # There is one pair of color and one of color . There are three odd socks left, one of each color. The number of pairs is . # # Function Description # # Complete the sockMerchant function in the editor below. # # sockMerchant has the following parameter(s): # # int n: the number of socks in the pile # int ar[n]: the colors of each sock # Returns # # int: the number of pairs # Input Format # # The first line contains an integer , the number of socks represented in . # The second line contains space-separated integers, , the colors of the socks in the pile. # # Constraints # # where # Sample Input # # STDIN Function # ----- -------- # 9 n = 9 # 10 20 20 10 10 30 50 10 20 ar = [10, 20, 20, 10, 10, 30, 50, 10, 20] # Sample Output # # 3 # ``` # + id="2bauY7FkNBFe" # #!/bin/python import math import os import random import re import sys # Complete the sockMerchant function below. def sockMerchant(n, ar): return sum([ar.count(i)//2 for i in set(ar)]) if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') n = int(raw_input()) ar = map(int, raw_input().rstrip().split()) result = sockMerchant(n, ar) fptr.write(str(result) + '\n') fptr.close() ## Code might not run here because it was written based on the compiler provided by the HackerRank team
Sales_by_Match.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import ascii as a import random art = {0:a.rock, 1: a.paper, 2: a.scissors} # + def game_rules(user, computer): keys = {0:'Rock', 1: 'Paper', 2: 'Scissors'} user = keys[user] computer = keys[computer] if user == 'Rock' and computer == 'Scissors': return 'user' elif user == 'Rock' and computer == 'Paper': return 'computer' elif user == 'Scissors' and computer == 'Paper': return 'user' elif user == 'Scissors' and computer == 'Rock': return 'computer' elif user == 'Paper' and computer == 'Scissors': return 'computer' elif user == 'Paper' and computer == 'Rock': return 'user' else: return 'tie' users_choice = int(input("What do you choose? Type 0 for Rock, 1 for Paper or 2 for Scissors.\n\n")) if (users_choice > 2) or (users_choice < 0): print('You made the wrong choice, you lose') else: print('You chose \n') print(art[users_choice]) computers_choice = random.randint(0,2) print('\nComputer chose \n') print(art[computers_choice]) keys = {0:'Rock', 1: 'Paper', 2: 'Scissors'} user = keys[users_choice] computer = keys[computers_choice] final = game_rules(users_choice, computers_choice) print(f'user: {user}, computer: {computer}, winner: {final} ') if final == 'user': print('User wins!') elif final == 'computer': print('Computer wins!') else: print('Game is a Tie, nobody wins!')
Day 4/Day 4 Final - Rock Paper Scissors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Impact v. effort scatter plot # # Document information # <table align="left"> # <tr> # <th class="text-align:left">Title</th> # <td class="text-align:left">Impact v. effort scatter plot</td> # </tr> # <tr> # <th class="text-align:left">Last modified</th> # <td class="text-align:left">2020-11-01</td> # </tr> # <tr> # <th class="text-align:left">Author</th> # <td class="text-align:left"><NAME> <<EMAIL>></td> # </tr> # <tr> # <th class="text-align:left">Status</th> # <td class="text-align:left">Active</td> # </tr> # <tr> # <th class="text-align:left">Type</th> # <td class="text-align:left">Jupyter notebook</td> # </tr> # <tr> # <th class="text-align:left">Created</th> # <td class="text-align:left">2017-07-31</td> # </tr> # <tr> # <th class="text-align:left">File name</th> # <td class="text-align:left">impact_effort_scatter_plot.ipynb</td> # </tr> # <tr> # <th class="text-align:left">Other files required</th> # <td class="text-align:left">impact_effort.csv</td> # </tr> # </table> # ## In brevi # # It is a grid or matrix to help in deciding which things to work on. It focuses on the impact of doing something v. the effort required. # ## Data # # Download the data file: # # - [Impact v. Effort](https://drive.google.com/open?id=0BzrdQfHR2I5DMFZVVG1TMnhWOFU) # ## Methodology # # An impact v. effort grid is drawn using a scatter plot with pandas.DataFrame.plot.scatter. Points are annotated with matplotlib.axes.Axes.annotate. The 'grid' is created with matplotlib.axes.Axes.axvline and matplotlib.axes.Axes.avhline. import matplotlib.pyplot as plt import datasense as ds def main(): # %matplotlib inline # %config InlineBackend.figure_format = 'svg' colour1 = '#0077bb' colour2 = '#33bbee' title, subtitle, x_axis_label, y_axis_label,\ file_name, figsize = ( 'Impact versus effort', 'Potential controls', 'Impact', 'Effort', 'impact_effort.csv', (8, 6) ) impact_effort = ds.read_file(file_name=file_name) plot_scatter_annotate( data=impact_effort, figsize=figsize, title=title, subtitle=subtitle, x_axis_label=x_axis_label, y_axis_label=y_axis_label, colour1=colour1, colour2=colour2 ) def plot_scatter_annotate( data, figsize, title, subtitle, x_axis_label, y_axis_label, colour1, colour2, ): fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) ax.plot(data['effort'], data['impact'], marker='o', color=colour1, linestyle='None') ax.set_title(title + '\n' + subtitle, fontweight="bold") ax.set_ylabel(y_axis_label, fontweight="bold") ax.set_xlabel(x_axis_label, fontweight="bold") for row, text in enumerate(data['process']): ax.annotate(text, (data['effort'][row] + 1, data['impact'][row] + 1)) ax.set_ylim(0, 100) ax.set_xlim(0, 100) ax.axhline( y=50, color=colour2 ) ax.axvline( x=50, color=colour2 ) ds.despine(ax) fig.savefig( fname='impact_effort.svg', format='svg' ) fig.savefig( fname='impact_effort.pdf', format='pdf' ) fig.savefig( fname='impact_effort.png', format='png' ) if __name__ == '__main__': main() # ## References # # American Society for Quality. "Impact Effort Matrix." [(http://asq.org/healthcare-use/why-quality/impact-effort.html)](http://asq.org/healthcare-use/why-quality/impact-effort.html). Accessed 2017-08-08. # # Health Quality Ontario. "Impact/Effort Decision Making Grid." [(http://www.hqontario.ca/Portals/0/documents/qi/learningcommunity/pc-impact-effort-decision-making-criteria-chronic-disease-roadmap-resource-en.pdf)](http://www.hqontario.ca/Portals/0/documents/qi/learningcommunity/pc-impact-effort-decision-making-criteria-chronic-disease-roadmap-resource-en.pdf). Accessed 2017-08-08.
impact_effort_scatter_plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.13 64-bit (''BCIMusical'': conda)' # name: python3 # --- import numpy as np from sklearn.svm import SVC from sklearn.model_selection import train_test_split from sklearn.model_selection import RandomizedSearchCV import pandas as pd df = np.genfromtxt('D:/Github/eeg.fem/public/data/Musical/6080072/data_for_train/ALL_3C_64.csv',delimiter=',') x = df[:, :-1] y = df[:, -1] X_train, X_test, y_train, y_test = train_test_split(x, y, random_state=3, test_size=0.3) # %%time classifier = SVC(kernel='rbf', max_iter=-1 ,decision_function_shape='ovr') params = {'C': [1e-5, 1, 1e5], 'gamma': [1e-5, 1, 1e5, 'scale'], 'tol': [1e-3]} rs = RandomizedSearchCV(classifier, params, cv=8, scoring='accuracy', n_iter=12) rs.fit(X_train, y_train) print(pd.DataFrame(rs.cv_results_)[['param_C','param_gamma','mean_test_score']]) # %%time classifier = SVC(kernel='rbf', max_iter=-1 ,decision_function_shape='ovr') params = {'C': [1e-5, 1, 1e5], 'gamma': ['scale'], 'tol': [1e-5, 1e-3, 1]} rs = RandomizedSearchCV(classifier, params, cv=8, scoring='accuracy', n_iter=9) rs.fit(X_train, y_train) print(pd.DataFrame(rs.cv_results_)[['param_C','param_tol','mean_test_score']]) # %%time classifier = SVC(kernel='rbf', max_iter=-1 ,decision_function_shape='ovr') params = {'C': [1, 1e5, 1e10], 'gamma': ['scale'], 'tol': [1e-5, 1e-3, 1]} rs = RandomizedSearchCV(classifier, params, cv=8, scoring='accuracy', n_iter=9) rs.fit(X_train, y_train) print(pd.DataFrame(rs.cv_results_)[['param_C','param_tol','mean_test_score']])
public/ipynb/Model_64_RBF_6080072.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Initial imports import numpy as np import pandas as pd from pathlib import Path import arch as arch # %matplotlib inline # - #Other Imports from collections import Counter from sklearn.metrics import balanced_accuracy_score from sklearn.metrics import confusion_matrix from imblearn.metrics import classification_report_imbalanced from sklearn.preprocessing import StandardScaler, MinMaxScaler # + #file_path = Path('PrivateEquityReturns.csv') #pe_df = pd.read_csv(file_path, parse_dates=True, index_col='Date', infer_datetime_format=True) #pe_df # - file_path = Path('PrivateEquityReturnsFinal.csv') pe_df = pd.read_csv(file_path, parse_dates=True, index_col='Date', infer_datetime_format=True) pe_df #pe_df = pe_df.drop(columns=['Index','Quarter','100','Unnamed: 6','Unnamed: 7']) pe_df['Private Equity Returns'] = pe_df['Private Equity Returns']/100 pe_df['Private Equity Returns'] scaled_returns pe_test = pe_df.dropna() pe_df['pe_returns'] = pe_df['Private Equity Returns'] pd.DataFrame(scaled_returns).hist() pe_df.pe_returns.hist() # + #pe_df = pe_df.dropna() #pe_df # + #pe_2_df = pe_df.set_index(['Date']) #pe_2_df # + #pe_final = pe_df['Private Equity Returns'].copy() #pe_final# # + #pe_final = pd.DataFrame(columns=['Date','Private Equity Returns']) #pe_final # + #pe_final = pe_final.drop([134]) #pe_final # + #pe_final = pe_final.set_index('Date') #pe_final # - pe_df['Private Equity Returns'].plot() pe_df['Cumulative'].plot() pe_df = df.asfreq('Q-DEC') pd.infer_freq(pe_df.index) from arch import arch_model # 'p' and 'q' are akin to the 'p' and 'q' of an ARMA model. # 'vol="GARCH"' means that we're using a GARCH model. # The 'mean="Zero"' means that we're estimating a GARCH. model = arch_model(pe_df.pe_returns, mean="Zero", vol="Garch", p=3, q=3) # Fit the GARCH Model res = model.fit() import statsmodels.api as sm ts_noise, ts_trend = sm.tsa.filters.hpfilter(pe_df.pe_returns) ts_noise.plot() ts_trend.plot() fit. pe_df.pe_returns.plot() # Summarize the model results res.summary() # Plot the model estimate of annualized volatility fig = res.plot() # Construct Volatility Forecasts for the next 3 days forecast_horizon = 3 # Take the last day of the data we used above. # If forecast horizon is 3, then the resulting 'h.1', 'h.2', and 'h.3' # are the forecasts for the following 3 days. forecasts = res.forecast(start='2019-06-30', horizon=forecast_horizon) forecasts.mean pe_df.tail() pe_df.tail() forecasts = res.forecast(start='2019-12-01', horizon=3) forecasts.variance # Annualize the forecast intermediate = np.sqrt(forecasts.variance.dropna() * 12) intermediate # The name of the column here is the date of the forecast. # Each row represents the forecast of volatility for the following days. # Transposing makes the forecast easier to plot final = intermediate.dropna().T final final.plot()
Python-Scripts/PEReturnsForecast.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # News Headlines Sentiment # # Use the news api to pull the latest news articles for bitcoin and ethereum and create a DataFrame of sentiment scores for each coin. # # Use descriptive statistics to answer the following questions: # 1. Which coin had the highest mean positive score? # 2. Which coin had the highest negative score? # 3. Which coin had the highest positive score? # + # Initial imports import os import pandas as pd from dotenv import load_dotenv from newsapi import NewsApiClient from nltk.sentiment.vader import SentimentIntensityAnalyzer analyzer = SentimentIntensityAnalyzer() # %matplotlib inline # - # Read your api key environment variable load_dotenv() api_key = os.getenv("NEWS_API_KEY") # Create a newsapi client newsapi = NewsApiClient(api_key=api_key) # Fetch the Bitcoin news articles btc_headlines = newsapi.get_everything( q="Bitcoin", language="en", sort_by="relevancy" ) # Fetch the Ethereum news articles eth_headlines = newsapi.get_everything( q="Ethereum", language="en", sort_by="relevancy" ) # + # Create the Bitcoin sentiment scores DataFrame btc_sentiments = [] for article in btc_headlines["articles"]: try: sentiment = analyzer.polarity_scores(article['content']) btc_sentiments.append({ "Text": article["content"], "Compound": sentiment["compound"], "Positive": sentiment["pos"], "Negative": sentiment["neg"], "Neutral": sentiment["neu"] }) except AttributeError: pass # Create DataFrame btc_df = pd.DataFrame(btc_sentiments) # Reorder DataFrame columns cols = ["Compound", "Negative", "Neutral", "Positive", "Text"] btc_df = btc_df[cols] btc_df.head() # + # Create the ethereum sentiment scores DataFrame eth_sentiments = [] for article in eth_headlines["articles"]: try: sentiment = analyzer.polarity_scores(article['content']) eth_sentiments.append({ "Text": article["content"], "Compound": sentiment["compound"], "Positive": sentiment["pos"], "Negative": sentiment["neg"], "Neutral": sentiment["neu"] }) except AttributeError: pass # Create DataFrame eth_df = pd.DataFrame(eth_sentiments) # Reorder DataFrame columns cols = ["Compound", "Negative", "Neutral", "Positive", "Text"] eth_df = eth_df[cols] eth_df.head() # - # Describe the Bitcoin Sentiment btc_df.describe() # Describe the Ethereum Sentiment eth_df.describe() # ### Questions: # # Q: Which coin had the highest mean positive score? # # A: Ethereum has the highest mean at 0.060950 # # Q: Which coin had the highest compound score? # # A: Ethereum has the highest compound score at 0.877900 # # Q. Which coin had the highest positive score? # # A: Ethereum had the highest positive score at 0.318000 # --- # # Tokenizer # # In this section, you will use NLTK and Python to tokenize the text for each coin. Be sure to: # 1. Lowercase each word # 2. Remove Punctuation # 3. Remove Stopwords from nltk.tokenize import word_tokenize, sent_tokenize from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer, PorterStemmer from string import punctuation import re import nltk nltk.download('stopwords') nltk.download('reuters') nltk.download('punkt') # Expand the default stopwords list if necessary sw_addons = ['bitcoin', 'ethereum', 'said', 'sent', 'found', 'including', 'today', 'char', 'reuters', 'seen', 'ha', 'announced', 'week', 'basically', 'also', 'of', 'the', ' ', ''] # Complete the tokenizer function def tokenizer(text): """Tokenizes text.""" # Create a list of the words text = word_tokenize(text) # Convert the words to lowercase text = [word.lower() for word in text] # Remove the punctuation regex = re.compile('[^a-zA-Z]') text = [regex.sub('', word) for word in text] # Remove the stop words sw = set(stopwords.words('english') + sw_addons) # Lemmatize Words into root words lemmatizer = WordNetLemmatizer() text = [lemmatizer.lemmatize(word) for word in text] text = [word for word in text if word not in sw] return text # Create a new tokens column for bitcoin btc_tokens = [] [btc_tokens.append(tokenizer(text)) for text in btc_df.Text] btc_df['Tokens'] = btc_tokens btc_df.head() # Create a new tokens column for ethereum eth_tokens = [] [eth_tokens.append(tokenizer(text)) for text in eth_df.Text] eth_df['Tokens'] = eth_tokens eth_df.head() # --- # # NGrams and Frequency Analysis # # In this section you will look at the ngrams and word frequency for each coin. # # 1. Use NLTK to produce the n-grams for N = 2. # 2. List the top 10 words for each coin. from collections import Counter from nltk import ngrams # Generate the Bitcoin N-grams where N=2 btc_bigram_counts = [Counter(ngrams(tokens, n=2)) for tokens in btc_df.Tokens] dict(btc_bigram_counts[0].most_common(10)) # Generate the Ethereum N-grams where N=2 eth_bigram_counts = [Counter(ngrams(tokens, n=2)) for tokens in eth_df.Tokens] dict(eth_bigram_counts[0].most_common(10)) # Use the token_count function to generate the top 10 words from each coin def token_count(tokens, N=10): """Returns the top N tokens from the frequency count""" return Counter(tokens).most_common(N) # Get the top 10 words for Bitcoin all_words_bitcoin = [] for tokens in btc_df.Tokens: all_words_bitcoin += tokens token_count(all_words_bitcoin, 10) # Get the top 10 words for Ethereum all_words_ethereum = [] for tokens in eth_df.Tokens: all_words_ethereum += tokens token_count(all_words_ethereum, 10) # # Word Clouds # # In this section, you will generate word clouds for each coin to summarize the news for each coin from wordcloud import WordCloud import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import matplotlib as mpl mpl.rcParams['figure.figsize'] = [20.0, 10.0] # Generate the Bitcoin word cloud all_words_bitcoin_str = ' '.join(all_words_bitcoin) wc = WordCloud(width=1200, height=800, max_words=50).generate(all_words_bitcoin_str) plt.imshow(wc) # Generate the Ethereum word cloud all_words_ethereum_str = ' '.join(all_words_ethereum) wc = WordCloud(width=1200, height=800, max_words=50).generate(all_words_ethereum_str) plt.imshow(wc) # # Named Entity Recognition # # In this section, you will build a named entity recognition model for both coins and visualize the tags using SpaCy. import spacy from spacy import displacy # Optional - download a language model for SpaCy # !python -m spacy download en_core_web_sm # Load the spaCy model nlp = spacy.load('en_core_web_sm') # ## Bitcoin NER # Concatenate all of the bitcoin text together btc_corpus = ' '.join(btc_df['Text'].values) # + # Run the NER processor on all of the text btc_ner = nlp(btc_corpus) # Add a title to the document btc_ner.user_data['title'] = 'Bitcoin Articles' # - # Render the visualization displacy.render(btc_ner, style='ent') # List all Entities btc_entities = set([ent.text for ent in btc_ner.ents]) btc_entities # --- # ## Ethereum NER # Concatenate all of the ethereum text together eth_corpus = ' '.join(eth_df['Text'].values) # + # Run the NER processor on all of the text eth_ner = nlp(eth_corpus) # Add a title to the document eth_ner.user_data['title'] = 'Ethereum Articles' # - # Render the visualization displacy.render(eth_ner, style='ent') # List all Entities eth_entities = set([ent.text for ent in eth_ner.ents]) eth_entities
Starter_Code/crypto_sentiment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Description of the project # In this project the task was to label individual pixels of images and output an entire image instead of just a classification. This was conducted with the help of a fully connected neural network or FCN for semantic segmentation. Semantic segmentation indentifies free space on the road at pixel-level granularity. The ultimate goal of this more detailed approach is to alow better decision making processes for driverless cars. In this project only two labels existed in which the pixels were categorized - road and no road. With more training data for various labels such as pedestrians, sidewalks, traffic lights, etc. this approach could be used to analyze pictures in great detail and help the diverless car to better understand complex environments. The following picture illustrates how this could look like. # ![Example for more labels](./images/semseg_example.png) # Due to the high complexity and need for a lot of computational power, the project only focuses on the pixels being part of the road or not. Following a picture from the *kitti road* training set with the corresponding ground truth image and an overlay to see how it comes together. # ![training_sample](./images/training_sample.png) # ![training_sample_gt](./images/training_sample_gt.png) # ![training_sample_overlay](./images/training_sample_overlay.png) # The pictures above show three labels in the ground truth image: # * red - no road # * pink - road the vehicle is driving on (both directions) # * black - other roads # But we are only focusing on two labels for now (road and no road). Therefore, the data was manipulated in the function *get_batches_fn* in *helper.py* before it was read into a numpy array. The red background was labeled as "no road" and everything else (the inverted part, that contains the pink and black areas) was labeled as "road" pixels. # # In order to achieve results faster a pretrained and frozen VGG16 model was used as the base for the FCN. The goal of this project was to encode a picture, learn from it on a pixle level, and then decode the information into a new picture again. This was achieved with the help of a pretrained model and a 1x1 convolution as the encoder and transposed convolutions as the decoder to upsample the image data back to the original format. # ![General structure of the FCN](./images/FCN_structure.png) # The goal of this project was to take the frozen VGG16 and add skip connections, 1x1 convolutions, and transposed convolutions. I therefore added skip connections to the VGG layers 3, 4, and 7 and added a 1x1 convolutional layer to each skipped connection. After that I added upsampling (transposed convolutions) to the output of the highest layer (convolution on layer 7) and added it (element-wise) to the output of the convolution on the next highest layer (layer 4). Then I took the result and did the same with the output from layer 3. A complete structure of the FCN can be seen in the picture below, which is a graph I visualized with the help of TensorBoard. The green box shows the added layers to the pretrained VGG16. # ![Graph of overall network structure.](./images/complete_network_graph.png) # The following section shows the program code of *main.py* divided up into different sections with additional explanation. The results are shown and explained below the program code to show the improvements that were made while developing the code. # ### Import all necessary dependencies and checking tensorflow and GPU support # Here I imported all necessary dependencies and made sure that the TensorFlow version was coorect and the GPU was found. # + import os.path import tensorflow as tf import helper import warnings from distutils.version import LooseVersion import project_tests as tests import numpy as np import random # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__) print('TensorFlow Version: {}'.format(tf.__version__)) # Check for a GPU if not tf.test.gpu_device_name(): warnings.warn('No GPU found. Please use a GPU to train your neural network.') else: print('Default GPU Device: {}'.format(tf.test.gpu_device_name())) # - # ### Load pretrained VGG model into TensorFlow # Next, the function for loading the pretrained VGG16 model was defined. def load_vgg(sess, vgg_path): """ Load Pretrained VGG Model into TensorFlow. :param sess: TensorFlow Session :param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb" :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out) """ # TODO: Implement function # Use tf.saved_model.loader.load to load the model and weights vgg_tag = 'vgg16' vgg_input_tensor_name = 'image_input:0' vgg_keep_prob_tensor_name = 'keep_prob:0' vgg_layer3_out_tensor_name = 'layer3_out:0' vgg_layer4_out_tensor_name = 'layer4_out:0' vgg_layer7_out_tensor_name = 'layer7_out:0' tf.saved_model.loader.load(sess, [vgg_tag], vgg_path) graph = tf.get_default_graph() w1 = graph.get_tensor_by_name(vgg_input_tensor_name) keep = graph.get_tensor_by_name(vgg_keep_prob_tensor_name) layer_3 = graph.get_tensor_by_name(vgg_layer3_out_tensor_name) layer_4 = graph.get_tensor_by_name(vgg_layer4_out_tensor_name) layer_7 = graph.get_tensor_by_name(vgg_layer7_out_tensor_name) return w1, keep, layer_3, layer_4, layer_7 # The function was seperately tested with the already implemented test function in *project_tests.py*. tests.test_load_vgg(load_vgg, tf) # ### Create the layers for a fully convolutional network # Following I had to implement the function *layers()* where the 1x1 convolution and the upsampling of the different layers was implemented as described before. I used a kernel regularizer on every layer as recommended in the course. def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes): """ Create the layers for a fully convolutional network. Build skip-layers using the vgg layers. :param vgg_layer3_out: TF Tensor for VGG Layer 3 output :param vgg_layer4_out: TF Tensor for VGG Layer 4 output :param vgg_layer7_out: TF Tensor for VGG Layer 7 output :param num_classes: Number of classes to classify :return: The Tensor for the last layer of output """ # TODO: Implement function # 1x1 convolution of vgg layer 7 layer7a_out = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding= 'same', kernel_initializer= tf.random_normal_initializer(stddev=0.01), kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3)) # upsample layer4a_in1 = tf.layers.conv2d_transpose(layer7a_out, num_classes, 4, strides= (2, 2), padding= 'same', kernel_initializer= tf.random_normal_initializer(stddev=0.01), kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3)) # make sure the shapes are the same! # 1x1 convolution of vgg layer 4 layer4a_in2 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, padding= 'same', kernel_initializer= tf.random_normal_initializer(stddev=0.01), kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3)) # element-wise addition layer4a_out = tf.add(layer4a_in1, layer4a_in2) # upsample layer3a_in1 = tf.layers.conv2d_transpose(layer4a_out, num_classes, 4, strides= (2, 2), padding= 'same', kernel_initializer= tf.random_normal_initializer(stddev=0.01), kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3)) # 1x1 convolution of vgg layer 3 layer3a_in2 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, padding= 'same', kernel_initializer= tf.random_normal_initializer(stddev=0.01), kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3)) # element-wise addition layer3a_out = tf.add(layer3a_in1, layer3a_in2) # upsample nn_last_layer = tf.layers.conv2d_transpose(layer3a_out, num_classes, 16, strides= (8, 8), padding= 'same', kernel_initializer= tf.random_normal_initializer(stddev=0.01), kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3)) return nn_last_layer # The function was seperately tested with the already implemented test function in project_tests.py. tests.test_layers(layers) # ### Build the TensorFlow loss and optimizer operations # Then I had to define the function *optimize()*, where the result from *layers()* was taken in order to calculate the loss, the accuracy, and the tyraining operation. I decided to take the cross entropy loss and use an Adam optimizer as this was recommended in the lessons. I also added the loss and accuracy to a summary that I used later for visualizing the performance of the FCN in TensorBoard. def optimize(nn_last_layer, correct_label, learning_rate, num_classes): """ Build the TensorFlow loss and optimizer operations. :param nn_last_layer: TF Tensor of the last layer in the neural network :param correct_label: TF Placeholder for the correct label image :param learning_rate: TF Placeholder for the learning rate :param num_classes: Number of classes to classify :return: Tuple of (logits, train_op, cross_entropy_loss) """ # TODO: Implement function # make logits a 2D tensor where each row represents a pixel and each column a class logits = tf.reshape(nn_last_layer, (-1, num_classes)) correct_label = tf.reshape(correct_label, (-1,num_classes)) # define loss function with tf.name_scope('cross_entropy'): cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= logits, labels= correct_label)) # calculate accuracy with tf.name_scope('accuracy'): correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(correct_label, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # define training operation with tf.name_scope('train'): optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate) train_op = optimizer.minimize(cross_entropy_loss) tf.summary.scalar("cost", cross_entropy_loss) tf.summary.scalar("accuracy", accuracy) return logits, train_op, cross_entropy_loss # The function was seperately tested with the already implemented test function in project_tests.py. tests.test_optimize(optimize) # + # add preprocessing steps here # - # ### Train neural network and print out the loss during training # After that I implemented the training process where the FCN was trained on the Kitti road dataset in batches. All summaries were merged and written to a file for later evaluation with TensorBoard. def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate): """ Train neural network and print out the loss during training. :param sess: TF Session :param epochs: Number of epochs :param batch_size: Batch size :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size) :param train_op: TF Operation to train the neural network :param cross_entropy_loss: TF Tensor for the amount of loss :param input_image: TF Placeholder for input images :param correct_label: TF Placeholder for label images :param keep_prob: TF Placeholder for dropout keep probability :param learning_rate: TF Placeholder for learning rate """ # TODO: Implement function sess.run(tf.global_variables_initializer()) summary_op = tf.summary.merge_all() writer = tf.summary.FileWriter("./writer/TensorBoard", graph=tf.get_default_graph()) print("Training...") step = 0 for i in range(epochs): print("EPOCH {} ...".format(i+1)) for image, label in get_batches_fn(batch_size): _, loss = sess.run([train_op, summary_op], feed_dict={input_image: image, correct_label: label, keep_prob: 0.5, learning_rate: 0.0009}) writer.add_summary(loss, step) #tf.summary.image("input", image, 1) #tf.summary.image("label", label, 1) #print("Batch: " + str(step+1)) step = step + 1 #print("Loss: = {:.3f}".format(loss)) # The test function was commented out due to the fact that the modifications for TensorBoard would cause an error. # + #tests.test_train_nn(train_nn) # - # Because the training data was very limited (only 289 pictures) I decided to augment the pictures randomly to increase the robustness of the algorithm. I added the function *modify_picture()* to *helper.py* for that purpose, that was called in the function *get_batches_fn()* when training the neural network in batches and randomly flipped and rotated the pictures and the corresponding ground truth image before passing the data to the FCN. Following an example picture that was flipped horizontally and then rotated. I limited the rotation to +/- 5 degrees to keep it in a more realistic range. # Original picture: # ![Normal picture](./images/training_picture_normal.png) # # Horizontally flipped: # ![Flipped picture](./images/training_picture_flipped.png) # # And rotated: # ![Normal picture](./images/training_picture_rotated.png) # ### Train neural network # Finally the function *run()* was implemented to put everything together - from loading the pretrained VGG16 and the dataset to training the modified FCN. # + def run(): # reset graph tf.reset_default_graph() num_classes = 2 # road and no-road image_shape = (160, 576) data_dir = './data' runs_dir = './runs' tests.test_for_kitti_dataset(data_dir) # Download pretrained vgg model helper.maybe_download_pretrained_vgg(data_dir) # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset. # You'll need a GPU with at least 10 teraFLOPS to train on. # https://www.cityscapes-dataset.com/ epochs = 2 batch_size = 10 training_image_path = os.path.join(data_dir, 'data_road/training/image_2') training_image_no = len(os.listdir(training_image_path)) # tf placeholders learning_rate = tf.placeholder(tf.float32, name='learning_rate') correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name='correct_label') with tf.Session() as sess: # Path to vgg model vgg_path = os.path.join(data_dir, 'vgg') # Create function to get batches get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape) # load vgg vgg_tag = 'vgg16' tf.saved_model.loader.load(sess, [vgg_tag], vgg_path) # OPTIONAL: Augment Images for better results # https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network # TODO: Build NN using load_vgg, layers, and optimize function input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(sess, vgg_path) nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes) logits, train_op, cross_entropy_loss = optimize(nn_last_layer, correct_label, learning_rate, num_classes) # TODO: Train NN using the train_nn function train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate) # TODO: Save inference data using helper.save_inference_samples helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image) # OPTIONAL: Apply the trained model to a video print('All finished.') if __name__ == '__main__': run() # - # ### Results # I trained the FCN in steps to see the progress of the learning along the way. I started with only 1 Epoch, then increased it steadily to up to 25 Epochs. The pictures below show the overlayed prediction of the road pixels on the original image after the different epochs. A lot of change happened during the first few epochs and therefore I chose to show epochs 1,2,3,5 and 25. # # After 1 Epoch: # ![Picture 1 Epoch](./images/1_epoch.png) # # After 2 Epochs: # ![Picture 2 Epochs](./images/2_epochs.png) # # After 3 Epochs: # ![Picture 3 Epochs](./images/3_epochs.png) # # After 5 Epochs: # ![Picture 5 Epochs](./images/5_epochs.png) # # After 25 Epochs: # ![Picture 25 Epochs](./images/25_epochs.png) # It can be seen that after the first epoch the prediction was almost a random guess. The red pixels seem to be almost evenly distributed. After the second epoch, most of the pixels were gone and only a few appeared in the center where the algorithm was certain that they belonged to the road. After the third epoch, a lot more pixels show up, but they are not going all the way to sides of the road and also not into the distance. You can clearly see larger steps forming the diagonal limits. It also recognized the pull-off area on the right falsy as a part of the road. After 5 epochs the steps were smoother and it detected into further distance. After 25 epochs it was way smoother, the pull-off area was removed and some parts of the road on the other side were detected. # # To show the progress over the 25 epochs, I recorded the accuracy and cost for each batch that was used for training. I trained for 25 Epochs with a batch size of 10. For 289 images that meant 29 batches per epoch or 725 batches in total. The images of the final test with the overlayed detected road pixels were saved as well as the summary of the accuracy and cost for TensorBoard. Following the two graphs: # # Accuracy: # ![TensorBoard Accuracy](./images/tensorboard_accuracy.png) # # Cost: # ![TensorBoard Cost](./images/tensorboard_cost.png) # Following a couple of examples where the algorithm did a good job detecting the road pixels: # # It detected multiple lanes in both directions and was not distracted by the shadows of the trees: # ![Good example 1](./images/good_example_1.png) # # It omitted the pixels of the cars that were on the street with good accuracy (there are still some red pixels in the car): # ![Good example 2](./images/good_example_2.png) # # It recognized that the sidewalk to the right does not belong to the road even though it has a very similar color and shape: # ![Good example 3](./images/good_example_3.png) # And here are some examples were the algorithm failed to detect a significant amount of the correct road pixels: # # It recognized the sidewalk as part of the road and did not recognized the parts of the road in the shadows: # ![Bad example 1](./images/bad_example_1.png) # # Again, the shadows were throwing off the road recognition: # ![Bad example 2](./images/bad_example_2.png) # # The different light conditions of the underpass caused some problems, too: # ![Bad example 3](./images/bad_example_3.png) # ### Further Optimization # The biggest problem seemed to be hard light and shadows on the street caused by buildings, trees, cars etc. This is understandable as only a few pictures in the training data showed shadows and the algorithm was not able to learn properly from that. In order to achieve better performance the solution could be further tweaked in multiple ways. Additional steps could be added to the image preprocessing such as random brightness adjustments of the whole picture or of parts of the picture (to create artificial shadows). The pictures could also be normalized and transferred into a different color space. But even more important is to train on a larger data set, as only 289 pictures is not enough to cover all the different traffic situations.
Semantic_Segmentation_Project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf # - # # Intro to Deep Learning # ## Live Demos from tensorflow.python.client import device_lib device_lib.list_local_devices() tf.__version__ first_param = tf.constant(2) second_param = tf.constant(3) # + # Old versions # tf.enable_eager_execution() # - first_param first_param.numpy() tf.multiply(first_param,second_param) # + # def compute(a,b): # return tf.pow(tf.add(tf.multiply(2, a), tf.multiply(3, b)),2) # Operators are overloaded def compute(a,b): return (2 * a + 3 * b) ** 2 # - compute(first_param,second_param).numpy() compute(2, 3) compute(2.0, 3.0) compute(np.array([2]), np.array([3])) compute(np.array([2, 42, 18]), np.array([3, -5, 12])) compute(tf.constant(2), tf.constant(30)) compute(tf.constant([2, 42, 18]), tf.constant([3, -5, 12])) # Now all is accepted as tensorflow tensors @tf.function def compute_dec(a,b): return (2 * a + 3 * b) ** 2 compute_dec(first_param,second_param).numpy() compute_dec(2, 3) compute_dec(2.0, 3.0) compute_dec(np.array([2]), np.array([3])) compute_dec(np.array([2, 42, 18]), np.array([3, -5, 12])) compute_dec(tf.constant(2), tf.constant(30)) a1, b1 = tf.constant([2, 3, 4]), tf.constant([3, 4, 5]) tf.summary.trace_on(graph=True, profiler=True) result = compute_dec(a1, b1) print(result.numpy()) writer = tf.summary.create_file_writer(logdir = "logs") with writer.as_default(): tf.summary.trace_export(name = "compute_func", step = 0, profiler_outdir = "logs")
1_Introduction_To_Deep_Learning_Basic_Models/01_IntroductionToDeepLearning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # # !pip3 install rotary-embedding-tensorflow --no-deps # # !pip3 install einops==0.3.0 # + # # !pip3 install tensorflow==1.15.5 # + import tensorflow as tf import numpy as np from rotary_embedding_tensorflow import apply_rotary_emb, RotaryEmbedding from fast_transformer import FastTransformer # tf.compat.v1.enable_eager_execution() # + # pos_emb = RotaryEmbedding(dim = 32) # + # x = tf.placeholder(tf.int32, [None]) # x # + # freqs = pos_emb(x, cache_key = 1024) # freqs # + # q = tf.random.normal((1, 1024, 64)) # k = tf.random.normal((1, 1024, 64)) # + # freqs = freqs[None, ...] # freqs # + # q = apply_rotary_emb(freqs, q) # k = apply_rotary_emb(freqs, k) # + # sess = tf.Session() # + # sess.run(k, feed_dict = {x: np.arange(1024)}) # - x = tf.placeholder(tf.int32, [None, None]) mask = tf.math.not_equal(x, 0) mask = tf.cast(mask, tf.bool) mask # + # mask = tf.ones((1, 4096)) # x = tf.convert_to_tensor(np.random.randint(0, 20000, (1, 4096), dtype = np.int32)) # mask = tf.cast(mask, tf.bool) # - model = FastTransformer( num_tokens = 20000, dim = 512, depth = 2, max_seq_len = 4096, absolute_pos_emb = True, mask = mask ) logits = model(x) logits sess = tf.Session() sess.run(tf.global_variables_initializer()) # + # %%time o = sess.run(logits, feed_dict = {x: np.random.randint(0, 20000, (1, 128), dtype = np.int32)}) # - o[0].shape, o[1].shape # + # sess.run(logits, feed_dict = {x: np.random.randint(0, 20000, (1, 1000), dtype = np.int32)}).shape # + # model.token_emb._trainable_weights[0] # + # tf.matmul(logits[0], model.token_emb._trainable_weights[0],transpose_b=True) # -
pretrained-model/fastformer/test-fastformer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os, glob import numpy as np import matplotlib.pyplot as plt import pandas as pd import geopandas as gpd from rioxarray.merge import merge_arrays from rasterio.plot import plotting_extent import rioxarray as rxr import xarray as xr import rasterio from rasterio.plot import show from osgeo import gdal import earthpy as et from shapely.geometry import box from tifffile import imsave import earthpy.plot as ep # - # # data paths # + # raw data paths nitrogen_path = os.path.join(et.io.HOME, 'desktop', 'earth-analytics', 'python', 'watersheds-biodiversity', 'raw-data', 'nitrogen') nitrogen_to_mosaic = glob.glob(os.path.join(str(nitrogen_path), "*.tif")) # + # crop path clip_path = os.path.join(et.io.HOME, 'desktop', 'earth-analytics', 'python', 'watersheds-biodiversity', 'clip-data', 'RR_HU8-polygon.shp') # read file with geopandas clip_boundary = gpd.read_file(clip_path) # - # # Functions Section # + # function that utilizes GDAL library and creates numpy array without losing metadata from geotif file # https://www.neonscience.org/resources/learning-hub/tutorials/merge-lidar-geotiff-py # https://www.neonscience.org/resources/learning-hub/tutorials/plot-neon-rgb-py def raster2array(geotif_file): """converts a geotif raster to a numpy array and stores all metadata in a separate dictionary . Parameters ----------- geotif_file: path to a geotif file Returns ----------- array : numpy array A numpy array of each band in the geotif metadata: dictionary A dictionary that stores metadata from in the geotif """ metadata = {} dataset = gdal.Open(geotif_file) metadata['array_rows'] = dataset.RasterYSize metadata['array_cols'] = dataset.RasterXSize metadata['bands'] = dataset.RasterCount metadata['driver'] = dataset.GetDriver().LongName metadata['projection'] = dataset.GetProjection() metadata['geotransform'] = dataset.GetGeoTransform() mapinfo = dataset.GetGeoTransform() metadata['pixelWidth'] = mapinfo[1] metadata['pixelHeight'] = mapinfo[5] xMin = mapinfo[0] xMax = mapinfo[0] + dataset.RasterXSize/mapinfo[1] yMin = mapinfo[3] + dataset.RasterYSize/mapinfo[5] yMax = mapinfo[3] metadata['extent'] = (xMin,xMax,yMin,yMax) raster = dataset.GetRasterBand(1) array_shape = raster.ReadAsArray(0,0,metadata['array_cols'],metadata['array_rows']).astype(float).shape metadata['noDataValue'] = raster.GetNoDataValue() metadata['scaleFactor'] = raster.GetScale() array = np.zeros((array_shape[0],array_shape[1],dataset.RasterCount),'uint8') #pre-allocate stackedArray matrix if metadata['bands'] == 1: raster = dataset.GetRasterBand(1) metadata['noDataValue'] = raster.GetNoDataValue() metadata['scaleFactor'] = raster.GetScale() array = dataset.GetRasterBand(1).ReadAsArray(0,0,metadata['array_cols'],metadata['array_rows']).astype(float) array[np.where(array==metadata['noDataValue'])]=np.nan array = array/metadata['scaleFactor'] elif metadata['bands'] > 1: for i in range(1, dataset.RasterCount+1): band = dataset.GetRasterBand(i).ReadAsArray(0,0,metadata['array_cols'],metadata['array_rows']).astype(float) band[np.where(band==metadata['noDataValue'])]=np.nan #band = band/metadata['scaleFactor'] array[...,i-1] = band return array, metadata # + def plot_array(array,spatial_extent,colorlimit,ax=plt.gca(),title='',cmap_title='',colormap=''): plot = plt.imshow(array,extent=spatial_extent,clim=colorlimit); cbar = plt.colorbar(plot,aspect=40); plt.set_cmap(colormap); cbar.set_label(cmap_title,rotation=90,labelpad=20); plt.title(title); ax = plt.gca(); ax.ticklabel_format(useOffset=False, style='plain'); rotatexlabels = plt.setp(ax.get_xticklabels(),rotation=90); # - # # clipping section # + # create a folder for the clipped rasters clipped_nitrogen_path = os.path.join(et.io.HOME, 'desktop', 'earth-analytics', 'python', 'watersheds-biodiversity', 'clipped-rasters', 'nitrogen') nitrogen_clipped_to_mosaic = glob.glob(os.path.join(str(clipped_nitrogen_path), "*.tif")) # + # This command only needs to run once in order to write raster files so that's why it is commented out # produces a clipped raster file for each original raster file and saves it according to path #for image, i in zip(nitrogen_to_mosaic, range(0,10)): # nitrogen_clip_gdal = gdal.Warp(clipped_nitrogen_path+'/nitrogen-rr-hu8-clip{}.tif'.format(i), # image, # cutlineDSName=clip_path, # cropToCutline=True, # dstNodata = 0) # - # # mosaic section # + # reduce overlapping data by reading in rasters in a new order order3 = [5, 6, 3, 4, 8, 9, 0, 7, 2, 1] # list of paths to clipped rasters nitrogen_clip_order3 = [nitrogen_clipped_to_mosaic[i] for i in order3] # convert list of paths to a string because gdal merge takes in a string argument string_nitrogen_clipped_to_mosaic = " ".join(nitrogen_clip_order3) # + # path to mosaiced file to be written in the following command mosaic_gdal_order3 = os.path.join(et.io.HOME, 'desktop', 'earth-analytics', 'python', 'watersheds-biodiversity', 'mosaics', 'nitrogen', 'nitrogen-mosaic-order3.tif') # + # # This command only needs to run once in order to write the raster file so that's why it is commented out # merge clipped rasters and output a mosaiced raster #command_nitrogen = "gdal_merge.py -o /Users/merielle/desktop/earth-analytics/python/watersheds-biodiversity/mosaics/nitrogen/nitrogen-mosaic-order3.tif -of gtiff " + string_nitrogen_clipped_to_mosaic #print(os.popen(command_nitrogen).read()) # - # # rasters to arrays # + # convert rasters to arrays nitrogen_order3, nitrogen_order3_metadata = raster2array( mosaic_gdal_order3) # - plt.hist(nitrogen_order[:,:,0],bins=50) plt.show # + # create a mask for values between flight paths # generate a list of all unique elements in the array unique_elements, counts_elements = np.unique(nitrogen_order3, return_counts=True) # pixel counts are suspiciously high for one value np.where(counts_elements == 4635083) # convert array elements to floats in order to set outlier value to nan nitrogen_order3_floats = nitrogen_order3.astype(float) # set value 113 to nan nitrogen_order3_floats[np.where(nitrogen_order3_floats==113)]=np.nan # - # # map plotting section # + fig, ax = plt.subplots(figsize=(10,10)) plot_array(nitrogen_order3_floats[:,:,0], nitrogen_order3_metadata['extent'], (0,113), title='Nitrogen ordered', cmap_title='Nitrogen Level', colormap='jet') # -
gdal-nitrogen.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/JayellWolfe/DS-Unit-2-Kaggle-Challenge/blob/master/Unit2_W2_D1_assignment_kaggle_challenge_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="7IXUfiQ2UKj6" colab_type="text" # Lambda School Data Science, Unit 2: Predictive Modeling # # # Kaggle Challenge, Module 1 # # ## Assignment # - [x] Do train/validate/test split with the Tanzania Waterpumps data. # - [x] Define a function to wrangle train, validate, and test sets in the same way. Clean outliers and engineer features. (For example, [what other columns have zeros and shouldn't?](https://github.com/Quartz/bad-data-guide#zeros-replace-missing-values) What other columns are duplicates, or nearly duplicates? Can you extract the year from date_recorded? Can you engineer new features, such as the number of years from waterpump construction to waterpump inspection?) # - [x] Select features. Use a scikit-learn pipeline to encode categoricals, impute missing values, and fit a decision tree classifier. # - [x] Get your validation accuracy score. # - [x] Get and plot your feature importances. # - [] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.) # - [ ] Commit your notebook to your fork of the GitHub repo. # # # ## Stretch Goals # # ### Reading # # - A Visual Introduction to Machine Learning # - [Part 1: A Decision Tree](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/) # - [Part 2: Bias and Variance](http://www.r2d3.us/visual-intro-to-machine-learning-part-2/) # - [Decision Trees: Advantages & Disadvantages](https://christophm.github.io/interpretable-ml-book/tree.html#advantages-2) # - [How a Russian mathematician constructed a decision tree — by hand — to solve a medical problem](http://fastml.com/how-a-russian-mathematician-constructed-a-decision-tree-by-hand-to-solve-a-medical-problem/) # - [How decision trees work](https://brohrer.github.io/how_decision_trees_work.html) # - [Let’s Write a Decision Tree Classifier from Scratch](https://www.youtube.com/watch?v=LDRbO9a6XPU) — _Don’t worry about understanding the code, just get introduced to the concepts. This 10 minute video has excellent diagrams and explanations._ # - [Random Forests for Complete Beginners: The definitive guide to Random Forests and Decision Trees](https://victorzhou.com/blog/intro-to-random-forests/) # # # ### Doing # - [ ] Add your own stretch goal(s) ! # - [ ] Try other [scikit-learn imputers](https://scikit-learn.org/stable/modules/impute.html). # - [ ] Try other [scikit-learn scalers](https://scikit-learn.org/stable/modules/preprocessing.html). # - [ ] Make exploratory visualizations and share on Slack. # # # #### Exploratory visualizations # # Visualize the relationships between feature(s) and target. I recommend you do this with your training set, after splitting your data. # # For this problem, you may want to create a new column to represent the target as a number, 0 or 1. For example: # # ```python # train['functional'] = (train['status_group']=='functional').astype(int) # ``` # # # # You can try [Seaborn "Categorical estimate" plots](https://seaborn.pydata.org/tutorial/categorical.html) for features with reasonably few unique values. (With too many unique values, the plot is unreadable.) # # - Categorical features. (If there are too many unique values, you can replace less frequent values with "OTHER.") # - Numeric features. (If there are too many unique values, you can [bin with pandas cut / qcut functions](https://pandas.pydata.org/pandas-docs/stable/getting_started/basics.html?highlight=qcut#discretization-and-quantiling).) # # You can try [Seaborn linear model plots](https://seaborn.pydata.org/tutorial/regression.html) with numeric features. For this problem, you may want to use the parameter `logistic=True` # # You do _not_ need to use Seaborn, but it's nice because it includes confidence intervals to visualize uncertainty. # # #### High-cardinality categoricals # # This code from a previous assignment demonstrates how to replace less frequent values with 'OTHER' # # ```python # # Reduce cardinality for NEIGHBORHOOD feature ... # # # Get a list of the top 10 neighborhoods # top10 = train['NEIGHBORHOOD'].value_counts()[:10].index # # # At locations where the neighborhood is NOT in the top 10, # # replace the neighborhood with 'OTHER' # train.loc[~train['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER' # test.loc[~test['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER' # ``` # # # + id="o9eSnDYhUGD7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="622b8b51-7f7e-44a2-8338-cb82e7181422" # If you're in Colab... import os, sys in_colab = 'google.colab' in sys.modules if in_colab: # Install required python packages: # category_encoders, version >= 2.0 # pandas-profiling, version >= 2.0 # plotly, version >= 4.0 # !pip install --upgrade category_encoders pandas-profiling plotly # Pull files from Github repo os.chdir('/content') # !git init . # !git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git # !git pull origin master # Change into directory for module os.chdir('module1') # + id="QJBD4ruICm1m" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="b21b2be3-58b0-4fa0-b21a-d96c1feedb2c" import pandas as pd from sklearn.model_selection import train_test_split train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'), pd.read_csv('../data/tanzania/train_labels.csv')) test = pd.read_csv('../data/tanzania/test_features.csv') sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv') train.shape, test.shape # + id="2Amxyx3xphbb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="2b49ab3f-cfc9-4364-e667-05a4ab1edd08" #train/val train, val = train_test_split(train, train_size = 0.8, test_size = 0.2, stratify = train['status_group'], random_state = 42) train.shape, val.shape, test.shape # + id="TdZUxA98Fddb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 408} outputId="46b80adb-0c3d-4ea1-f70a-1ef114332e26" train.head() # + id="vVxPvMGWOoPZ" colab_type="code" colab={} import numpy as np def wrangle(X): # Prevent copy warning X = X.copy() # zero out lat values- they are near that anyway X['latitude'] = X['latitude'].replace(-2e-08, 0) # replace the zeros with nulls, and impute missing values later. zero_cols = ['longitude', 'latitude'] for col in zero_cols: X[col] = X[col].replace(0, np.nan) # focus clean - drop features dropfeats = ['funder', 'installer', 'scheme_name', 'subvillage', 'ward', 'wpt_name'] # drop - unneeded drop_cols = ['id', 'recorded_by', 'quality_group', 'quantity_group', 'date_recorded', 'extraction_type_class', 'extraction_type_group', 'num_private', 'payment_type', 'source_class', 'source_type', 'waterpoint_type_group'] + dropfeats X = X.drop(columns=drop_cols, axis='columns') # combine 'other' and 'unknown' for source feature X['source'] = X['source'].replace(to_replace='unkown', value='other') # convert zeros to nan for imputing convert_zeros = ['amount_tsh', 'construction_year', 'district_code', 'population'] for feature in convert_zeros: X[feature] = X[feature].replace(to_replace=0, value=np.nan) # impute zero and siubzero-gps-heights(prob nan anyway) X.loc[X['gps_height'] <= 0, 'gps_height'] = np.nan # return the wrangled dataframe return X train = wrangle(train) val = wrangle(val) test = wrangle(test) # + id="Hms_E3HdIhM6" colab_type="code" colab={} #arrange in X/Y target = 'status_group' X_train = train.drop(target, axis='columns') y_train = train[target] X_val = val.drop(target, axis='columns') y_val = val[target] X_test = test # + id="rNP-zi3zIrg4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 126} outputId="81adce21-2675-41ba-b968-f60cc36823c6" train.columns # + id="_SHnCK37Kic-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 90} outputId="cf02749c-3645-40c4-c7e8-076da96b76f9" # %%time import category_encoders as ce from sklearn.impute import SimpleImputer from sklearn.experimental import enable_iterative_imputer from sklearn.impute import IterativeImputer from sklearn.tree import DecisionTreeClassifier from sklearn.pipeline import make_pipeline encode_features = ['basin', 'region', 'region_code', 'district_code', 'lga', 'scheme_management', 'extraction_type', 'management', 'management_group', 'payment', 'water_quality', 'quantity', 'source', 'waterpoint_type'] pipeline = make_pipeline( ce.OrdinalEncoder(cols=encode_features, handle_missing='return_nan', handle_unknown='return_nan'), SimpleImputer(), DecisionTreeClassifier(min_samples_leaf=250, max_depth=10, random_state=42) ) # Fit on train pipeline.fit(X_train, y_train) # Score on val print('Train Accuracy', pipeline.score(X_train, y_train)) print('Validation Accuracy', pipeline.score(X_val, y_val)) # Predict on test y_pred = pipeline.predict(X_test) # + id="39eqj5MGKmFg" colab_type="code" colab={} X_train_copy = X_train.copy() X_val_copy = X_val.copy() # + id="Wp0emjAQK7h9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 435} outputId="85bffc29-0360-412f-be5c-433bc59be399" X_train_copy.isnull().sum() # + id="PmGlZm2BVYE3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 435} outputId="750b163e-b6e9-4ed7-abbd-eec2076b1e28" X_train.isnull().sum() # + id="eSs67EhDLBjw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="6b5c5893-1b6e-47c9-d553-664e5022ecbc" pipeline.steps[1]
Unit2_W2_D1_assignment_kaggle_challenge_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Problem Statement Focus:** To check for NLP and Python capability to extract data elements from the financial statement documents. # # **Data to be provided –** # • Training & Development Set - 500 Text files (OCR output of images) # # Sample Data to be Extracted # Highlighted text on the image is required to be extracted # # # ![image.png](attachment:image.png) # # # # **Fields to be extracted are:** # - Current Assets : 51,700 # - Creditors: amounts falling due within one year : 55,505 # - Net current liabilities : (3,805) # - Total assets less current liabilities : (3,805) # - Accruals and deferred income : (500) # - Net liabilities : (4,305) # - Capital and reserves : (4,305) # # # ![image.png](attachment:image.png) # # # **Fields to be extracted are:** # - Fixed asset :  15837 # - Intangible assets : 9423 # - Tangible assets : 6414 # - Current Assets : 236,183 # - Debtors: amounts falling due within one year : 113,831 # - Cash at bank and in hand : 122,352 # - Creditors: amounts falling due within one year : (110,924) # - Net current assets : 125,259 # - Net assets : 141,096 # - Capital and reserves: - # - Called up share capital : 70,587 # - Profit and loss accounts 70,509 # - Shareholders funds : 141,096 # # # Please note, data only for year 2019 is to be extracted. # # Import important libary import os #from pathlib import Path import json import numpy as np import re import pandas as pd fileContainingDir = './HCL ML Challenge Dataset/' fileNames = os.listdir(fileContainingDir) #print(fileNames) # + # function to get unique values def unique(list1): x = np.array(list1) return np.unique(x) def extractValue(value): #value = re.sub(',', '', value) value = re.sub(r'(\d),(\d)', r'\1\2', value) value = re.sub(r'\((\d*)\)', r'-\1', value) return value # - extractValue(' Net current assets (liabilities) (54,659) (56,001) ') fileNames = ['X8Y4VQ55.txt'] outputData = [] for fileName in fileNames: # Using readlines() firstFilePath = fileContainingDir + fileName file1 = open(firstFilePath, 'r') Lines = file1.readlines() requiredLines = [] flag = True resultDic = {} resultDicWithFileName = {} #print(Lines) for line in Lines: if(flag): flag = False continue line = line.strip() lineLower = line.lower() if('notes to the accounts' in lineLower or 'for the year ending ' in lineLower or 'for the period ending ' in lineLower or 'the company is a private' in lineLower or 'the director is satisfied' in lineLower or 'the director arc satisfied' in lineLower or 'notes to the financial statements' in lineLower or 'principal activity' in lineLower or 'the director are satisfied' in lineLower or 'notes' == lineLower or 'statements' == lineLower or lineLower.startswith('number')): break else: if(line != ''): line = re.sub('£', '&#163;', line) requiredLines.append(re.sub(' +', ' ', line)) if(len(requiredLines)==0): resultDicWithFileName['Filename'] = fileName[0:-4] resultDicWithFileName['Extracted Values'] = resultDic outputData.append(resultDicWithFileName) continue if(requiredLines[0].lower().startswith('as at') or requiredLines[0].lower().startswith('31/03/2019') or requiredLines[0].lower().startswith('30 june 2019')): if(requiredLines[1].lower().startswith('registered number')): requiredLines = requiredLines[2:] else: requiredLines = requiredLines[1:] #print(requiredLines) if(requiredLines[0].lower().startswith('31 august 2019') or requiredLines[0].lower().startswith('31 may 2019')): requiredLines[0] = '2019' if(requiredLines[1].startswith('&#163;') or requiredLines[1].startswith('Notes') or requiredLines[1].startswith('€')): del requiredLines[1] years2019 = requiredLines[0].split(' ') del requiredLines[0] if(len(years2019)==1): if(years2019[0]=='2019' or '2019' in years2019[0]): for requiredLine in requiredLines: if re.search('[a-zA-Z]', requiredLine) is None: requiredLine = 'nan' +' '+requiredLine values = requiredLine.split(' ') if(len(values)==2): resultDic[values[0]] = extractValue(values[1]) else: resultDic[values[0]] = 'nan' else: for requiredLine in requiredLines: values = requiredLine.split(' ') resultDic[values[0]] = 'nan' elif(len(years2019)==2): index = 0 if(years2019[0] == '2019' or '2019' in years2019[0]): index = 1 elif(years2019[1] == '2019' or '2019' in years2019[1]): index = 2 else: index = 0 if(index != 0): for requiredLine in requiredLines: if re.search('[a-zA-Z]', requiredLine) is None: requiredLine = 'nan' +' '+requiredLine values = requiredLine.split(' ') if(len(values)==3): resultDic[values[0]] = extractValue(values[index]) elif(len(values)==2): resultDic[values[0]] = extractValue(values[1]) else: resultDic[values[0]] = 'nan' else: for requiredLine in requiredLines: values = requiredLine.split(' ') resultDic[values[0]] = 'nan' else: index = 0 if(years2019[1] == '2019' or '2019' in years2019[1]): index = 1 elif(years2019[2] == '2019' or '2019' in years2019[2]): index = 2 else: index = 0 if(index != 0): for requiredLine in requiredLines: if re.search('[a-zA-Z]', requiredLine) is None: requiredLine = 'nan' +' '+requiredLine values = requiredLine.split(' ') if(len(values)==4): resultDic[values[0]] = extractValue(values[index+1]) elif(len(values)==3): resultDic[values[0]] = extractValue(values[index]) elif(len(values)==2): resultDic[values[0]] = extractValue(values[1]) else: resultDic[values[0]] = 'nan' else: for requiredLine in requiredLines: values = requiredLine.split(' ') resultDic[values[0]] = 'nan' resultDicWithFileName['Filename'] = fileName[0:-4] resultDicWithFileName['Extracted Values'] = json.dumps(resultDic, ensure_ascii=False) print(fileName +"\n"+json.dumps(resultDic, ensure_ascii=False)) outputData.append(resultDicWithFileName) #print(outputData) df = pd.DataFrame(outputData) df.head(80) df[['Filename','Extracted Values']].to_csv('submissions.csv', index=False)
.ipynb_checkpoints/hcl-ml-challenge-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import panel as pn pn.extension() # The ``Card`` layout allows arranging multiple Panel objects in a collapsible, vertical container with a header bar. It has a list-like API with methods for interactively updating and modifying the layout, including ``append``, ``extend``, ``clear``, ``insert``, ``pop``, ``remove`` and ``__setitem__`` (for replacing the card's contents). # # `Card` components are very helpful for laying out components in a grid in a complex dashboard to make clear visual separations between different sections. The ability to collapse them can also be very useful to save space on a page with a lot of components. # # #### Parameters: # # * **``collapsed``** (bool): Whether the `Card` is collapsed. # * **``collapsible``** (bool): Whether the `Card` can be expanded and collapsed. # * **``header``** (Viewable): A Panel component to display in the header bar of the Card. # * **``objects``** (list): The list of objects to display in the Card, which will be formatted like a `Column`. Should not generally be modified directly except when replaced in its entirety. # * **``title``** (str): The title to display in the header bar if no explicit `header` is defined. # # Styling related parameters: # # * **``active_header_background``** (str): The background color of the header when the `Card` is expanded. # * **``background``** (str): The background color of the content area. # * **``header_color``** (str): The color of the header text. # * **``button_css_classes``** (list[str]): The list of CSS classes to apply to the collapse button. # * **``css_classes``** (list[str]): The list of CSS classes to apply to the main area. # * **``header_background``** (str): The background color of the header. # * **``header_css_classes``** (list[str]): The list of CSS classes to apply to the header. # # For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb). # # ___ # A ``Card`` layout can either be instantiated as empty and populated after the fact, or by using a list of objects provided on instantiation as positional arguments. If the objects are not already Panel components they will each be converted to one using the ``pn.panel`` conversion method. Unlike the `Row` and `Column` layouts, a `Card` has an explicit `title` that will be shown in the header bar alongside the collapse button (if the `collapsible` parameter is enabled): # + w1 = pn.widgets.TextInput(name='Text:') w2 = pn.widgets.FloatSlider(name='Slider') card = pn.Card(w1, w2, title='Card', background='WhiteSmoke') card # - # The contents of the ``Card.objects`` list should never be modified individually, because Panel cannot detect when items in that list have changed internally, and will thus fail to update any already-rendered views of those objects. Instead, use the provided methods for adding and removing items from the list. The only change that is safe to make directly to ``Card.objects`` is to replace the list of ``objects`` entirely. As a simple example of using the methods, we might add an additional widget to the card using the append method: w3 = pn.widgets.Select(options=['A', 'B', 'C']) card.append(w3) # On a live server or in a notebook the `card` displayed after the previous code cell (above) will dynamically expand in size to accomodate all three widgets and the title. To see the effect in a statically rendered page, we will display the column a second time: card # Whether the `Card` is collapsed or not can be controlled from Python and Javascript: print(card.collapsed) card.collapsed = True # ### Header # # Instead of using a `title`, a `Card` may also be given an explicit `header` that can contain any component, e.g. in this case the Panel logo: # + logo = 'https://panel.holoviz.org/_static/logo_horizontal.png' red = pn.Spacer(background='red', height=50) green = pn.Spacer(background='green', height=50) blue = pn.Spacer(background='blue', height=50) pn.Card( red, green, blue, header_background='#2f2f2f', header_color='white', header=pn.panel(logo, height=40), width=300, ) # - # ### Layout # # In general a ``Card`` does not have to be given an explicit ``width``, ``height``, or ``sizing_mode``, allowing it to adapt to the size of its contents. However in certain cases it can be useful to declare a fixed-size layout, which its responsively sized contents will then fill, making it possible to achieve equal spacing between multiple objects: # + red = pn.Spacer(background='red', sizing_mode='stretch_both') green = pn.Spacer(background='green', sizing_mode='stretch_both') blue = pn.Spacer(background='blue', sizing_mode='stretch_both') pn.Card(red, green, blue, height=300, width=200, title='Fixed size') # - # When no fixed size is specified the column will expand to accomodate the sizing behavior of its contents: # + from bokeh.plotting import figure p1 = figure(height=250, sizing_mode='stretch_width', margin=5) p2 = figure(height=250, sizing_mode='stretch_width', margin=5) p1.line([1, 2, 3], [1, 2, 3]) p2.circle([1, 2, 3], [1, 2, 3]) pn.Card(p1, pn.layout.Divider(), p2, title="Responsive", sizing_mode='stretch_width')
examples/reference/layouts/Card.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2.7.17 64-bit # language: python # name: python271764bit2489414508ad42f5986518ae5ecfaf3d # --- ## TEST SPLINE FITTING # Given a set of 2D points, fits spline then plots import numpy as np import matplotlib.pyplot as plt from scipy.interpolate import interp1d import dubins # + # Vehicle Constants (same as common.py) x_lim = [-10, 10] y_lim = [-10, 10] theta_lim = [-np.pi, np.pi] num_waypoints = 10 waypoint_tol = 0.2 wheelbase = 0.335 max_acc = 3 max_steering_angle = 0.5 # - # Generate random waypoints (same as waypoint_publisher.py) waypoints = np.random.rand(num_waypoints, 3) waypoints[:, 0] = (x_lim[1] - x_lim[0]) * waypoints[:, 0] + x_lim[0] waypoints[:, 1] = (y_lim[1] - y_lim[0]) * waypoints[:, 1] + y_lim[0] waypoints[:, 2] = (theta_lim[1] - theta_lim[0]) * waypoints[:, 2] + theta_lim[0] # + turning_radius = 1 step_size = 0.5 path_list = np.empty((0,3)) for i in range(waypoints.shape[0] - 1): q0 = (waypoints[i,0], waypoints[i,1], waypoints[i,2]) q1 = (waypoints[i+1,0], waypoints[i+1,1], waypoints[i+1,2]) path = dubins.shortest_path(q0, q1, turning_radius) configurations, _ = path.sample_many(step_size) configurations = np.array(configurations) # print(configurations.shape) path_list = np.vstack((path_list, configurations)) print(path_list.shape) # - path_list # path_list = np.array(path_list) # path_list.shape plt.figure(figsize=(10,10)) plt.plot(path_list[:,0], path_list[:,1], label="Planned Path") # Plot waypoints and associated index plt.plot(waypoints[:,0], waypoints[:,1],'.', label="Waypoints") for i in range(num_waypoints): plt.text(waypoints[i,0]+0.05, waypoints[i,1], str(i)) plt.arrow(waypoints[i,0], waypoints[i,1], 0.2 * np.cos(waypoints[i,2]), 0.2* np.sin(waypoints[i,2]), head_width=0.2) plt.legend() plt.show()
ocrl/playground/test_dubins.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # Import import numpy import torch import sys sys.path.append('..') from harmonica.window import Window import matplotlib.pyplot as plt torch.set_printoptions(precision=12, sci_mode=True) print(torch.cuda.is_available()) print(torch.get_num_threads()) # + # Set data type and device dtype = torch.float64 device = 'cpu' # + [markdown] heading_collapsed=true tags=[] # # Example-01: Window initialization # + # Window is used to fix signal length in computations # Each signal in TbT should have fixed length defined by selected window # Window is also used to set default data type and device for all further computations # Initialize window instance (default parameters) w = Window(length=1024, name=None, order=None, dtype=dtype, device=device) # String representaion print(w) # Window length print(w.length) # Window name # Should be one of the defined static methods ('cosine_window' or 'kaiser_window') or None for empty initialization print(w.name) # Window order # Float, not used if name is None print(w.order) # Window data (all ones by default if window name is None) print(w.window.detach().cpu().numpy()) # Window data type print(w.dtype) # Window device print(w.device) # Window total (sum of window data) print(w.total) # Set custom window (copy tensor with matching length to data) w.window.copy_(torch.zeros_like(w.window)) print(w.window.detach().cpu().numpy()) # Set custom window (use set_data method to invoke defined static method for given window name) w.set_data(name='cosine_window', order=0.0) print(w.window.detach().cpu().numpy()) # Set custom window (use set_data method with given input tensor of matching length) w.set_data(data=torch.zeros_like(w.window)) print(w.window.detach().cpu().numpy()) # Set custom window (invoke set_data) w(name='cosine_window', order=0.0) print(w.window.detach().cpu().numpy()) # Set custom window (invoke set_data) w(data=torch.zeros_like(w.window)) print(w.window.detach().cpu().numpy()) # Print window attibutes for key, value in w.__dict__.items(): print(f'{key}: {value}') # + [markdown] heading_collapsed=true tags=[] # # Example-02: Free CUDA memory # + # Delete data attribute or whole instance to free CUDA memory # Call torch.cuda.empty_cache() # Set window instance w = Window(1024, 'cosine_window', 1.0, dtype=dtype, device=device) print(w) # Print allocated memory info print(torch.cuda.memory_allocated()) # Delete window instance del w # Print allocated memory info print(torch.cuda.memory_allocated()) # + [markdown] heading_collapsed=true tags=[] # # Example-03: Cosine window generation (normalized sum) # + # Generate and plot cosine window (window total is one) # Set plot size plt.figure(figsize=(15, 3)) # Set time t = torch.linspace(0, 1024, 1024, dtype=dtype, device=device) # Set cosine window of order = 1.0 w = Window(1024, 'cosine_window', 1.0, dtype=dtype, device=device) print(w) plt.scatter(t.cpu().numpy(), w.window.cpu().numpy(), color='red', marker='o') # Reset cosine window w(name='cosine_window', order=2.0) print(w) plt.scatter(t.cpu().numpy(), w.window.cpu().numpy(), c='blue', marker='o') # Show plots plt.show() # + [markdown] heading_collapsed=true tags=[] # # Example-04: Kaiser window (normalized max) # + # Generate and plot kaiser window (max value is one) # Set plot size plt.figure(figsize=(15, 3)) # Set time t = torch.linspace(0, 1024, 1024, dtype=dtype, device=device) # Set kaiser window with order = 5.0 w = Window(1024, 'kaiser_window', 5.0, dtype=dtype, device=device) print(w) plt.scatter(t.cpu().numpy(), w.window.cpu().numpy(), color='red', marker='o') # Reset kaiser window w(name='kaiser_window', order=10.0) print(w) plt.scatter(t.cpu().numpy(), w.window.cpu().numpy(), color='blue', marker='o') # Show plots plt.show() # + [markdown] heading_collapsed=true tags=[] # # Example-05: Generate from staticmethods # + # Empty initialization w = Window(1024, dtype=dtype, device=device) print(w) print(w.window.cpu().numpy()) # Generate cosine window data with staticmethod print(Window.cosine_window(1024, 0.0).cpu().numpy()) # Generate kaiser window data with staticmethod print(Window.kaiser_window(1024, 0.0).cpu().numpy()) # + [markdown] heading_collapsed=true tags=[] # # Example-06: Generate from classmethods # + # Initialize using cosine window w = Window.from_cosine(1024, 0.0, dtype=dtype, device=device) print(w) print(w.name) print(w.order) print(w.window.detach().cpu().numpy()) # Initialize using kaiser window w = Window.from_kaiser(1024, 0.0, dtype=dtype, device=device) print(w) print(w.name) print(w.order) print(w.window.cpu().numpy()) # + [markdown] heading_collapsed=true tags=[] # # Example-07: Weighted mean # + # Compare estimation of signal mean without and with window weighting # Set test signal with 1.E-1 mean value length = 4096 signal = 1.E-1 + torch.sin(2.0*numpy.pi*0.12345*torch.linspace(0, length, length, dtype=dtype, device=device)) # Compute mean print(torch.mean(signal)) # Compute weighted mean using cosine window w = Window.from_cosine(length, 5.0, dtype=dtype, device=device) print(w) print(torch.dot(signal, w.window)/w.total) # Compute weighted mean using kaiser window w = Window.from_kaiser(length, 10.0, dtype=dtype, device=device) print(w) print(torch.dot(signal, w.window)/w.total) # + [markdown] heading_collapsed=true tags=[] # # Example-08: Apply window # + # Apply window to signal and plot result in time domain # Set test signal length = 4096 signal = torch.sin(2.0*numpy.pi*0.12345*torch.linspace(0, length, length, dtype=dtype, device=device)) # Set time t = torch.linspace(0, length, length, dtype=dtype, device=device) # Set cosine window w = Window.from_cosine(length, 1.0, dtype=dtype, device=device) print(w) # Plot plt.figure(figsize=(15, 3)) plt.ylim(-2.5, 2.5) plt.scatter(t.detach().cpu(), signal.detach().cpu(), color='red', marker='o') plt.scatter(t.detach().cpu(), (signal*w.window).detach().cpu(), color='blue', marker='o') plt.show() # Set kaiser window w = Window.from_kaiser(length, 5.0, dtype=dtype, device=device) print(w) # Plot plt.figure(figsize=(15, 3)) plt.ylim(-2.5, 2.5) plt.scatter(t.cpu().numpy(), signal.cpu().numpy(), color='red', marker='o') plt.scatter(t.cpu().numpy(), (signal*w.window).cpu().numpy(), color='blue', marker='o') plt.show() # + [markdown] heading_collapsed=true tags=[] # # Example-09: Apply window (spectrum) # + # Apply window to signal and plot DFT amplitude spectrum # Set test signal length = 4096 # Set test signal (two components) signal = torch.zeros(length, dtype=dtype, device=device) signal += 1.0*torch.sin(2.0*numpy.pi*0.120*torch.linspace(0, length, length, dtype=dtype, device=device)) signal += 0.1*torch.sin(2.0*numpy.pi*0.125*torch.linspace(0, length, length, dtype=dtype, device=device)) # Set frequency grid grid = torch.fft.rfftfreq(length, dtype=dtype, device=device) # Set window w = Window.from_cosine(length, 2.0, dtype=dtype, device=device) print(w) # Plot plt.figure(figsize=(15, 3)) plt.xlim(0.10, 0.15) plt.ylim(-4, +4) plt.scatter(grid.cpu().numpy(), torch.log10(torch.abs(torch.fft.rfft(signal))).cpu().numpy(), color='red', marker='o') plt.scatter(grid.cpu().numpy(), torch.log10(torch.abs(torch.fft.rfft(signal*w.window))).cpu().numpy(), color='blue', marker='o'), plt.show() # Compute parameters with known frequency using convolution without and with window # Set window w = Window.from_cosine(length, 4.0, dtype=dtype, device=device) print(w) # Set time t = torch.linspace(0, length, length, dtype=dtype, device=device) # Compute s-amplitude without and with window print(2.0*torch.dot(signal, torch.sin(2.0*numpy.pi*0.120*t))/len(w)) print(2.0*torch.dot(signal*w.window, torch.sin(2.0*numpy.pi*0.120*t))/w.total) # -
notebook/nb_window.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Stability # language: python # name: stability # --- from protera_stability import EmbeddingExtractor1D # + from pathlib import Path import pandas as pd data_path = Path("../data") # !ls $data_path # - # > * Los `.csv` deben contener las columnas `labels` y `sequences`. # # > * `data_path` debe ser Path y un directorio que contenga tus `.csv` pd.read_csv(data_path / "stability_train.csv") # + args_dict = { "model_name": "esm1b_t33_650M_UR50S", "base_path": data_path, "gpu": True, } emb_stabilty = EmbeddingExtractor1D(**args_dict) # - embeddings = emb_stabilty.generate_embeddings( ["stability_train.csv"], # esta en data_path path_out="stability_embeddings_train", # guardarlo en data_path/stability_train.pkl bs=256, ) dset = emb_stabilty.generate_datasets( ["stability.csv"], h5_stem="stability", # data_path / "stability".h5 bs=256, embedding_file="stability_embeddings", # cargar los embeddings desde donde se espera que esten target_name="stability_scores" ) dset["labels"][:10], dset["sequences"][:10] dset.close() # hay que cerrarlo pq es un h5py
project/parallel_synthesis/examples/embedding-extractor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python basics (tutorial) # This notebook will provide short tutorial on basic Python concepts and syntax. We will go over the Python package system and imports, data types, functions, conditionals, and loops. # # ## Contents # 1. The Python ecosystem, imports, and whitespace # 2. Basic data types # 3. Functions and methods # 4. Conditionals # 5. Loops # # **Estimated time to complete**: 2-4 hours # ## The Python ecosystem # Python is a multipurpose programming language, meaning it can be used for almost anything. While the "standard library" of Python (i.e., the functionality that is shipped with any Python installation) contains the bare minimum for any programming language, Python's versatility comes from a massive community of developers that created many different "third-party" packages for almost any purpose you can think of (e.g., visualization, machine learning, game programming, etc.). # # For example: # * the [scipy](https://www.scipy.org/) package provides functionality for scientific computing (e.g. statistics, signal processing); # * the [numpy](http://www.numpy.org/) package provides data structures and functionality for (very fast) numeric computing (e.g. multidimensional numeric array computations, some linear algebra); # * the [pandas](https://pandas.pydata.org/) package provides functionality to work with "tables"; # * the [matplotlib](http://matplotlib.org/) package provides plotting functions; # * and various specialied neuroimaging packages provide functionality to work and analyze (f)MRI (e.g. [nibabel](http://nipy.org/nibabel/) and [nipype](http://nipy.org/nipype)) and MEG/EEG (e.g. [MNE](http://www.martinos.org/mne/stable/index.html)). # # Basically, there are packages for everything you can think of! In this course, we will mostly use basic Python in combination with the scientific computing packages (*pandas*, *numpy*, and *matplotlib*). # ## Import statements # As explained above, Python ships with some default functionality. This means that it's already available upon starting a notebook (or any other Python environment) and doesn't need to be imported. An example is the function `len()`. my_list = [1, 2, 3] print(len(my_list)) # However, non-built-in packages &mdash; such as `numpy` &mdash; need to be explicitly imported to access their functionality. After importing, their functions are accessible as: `{package}.{function}`. # # For example: # + import numpy # Now you can access the numpy function `add()` as numpy.add() print(numpy.add(5, 3)) # - # However, writing `numpy` in front of every function you access from it becomes annoying very quickly. Therefore, we usually abbreviate the package name by two or three characters, which can be achieved through: # # ``` # import {package} as {abbreviation} # ``` # # For example, people usually abbreviate the numpy import as follows: # + import numpy as np # Now you can access numpy functions such as 'add()' as: print(np.add(5, 3)) # - # Often (but not always), Python packages consist of subpackages. These subpackages are often used to group similar functions/code together. For example, the `numpy` package has the subpackage (also called "module") `random`, which contains functions that allow you to generate random data from different distributions. # # In the previous cell, we imported the *entire* `numpy` package by running `import numpy as np`. However, sometimes you might only need a particular subpackage ("module"), like the subpackage `random` from `numpy`. To import *only* the `random` subpackage, you can do the following: import numpy.random # Now, you can use functions from the `numpy.random` class! Technically, even subpackages may contain their own subpackages. Importing subpackages from subpackages works the same way: # # ```python # import mainpackage.somesubpackage.anothersubpackage.yetanothersubpackage # ``` # Throughout the tutorials, you'll see different packages (e.g. `nibabel` and `scipy`) being imported using abbreviations (e.g., `import nibabel as nib`). # # Also, you don't need to import an *entire* package, but you can also import a specific function or class. This is done as follows: # # ```python # from {package} import {function1}, {function2}, {etc} # ``` # # An example: # + from numpy import add, subtract # Now I can simply call add() and subtract() print(add(5, 3)) # - # Note that some packages have a hierarchical structure with subpackages (also called modules). For example, scipy has a subpackage `ndimage` (with functions for n-dimensional arrays). To import *only* this subpackage, do the following: # + from scipy import ndimage # Now you can call functions from the ndimage subpackage, # e.g. gaussian_filter print(ndimage.gaussian_filter([10, 5, 4], 2)) # - # Note that you can mix and match all of these operations to customize the import to your own liking (see cell below for such a fancy import). In this course, we'll usually just import entire packages (e.g. `import numpy as np`) or specific functions/subpackages (e.g. `from scipy import stats`). # # Another thing you can do with imports is renaming the function/module you're importing. This follows the following syntax: # # ```python # from {package} import {some function/module} as {new name} # ``` # # See the cell below for an example: # + # a fancy import from scipy.stats import binom_test as omg_binomial_testing_so_cool print(omg_binomial_testing_so_cool(0.5, 10)) # - # <div class="alert alert-warning"> # <b>ToDo</b>: Import the function <tt>randn</tt> (which generates random numbers from a standard normal distribution) from the numpy subpackage <tt>random</tt> and rename it <tt>random_normal_generator</tt>. # </div> # + nbgrader={"grade": false, "grade_id": "cell-87af56a740628389", "locked": false, "schema_version": 3, "solution": true} tags=["hide-cell"] """ Implement the ToDo here. """ ### BEGIN SOLUTION from numpy.random import randn as random_normal_generator ### END SOLUTION # + nbgrader={"grade": true, "grade_id": "cell-f7c6fda0670851b5", "locked": true, "points": 0, "schema_version": 3, "solution": false} """ Tests the above ToDo. """ try: assert('random_normal_generator' in dir()) except AssertionError as e: print("I couldn't find the function 'random_normal_generator'; did you spell it correctly?") raise(e) else: print("Great! Well done!") # + nbgrader={"grade": true, "grade_id": "cell-2ec6efb629701eda", "locked": true, "points": 0, "schema_version": 3, "solution": false} """ Another test for the above ToDo. """ try: assert(random_normal_generator.__name__ == 'randn') except AssertionError as e: print("Your 'random_normal_generator' function does not point to the 'randn' numpy.random subpackage!") raise(e) else: print("Correct!") # - # <div class="alert alert-success"> # <b>Wildcard imports.</b> Python allows also "wildcard" imports, like: <tt>from numpy import *</tt>, which says: import <em>everything</em> from the <tt>numpy</tt> package. This is often discouraged, because the beauty of having explicit imports (unlike MATLAB) is that you known where your functions come from (e.g., is it a base Python function or a numpy function?). # </div> # ## Whitespace for indentation # In most programming languages, code blocks (e.g., if-else blocks, or for-loops) are delineated by dedicated symbols (often curly brackets, `{}`). For example, an if-else block in R may be written like this: # # ```R # if (x > 0) { # y = x + 5 # } else { # y = x - 5 # } # ``` # # While in languages like R and MATLAB whitespace/indentation is used for readability, it is not necessary! The above if-else statement in R can also be written as: # # ```R # if (x > 0) { y = x + 5 } else { y = x - 5 } # ``` # # However, in Python, whitespace and indentation is important! In Python, indendation &mdash; instead of curly braces &mdash; delineates code blocks, and if code is incorrectly indented, Python will give an error! Identation can be done using spaces or tabs; both are fine ([but programmers often have a very strong opinion on using on or the other](https://thenewstack.io/spaces-vs-tabs-a-20-year-debate-and-now-this-what-the-hell-is-wrong-with-go)), as long as it is consistent. Most style guides recommend either four spaces or a single tab. # # Importantly, if a code block (e.g., an if-else statement) in Python is indented incorrectly, Python will throw an `IdentationError`, as show below: # + tags=["raises-exception"] x = 0 if x < 0: y = x + 5 else: y = x - 5 # - # <div class="alert alert-warning"> # <b>ToDo</b>: Fix the code block above by identing it correctly. (No test cell.) # </div> # ## Basic data types # Base (i.e., built-in) Python has mostly the same data types as you might know from MATLAB or R, such as numbers (integers/floats), strings, and lists (cells in MATLAB; lists in R). Also, Python has to data types that might be unknown to MATLAB/R users, such as "dictionaries" and "tuples", which are explained later. # ### Numbers # Numbers are represented either as integers ("whole" numbers) or floats (numbers with decimals, basically). # + x = 3 print('x is of type', type(x)) # use type(variable) to find out of what data-type something is! y = 3.1415 print('y is of type', type(y)) # - # Let's try to apply arithmetic to x as defined above with some basic operations: print(x + 1) # Addition; print(x - 1) # Subtraction; print(x / 2) # Division; print(x * 2) # Multiplication; print(x ** 2) # Exponentiation; # The above commands apply operations to x, but do not *change* x itself. To permanently change x, you have to store the results of the operation (e.g. `x + 1`) into a variable (e.g. `x2 = x + 1`), as shown in the cell below: # + x = 3 x_new = x + 2 # If you simply want to update an existing variable, you can do this in two ways: x = x + 1 # ... or: x += 1 print(x) x *= 2 # This is the same as: x = x * 2 print(x) # - # <div class="alert alert-warning"> # <b>ToDo</b>: In the cell below, make a new variable, <tt>y</tt>, which should contain <tt>x</tt> minus 5, of which the result is subsequently raised to the 4th power. # </div> # + nbgrader={"grade": false, "grade_id": "cell-b1f3eff2d86022f9", "locked": false, "schema_version": 3, "solution": true} tags=["hide-cell"] """ Implement the ToDo here. """ x = 8 ### BEGIN SOLUTION y = (x - 5) ** 4 ### END SOLUTION # + nbgrader={"grade": true, "grade_id": "cell-86a18c4fb5804a78", "locked": true, "points": 0, "schema_version": 3, "solution": false} ''' Tests the above ToDo.''' # Check if there exists a variable 'y' try: assert('y' in dir()) except AssertionError as e: print("The variable 'y' doesn't seem to exist! Did you name it correctly?") raise(e) else: print("Well done! 1 out of tests 2 passed") # Check if it has the correct number try: assert(y == 81) except AssertionError as e: print("The variable y does not seem to equal x minus 5, raised to the power 4.") raise(e) else: print("Well done! 2 out of tests 2 passed") # - # <div class='alert alert-success'> # <b>Tip!</b> # When you're working on your ToDo, it's often informative to print (intermediate) output/variables of your solution (in a new code cell for example). This might give insight into (potentially) failing tests! # </div> # ### Booleans # Python implements all of the usual operators for comparisons. Similar to what you might know from other languages, `==` tests equivalence, `!=` for not equivalent, and `<` and `>` for larger/smaller than. The result of those comparisons are a datatype called a "boolean", representing truth values. Booleans can take on the value `True` or `False`. # # Check out some examples below: # + a = 3 b = 5 is_a_equal_to_b = (a == b) print(is_a_equal_to_b) print('the ouput is of type', type(is_a_equal_to_b)) # - # Some more examples of Boolean operators: bool_1 = 3 > 5 # False, because 3 is not greater than 5 bool_2 = (5 == 5) # True, because, well, 5 is 5 print(bool_1) print(bool_2) # However, for some Boolean logic, python doesn't use operators (such as && for "and" and | for "or") but uses special (regular English) *words*: # note: bool_1 is False, bool_2 is True print(bool_1 and bool_2) # Logical AND, both have to be True print(bool_1 or bool_2) # Logical OR, either one of them has to be True print(not bool_1) # Logical NOT, the inverse of bool_1 print(bool_1 != bool_2) # Logical XOR, yields True when bool_1 and bool_2 are not equal # (Although, technically, the keyword `and` and `&`, and `or` and `|` can be used interchangeably.) # <div class='alert alert-warning'> # <b>ToDo</b>: Mess around with booleans in the cell below. Try some more complex things, like: <tt>not ((3 > 5) and not (5 > 2))</tt>. Do you understand why the result is the way it is? Try to follow the logic in the sequence of statements (no test cell). # </div> # Do your ToDo here: # ### Strings # Strings in Python are largely the same as in other languages. # + h = 'hello' # String literals can use single quotes w = "world" # or double quotes; it does not matter. print(h) print(len(h)) # see how many characters in this string # - # In addition to designating strings with single or double quotes, you can also create (multi-line) strings using triple quotes (again, either using single or double quotes): # + long_string = """This quite a long string, which may span multiple lines but otherwise works just like a normal string, which can be printed and stuff and now I'm out of inspiration to keep writing so I'll stop here.""" print(long_string) # - # A very nice feature of Python strings is that they are easy to concatenate: just use '+'! hw = h + ', ' + w + '!' # String concatenation print(hw) # You can also create and combine strings with what is called 'string formatting'. This is accomplished by inserting a placeholder in a string, that you can fill with variables. Confusingly, there are many approaches to string formatting. Arguably, the most used one is the "old style" string formatting, of which an example is given below: # + # Here, we have a string with a placeholder '%s' (the 's' refers to 'string' placeholder) my_string = 'My favorite programming language is: %s' print('Before formatting:') print(my_string) # Now, to 'fill' the placeholder, do the following: my_fav_language = 'Python' my_string = 'My favorite programming language is: %s' % my_fav_language print('\nAfter formatting:') print(my_string) # - # You can also use specific placeholders for different data types: # + week_no = 1 # integer string1 = 'This is week %i of the Python mini course' % week_no # the %i expects an integer! print(string1) score = 99.50 # float string2 = 'I will get a %f on my quiz this week!' % score print(string2) # You can also combine different types in a string: string3 = 'In week %i of the course, %s will get a %f (or higher) on my quiz!' % (week_no, "I", 95.00) print(string3) # - # <div class='alert alert-warning'> # <b>ToDo</b>: Modify the variable <tt>to_print</tt> defined below, such that printing it (i.e., running <tt>print(to_print)</tt>) will print: "I love Python 4ever". So you'll have to "fill" the "%" placeholders using string formatting. That is, you have to put a <tt>%</tt> sign after the <tt>to_print</tt> variable and "fill" it with the correct inputs. # </div> # + nbgrader={"grade": false, "grade_id": "cell-b08c34ab23707a68", "locked": false, "schema_version": 3, "solution": true} tags=["hide-cell"] """ Implement the ToDo here. """ to_print = "I love %s %iever" ### BEGIN SOLUTION to_print = "I love %s %iever" % ('Python', 4) print(to_print) ### END SOLUTION # + nbgrader={"grade": true, "grade_id": "cell-eb152527160ad636", "locked": true, "points": 0, "schema_version": 3, "solution": false} """ Tests the above ToDo. """ try: assert(to_print == 'I love Python 4ever') except AssertionError as e: print("This string is not formatted correctly!") raise(e) else: print("Well done!") # - # As mentioned, there are several different approaches to string formatting. We particularly like the "string interpolation" (F-string) approach, in which you can directly "insert" variables into strings: # # ```python # year = 2020 # string = f"At the time of writing, we're living in the year {year}" # ``` # # To use this method, you have to preprend your string with the letter <tt>f</tt>! # <div class='alert alert-warning'> # <b>ToDo</b>: Using the variables below and the F-string method, create the following string (and print it): "There are five notebooks this week; OMG, way too many." # </div> # + nbgrader={"grade": false, "grade_id": "cell-53cb84860dbe2355", "locked": false, "schema_version": 3, "solution": true, "task": false} tags=["hide-cell"] """ Implement the ToDo here. (no test cell)""" n_notebooks = 5 oh_my_god = "OMG" many_or_few = "many" ### BEGIN SOLUTION print(f"There are {n_notebooks} notebooks this week; {oh_my_god}, way too {many_or_few}.") ### END SOLUTION # - # ### Lists # A list is the Python equivalent of an "array", but can be resized and can contain elements of different types. It is similar to a list in R and a cell in MATLAB. Note that indices in python start with 0! This means that the 3rd element of the list below is accessed through `[2]`. # # Let's check out some lists and how to index them! # + # Note that list may contain numbers ... list1 = [3, 1, 2] # ... or strings list2 = ['hello', 'world'] # ... or, actually, anything at all! List lists themselves list3 = ['hello', [3, 1, 2], 'world', 5.3, -999] # - # Whatever the contents of a list, they are indexed the same way: using square brackets with an integer, e.g. `[0]`: print('The first element of list1 is: %i' % list1[0]) print('The second element of list2 is: %s' % list2[1]) print('The last element of list3 is: %i' % list3[-1]) print('The second-to-last element of list3 is: %f' % list3[-2]) # Note that you can also use negative indices! Negative indices start indexing from the end of the list, so `[-1]` indexes the last element, `[-2]` indexes the second-to-last element, etc. # # We cannot only 'extract' element from lists using indexing, but we can also replace them! This works as follows: # + some_list = [1, 2, 3, ['A', 'B', 'C']] # Let's set the first element of some_list to 100: some_list[0] = 100 print(some_list) # Note that indexing a list within a list is done with sequential square brackets, # so if we want to index the element 'A' in some_list, we do: some_list[-1][0] = 'ANOTHER STRING' print(some_list) # - # <div class='alert alert-warning'> # <b>ToDo</b>: In the cell below, replace the element 'TO_REPLACE_1' with 'REPLACED' and the element 'TO_REPLACE_2' also with 'REPLACED' in the list <tt>todo_list</tt>. # </div> # + nbgrader={"grade": false, "grade_id": "cell-fcaca4cbf6963307", "locked": false, "schema_version": 3, "solution": true} tags=["hide-cell"] """ Implement your ToDo here. """ todo_list = [1, 'B', 'TO_REPLACE_1', [5, 3, 1038, 'C'], [1, 3, 5, [9, 3, 1, 'TO_REPLACE_2']]] ### BEGIN SOLUTION todo_list[2] = 'REPLACED' todo_list[-1][-1][-1] = 'REPLACED' ### END SOLUTION # - # *Note*: the code-cell below as usual tests your ToDo, but we haven't written out the tests in the cell itself. Instead, we wrote the tests in a separate Python module, which we import here. (We do this, because writing out the tests here would give you the answer rightaway!) # + nbgrader={"grade": true, "grade_id": "cell-6cc25540df324d7d", "locked": true, "points": 0, "schema_version": 3, "solution": false} """ Tests the above ToDo with a custom function. """ from tests import test_list_indexing test_list_indexing(todo_list) # - # In addition to accessing list elements one at a time, Python provides concise syntax to access specific parts of a list (sublists); this is known as *slicing*. # # Let's look at some slice operations: nums = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] print(nums) # Our original list # Get a slice form index 2 to 4 (exclusive); prints "[2, 3]" print(nums[2:4]) # Get a slice from index 2 to the end; prints "[2, 3, 4, 5, 6, 7, 8, 9]" print(nums[2:]) # Get a slice from the start to index 3 (exclusive); prints "[0, 1, 2]" print(nums[:3]) # Slice indices can be negative; prints ["0, 1, 2, 3, 4, 5, 6, 7, 8]", # so everything up to (but not including) the last element print(nums[:-1]) # Importantly, slices are "end exclusive", which means that if you slice from `0` to `5`, you get the indices `0, 1, 2, 3, 4`! While this may seem confusing at first, you'll get used to it. To appreciate the use of "end exclusive indexing", do the next ToDo. # <div class='alert alert-warning'> # <b>ToDo</b>: Slice the list below, <tt>to_be_split</tt>, into two separate lists: one called <tt>first_half</tt> with the first half of the list values, and one called <tt>second_half</tt>, with the second half of the list values. # </div> # + nbgrader={"grade": false, "grade_id": "cell-f5cbfc80f5ab048f", "locked": false, "schema_version": 3, "solution": true} tags=["hide-cell"] """ Implement your ToDo here. """ to_be_split = [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] ### BEGIN SOLUTION mid = len(to_be_split) // 2 first_half = to_be_split[:mid] second_half = to_be_split[mid:] # or just: # to_be_split[:8] # to_be_split[8:] ### END SOLUTION # + nbgrader={"grade": true, "grade_id": "cell-c58380642ef1d9f6", "locked": true, "points": 0, "schema_version": 3, "solution": false} """ Tests the above ToDo. """ assert(first_half == [10, 11, 12, 13, 14, 15, 16, 17]) assert(second_half == [18, 19, 20, 21, 22, 23, 24, 25]) print("Well done!") # - # Apart from the syntax `[from:to]`, you can also specify a "stride" (sort of step-size) of your slice using the syntax `[from:to:stride]`: # + # Return values in steps of 2 print(nums[::2]) # Returns values in steps of 3, but starting from the second element print(nums[1::3]) # - # With 'normal' indexing of lists, you can only index a subsequently set/replace one element at the time. With slices, however, you can set multiple elements at the same time: nums[2:4] = [100, 200] # Assign a new sublist to a slice print(nums) # Prints "[0, 1, 100, 200, 4, 5, 6, 7, 8, 9]" # **Pro-tip**: instead of creating sequential lists like this: # # ```python # num = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] # ``` # # ... we can also create a list using the syntax: # # ```python # num = list(range(starting_point, exclusive_end_point)) # ``` # # For example, to create a list from 5 to 15, you can do the following: # # ```python # num = list(range(5, 16)) # ``` # # We'll use this construction (`list(range(x, y))`, or without the `list`) quite often in this course! # <div class='alert alert-warning'> # <b>ToDo</b>: From the list (<tt>my_list</tt>) below, extract the numbers 2, 3, 4, 5, and 6 using a slice and store it in a new variable named <tt>my_new_list</tt>! # </div> # + nbgrader={"grade": false, "grade_id": "cell-daeb7ca767d21986", "locked": false, "schema_version": 3, "solution": true} tags=["hide-cell"] """ Implement the ToDo here. """ my_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] ### BEGIN SOLUTION my_new_list = my_list[1:6] ### END SOLUTION # + nbgrader={"grade": true, "grade_id": "cell-326bc8ff7960e645", "locked": true, "points": 0, "schema_version": 3, "solution": false} from tests import test_slicing_1 available_vars = dir() if 'my_new_list' not in available_vars: raise ValueError("You did not store the results in a new variable called 'my_new_list'!") test_slicing_1(my_new_list) # - # <div class='alert alert-warning'> # <b>ToDo</b>: From the list below (<tt>my_list_2</tt>), extract the values <tt>[5, 7, 9, 11]</tt> using a slice (i.e., in a single operation!) and store it in a new variable named <tt>my_new_list_2</tt>. # </div> # + nbgrader={"grade": false, "grade_id": "cell-cfe59b0d7902a00a", "locked": false, "schema_version": 3, "solution": true} tags=["hide-cell"] """ Implement the ToDo here. """ my_list_2 = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21] ### BEGIN SOLUTION my_new_list_2 = my_list_2[2:6] ### END SOLUTION # + nbgrader={"grade": true, "grade_id": "cell-fac7511a9ff08758", "locked": true, "points": 0, "schema_version": 3, "solution": false} ''' Tests the above ToDo ''' from tests import test_slicing_2 available_vars = dir() if 'my_new_list_2' not in available_vars: raise ValueError("You didn't define the variable 'my_new_list_2'!") test_slicing_2(my_new_list_2) # - # *Note*: you can index *strings* the same way as you index lists! Try to see it this way: a string is, quite literally, a *string* ("list") of characters. So, to get the first letter of some string s (e.g, 'this is a string'), you simply write: `s[0]`. To get first 5 characters, you write `s[:5]`, etc etc. Remember this! # Check out string slicing/indexing below s = 'python programming' print(s[0:9:2]) # ### Dictionaries # Dictionaries might be new for those who are used to MATLAB or R. Basically, a dictionary is an *unordered* list in which list entries have a name (which is also referred to as a "key"). To get a value from a dictionary, you have to use the "key" as index instead of using an integer (although, strictly speaking, keys can also be integers ... but that's not important for now). # # Let's check out such a dictionary and how to index it. We build a dictionary using the following syntax: # # ```python # {some_key: value, another_key: another_value, etc: etc} # ``` # # The keys can be anything! Strings, integers, lists ... doesn't matter! Mostly, though, strings are used as keys. So, let's look at an example: my_dict = {'cat': 'cute', 'dog': 'furry'} # Create a new dictionary with some data # To index a dictionary, we'll use square brackets `[]` again, just like with lists. But now, we can index using the key! indexed_value = my_dict['cat'] print(indexed_value) # Adding new key-value pairs to dictionaries is easy! Just index it with a new key, and assign the value to it: my_dict['fish'] = 'wet' # Set an entry in a dictionary print(my_dict['fish']) # Prints "wet" # Like a list, an entry in a dictionary can be of any data type: my_dict['rabbit'] = ['omg', 'so', 'cute'] print(my_dict['rabbit']) # If you try to 'index' a dictionary with a key that doesn't exist, it raises a "KeyError", which means you're trying to index something that doesn't exist: # + tags=["raises-exception"] print(my_dict['monkey']) # - # <div class='alert alert-warning'> # <b>ToDo</b>: In the code cell below, add a new key to the dictionary <tt>my_dict</tt> named <tt>"rat"</tt> and with the value <tt>"nasty"</tt>. # </div> # + nbgrader={"grade": false, "grade_id": "cell-121f9f1b0a5263be", "locked": false, "schema_version": 3, "solution": true} tags=["hide-cell"] """ Implement the ToDo here. """ ### BEGIN SOLUTION my_dict['rat'] = 'nasty' ### END SOLUTION # + nbgrader={"grade": true, "grade_id": "cell-0db58b3f530e7219", "locked": true, "points": 0, "schema_version": 3, "solution": false} """ Tests the above ToDo. """ try: assert('rat' in my_dict) except AssertionError as e: print("There exists no key 'rat' in my_dict!") raise(e) try: assert(my_dict['rat'] == 'nasty') except AssertionError as e: print("The value of key 'rat' is '%s' and NOT 'nasty'" % my_dict['rat']) print('Well done!') # - # <div class='alert alert-warning'> # <b>ToDo</b>: Values of dictionaries can be any type of object, even dictionaries themselves! So, add a new key to the dictionary <tt>my_dict</tt> named <tt>"another_dict"</tt> with the value of <em>another</em> dictionary with the keys <tt>"a"</tt> and <tt>"b"</tt> and the corresponding values <tt>1</tt> and <tt>2</tt>. Also, try to figure out how to index the value <tt>1</tt> from the 'nested' dictionary (this is not graded, but try it nonetheless!). # </div> # + nbgrader={"grade": false, "grade_id": "cell-8c0d0b8e890bfb78", "locked": false, "schema_version": 3, "solution": true} tags=["hide-cell"] """ Implement the ToDo here. """ ### BEGIN SOLUTION my_dict['another_dict'] = {'a': 1, 'b': 2} ### END SOLUTION # + nbgrader={"grade": true, "grade_id": "cell-83ed96f5b23bddbe", "locked": true, "points": 0, "schema_version": 3, "solution": false} """ Tests the above ToDo. """ try: assert('another_dict' in my_dict) except AssertionError as e: print("There exists no key 'another_dict' in my_dict!") raise(e) try: assert(my_dict['another_dict']['a'] == 1) assert(my_dict['another_dict']['b'] == 2) except AssertionError as e: print("The key 'another_dictionary' should contain a dictionary with keys 'a' and 'b', corresponding" "to values 1 and 2, respectively.") raise(e) print('Well done!') # - # <div class='alert alert-success'> # <b>Tip</b>: Instead of using curly braces, you can also initialize dictionaries explicitly using the <tt>dict</tt> class:<br> # <tt>some_dict = dict(key1='value1', key2='value2')</tt><br> # Note, though, you have to use string-like keys in this approach (unlike the curly brace approach). # </div> # ### Tuples # Tuples are very much like lists, but the main difference is that they are immutable. In other words, after creating them, they cannot be modified (their values cannot be replaced/altered): # A list can be modified ... my_list = [1, 2, 3] my_list[0] = 0 print(my_list) # + tags=["raises-exception"] # ... but a tuple cannot (and will give an error!) my_tuple = (1, 2, 3) print(my_tuple[0]) # you can print parts of tuple ... my_tuple[0] = 0 # but you cannot modify it! # - # You probably won't use tuples a lot, but you might come across them when using and writing functions, as multiple outputs from functions are stored in tuples (see below; but more about that in the next section!). # + def my_epic_function(integer): """ Returns the input and the input times 2.""" return integer, integer * 2 outputs = my_epic_function(10) print(outputs) print(type(outputs)) # - # ## Functions and methods # # ### Functions # Like any programming language, Python allows you to create your own custom functions. Writing your own functions is useful when, for example, you want to do a particular computation/task many times. Then, if you need to change the computation or task, you only have to change the function instead of manually editing your code every time you do the computation/task. If you're familiar with other programming languages, you'll see that the syntax of Python functions is quite similar to what you're used to. # # A function definition in Python starts with the keyword `def`, followed by the function name and round brackets with the arguments to the function, and finally the contents of the function, like so (note the indentation with four spaces/tab!): # # ```python # def my_awesome_function(arg_1, arg_2): # print("Argument 1: %s" % arg_1) # print("Argument 2: %s" % arg_2) # ``` # # This dummy-function above prints some stuff, but does not *return* something. Similar to R (but unlike MATLAB), you have to explicitly state what you want to *return* from the function by the `return` statement. # # So, suppose you have a function that adds 2 to any number. Let's define it as follows (you have to run the cell to let Python know you've defined this function): def add_2_to_a_number(some_number): new_number = some_number + 2 # Here, we omitted a `return` statement to return the value of `new_number`. This is a problem, because in Python (like most languages) you cannot 'peek' inside the function after using it! You can only access whatever is returned. # # So, in the function defined above, we cannot access the value of `new_number`, because we didn't return it (so it will give an error): # + tags=["raises-exception"] # This will give an error! add_2_to_a_number(5) print(new_number) # - # So, to access the *value* of `new_number` (that is, *not* `new_number` itself, but its associated value), we need to return it: def add_2_to_a_number_fixed(some_number): new_number = some_number + 2 return new_number value_contained_in_new_number = add_2_to_a_number_fixed(5) print("Results of function 'add_2_to_a_number' with argument '5': %i" % value_contained_in_new_number) # Importantly, you can name the variable to which you assign the return value *anyway you like*. This doesn't have to be `new_number`! Like above, we named it `value_contained_in_new_number`, but it really doesn't matter. # <div class='alert alert-warning'> # <b>ToDo</b>: In the code cell below, we've started writing a function named <tt>extract_last_element</tt> that takes one input-argument &mdash; a list &mdash; and returns the last element of the list. Some parts of the function are missing, though, which you need to write! When you're done, run the test-cell below it to check if it's right! # </div> # + nbgrader={"grade": false, "grade_id": "cell-844cc0bf2fe08f21", "locked": false, "schema_version": 3, "solution": true} tags=["hide-cell"] """ Implement the ToDo here. """ def extract_last_element(input_list): ### BEGIN SOLUTION last_element = input_list[-1] return last_element ### END SOLUTION # + nbgrader={"grade": true, "grade_id": "cell-cce2e863aae1f1d8", "locked": true, "points": 0, "schema_version": 3, "solution": false} try: assert(extract_last_element(input_list=[0, 1, 2]) == 2) except AssertionError as e: print("Your function fails for input [0, 1, 2]") raise(e) try: assert(extract_last_element(input_list=[0]) == 0) except AssertionError as e: print("Your function fails for input [0]") raise(e) try: assert(extract_last_element(input_list=['string1', 'string2', 'string3']) == 'string3') except AssertionError as e: print("Your function fails for input ['string1', 'string2', 'string3']") raise(e) print("GREAT! All seems to be correct :-)") # - # Alright, that was probably relatively easy. Let's do a slightly harder one. # <div class='alert alert-warning'> # <b>ToDo</b>: Write a completely new function named <tt>get_values_from_odd_indices</tt> (so you have to write the <tt>def ...</tt> part!) that takes one input-argument &mdash; a list &mdash; and returns all values from the odd indices of that list. So, suppose you have the following list: [2, 100, 25, 48, 92, -5, 12]. Your function should return: [100, 48, -5]. That is, the values from odd indices (here: 1, 3, 5; we exclude index zero!) Hint: slices might be useful here! # </div> # + nbgrader={"grade": false, "grade_id": "cell-b53fdc7b50d5fcf3", "locked": false, "schema_version": 3, "solution": true} tags=["hide-cell"] """ Implement your function here. """ ### BEGIN SOLUTION def get_values_from_odd_indices(in_list): return in_list[1::2] ### END SOLUTION # + nbgrader={"grade": true, "grade_id": "cell-f0aba01f8698280a", "locked": true, "points": 0, "schema_version": 3, "solution": false} """ Tests the ToDo above. """ try: assert('get_values_from_odd_indices' in dir()) assert(callable(get_values_from_odd_indices)) except AssertionError as e: print("Your function 'get_values_from_odd_indices' does not seem to exist!") try: out = get_values_from_odd_indices([0, 1, 2]) if out is None: msg = "ERROR: did you forget the Return statement?" raise ValueError(msg) except ValueError as e: raise(e) print("Well done (also run the next cell with tests)!") # + nbgrader={"grade": true, "grade_id": "cell-3dbc2e60c3240847", "locked": true, "points": 0, "schema_version": 3, "solution": false} """ Some other tests for the ToDo above. """ inp = [0, 1, 2] outp = get_values_from_odd_indices(inp) ans = [1] try: assert(outp == ans) except AssertionError as e: print("Your function returned '%r' but I expected '%r'" % (outp, ans)) raise(e) inp = [5, 7, 9, 11, 13, 15, 18, 20, 21] outp = get_values_from_odd_indices(inp) ans = [7, 11, 15, 20] try: assert(outp == ans) except AssertionError as e: print("Your function returned '%r' but I expected '%r'" % (outp, ans)) raise(e) print("Well done!") # - # **Important**: it is possible to return *multiple things* from a function. The function, then, returns these things as a tuple, which can subsequently be "unpacked". Let's check out an example using a custom function called `minmax_of_list` which returns both the minimum and maximum of a list: def minmax_of_list(some_list): ''' Returns both the minimum and maximum of a list. Parameters ---------- some_list : list A list with numbers (int/float) only Returns ------- min_value : a float or int The minimum of a list max_value : a float or int The maximum of a list ''' min_value = min(some_list) max_value = max(some_list) return min_value, max_value # As you can see, returning multiple things is a simple as adding more variables after the `return` statement, separated by commas. If we now call the function with a particular list, it gives us back a tuple of size 2 (one value for the minimum, one value for the maximum): output_from_function = minmax_of_list([0, 1, 2, 3]) print(output_from_function) print(type(output_from_function)) # We can now "unpack" the tuple (i.e., extract the separate values) in several ways. One way is to simply index the values: # + output_from_function = minmax_of_list([0, 1, 2, 3]) minimum = output_from_function[0] print("Minimum: %i" % minimum) maximum = output_from_function[1] print("Maximum: %i" % maximum) # - # Alternatively, we can already "extract" one value, let's say the maximum (index 1 of the tuple) right after calling the function, so we can skip dealing with the tuple altogether: maximum = minmax_of_list([0, 1, 2, 3])[1] # The [1] extracts the maximum from the output of the function immediately! print("Maximum: %i" % maximum) # Keep this feature of returning multiple things and tuple unpacking in mind for the rest of the course (you'll definitely encounter it more often!). # <div class='alert alert-warning'> # <b>ToDo</b>: Write a function called <tt>get_length_first_and_last_value</tt> which takes a list as single input argument, and returns the length of the list (the first output), the first value of the list (the second output), and the last value of the list (the third output). So, e.g., for the list <tt>[0, 1, 2, 3, 4]</tt>, the function should return <tt>(5, 0, 4)</tt> (a tuple of length 3, with the three outputs). Note that it should work for lists of arbitrary lengths and value types! # </div> # + nbgrader={"grade": false, "grade_id": "cell-1d7353d763e789a1", "locked": false, "schema_version": 3, "solution": true} tags=["hide-cell"] """ Implement the function here. """ ### BEGIN SOLUTION def get_length_first_and_last_value(lst): return len(lst), lst[0], lst[-1] ### END SOLUTION # + nbgrader={"grade": true, "grade_id": "cell-2df42fc82e0bc822", "locked": true, "points": 0, "schema_version": 3, "solution": false} """ Tests the above ToDo. """ try: assert('get_length_first_and_last_value' in dir()) assert(callable(get_length_first_and_last_value)) except AssertionError as e: print("Your function 'get_length_first_and_last_value' does not seem to exist!") out = get_length_first_and_last_value([0, 1, 2]) if out is None: msg = "ERROR: did you forget the Return statement?" raise ValueError(msg) if len(out) != 3: msg = "ERROR: you returned %i things; this should be 3!" % len(out) raise ValueError(msg) assert(out == (3, 0, 2)) assert(get_length_first_and_last_value([2, 3, 4, 5, 6, 7]) == (6, 2, 7)) assert(get_length_first_and_last_value([0]) == (1, 0, 0)) assert(get_length_first_and_last_value(['a', 'b']) == (2, 'a', 'b')) print("Well done!") # - # <div class='alert alert-warning'> # <b>ToDo</b> (optional!): For this optional ToDo, write a function named <tt>round_number</tt> which takes a single argument, a float, and returns its nearest integer. For example, for the input <tt>5.3820</tt>, the function should return the integer <tt>5</tt> and for the input <tt>395.89271</tt> the function should return the integer <tt>396</tt>. Floats exactly in between two integers (e.g., <tt>3.5</tt>, <tt>38502.5</tt>, etc.) should be rounded down. # # Here's the catch: try to do this without a conditional (if-else statement) and, of course, without the built-in <tt>round</tt> function (or Numpy equivalent)! Hint: try to see what happens when you convert a float to an integer using the <tt>int</tt> function. # </div> # + nbgrader={"grade": false, "grade_id": "cell-8c29c2aad33bf842", "locked": false, "schema_version": 3, "solution": true, "task": false} tags=["hide-cell"] """ Implement the ToDo below. """ ### BEGIN SOLUTION def round_number(num): # Add 0.5 to number and then "round down" by converting # the float to an integer # For example, 0.6 + 0.5 = 1.1 -> to int -> 1 # Also, 0.4 + 0.5 = 0.9 -> to int -> 0 return int(num + 0.5) ### END SOLUTION # + nbgrader={"grade": true, "grade_id": "cell-72233ed2721d59b7", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} """ Tests the above ToDo. """ for num in [3.2385, 3.5, 3.9285, 28583.2195]: rounded = round_number(num) if not isinstance(rounded, int): raise ValueError("The rounded number is not an integer!") if rounded != round(num - 1e-100): raise ValueError("The rounding is not correct ...") print("Well done!") # - # ### Methods # In Python, functions are not the only things that allow you to 'do' things with data. As you've seen in the previous notebook, there are also methods! Different types of objects in Python, such as stings and lists, have their own set of methods. For example, the function you defined above (`extract_last_element()`) also exists as a method each list has, called `pop()`! (This is a builtin, standard, method that each list in Python has.) See for yourself in the block below. # + my_list = [0, 5, 10, 15] print(my_list.pop()) # You can also just do the following (i.e. no need to define a variable first!): print([0, 5, 10, 15].pop()) # ... which is the same as: print(extract_last_element([0, 5, 10, 15])) # - # Not only lists, but also other data-types (such as strings, dictionaries, and, as we'll see later, numpy arrays) have their own methods. We'll show you a couple of (often-used) examples of methods in built-in data types. # # First, one often used method of lists is `append`, which takes in an object adds it to itself. x = [0, 10, 15] x.append(20) # Add a new element to the end of the list using the append() method! print(x) # <div class='alert alert-warning'> # <b>ToDo</b>: Suppose we have a list of grades (the variable <tt>grades</tt> below) and we want to extend this list with two other grades (the variable <tt>new</tt> below). Use the <tt>append</tt> function to add the new grades to the <tt>grades</tt> variable such that it has six elements afterwards. # </div> # + nbgrader={"grade": false, "grade_id": "cell-f76db66bbbb54248", "locked": false, "schema_version": 3, "solution": true, "task": false} tags=["hide-cell"] """ Implement the ToDo here. """ grades = [8.5, 7.2, 9.8, 8.1] new = [10.0, 6.8] ### BEGIN SOLUTION grades.append(new[0]) grades.append(new[1]) ### END SOLUTION # + nbgrader={"grade": true, "grade_id": "cell-bb93393ec583cd5c", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} """ Tests the ToDo above. """ if grades[-1] == [10.0, 6.8]: raise ValueError("You added all new grades at once ... Do you understand what is going wrong?") if not grades[-2:] == [10.0, 6.8] or grades[-2:] == [6.8, 10.0]: raise ValueError("Something else went wrong ...") else: print("YAY!") # - # Some often-used methods for dictionaries are `values` and `keys`, which return the dictionary's values and keys, respectively. This may be useful in loops (as will be discussed later). # + my_dict = {'a': 0, 'b': 1, 'c': 2} # The .values() method returns all the values of the dictionary print(list(my_dict.values())) # And the .keys() method returns all the keys of the dictionary print(list(my_dict.keys())) # - # Strings actually have a ton of useful methods that come in handy when manipulating and "cleaning up" text: # + my_string = 'Python is fun!' # The .upper() method returns the string in uppercase! print(my_string.upper()) # The .count(substring) method returns the number of times a substring occurs in a string print(my_string.count('n')) # The .replace(old, new) method replaces substrings print(my_string.replace('fun', 'awesome')) # The .split(separator) splits a string into subparts (returned as a list) print(my_string.split(' ')) # split by whitespace # - # <div class='alert alert-warning'> # <b>ToDo</b>: Below, fix the string with typos (<tt>lots_of_typos</tt>) using (a combination of) string methods and store the result in a new variable named <tt>fixed_string</tt>. Note that you can "chain" different methods together on a single line (but this is not necessarily better/more readable)! # </div> # + nbgrader={"grade": false, "grade_id": "cell-e4ae7215115b1e2f", "locked": false, "schema_version": 3, "solution": true, "task": false} tags=["hide-cell"] """ Implement the ToDo here. """ lots_of_typos = "My.name.is.Lukas.and.i.like.Pyton." ### BEGIN SOLUTION fixed_string = ' '.join(lots_of_typos.split('.')).replace('Pyton ', 'Python.').replace(' i ', ' I ') # Or: # fixed_string = lots_of_typos.replace('.', ' ').replace('Pyton ', 'Python.').replace(' i ', ' I ') ### END SOLUTION # + nbgrader={"grade": true, "grade_id": "cell-cdc04b148398e822", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} """ Tests the above ToDo. """ if fixed_string != "My name is Lukas and I like Python.": raise ValueError("Not completely correct yet ...") print("Damn, you're a Python wizard!") # - # ### Default arguments in functions/methods # Importantly, and unlike most (scientific) programming languages, Python supports the use of 'default' arguments in functions. Basically, if you don't specify an optional argument, it uses the default: # + def exponentiate_number(number, power=2): return number ** power print(exponentiate_number(2)) # now it uses the default! print(exponentiate_number(2, 10)) # now it "overwrites" the default and uses power=10 print(exponentiate_number(number=2, power=10)) # also note that you can 'name' arguments # - # ## Conditionals (if-statements) # Conditionals, or "if-statements", are quite straightforward. There are used in combination with booleans (`True` and `False` values) to run code conditionally. An example: # An example: # + x = 5 if x > 0: print('x is larger than 0') elif x < 0: print('x is smaller than 0') else: print('x must be exactly 0!') # - # If-statements contain at least an `if` keyword, but optionally also one or more `elif` ("else if") statements and an optional `else` statement. We'll practice this (in a `ToDo`) after the section on Loops. # <div class='alert alert-warning'> # <b>ToDo</b> (optional): Write a function named <tt>juf</tt> that takes a single argument named <tt>number</tt> (an integer with a default of 1) and prints out the number <em>unless</em> the number is divisible by 7 (e.g., 14 or 21) <em>or</em> contains a 7 (e.g., 7, 27, or 77); in that case, it prints| "JUF!". (Apologies to the non-Dutchies for this reference.) # # <br>Hint: you may want to use the <a href="https://realpython.com/python-modulo-operator/">modulo operator (%)</a> to check the divisibility of a number by another number. # </div> # + nbgrader={"grade": false, "grade_id": "cell-3eead06787af2ed8", "locked": false, "schema_version": 3, "solution": true, "task": false} tags=["hide-cell"] """ Implement the ToDo here. """ ### BEGIN SOLUTION def juf(number=1): if '7' in str(number) or number % 7 == 0: print("JUF!") else: print(number) ### END SOLUTION # + nbgrader={"grade": true, "grade_id": "cell-3b25bfa3b7e42ef1", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} """ Tests the ToDo above. """ import inspect spec = inspect.getfullargspec(juf) if spec.args[0] != 'number': raise ValueError("The argument to juf is not called `number`!") if spec.defaults[0] != 1: raise ValueError("The default of `number` should be 1!") # The following should print out: 1, JUF!, JUF!, 22, JUF!, JUF!, 80 for nb in [1, 7, 17, 22, 63, 71, 80]: juf(nb) # - # ## Loops # Loops in Python (for- and while-loops) are largely similar to MATLAB and R loops, with some minor differences in their syntax. Again, just like in conditionals, indentation is important: each line within your loop needs to be consistently indented. Also, loops do not contain a keyword (like `end` in Matlab) or bracket (like a curly bracket in *R*) that determines the end of the loop; this is just determined by the indentation. # ### For loops # For loops are constructs that allow to loop over a predetermined number of elements within an object. Below, you can see an example of a for loop over a list of strings: animals = ['cat', 'dog', 'monkey'] for animal in animals: print(animal) # Basically, each data type that is also an "iterable" (something that you can iterate over) can be used in loops, including lists, dictionaries, and tuples. # An example of looping over a list my_list = [1, 2, 3] for x in my_list: print(x) # MATLAB users might be used to looping over indices instead of the actual list values, like the following: # # ```Matlab # for i=1:100 # disp(some_list(i)); # end # ``` # # In Python, however, you loop (by default) over the contents of a list: # # ```Python # for entry in some_list: # print(entry) # ``` # # If you want to access for the value *and* the index, you can use the built-in `enumerate` function: # + my_list = ['a', 'b', 'c'] for index, value in enumerate(my_list): print('Loop iteration number (index) = %i, value = %s' % (index, value)) # Don't forget that Python indexing starts at zero! # - # Apart from lists, you can also loop over tuples: # Looping over a tuple (exactly the same as looping over a list) my_tuple = (1, 2, 3) for x in my_tuple: print(x) # ... and dictionaries: # + # Iterating over a dictionary can be done in a couple of ways! my_dict = {'a': 1, 'b': 2, 'c': 3} # Looping over the keys ONLY for key in my_dict: print(key) # - # Looping over both the keys and the entries for key, entry in my_dict.items(): print(key, entry) # <div class='alert alert-warning'> # <b>ToDo</b>: Complete the function below &mdash; named <tt>extract_values_smaller_than_0</tt> &mdash; that takes a single list with numbers as input and returns a new list with <em>only the values smaller than 0</em> from the input-list. For example, suppose our input-list is: [2, -5.3, 1.8, 0.0, -205.1, 6029]. Then, the function should return: [-5.3, -205.1].<br><br>Hint: use an if-statement in combination with the <tt>.append()</tt> method of the empty list we initialized below (<tt>list_to_return</tt>) to fill the <tt>list_to_return</tt> variable in a for-loop. In other words, the function should contain an if-statement in a for-loop (in which you need to use the <tt>.append()</tt> method). # </div> # + nbgrader={"grade": false, "grade_id": "cell-12b8cc33efcf2857", "locked": false, "schema_version": 3, "solution": true} tags=["hide-cell"] """ Implement the ToDo here. """ # Complete the function below (make sure to remove raise NotImplementedError!) def extract_values_smaller_than_0(input_list): # We initialize an empty list here (which you need to fill using a for-loop) list_to_return = [] ### BEGIN SOLUTION for value in input_list: if value < 0: list_to_return.append(value) ### END SOLUTION return list_to_return # + nbgrader={"grade": true, "grade_id": "cell-edbdf3424641ffb9", "locked": true, "points": 0, "schema_version": 3, "solution": false} """ Tests the ToDo above. """ inp = [-5, 2, 3, -8] outp = extract_values_smaller_than_0(inp) ans = [-5, -8] try: assert(outp == ans) except AssertionError as e: print("Your function with input '%r' returned '%r', but I expected '%r'" % (inp, outp, ans)) raise(e) inp = [0, 2, -3] outp = extract_values_smaller_than_0(inp) ans = [-3] try: assert(outp == ans) except AssertionError as e: print("Your function with input '%r' returned '%r', but I expected '%r'" % (inp, outp, ans)) raise(e) inp = [0, 0, 0] outp = extract_values_smaller_than_0(inp) ans = [] try: assert(outp == ans) except AssertionError as e: print("Your function with input '%r' returned '%r', but I expected '%r'" % (inp, outp, ans)) raise(e) print("EPIC! Well done!") # - # ### While loops # While loops are constructs that allow you to do something until a specific condition is met and are often used when you do not know the number of elements you want to loop over beforehand. For example, suppose we simulate a six-sided die roll using the `randint` ("random integer") function. We can use a while loop to keep "rolling" the die until we roll a six (i.e., the condition): # + from random import randint # keep track of nr of rolls # (not necessary for the loop) i_rolls = 0 # Conditional right after the `while` statement! while randint(1, 6) != 6: print("Still haven't rolled a 6 ...") # (Not strictly necessary, but nice to print out later) i_rolls = i_rolls + 1 print(f"It took {i_rolls} to roll a six!") # - # Instead of looping until the condition right after the while statement is met, you can also manually "break out" of the loop using the `break` keyword. For example, suppose you want to stop rolling the die after 3 rolls because it would otherwise take too long. You could implement this as follows: i_rolls = 0 while randint(1, 6) != 6: print("Still haven't rolled a 6 ...") i_rolls = i_rolls + 1 if i_rolls >= 3: print("Takes too long! Breaking out ...") break # break out of loop! # Sometimes, you might see the `break` construct used in combination with a while loop that is always true (e.g., `while True: ...`; an "infinite loop"). For example, the first die roll example can also be implemented as follows: # + i_rolls = 0 while True: roll = randint(1, 6) if roll == 6: break print("Still haven't rolled a 6 ...") i_rolls = i_rolls + 1 print(f"It took {i_rolls} to roll a six!") # - # Both implementations are fine; it's up to you what you prefer. # <div class='alert alert-success'> # <b>Tip</b>: the <tt>break</tt> keyword also works in for loops! You can use this if you want to terminate the loop earlier than expected. # </div> # <div class='alert alert-warning'> # <b>ToDo</b>: Below, we created a function, <tt>random_string</tt>, which creates a random five-letter string. Create a while-loop that loops until a random string has been created that starts with "A" and ends with "Z". (No test cell.) # </div> # + nbgrader={"grade": false, "grade_id": "cell-011168f7bb356c50", "locked": false, "schema_version": 3, "solution": true, "task": false} tags=["hide-cell"] """ Implement the ToDo here. """ import string from random import choice def random_string(): """ Generates a 5 letter random string. """ rs = ''.join(choice(string.ascii_uppercase) for _ in range(5)) return rs ### BEGIN SOLUTION while True: rs = random_string() if rs[0] == 'A' and rs[-1] == 'Z': break print(rs) ### END SOLUTION # - # <div class='alert alert-danger'> # <b>Warning</b>: Sometimes, you might run an infinite loop without a way to "break out" of it. When doing so in a (non-interactive) Python script, you can simply press control + C on your keyboard to interrupt the script, but in Jupyter notebooks, this doesn't work. Instead, you need to click on the "stop" button (next to the "run" button) to interrupt the cell! If even that doesn't work, you need to stop the entire kernel: <em>Kernel</em> &rarr; <em>Restart Kernel</em> (or <em>Shut Down Kernel</em>). # </div> # ### Advanced for loops: list comprehensions (optional) # Sometimes, writing (and reading!) for-loops can be confusing and lead to "ugly" code. Wouldn't it be nice to represent (small) for-loops on a single line? Python has a way to do this: using what is called `list comprehensions`. It does exactly the same thing as a for-loop: it takes a list, iterates over its entries (and does something with each entry), and (optionally) returns a (modified) list. # # Let's look at an arbitrary example of a for-loop over a list: # + nums = [0, 1, 2, 3, 4] # Also, check out the way 'enumerate' is used here! for index, x in enumerate(nums): nums[index] = x ** 2 print(nums) # - # You can make this code simpler using a list comprehension: nums = [0, 1, 2, 3, 4] squares = [x ** 2 for x in nums] # importantly, a list comprehension always returns a (modified) list! print(squares) # Also, list comprehensions may contain if-statements! string_nums = ['one', 'two', 'three'] starts_with_t = ['yes' if s[0] == 't' else 'no' for s in string_nums] print(starts_with_t) # <div class='alert alert-warning'> # <b>ToDo</b> (optional): Write a list comprehension that adds the string <tt>'_check'</tt> to each value in the list <tt>my_list</tt> below, except if the value is 'B'. Store the result in a new variable named <tt>my_list_checked</tt>. Note that (in this particular use of list-comprehensions) you always need <em>both</em> a "if .." part <em>and</em> an "else ..." part! So, can you think of a way to add nothing to a string (i.e., the "else ...", when the element is not 'B', part of this list comprehension)? # </div> # + nbgrader={"grade": false, "grade_id": "cell-67702630da9410f5", "locked": false, "schema_version": 3, "solution": true, "task": false} tags=["hide-cell"] """ Implement the ToDo below (no test cell). """ my_list = ['A', 'B', 'C', 'D'] ### BEGIN SOLUTION my_list_checked = [elem + '_check' if elem != 'B' else elem for elem in my_list] ### END SOLUTION # + nbgrader={"grade": true, "grade_id": "cell-ce566d10a2d2fa8d", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} """ Tests the above ToDo. """ for orig, new in zip(my_list, my_list_checked): if orig == 'B': assert(orig == new) else: assert(f'{orig}_check' == new) print("Alright! Well done!") # - # In addition to *list* comprehensions, Python also features *dictionary* comprehensions, *set* comprehensions, and *generator* comprehensions. As we haven't discussed sets and generators (as these are somewhat more advanced and/or niche concepts), let's check out an example of a dictionary comprehension. For example, suppose I want to initialize a dictionary with names of students as keys and an empty list as values (which will, at some point, be populated by grades for different assignments). I can do this using a dictionary comprehension: names = ['billy', 'sara', 'amir'] grades = {name: [] for name in names} print(grades) # This is the same as below: # grades = {} # for name in names: # grades[name] = [] # List and dictionary comprehensions are more advanced Python concepts, so if you don't feel comfortable using them (correctly) in your future assignments, use regular for-loops by all means! # ## A challenging exercise (optional) # For those that a challenge, try the following (very) difficult ToDo! # <div class='alert alert-warning'> # <b>ToDo</b>: When developing a new experiment, you may want to control the order and frequency of trials from different conditions such that the experiment does not become too predictable and does not feature more trials from one condition than another. Write a function that generates a list with a random sequence of trials from two conditions (either condition <tt>0</tt> or condition <tt>1</tt>). The number of trials in total is an argument to the function (<tt>n_trials</tt>) and may vary, but you may assume that it is an even number (and thus divisible by the number of conditions). The output of the function is a random list of numbers (either 0 or 1), for example: <tt>[0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, ..., 1, 1, 0]</tt>. There are, however, two conditions: # # 1. Each condition should have the same amount of trials; # 2. Each condition should have the same probability of being followed by any condition (often called "counterbalancing"). # # Condition 2 means that for a given trial of, for example, condition <tt>1</tt>, there is a 50% probability that this trial is followed by a trial of condition <tt>0</tt> and a 50% probability that this trial is followed by a trial of condition <tt>1</tt>. Note that it is impossible to <em>completely</em> counterbalance this proportion, because the last trial does not have trial after itself. Therefore, the counterbalancing may be off by 1 trial. # </div> # + nbgrader={"grade": false, "grade_id": "cell-c49350389f6072cc", "locked": false, "schema_version": 3, "solution": true, "task": false} tags=["hide-cell"] """ Complete the function below. """ import random def generate_experiment_sequence(n_trials=40): """ Generates a sequence of trials across `n_cond` conditions. Parameters ---------- n_cond : int Number of conditions Returns ------- exp_seq : list List of experimental sequence """ ### BEGIN SOLUTION # NOTE: this is not necessarily the best/most efficent # solution! # Create sequence n_per_cond = int(n_trials / 2) exp_seq = list(range(2)) * n_per_cond while True: # Shuffle sequence random.shuffle(exp_seq) # Keep track of counts per condition {0, 1} counts = {0: [0, 0], 1: [0, 0]} # Count all n+1 trial conditions for each condition for i in range(len(exp_seq) - 1): counts[exp_seq[i]][exp_seq[i + 1]] += 1 # Check if proportions are correct! correct = [] for cond, count in counts.items(): if abs(count[0] - count[1]) < 2: correct.append(True) else: correct.append(False) if all(correct): return exp_seq ### END SOLUTION # + nbgrader={"grade": true, "grade_id": "cell-adb6f09735d3990d", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} """ Tests the above ToDo. """ import numpy as np # Check for multiple amounts of trials for n_trials in [20, 40, 60, 80]: # I'm using numpy because I'm lazy seq = np.asarray(generate_experiment_sequence(n_trials)) # Check equal frequencies assert(seq[seq == 0].size == seq[seq == 1].size) # Check counterbalancing # Compute proportion of 0 trials prop0 = seq[np.where(seq[:-1] == 0)[0] + 1].mean() # eps = "epsilon" (tolerance) eps = 1 / (n_trials / 2) # Check if worse than tolerance if abs(prop0 - 0.5) > eps: raise ValueError("Not correctly counterbalanced") print("CORRECT! THIS IS AMAZING!") # - # Alright! That was it for this tutorial. We discussed the most important syntax, data types, and constructs in Python. Sure, there are things that we haven't discussed (e.g., [sets](https://realpython.com/python-sets/), [generators](https://realpython.com/introduction-to-python-generators/), [inheritance](https://realpython.com/inheritance-composition-python/), [decorators](https://realpython.com/primer-on-python-decorators/) and so on), but the material from this tutorial should be enough to get you started with Python (and the next tutorials)! # # You can continue with the next one about *Matplotlib* if you're ready!
intropy/solutions/week_1/1_python_basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] colab_type="text" id="sSknT9fOwo3z" # # SQL (ii) (sesión 2) # + [markdown] colab_type="text" id="HnLp0SNrIWqW" # ![Mysql](https://labs.mysql.com/common/logos/mysql-logo.svg) # # + [markdown] colab_type="text" id="yULxkMKuIWqZ" # # Inicialización y carga de paquetes # # # + [markdown] colab_type="text" id="NVBsiBj1IWqc" # Esta hoja muestra cómo acceder a bases de datos SQL y también a conectar la salida con Jupyter/Colab. Las partes en SQL también se pueden realizar directamente en MySQL ejecutando el programa `mysql` del entorno de ejecución. # # # - # !apt-get update -qq # + colab={} colab_type="code" id="Xcy7h029JBPQ" # !apt-get install -y mysql-server mysql-client # + colab={} colab_type="code" id="kNuEvfooJPQ8" # !service mysql start # + colab={} colab_type="code" id="7HUlENglJYSH" # !mysql -e "ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY 'root'" # + colab={} colab_type="code" id="FW-KsTLSIWqg" # !pip install PyMySQL # + colab={} colab_type="code" id="TQ0a3uknIWqt" # %load_ext sql # #%config SqlMagic.feedback = False # Evitar que muestre el número de filas # %config SqlMagic.autopandas=True # + colab={} colab_type="code" id="F3UMoNLIIWq1" import pandas as pd import matplotlib.pyplot as plt import matplotlib # %matplotlib inline matplotlib.style.use('ggplot') # + [markdown] colab_type="text" id="hdeQswAYwo4_" # - Format: 7zipped # - Files: # - **badges**.xml # - UserId, e.g.: "420" # - Name, e.g.: "Teacher" # - Date, e.g.: "2008-09-15T08:55:03.923" # - **comments**.xml # - Id # - PostId # - Score # - Text, e.g.: "@Stu Thompson: Seems possible to me - why not try it?" # - CreationDate, e.g.:"2008-09-06T08:07:10.730" # - UserId # - **posts**.xml # - Id # - PostTypeId # - 1: Question # - 2: Answer # - ParentID (only present if PostTypeId is 2) # - AcceptedAnswerId (only present if PostTypeId is 1) # - CreationDate # - Score # - ViewCount # - Body # - OwnerUserId # - LastEditorUserId # - LastEditorDisplayName="<NAME>" # - LastEditDate="2009-03-05T22:28:34.823" # - LastActivityDate="2009-03-11T12:51:01.480" # - CommunityOwnedDate="2009-03-11T12:51:01.480" # - ClosedDate="2009-03-11T12:51:01.480" # - Title= # - Tags= # - AnswerCount # - CommentCount # - FavoriteCount # - **posthistory**.xml # - Id # - PostHistoryTypeId # - 1: Initial Title - The first title a question is asked with. # - 2: Initial Body - The first raw body text a post is submitted with. # - 3: Initial Tags - The first tags a question is asked with. # - 4: Edit Title - A question's title has been changed. # - 5: Edit Body - A post's body has been changed, the raw text is stored here as markdown. # - 6: Edit Tags - A question's tags have been changed. # - 7: Rollback Title - A question's title has reverted to a previous version. # - 8: Rollback Body - A post's body has reverted to a previous version - the raw text is stored here. # - 9: Rollback Tags - A question's tags have reverted to a previous version. # - 10: Post Closed - A post was voted to be closed. # - 11: Post Reopened - A post was voted to be reopened. # - 12: Post Deleted - A post was voted to be removed. # - 13: Post Undeleted - A post was voted to be restored. # - 14: Post Locked - A post was locked by a moderator. # - 15: Post Unlocked - A post was unlocked by a moderator. # - 16: Community Owned - A post has become community owned. # - 17: Post Migrated - A post was migrated. # - 18: Question Merged - A question has had another, deleted question merged into itself. # - 19: Question Protected - A question was protected by a moderator # - 20: Question Unprotected - A question was unprotected by a moderator # - 21: Post Disassociated - An admin removes the OwnerUserId from a post. # - 22: Question Unmerged - A previously merged question has had its answers and votes restored. # - PostId # - RevisionGUID: At times more than one type of history record can be recorded by a single action. All of these will be grouped using the same RevisionGUID # - CreationDate: "2009-03-05T22:28:34.823" # - UserId # - UserDisplayName: populated if a user has been removed and no longer referenced by user Id # - Comment: This field will contain the comment made by the user who edited a post # - Text: A raw version of the new value for a given revision # - If PostHistoryTypeId = 10, 11, 12, 13, 14, or 15 this column will contain a JSON encoded string with all users who have voted for the PostHistoryTypeId # - If PostHistoryTypeId = 17 this column will contain migration details of either "from <url>" or "to <url>" # - CloseReasonId # - 1: Exact Duplicate - This question covers exactly the same ground as earlier questions on this topic; its answers may be merged with another identical question. # - 2: off-topic # - 3: subjective # - 4: not a real question # - 7: too localized # - **postlinks**.xml # - Id # - CreationDate # - PostId # - RelatedPostId # - PostLinkTypeId # - 1: Linked # - 3: Duplicate # - **users**.xml # - Id # - Reputation # - CreationDate # - DisplayName # - EmailHash # - LastAccessDate # - WebsiteUrl # - Location # - Age # - AboutMe # - Views # - UpVotes # - DownVotes # - **votes**.xml # - Id # - PostId # - VoteTypeId # - ` 1`: AcceptedByOriginator # - ` 2`: UpMod # - ` 3`: DownMod # - ` 4`: Offensive # - ` 5`: Favorite - if VoteTypeId = 5 UserId will be populated # - ` 6`: Close # - ` 7`: Reopen # - ` 8`: BountyStart # - ` 9`: BountyClose # - `10`: Deletion # - `11`: Undeletion # - `12`: Spam # - `13`: InformModerator # - CreationDate # - UserId (only for VoteTypeId 5) # - BountyAmount (only for VoteTypeId 9) # + colab={} colab_type="code" id="H0XhGi4-UYND" import gzip from urllib.request import Request,urlopen import io import os import os.path as path def download_csv(baseurl, filename): file = path.abspath(path.join(os.getcwd(),filename)) request = Request(baseurl + filename + '.gz') response = urlopen(request) buf = io.BytesIO(response.read()) f = gzip.GzipFile(fileobj=buf) data = f.read() with open (filename, 'wb') as ff: ff.write(data) baseurl = 'https://raw.githubusercontent.com/dsevilla/bdge-data/master/es.stackoverflow/' download_csv(baseurl, 'Posts.csv') download_csv(baseurl, 'Users.csv') download_csv(baseurl, 'Tags.csv') download_csv(baseurl, 'Comments.csv') download_csv(baseurl, 'Votes.csv') # + colab={} colab_type="code" id="JeLNiqCCIWq7" language="sql" # mysql+pymysql://root:root@localhost/?charset=utf8mb4&local_infile=1 # + colab={} colab_type="code" id="sueYe40Swo5P" language="sql" # DROP SCHEMA IF EXISTS stackoverflow; # CREATE SCHEMA stackoverflow CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; # + colab={} colab_type="code" id="n-PAF2Fgwo5b" language="sql" # USE stackoverflow; # + colab={} colab_type="code" id="wAOYqQaFwo5k" language="sql" # SET GLOBAL local_infile = true; # + colab={} colab_type="code" id="vxTPv9xswo5u" language="sql" # DROP TABLE IF EXISTS PostsPart; # CREATE TABLE PostsPart ( # Id INT, # AcceptedAnswerId INT NULL DEFAULT NULL, # AnswerCount INT DEFAULT 0, # Body TEXT, # ClosedDate DATETIME(6) NULL DEFAULT NULL, # CommentCount INT DEFAULT 0, # CommunityOwnedDate DATETIME(6) NULL DEFAULT NULL, # CreationDate DATETIME(6) NULL DEFAULT NULL, # FavoriteCount INT DEFAULT 0, # LastActivityDate DATETIME(6) NULL DEFAULT NULL, # LastEditDate DATETIME(6) NULL DEFAULT NULL, # LastEditorDisplayName TEXT, # LastEditorUserId INT NULL DEFAULT NULL, # OwnerDisplayName TEXT, # OwnerUserId INT NULL DEFAULT NULL, # ParentId INT NULL DEFAULT NULL, # PostTypeId INT, -- 1 = Question, 2 = Answer # Score INT DEFAULT 0, # Tags TEXT, # Title TEXT, # ViewCount INT DEFAULT 0, # PRIMARY KEY(Id) # ) # CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci # PARTITION BY KEY(Id) # PARTITIONS 6; # + colab={} colab_type="code" id="KQORIoiZwo56" language="sql" # LOAD DATA LOCAL INFILE "Posts.csv" INTO TABLE PostsPart # CHARACTER SET utf8mb4 # COLUMNS TERMINATED BY ',' # OPTIONALLY ENCLOSED BY '"' # ESCAPED BY '"' # LINES TERMINATED BY '\r\n' # IGNORE 1 LINES # (Id, # @AcceptedAnswerId, # @AnswerCount, # Body, # @ClosedDate, # @CommentCount, # @CommunityOwnedDate, # CreationDate, # @FavoriteCount, # @LastActivityDate, # @LastEditDate, # LastEditorDisplayName, # @LastEditorUserId, # OwnerDisplayName, # @OwnerUserId, # @ParentId, # PostTypeId, # Score, # Tags, # Title, # @ViewCount) # SET ParentId = nullif (@ParentId, ''), # ClosedDate = nullif(@ClosedDate, ''), # LastEditorUserId = nullif(@OLastEditorUserId, ''), # LastActivityDate = nullif(@LastActivityDate, ''), # LastEditDate = nullif(@LastEditDate, ''), # AcceptedAnswerId = nullif (@AcceptedAnswerId, ''), # OwnerUserId = nullif(@OwnerUserId, ''), # LastEditorUserId = nullif(@LastEditorUserId, ''), # CommunityOwnedDate = nullif(@CommunityOwnedDate, ''), # FavoriteCount = if(@FavoriteCount = '',0,@FavoriteCount), # CommentCount = if(@CommentCount = '',0,@CommentCount), # ViewCount = if(@ViewCount = '',0,@ViewCount), # AnswerCount = if(@AnswerCount = '',0,@AnswerCount) # ; # + colab={} colab_type="code" id="QgtCSkWUwo6F" language="sql" # DROP TABLE IF EXISTS Users; # CREATE TABLE Users ( # Id INT, # AboutMe TEXT, # AccountId INT, # Age INT NULL DEFAULT NULL, # CreationDate DATETIME(6) NULL DEFAULT NULL, # DisplayName TEXT, # DownVotes INT DEFAULT 0, # LastAccessDate DATETIME(6) NULL DEFAULT NULL, # Location TEXT, # ProfileImageUrl TEXT, # Reputation INT DEFAULT 0, # UpVotes INT DEFAULT 0, # Views INT DEFAULT 0, # WebsiteUrl TEXT, # PRIMARY KEY(Id) # ) # CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; # + colab={} colab_type="code" id="kkIoX7fywo6S" language="sql" # LOAD DATA LOCAL INFILE "Users.csv" INTO TABLE Users # CHARACTER SET utf8mb4 # COLUMNS TERMINATED BY ',' # OPTIONALLY ENCLOSED BY '"' # ESCAPED BY '"' # LINES TERMINATED BY '\r\n' # IGNORE 1 LINES # (Id,AboutMe,@AccountId,@Age,@CreationDate,DisplayName,DownVotes,LastAccessDate,Location,ProfileImageUrl, # Reputation,UpVotes,Views,WebsiteUrl) # SET LastAccessDate = nullif(@LastAccessDate,''), # Age = nullif(@Age, ''), # CreationDate = nullif(@CreationDate,''), # AccountId = nullif(@AccountId, '') # ; # + colab={} colab_type="code" id="LpSsjTtHwo6e" language="sql" # DROP TABLE IF EXISTS Tags; # CREATE TABLE Tags ( # Id INT, # Count INT DEFAULT 0, # ExcerptPostId INT NULL DEFAULT NULL, # TagName TEXT, # WikiPostId INT NULL DEFAULT NULL, # PRIMARY KEY(Id) # ) # CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; # + colab={} colab_type="code" id="VVUj_JLnwo6t" language="sql" # LOAD DATA LOCAL INFILE "Tags.csv" INTO TABLE Tags # CHARACTER SET utf8mb4 # COLUMNS TERMINATED BY ',' # OPTIONALLY ENCLOSED BY '"' # ESCAPED BY '"' # LINES TERMINATED BY '\r\n' # IGNORE 1 LINES # (Id,Count,@ExcerptPostId,TagName,@WikiPostId) # SET WikiPostId = nullif(@WikiPostId, ''), # ExcerptPostId = nullif(@ExcerptPostId, '') # ; # + colab={} colab_type="code" id="LHfgGeWgwo65" language="sql" # DROP TABLE IF EXISTS Comments; # CREATE TABLE Comments ( # Id INT, # CreationDate DATETIME(6) NULL DEFAULT NULL, # PostId INT NULL DEFAULT NULL, # Score INT DEFAULT 0, # Text TEXT, # UserDisplayName TEXT, # UserId INT NULL DEFAULT NULL, # PRIMARY KEY(Id) # ) # CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; # + colab={} colab_type="code" id="62HNOOEqwo7E" language="sql" # LOAD DATA LOCAL INFILE "Comments.csv" INTO TABLE Comments # CHARACTER SET utf8mb4 # COLUMNS TERMINATED BY ',' # OPTIONALLY ENCLOSED BY '"' # ESCAPED BY '"' # LINES TERMINATED BY '\r\n' # IGNORE 1 LINES # (Id,@CreationDate,@PostId,Score,Text,@UserDisplayName,@UserId) # SET UserId = nullif(@UserId, ''), # PostId = nullif(@PostId, ''), # CreationDate = nullif(@CreationDate,''), # UserDisplayName = nullif(@UserDisplayName,'') # ; # + colab={} colab_type="code" id="vG_N4kQMwo7S" language="sql" # DROP TABLE IF EXISTS Votes; # CREATE TABLE Votes ( # Id INT, # BountyAmount INT DEFAULT 0, # CreationDate DATETIME(6) NULL DEFAULT NULL, # PostId INT NULL DEFAULT NULL, # UserId INT NULL DEFAULT NULL, # VoteTypeId INT, # PRIMARY KEY(Id) # ) # CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; # + colab={} colab_type="code" id="P7qfdxTxwo7b" language="sql" # LOAD DATA LOCAL INFILE "Votes.csv" INTO TABLE Votes # CHARACTER SET utf8mb4 # COLUMNS TERMINATED BY ',' # OPTIONALLY ENCLOSED BY '"' # ESCAPED BY '"' # LINES TERMINATED BY '\r\n' # IGNORE 1 LINES # (Id,@BountyAmount,@CreationDate,@PostId,@UserId,VoteTypeId) # SET UserId = nullif(@UserId, ''), # PostId = nullif(@PostId, ''), # BountyAmount = if(@BountyAmount = '',0,@BountyAmount), # CreationDate = nullif(@CreationDate, '') # ; # + colab={} colab_type="code" id="eO4mrhRuwo7k" # %sql use stackoverflow # + colab={} colab_type="code" id="yXgw0XUSwo7q" language="sql" # SHOW TABLES; # + colab={} colab_type="code" id="-G_Gub7Pwo70" language="sql" # EXPLAIN SELECT * FROM PostsPart; # + colab={} colab_type="code" id="tJF9D1uTwo7_" language="sql" # EXPLAIN SELECT * FROM PostsPart WHERE Id = 5; # + colab={} colab_type="code" id="LUMrooU-wo8F" language="sql" # SELECT * FROM PostsPart PARTITION(p0) LIMIT 10; # + colab={} colab_type="code" id="Ski3lcF3wo8N" language="sql" # DROP TABLE IF EXISTS Posts; # CREATE TABLE Posts AS SELECT * FROM PostsPart; # + colab={} colab_type="code" id="YaYqdd0Owo8U" language="sql" # DESCRIBE Posts; # + colab={} colab_type="code" id="Qi7xjsW4wo8c" language="sql" # ALTER TABLE Posts ADD PRIMARY KEY (Id); # ALTER TABLE Posts ADD FOREIGN KEY(OwnerUserId) REFERENCES Users(Id); # ALTER TABLE Posts ADD FOREIGN KEY(ParentId) REFERENCES Posts(Id); # + colab={} colab_type="code" id="hTuFYN-hwo8l" language="sql" # DESCRIBE Posts; # + [markdown] colab_type="text" id="moKPVJtQwo8p" # Creamos un índice full-text para Tags de Posts. # + colab={} colab_type="code" id="e1h1zCuuwo8r" language="sql" # CREATE FULLTEXT INDEX Post_Tag_idx ON Posts(Tags); # + colab={} colab_type="code" id="7EOuKx5Jwo8z" # %timeit %sql SELECT * FROM Posts WHERE Tags LIKE '%java%'; # + colab={} colab_type="code" id="Om8lYk26wo83" # %timeit %sql SELECT * FROM Posts WHERE MATCH(Tags) AGAINST ('java'); # + colab={} colab_type="code" id="pw48cIViwo8_" language="sql" # EXPLAIN SELECT * FROM Posts WHERE MATCH(Tags) AGAINST ('java'); # + colab={} colab_type="code" id="eoENt9Lowo9F" language="sql" # EXPLAIN SELECT COUNT(*) FROM Posts WHERE MATCH(Tags) AGAINST ('java'); # + colab={} colab_type="code" id="J7gVe9n9wo9f" language="sql" # EXPLAIN SELECT * FROM Posts WHERE Tags LIKE '%java%'; # + [markdown] colab_type="text" id="OaEiQBAjwo9l" # ## Decline de Stackoverflow # # En [esta dirección](https://hackernoon.com/the-decline-of-stack-overflow-7cb69faa575d#.pbf8nmlwx) se habla del decline de Stackoverflow. Por ejemplo, se habla de que el 77% de los usuarios sólo hacen una pregunta, que el 65% sólo responde a una pregunta y que sólo el 8% de los usuarios responden a más de una pregunta. # # Los siguientes ejercicios están orientados a ver si esto también se cumple en stackoverflow en español. # # En el artículo, se habla de una referencia, que se muestra a continuación. En una página web se listan un conjunto de trabajos de investigación que utilizan el conjunto de datos de stackoveflow para hacer estudios. La idea es reproducir esos resultados y ver cómo se comparan con los de español. Se muestran dos artículos. # + [markdown] colab_type="text" id="eZuzFzZhwo9m" # ## Estudio empírico de Stackoverflow # # Artículo que se puede encontrar [aquí](http://ink.library.smu.edu.sg/cgi/viewcontent.cgi?article=2810&context=sis_research). La fuente original que lista varios artículos con fuente en los datos de Stackoverflow se puede ver [aquí](http://www.stackprinter.com/export?service=meta.stackoverflow&question=134495&printer=false&linktohome=true). # + colab={} colab_type="code" id="ibOG-eCowo9o" # http://stackoverflow.com/questions/19470099/view-pdf-image-in-an-ipython-notebook class PDF(object): def __init__(self, pdf, size=(200,200)): self.pdf = pdf self.size = size def _repr_html_(self): return '<iframe src={0} width={1[0]} height={1[1]}></iframe>'.format(self.pdf, self.size) def _repr_latex_(self): return r'\includegraphics[width=\textwidth]{{{0}}}'.format(self.pdf) # + colab={} colab_type="code" id="zRlDoOWqwo9w" PDF('http://ink.library.smu.edu.sg/cgi/viewcontent.cgi?article=2810&context=sis_research',size=(800,600)) # + [markdown] colab_type="text" id="KLNNZCzswo96" # # EJERCICIO: Mostrar cómo conseguir RQ1, RQ2, RQ3 y RQ4 (tablas y gráficas) del artículo anterior, y ver si la tendencia también se repite en Stackoverflow en español # + colab={} colab_type="code" id="kc2NWc_gwo98" language="sql" # SELECT C.c, COUNT(C.c) FROM # (SELECT COUNT(*) as C FROM Posts GROUP BY OwnerUserId) AS C # GROUP BY C.c # ORDER BY C.c; # + [markdown] colab_type="text" id="M4JqtKjCwo-D" # # EJERCICIO (E1): Idear las tablas e índices necesarios para que la consulta de, dados un usuario y un tag, se pueda saber de la forma más rápida qué posts tiene ese usuario de ese tag, y también, dado un tag, saber qué usuarios han preguntado sobre ese tag. Rellenar las tablas. Mostrar con un EXPLAIN que realmente no se usa una búsqueda secuencial sobre la tabla # + colab={} colab_type="code" id="lHImFwj6wo-D" # + [markdown] colab_type="text" id="NSHsn8vlwo-I" # # EJERCICIO (E2): Crear una tabla de hechos `StackOverflowFacts` que incluya y ordene, por el campo `CreationDate`, todos los eventos de los datos obtenidos, Posts (diferenciad entre la creación de una pregunta y una respuesta), Users, Votes, Comments. Téngase en cuenta como se vió que puede haber información (p. ej. en la tabla Votes) que no mantenga la integridad referencial. Defina las claves ajenas y los índices que considere necesarios. # + colab={} colab_type="code" id="DpZ4Agukwo-J" # + [markdown] colab_type="text" id="rebPemPWwo-N" # # EJERCICIO (E3): Con la información de la tabla `StackOverflowFacts`, escriba una consulta SQL que calcule el tiempo medio en segundos, para todos los usuarios, entre creación de un usuario y su primera pregunta # + colab={} colab_type="code" id="B_4bZ-N2wo-O" # + [markdown] colab_type="text" id="TLpkVSiRwo-T" # # EJERCICIO (E4): Muestre un histograma de tiempo hasta la primera pregunta que clasifique a los usuarios en grupos de número de días desde que se crea el usuario hasta la primera pregunta, considerando el último periodo como "una semana o más"
sql/sesion2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import pandas as pd import json import os import matplotlib.pyplot as plt from steves_utils.summary_utils import ( get_experiments_from_path ) from steves_utils.utils_v2 import ( get_experiments_base_path ) # + experiments_to_get = [ "oracle.run2", "oracle.run1", "cores", "metehan", "wisig", "oracle.run1.framed", "oracle.run2.framed", ] experiments = {} for experiment in experiments_to_get: experiments[experiment] = get_experiments_from_path( os.path.join(get_experiments_base_path(), "baseline_ptn", experiment) ) # - for experiment,trials in experiments.items(): assert len(trials) == 3 # + all_trials = pd.DataFrame(columns=[ "experiment_name", "source_val_label_accuracy", "source_val_label_loss", "target_val_label_accuracy", "target_val_label_loss", "total_epochs_trained", "total_experiment_time_secs", ]) for experiment in experiments_to_get: for trial in experiments[experiment]: f = pd.DataFrame(trial["results"]) f["experiment_name"] = experiment f = f[all_trials.columns] f = f.iloc[0] # Unknown why, but pandas is repeating trials for each domain in the trial! all_trials = all_trials.append(f) all_trials = all_trials.reset_index(drop=True) all_trials # - # Show that all the trial accuracies are the same, but we have unique times for each g = all_trials.groupby("experiment_name")[["source_val_label_accuracy", "target_val_label_accuracy", "total_experiment_time_secs"]].nunique() g["Trial Count"] = all_trials.groupby("experiment_name").size() g # + # %matplotlib inline plt.rcParams['figure.figsize'] = [20, 6] g = all_trials.groupby("experiment_name")[["source_val_label_accuracy", "target_val_label_accuracy"]].mean() g.plot(kind="bar") # - g
analysis/baseline_ptn/summary.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from __future__ import print_function # if our large test file is available, use it. Otherwise, use file generated from toy_mistis_1_setup_run.ipynb import os test_file = "../toy_mistis_1k_OPS1.nc" filename = test_file if os.path.isfile(test_file) else "mistis.nc" print(filename) # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import openpathsampling as paths # %%time storage = paths.AnalysisStorage(filename) mistis = storage.networks.load(0) mistis.hist_args['max_lambda'] = { 'bin_width' : 0.01, 'bin_range' : (-0.35, 0.5) } mistis.hist_args['pathlength'] = { 'bin_width' : 5, 'bin_range' : (0, 150) } # %%time scheme = storage.schemes[0] scheme.move_summary(storage.steps) scheme.move_summary(storage.steps, 'shooting') scheme.move_summary(storage.steps, 'minus') scheme.move_summary(storage.steps, 'repex') # we need to load the states and the innermost interface for each transition stateA = storage.volumes['A'] stateB = storage.volumes['B'] stateC = storage.volumes['C'] inner_AB = mistis.transitions[(stateA, stateB)].interfaces[0] inner_AC = mistis.transitions[(stateA, stateC)].interfaces[0] inner_BA = mistis.transitions[(stateB, stateA)].interfaces[0] # got these from mistis_flux.ipynb fluxes = {(stateA, inner_AB): 0.0916199741819, (stateA, inner_AC): 0.0915271110694, (stateB, inner_BA): 0.0916882528979} mistis.set_fluxes(fluxes) # %%time rate = mistis.rate_matrix(storage.steps, force=True) rate import pandas as pd pd.options.display.float_format = '{:.3e}'.format rate # this can be copy-pasted into an article print(rate.to_latex(float_format='{:.3e}'.format)) trans = list(mistis.transitions.values())[2] trans_hists = trans.histograms['max_lambda'] print(trans) for hist in trans_hists: cross_prob = trans_hists[hist].reverse_cumulative() plt.plot(cross_prob.x, np.log(cross_prob)) plt.plot(trans.tcp.x, np.log(trans.tcp), '-k', lw=2) len(storage.steps) # + #import logging.config #logging.config.fileConfig("../resources/debug_logging.conf", disable_existing_loggers=False) # - n_blocks = 1 # for testing code # NBVAL_SKIP n_blocks = 5 # for real examples resampling = paths.numerics.BlockResampling(storage.steps, n_blocks=n_blocks) rate_df_func = lambda steps: mistis.rate_matrix(steps, force=True) # %%time rates = paths.numerics.ResamplingStatistics(function=rate_df_func, inputs=resampling.blocks) rates.mean rates.std rates.percentile(0) rates.percentile(25) rates.percentile(50) rates.percentile(75) rates.percentile(100)
examples/toy_model_mistis/toy_mistis_3_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: geo-env # language: python # name: geo-env # --- # ### BC report on Indigenous Deaths (2003) # # https://nccabc.ca/wp-content/uploads/2015/02/nccabc_aboriginaldeathsincustody.pdf # # This pdf had to be manually transcribed- I've only added BC thus far # + name = ['<NAME>','<NAME>','<NAME>', '<NAME>','<NAME>','<NAME>', '<NAME>','<NAME>','<NAME>', '<NAME>','<NAME>','<NAME>', '<NAME>','<NAME>', '<NAME>', '<NAME>','<NAME>','<NAME>', '<NAME>','<NAME>','<NAME>', '<NAME>','<NAME>','<NAME>', '<NAME>','<NAME>','<NAME>',] gender = ['m?','m?','m?', 'm?','m?','m?', 'f?','m?','m?', 'f?','m?','m?', 'm?','f?','m?', 'm?','m?','m?', 'm?','m?','m?', 'f?','m?','m?', 'm?','Female','Male', ] age = [np.nan,np.nan,np.nan, np.nan,np.nan,np.nan, np.nan,21,np.nan, np.nan,np.nan,np.nan, np.nan,np.nan,np.nan, 67,np.nan,np.nan, np.nan,np.nan,33, np.nan,np.nan,np.nan, np.nan,37,9, ] date= ['11/26/1971','7/5/1973','1/1/1977', '8/1/1982','10/28/1983','11/1/1983', '4/4/1991','3/1/1992','12/26/1994', '1/1/1993','5/26/1997','12/6/1998', '8/13/1999','1/1/1999','1/1/1999', '4/18/2000','7/4/2001','10/26/2001', '11/09/2001','9/1/2002','7/22/2003', '3/1/2003','1/1/2003','1/1/2003', '1/1/2003','3/22/1998','3/22/1998', ] city_town= ['Williams Lake','Anahim Lake','Ft St. James', 'Ladysmith','Nanaimo','Vancouver', 'Port Alberni','Vancouver','Bella Bella', 'Duncan','Victoria','Vancouver', 'Victoria','Vancouver','Vancouver', 'Williams Lake','Terrace','Port Alberni', 'Prince George','Prince George','Prince George', 'Alert Bay','Campbell River','Comox', 'Prince Rupert','Tsuu T’ina Nation','Tsuu T’ina Nation', ] cause_death = ['Beating','assault','In Custody', 'In Custody','Use of Force/In Custody','Baten', 'Unspecified','Gunshot','Unpspecified', 'Unpspecified','Unspecified','Hypotherimia', 'Use of Force','Use of Force','Use of Force', 'In Custody','Suicide','Unspecified', 'In Custody','Unspecified','In Custody', 'Unspecified','Unspecified','Unspecified', 'Unspecified','Gunshot','Gunshot' ] department = ['RCMP','RCMP','RCMP', 'RCMP','RCMP','Vancouver Police Deparment', 'RCMP','Vancouver Police Department','RCMP', 'RCMP','Victorial Police Department','Vancouver Police Department', 'Victorial Police Department','Vancouver Police Department','Vancouver Police Department', 'RCMP','RCMP','RCMP', 'RCMP','RCMP','RCMP', 'RCMP','RCMP','RCMP', 'RCMP','RCMP','RCMP', ] summary = ['','Officer chared and acquitted','*date only year', '*date only m/y','*date only m/y "broken neck/no attn"','Mackenzie Inquiry', '','*date only m/y','', '*date only year','','*Left to die in alley', '','*date only year','*date only year', '','','', '','*date only m/y','', '*date only m/y','*no date','*no date', '*no date','Officers were trying to take her children','Officers were trying to take chldren' ] prov = ['BC','BC','BC', 'BC','BC','BC', 'BC','BC','BC', 'BC','BC','BC', 'BC','BC','BC', 'BC','BC','BC', 'BC','BC','BC', 'BC','BC','BC', 'BC','AB','AB', ] ## Not specified in original record ethnic_ancestry = ['','','', '','','', '','','', '',"Mi'kmaq",'', 'Musgamagw Tsawataineuk','','', '','','', '','','', '','','', '','Tsuu T’ina','Tsuu T’ina', ] Add = {'name':name, 'date':date, 'city_town':city_town, 'cause_death':cause_death, 'department':department, 'prov':prov, 'race':['indegenous' for x in name], 'ethnic_ancestry':ethnic_ancestry, 'summary':summary } NCCABC = pd.DataFrame(data = Add) NCCABC['id_victim'] = NCCABC.index.astype(str)+'_nccabc' NCCABC['data_source'] = 'NCCABC' NCCABC['link'] = 'https://nccabc.ca/wp-content/uploads/2015/02/nccabc_aboriginaldeathsincustody.pdf' # print(NCCABC) NCCABC.to_csv('manual_entries/NCCAB.csv') # for i in C # + Misc = pd.read_csv('manual_entries/misc_sources.csv',index_col=['id_victim']) # print(Misc['DEATH CATEGORY']) Misc = Misc.drop('alias_nickname',axis=1) Misc = Misc.rename(columns={'DEATH CATEGORY':'circumstances_of_death','Link':'other_source_link'}) Misc.columns # + UD = pd.read_csv('manual_entries/UpdatedLocations.csv',index_col=['id_victim']) print(UD.head())
BC report on Indigenous Deaths (2003).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.6 64-bit # language: python # name: python3 # --- from ortools.linear_solver import pywraplp solver_obj = pywraplp.Solver_CreateSolver('GLOP') # + #Define variables x = solver_obj.NumVar(0, solver_obj.infinity(), 'x') y = solver_obj.NumVar(0, solver_obj.infinity(), 'y') print('Number of variables =', solver_obj.NumVariables()) # + # Define Constraints # Constraint 0: x + 2y <= 14. solver_obj.Add(x + 2 * y <= 14.0) # Constraint 1: 3x - y >= 0. solver_obj.Add(3 * x - y >= 0.0) # Constraint 2: x - y <= 2. solver_obj.Add(x - y <= 2.0) print('Number of constraints =', solver_obj.NumConstraints()) # - # Define Objective Function # Objective function: 3x + 4y. solver_obj.Maximize(3 * x + 4 * y) status = solver_obj.Solve() if status == pywraplp.Solver.OPTIMAL: print('Optimal Solution Found at :') print('Objective Value =', solver_obj.Objective().Value()) print('x = ', x.solution_value()) print('y = ', y.solution_value()) else: print('The problem does not have an optimal solution.') # + #Solving for the following LP : #0 <= a <= 1 #0 <= b <= 2 # a + b <= 2 # Maximize : 3a + b # solver_obj2 = pywraplp.Solver.CreateSolver('GLOP') solver_obj2 = pywraplp.Solver('GLOPSolver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) a = solver_obj2.NumVar(0, 1, 'a') b = solver_obj2.NumVar(0, 2, 'b') solver_obj2.Add(a + b <= 2, name = 'constraint0') solver_obj2.Maximize(3 * a + b) status2 = solver_obj2.Solve() print('Solution : ') if status2 == pywraplp.Solver.OPTIMAL: print('Objective value =', solver_obj2.Objective().Value()) print('a =', a.solution_value()) print('b =', b.solution_value()) else: print('Problem does not have an optimal solution') # + #STIGLER DIET PROBLEM def Stigler_Problem(): #Prep the data: nutrients = [ ['Calories (kcal)', 3], ['Protein (g)', 70], ['Calcium (g)', 0.8], ['Iron (mg)', 12], ['Vitamin A (KIU)', 5], ['Thiamine - Vitamin B1 (mg)', 1.8], ['Riboflavin - Vitamin B2 (mg)', 2.7], ['Niacin (mg)', 18], ['Ascorbic Acid - Vitamin C (mg)', 75] ] #PREP DATA IN THE FORMAT : # Commodity, Unit, 1939 price (cents), Calories (kcal), Protein (g), # Calcium (g), Iron (mg), Vitamin A (KIU), Vitamin B1 (mg), Vitamin B2 (mg), # Niacin (mg), Vitamin C (mg) data = [ [ 'Wheat Flour (Enriched)', '10 lb.', 36, 44.7, 1411, 2, 365, 0, 55.4, 33.3, 441, 0 ], ['Macaroni', '1 lb.', 14.1, 11.6, 418, 0.7, 54, 0, 3.2, 1.9, 68, 0], [ 'Wheat Cereal (Enriched)', '28 oz.', 24.2, 11.8, 377, 14.4, 175, 0, 14.4, 8.8, 114, 0 ], ['Corn Flakes', '8 oz.', 7.1, 11.4, 252, 0.1, 56, 0, 13.5, 2.3, 68, 0], [ 'Corn Meal', '1 lb.', 4.6, 36.0, 897, 1.7, 99, 30.9, 17.4, 7.9, 106, 0 ], [ 'Hominy Grits', '24 oz.', 8.5, 28.6, 680, 0.8, 80, 0, 10.6, 1.6, 110, 0 ], ['Rice', '1 lb.', 7.5, 21.2, 460, 0.6, 41, 0, 2, 4.8, 60, 0], ['Rolled Oats', '1 lb.', 7.1, 25.3, 907, 5.1, 341, 0, 37.1, 8.9, 64, 0], [ 'White Bread (Enriched)', '1 lb.', 7.9, 15.0, 488, 2.5, 115, 0, 13.8, 8.5, 126, 0 ], [ 'Whole Wheat Bread', '1 lb.', 9.1, 12.2, 484, 2.7, 125, 0, 13.9, 6.4, 160, 0 ], ['Rye Bread', '1 lb.', 9.1, 12.4, 439, 1.1, 82, 0, 9.9, 3, 66, 0], ['Pound Cake', '1 lb.', 24.8, 8.0, 130, 0.4, 31, 18.9, 2.8, 3, 17, 0], ['Soda Crackers', '1 lb.', 15.1, 12.5, 288, 0.5, 50, 0, 0, 0, 0, 0], ['Milk', '1 qt.', 11, 6.1, 310, 10.5, 18, 16.8, 4, 16, 7, 177], [ 'Evaporated Milk (can)', '14.5 oz.', 6.7, 8.4, 422, 15.1, 9, 26, 3, 23.5, 11, 60 ], ['Butter', '1 lb.', 30.8, 10.8, 9, 0.2, 3, 44.2, 0, 0.2, 2, 0], ['Oleomargarine', '1 lb.', 16.1, 20.6, 17, 0.6, 6, 55.8, 0.2, 0, 0, 0], ['Eggs', '1 doz.', 32.6, 2.9, 238, 1.0, 52, 18.6, 2.8, 6.5, 1, 0], [ 'Cheese (Cheddar)', '1 lb.', 24.2, 7.4, 448, 16.4, 19, 28.1, 0.8, 10.3, 4, 0 ], ['Cream', '1/2 pt.', 14.1, 3.5, 49, 1.7, 3, 16.9, 0.6, 2.5, 0, 17], [ 'Peanut Butter', '1 lb.', 17.9, 15.7, 661, 1.0, 48, 0, 9.6, 8.1, 471, 0 ], ['Mayonnaise', '1/2 pt.', 16.7, 8.6, 18, 0.2, 8, 2.7, 0.4, 0.5, 0, 0], ['Crisco', '1 lb.', 20.3, 20.1, 0, 0, 0, 0, 0, 0, 0, 0], ['Lard', '1 lb.', 9.8, 41.7, 0, 0, 0, 0.2, 0, 0.5, 5, 0], [ 'S<NAME>', '1 lb.', 39.6, 2.9, 166, 0.1, 34, 0.2, 2.1, 2.9, 69, 0 ], ['Round Steak', '1 lb.', 36.4, 2.2, 214, 0.1, 32, 0.4, 2.5, 2.4, 87, 0], ['Rib Roast', '1 lb.', 29.2, 3.4, 213, 0.1, 33, 0, 0, 2, 0, 0], ['Chuck Roast', '1 lb.', 22.6, 3.6, 309, 0.2, 46, 0.4, 1, 4, 120, 0], ['Plate', '1 lb.', 14.6, 8.5, 404, 0.2, 62, 0, 0.9, 0, 0, 0], [ 'Liver (Beef)', '1 lb.', 26.8, 2.2, 333, 0.2, 139, 169.2, 6.4, 50.8, 316, 525 ], ['Leg of Lamb', '1 lb.', 27.6, 3.1, 245, 0.1, 20, 0, 2.8, 3.9, 86, 0], [ 'Lamb Chops (Rib)', '1 lb.', 36.6, 3.3, 140, 0.1, 15, 0, 1.7, 2.7, 54, 0 ], ['Pork Chops', '1 lb.', 30.7, 3.5, 196, 0.2, 30, 0, 17.4, 2.7, 60, 0], [ 'Pork Loin Roast', '1 lb.', 24.2, 4.4, 249, 0.3, 37, 0, 18.2, 3.6, 79, 0 ], ['Bacon', '1 lb.', 25.6, 10.4, 152, 0.2, 23, 0, 1.8, 1.8, 71, 0], ['Ham, smoked', '1 lb.', 27.4, 6.7, 212, 0.2, 31, 0, 9.9, 3.3, 50, 0], ['Salt Pork', '1 lb.', 16, 18.8, 164, 0.1, 26, 0, 1.4, 1.8, 0, 0], [ 'Roasting Chicken', '1 lb.', 30.3, 1.8, 184, 0.1, 30, 0.1, 0.9, 1.8, 68, 46 ], ['Veal Cutlets', '1 lb.', 42.3, 1.7, 156, 0.1, 24, 0, 1.4, 2.4, 57, 0], [ 'Salmon, Pink (can)', '16 oz.', 13, 5.8, 705, 6.8, 45, 3.5, 1, 4.9, 209, 0 ], ['Apples', '1 lb.', 4.4, 5.8, 27, 0.5, 36, 7.3, 3.6, 2.7, 5, 544], ['Bananas', '1 lb.', 6.1, 4.9, 60, 0.4, 30, 17.4, 2.5, 3.5, 28, 498], ['Lemons', '1 doz.', 26, 1.0, 21, 0.5, 14, 0, 0.5, 0, 4, 952], ['Oranges', '1 doz.', 30.9, 2.2, 40, 1.1, 18, 11.1, 3.6, 1.3, 10, 1998], ['Green Beans', '1 lb.', 7.1, 2.4, 138, 3.7, 80, 69, 4.3, 5.8, 37, 862], ['Cabbage', '1 lb.', 3.7, 2.6, 125, 4.0, 36, 7.2, 9, 4.5, 26, 5369], ['Carrots', '1 bunch', 4.7, 2.7, 73, 2.8, 43, 188.5, 6.1, 4.3, 89, 608], ['Celery', '1 stalk', 7.3, 0.9, 51, 3.0, 23, 0.9, 1.4, 1.4, 9, 313], ['Lettuce', '1 head', 8.2, 0.4, 27, 1.1, 22, 112.4, 1.8, 3.4, 11, 449], ['Onions', '1 lb.', 3.6, 5.8, 166, 3.8, 59, 16.6, 4.7, 5.9, 21, 1184], [ 'Potatoes', '15 lb.', 34, 14.3, 336, 1.8, 118, 6.7, 29.4, 7.1, 198, 2522 ], ['Spinach', '1 lb.', 8.1, 1.1, 106, 0, 138, 918.4, 5.7, 13.8, 33, 2755], [ 'Sweet Potatoes', '1 lb.', 5.1, 9.6, 138, 2.7, 54, 290.7, 8.4, 5.4, 83, 1912 ], [ 'Peaches (can)', 'No. 2 1/2', 16.8, 3.7, 20, 0.4, 10, 21.5, 0.5, 1, 31, 196 ], [ 'Pears (can)', 'No. 2 1/2', 20.4, 3.0, 8, 0.3, 8, 0.8, 0.8, 0.8, 5, 81 ], [ 'Pineapple (can)', 'No. 2 1/2', 21.3, 2.4, 16, 0.4, 8, 2, 2.8, 0.8, 7, 399 ], [ 'Asparagus (can)', 'No. 2', 27.7, 0.4, 33, 0.3, 12, 16.3, 1.4, 2.1, 17, 272 ], [ 'Green Beans (can)', 'No. 2', 10, 1.0, 54, 2, 65, 53.9, 1.6, 4.3, 32, 431 ], [ 'Pork and Beans (can)', '16 oz.', 7.1, 7.5, 364, 4, 134, 3.5, 8.3, 7.7, 56, 0 ], ['Corn (can)', 'No. 2', 10.4, 5.2, 136, 0.2, 16, 12, 1.6, 2.7, 42, 218], [ 'Peas (can)', 'No. 2', 13.8, 2.3, 136, 0.6, 45, 34.9, 4.9, 2.5, 37, 370 ], [ 'Tomatoes (can)', 'No. 2', 8.6, 1.3, 63, 0.7, 38, 53.2, 3.4, 2.5, 36, 1253 ], [ 'Tomato Soup (can)', '10 1/2 oz.', 7.6, 1.6, 71, 0.6, 43, 57.9, 3.5, 2.4, 67, 862 ], [ '<NAME>', '1 lb.', 15.7, 8.5, 87, 1.7, 173, 86.8, 1.2, 4.3, 55, 57 ], [ '<NAME>', '1 lb.', 9, 12.8, 99, 2.5, 154, 85.7, 3.9, 4.3, 65, 257 ], [ '<NAME>', '15 oz.', 9.4, 13.5, 104, 2.5, 136, 4.5, 6.3, 1.4, 24, 136 ], [ '<NAME>', '1 lb.', 7.9, 20.0, 1367, 4.2, 345, 2.9, 28.7, 18.4, 162, 0 ], [ '<NAME>', '1 lb.', 8.9, 17.4, 1055, 3.7, 459, 5.1, 26.9, 38.2, 93, 0 ], [ '<NAME>', '1 lb.', 5.9, 26.9, 1691, 11.4, 792, 0, 38.4, 24.6, 217, 0 ], ['Coffee', '1 lb.', 22.4, 0, 0, 0, 0, 0, 4, 5.1, 50, 0], ['Tea', '1/4 lb.', 17.4, 0, 0, 0, 0, 0, 0, 2.3, 42, 0], ['Cocoa', '8 oz.', 8.6, 8.7, 237, 3, 72, 0, 2, 11.9, 40, 0], ['Chocolate', '8 oz.', 16.2, 8.0, 77, 1.3, 39, 0, 0.9, 3.4, 14, 0], ['Sugar', '10 lb.', 51.7, 34.9, 0, 0, 0, 0, 0, 0, 0, 0], ['Corn Syrup', '24 oz.', 13.7, 14.7, 0, 0.5, 74, 0, 0, 0, 5, 0], ['Molasses', '18 oz.', 13.6, 9.0, 0, 10.3, 244, 0, 1.9, 7.5, 146, 0], [ 'Strawberry Preserves', '1 lb.', 20.5, 6.4, 11, 0.4, 7, 0.2, 0.2, 0.4, 3, 0 ], ] solver = pywraplp.Solver('StiglerSolver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) #Array of food LP variables, which indicate the amount to spend for each food X. foods = [solver.NumVar(0.0, solver.infinity(), item[0]) for item in data] print('Number of variables =', solver.NumVariables()) #Creating constraints, one per nutrient to specify the minimum intake. constraints = [] for i, nutrient in enumerate(nutrients): constraints.append(solver.Constraint(nutrient[1], solver.infinity())) #Range from min specification to infinity for j, item in enumerate(data): constraints[i].SetCoefficient(foods[j], item[i + 3]) #Add coefficient for each food variable j, #add the corresponding coefficient i+3 #Defining objective function. objective = solver.Objective() for food in foods: objective.SetCoefficient(food, 1) #coeff = 1 for each food item objective.SetMinimization() status = solver.Solve() nutrients_result = [0] * len(nutrients) if status == pywraplp.Solver.OPTIMAL: print('OPTIMAL ANNUAL FOOD PURCHASE AMOUNT (IN DOLLARS) :') # print('ANNUAL FOODS : ') for i, food in enumerate(foods): if food.solution_value() > 0: print('{}: ${}'.format(data[i][0], 365.0 * food.solution_value())) for j, _ in enumerate(nutrients): nutrients_result[j] += data[i][j+3] * food.solution_value() print('Optimal annual price: ${:.4f}'.format(365.0 * objective.Value())) # - Stigler_Problem()
LP_solvers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import seaborn as sns import gspread import matplotlib.pyplot as plt from oauth2client.service_account import ServiceAccountCredentials import pandas as pd # %matplotlib inline # + scopes = ["https://www.googleapis.com/auth/drive", "https://www.googleapis.com/auth/spreadsheets"] goog_creds = ServiceAccountCredentials.from_json_keyfile_name( 'airy.json', scopes=scopes) gc = gspread.authorize(goog_creds) sheet = gc.open('Air_Quality') work_sheet = sheet.worksheet('sheet') # - values = work_sheet.get_all_values() dat = pd.DataFrame.from_records(data=values[2:], columns=['Time Stamp', 'Dust Density']) dat['Time Stamp'] = dat['Time Stamp'].apply(lambda x: pd.to_datetime(x, unit='ms')) dat.head() sns.set_style("darkgrid") fig, ax = plt.subplots(figsize=(30, 8)) ax.plot(dat['Time Stamp'], dat['Dust Density'], )
.ipynb_checkpoints/dust_analysis-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Stationary PDE # # writen by <NAME>() # ## Introduction # The differential equation dependant on more than one independant variable is known as partial differential equation. Now we consider the Stationary heat Equation as an example # # <p style="text-align: center;">$T_{xx}+T_{yy} = -2\pi^2sin(\pi x)sin(\pi y)$</p> # # on a unit square domain ]0:1[$^2$ with the temperature T(x,y) and homogeneous Dirichlet Boundary Conditions. # # <p style="text-align: center;">$T(x,y) = 0$ for all (x,y) in $\partial$]0:1[$^2$</p> # The boundary value problem has an analytical solution # # <p style="text-align: center;">$T(x,y) = 2\pi sin(\pi x) sin (\pi y)$</p> # # Implement a finite second order discretisation and solve the discretisation through a gauss seidel solver. Let the matrix be of dimension $N+2*N+2$ where N is the number of cells #import the required modules # %matplotlib notebook import numpy as np import matplotlib.pyplot as plt from scipy import sparse as sp from mpl_toolkits.mplot3d import Axes3D # We will now write 2 functions and compare the evaluation of a full matrix and sparse matrix for the same discretisation.Nx and Ny will be the no of cell division on the x axis and y axis respectively # + def fullmatrix(Nx,Ny): A = np.zeros(shape=(Nx*Ny,Nx*Ny)) hx = 1/(Nx+1) hy = 1/(Ny+1) for i in range(Nx*Ny): A[i,i] = -2*(1/hx^2+1/hy^2) if i<= Nx: A[i,i+Nx] = 1/hy^2 elif i >=(Ny-1)*Nx+1: A[i,i-Nx] = 1/hy^2 else: A[i,i+Nx] = 1/hy^2 A[i,i-Nx] = 1/hy^2 if np.mod(i,Nx) == 1: A[i,i+1] = 1/hx^2 elif np.mod(i,Nx) == 0: A[i,i-1] = 1/hx^2 else: A[i,i+1] = 1/hx^2 A[i,i-1] = 1/hx^2 # + # TO DO find an equivalency of Sparse(A) in Matlab # + #gauss seidel solver. we use a stencil method here instead of the matrix form def GaussSeidel(b,Nx,Ny): x = np.zeros(shape = (Ny+2,Nx+2)) x[1,:] = 0 x[Ny+2,:] = 0 x[:,1] = 0 x[:,Nx+2] = 0 hx = 1/(Nx+1) hy = 1/(Ny+1) res = 1.0 n = 0.0 while (res>1e-5 and n<1e5): for i in range(1,Ny+1): for j in range(1,Nx+1): x[i,j] = (b[i,j]-(x[i,j-1]/hy^2)-(x[i-1,j]/hx^2)-(x[i+1,j]/hx^2)-(x[i,j+1]/hy^2))/(-2*(1/hx^2+1/hy^2)) res = 0 for i in range(1,Ny+1): for j in range(1,Nx+1): res = res+(b[i,j] - (x[i,j-1]/hy^2) - (x[i-1,j]/hx^2)+ (x[i,j])*2*(1/hx^2+1/hy^2)) res = np.sqrt(res/(Nx*Ny)) n += 1 # + #TO DO Plot a surface plot in 3d
Notebooks/5.Stationary PDEs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + # Copyright 2021 Google LLC # Use of this source code is governed by an MIT-style # license that can be found in the LICENSE file or at # https://opensource.org/licenses/MIT. # Author(s): <NAME> (<EMAIL>) and <NAME> (<EMAIL>) # - # <a href="https://opensource.org/licenses/MIT" target="_parent"><img src="https://img.shields.io/github/license/probml/pyprobml"/></a> # <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/figures//chapter4_figures.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # # Cloning the pyprobml repo # !git clone https://github.com/probml/pyprobml # %cd pyprobml/scripts # # Installing required software (This may take few minutes) # !apt-get install octave -qq > /dev/null # !apt-get install liboctave-dev -qq > /dev/null # + # %%capture # %load_ext autoreload # %autoreload 2 DISCLAIMER = 'WARNING : Editing in VM - changes lost after reboot!!' from google.colab import files def interactive_script(script, i=True): if i: s = open(script).read() if not s.split('\n', 1)[0]=="## "+DISCLAIMER: open(script, 'w').write( f'## {DISCLAIMER}\n' + '#' * (len(DISCLAIMER) + 3) + '\n\n' + s) files.view(script) # %run $script else: # %run $script # - # ## Figure 4.1: # (a) Covariance matrix for the features in the iris dataset from \cref sec:iris . (b) Correlation matrix. We only show the lower triangle, since the matrix is symmetric and has a unit diagonal. Compare this to \cref fig:irisPairs . # Figure(s) generated by [iris_cov_mat.py](https://github.com/probml/pyprobml/blob/master/scripts/iris_cov_mat.py) interactive_script("iris_cov_mat.py") # ## Figure 4.2: # Illustration of various loss functions for binary classification. The horizontal axis is the margin $z=\cc@accent "707E y \eta $, the vertical axis is the loss. The log loss uses log base 2. # Figure(s) generated by [hinge_loss_plot.py](https://github.com/probml/pyprobml/blob/master/scripts/hinge_loss_plot.py) interactive_script("hinge_loss_plot.py") # ## Figure 4.3: # Estimating a covariance matrix in $D=50$ dimensions using $N \in \ 100, 50, 25\ $ samples. We plot the eigenvalues in descending order for the true covariance matrix (solid black), the MLE (dotted blue) and the MAP estimate (dashed red), using \cref eqn:covShrinkLedoit with $\lambda =0.9$. We also list the condition number of each matrix in the legend. We see that the MLE is often poorly conditioned, but the MAP estimate is numerically well behaved. Adapted from Figure 1 of <a href='#Schafer05'>[SS05]</a> . # Figure(s) generated by [shrinkcov_plots.py](https://github.com/probml/pyprobml/blob/master/scripts/shrinkcov_plots.py) interactive_script("shrinkcov_plots.py") # ## Figure 4.4: # (a-c) Ridge regression applied to a degree 14 polynomial fit to 21 datapoints. (d) MSE vs strength of regularizer. The degree of regularization increases from left to right, so model complexity decreases from left to right. # Figure(s) generated by [linreg_poly_ridge.py](https://github.com/probml/pyprobml/blob/master/scripts/linreg_poly_ridge.py) interactive_script("linreg_poly_ridge.py") # ## Figure 4.5: # Schematic of 5-fold cross validation. # ## Figure 4.6: # Ridge regression is applied to a degree 14 polynomial fit to 21 datapoints shown in \cref fig:polyfitRidge for different values of the regularizer $\lambda $. The degree of regularization increases from left to right, so model complexity decreases from left to right. (a) MSE on train (blue) and test (red) vs $\qopname o log (\lambda )$. (b) 5-fold cross-validation estimate of test MSE in red; error bars are standard error of the mean. In black we plot the negative log evidence $-\qopname o log p( \mathcal D |\lambda )$. Both curves are scaled to lie in $[0,1]$. # Figure(s) generated by [polyfitRidgeModelSel.m](https://github.com/probml/pmtk3/blob/master/demos/polyfitRidgeModelSel.m) # !octave -W polyfitRidgeModelSel.m >> _ # ## Figure 4.7: # Performance of a text classifier (a neural network applied to a bag of word embeddings using average pooling) vs number of training epochs on the IMDB movie sentiment dataset. Blue = train, red = validation. (a) Cross entropy loss. Early stopping is triggered at about epoch 25. (b) Classification accuracy. # Figure(s) generated by [imdb_mlp_bow_tf.py](https://github.com/probml/pyprobml/blob/master/scripts/imdb_mlp_bow_tf.py) interactive_script("imdb_mlp_bow_tf.py") # ## Figure 4.8: # MSE on training and test sets vs size of training set, for data generated from a degree 2 polynomial with Gaussian noise of variance $\sigma ^2=4$. We fit polynomial models of varying degree to this data. # Figure(s) generated by [linreg_poly_vs_n.py](https://github.com/probml/pyprobml/blob/master/scripts/linreg_poly_vs_n.py) interactive_script("linreg_poly_vs_n.py") # ## Figure 4.9: # Illustration of exponentially-weighted moving average with and without bias correction. (a) Short memory: $\beta =0.9$. (a) Long memory: $\beta =0.99$. # Figure(s) generated by [ema_demo.py](https://github.com/probml/pyprobml/blob/master/scripts/ema_demo.py) interactive_script("ema_demo.py") # ## References: # <a name='Schafer05'>[SS05]</a> <NAME> and <NAME>. "A shrinkage approach to large-scale covariance matrix estimation and implications for functional genomics". In: Statist. Appl. Genet. Mol. Biol (2005). # #
notebooks/figures/chapter4_figures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/shannonpowelson/global-meat-consumption-analysis/blob/main/2021_04_12_meat_consumption_analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="QV6VaN-knKNT" # # Global Meat Consumption Analysis # + [markdown] id="pZpiz54OnAGk" # # Import Libraries # + [markdown] id="-rUNARp4nToJ" # First we will import the libraries needed to run our code in later sections # + id="u9yjZUd5Unlv" #import libraries #data visualization import matplotlib.pyplot as plt import plotly.express as px #data analysis import pandas as pd #import files to our computer from google.colab import files # + [markdown] id="cmG63WGSnh6A" # # Beef Consumption Over Time # + [markdown] id="fOeRJlaVagD_" # First we will import data from this [github repository](https://github.com/shannonpowelson/global-meat-consumption-analysis/blob/main/meat_consumption_worldwide.csv) # + id="-i6hFnyJbtiC" #import csv of global meat consumption data from our github repository (this data is originally from Kaggle) #save the csv link meat = "https://raw.githubusercontent.com/shannonpowelson/global-meat-consumption-analysis/main/meat_consumption_worldwide.csv" # + [markdown] id="LBtyPQroo3FL" # Create the initial data frame # + id="5PMdwy4Xb0it" #Define the data frame df_meat = pd.read_csv(meat) # + [markdown] id="GIw5P8dao7hs" # Create other data frames needed for the analysis # + id="6kpHaPEucu2i" #Make a new data frame containing meat consumption measured only in kilograms per capita df_meat = df_meat[df_meat['MEASURE']=='KG_CAP'] # + id="5nXx7wczngEk" #Make a new data frame containing only beef consumption measured in kilograms per capita df_meat_beef = df_meat[df_meat['SUBJECT']=='BEEF'] # + id="pIO2dLS5pnQw" #Make a new data frame for beef consumption in USA measured in kilograms per capita df_beef_USA = df_meat_beef[df_meat_beef['LOCATION']=='USA'] # + id="d-G16qRdytRI" #Make a new data frame for beef consumption in Australia in kilograms per capita df_beef_AUS = df_meat_beef[df_meat_beef['LOCATION']=='AUS'] # + id="Z6IE0swVy6Pz" #Make a new data frame for beef consumption in India in kilograms per capita df_beef_IND = df_meat_beef[df_meat_beef['LOCATION']=='IND'] # + id="0blaiicezJiG" #Make a new data frame for beef consumption in Bangladesh in kilograms per capita df_beef_BGD = df_meat_beef[df_meat_beef['LOCATION']=='BGD'] # + id="QlbUoWj63WGq" #Make a new data frame for beef consumption in Israel in kilograms per capita df_beef_ISR = df_meat_beef[df_meat_beef['LOCATION']=='ISR'] # + id="yDkjeQdY3b1J" #Make a new data frame for beef consumption in Mozambique in kilograms per capita df_beef_MOZ = df_meat_beef[df_meat_beef['LOCATION']=='MOZ'] # + [markdown] id="7tIGqe1_pt1g" # Graph the beef consumption from 1991 to 2026 for these six countries # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="6UkUek42zRZb" outputId="45c7fd7d-5aab-4fc7-b6fa-4b15a6316ac5" least_and_most_consumption = plt.figure() #This will be used later to save the image of the graph ax = plt.gca(title = "Beef Consumption by Country from 1991 to 2026") #This defines the axis in order to plot multiple lines df_beef_USA.plot(x = 'TIME', y = 'Value', kind='line', label ='United States', ax=ax) #USA beef consumption df_beef_AUS.plot(x = 'TIME', y='Value', kind='line', label = 'Australia', ax=ax) #AUS beef consumption df_beef_IND.plot(x='TIME', y='Value', kind='line', label = 'India', ax=ax) #IND beef consumption df_beef_BGD.plot(x='TIME', y='Value', kind='line', label = 'Bangladesh', ax=ax) #BGD beef consumption df_beef_ISR.plot(x='TIME', y='Value', kind='line', label='Israel', ax=ax) #ISR beef consumption df_beef_MOZ.plot(x='TIME', y='Value', kind='line', label='Mozambique', ax=ax) #MOZ beef consumption plt.xlabel('Year') #This labels the x axis plt.ylabel('Beef Consumption (kilogram/capita)') #this labels the y axis # + [markdown] id="0V_ADVQZqUSj" # Save the graph as an image # + id="94BPW-BL111K" #saves the figure least_and_most_consumption.savefig('least_and_most_consumption.png') # + colab={"base_uri": "https://localhost:8080/", "height": 17} id="RGGrQx_063Mi" outputId="c9bbc67d-d817-4404-f810-6c7510d4643a" #downloads the figure as an image onto the computer files.download('least_and_most_consumption.png') # + [markdown] id="zxCXDdsoqkFE" # Graph the beef consumption from 1991 to 2026 for the three countries that consume the least amount of meat # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="7qUtMZ822Uv1" outputId="38498c63-e8e8-44a5-8b8e-80d36cbc9d72" least_consumption = plt.figure() #This will be used later to save the image of the graph ax = plt.gca(title = "Beef Consumption by Country from 1991 to 2026") #This defines the axis in order to plot multiple lines df_beef_IND.plot(x='TIME', y='Value', kind='line', label = 'India', ax=ax) #IND beef consumption df_beef_BGD.plot(x='TIME', y='Value', kind='line', label = 'Bangladesh', ax=ax) #BGD beef consumption df_beef_MOZ.plot(x='TIME', y='Value', kind='line', label = 'Mozambique', ax=ax) #MOZ beef consumption plt.xlabel('Year') #This labels the x axis plt.ylabel('Beef Consumption (kilogram/capita)') #This labels the y axis # + [markdown] id="3n9uZ7XvrL4Y" # Save the graph as an image # + id="vBsGzgdu2Xcx" #saves the figure least_consumption.savefig('least_consumption.png') # + colab={"base_uri": "https://localhost:8080/", "height": 17} id="B-Ld-i-J7ZpW" outputId="c8990288-8f30-4384-8ec3-632b94c8240a" #downloads the figure as an image onto the computer files.download('least_consumption.png') # + [markdown] id="aadkqEuVrWdf" # Graph the beef consumption from 1991 to 2026 for the three countries that consume the most amount of meat # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="oVuHVhlV2hcd" outputId="4bc7803c-7176-4c66-88eb-7fab79649664" most_consumption = plt.figure() #This will be used later to save the image of the graph ax = plt.gca(title = "Beef Consumption by Country from 1991 to 2026") #This defines the axis in order to plot multiple lines df_beef_USA.plot(x = 'TIME', y = 'Value', kind='line', label ='United States', ax=ax) #USA beef consumption df_beef_AUS.plot(x = 'TIME', y='Value', kind='line', label = 'Australia', ax=ax) #AUS beef consumption df_beef_ISR.plot(x='TIME', y='Value', kind='line', label='Israel', ax=ax) #ISR beef consumption plt.xlabel('Year') #This labels the x axis plt.ylabel('Beef Consumption (kilogram/capita)') #This labels the y axis # + [markdown] id="a25xX5L1r2o4" # Save the graph as an image # + id="fDo42F3i2kkW" #saves the figure most_consumption.savefig('most_consumption.png') # + colab={"base_uri": "https://localhost:8080/", "height": 17} id="leA5CYgh4K91" outputId="0a38ecb1-4685-44ad-a829-77d90cc12f30" #downloads the figure as an image onto the computer files.download('most_consumption.png') # + [markdown] id="z_YMSZxSsG-q" # # Cardiovascular Disease Deaths Over Time # + [markdown] id="x0XLypEusZE6" # First we will import the data from this [github repository](https://github.com/shannonpowelson/global-meat-consumption-analysis/blob/main/cardiovascular-disease-death-rates.csv) # + id="HcGzqWEvSANj" #import csv of cardiovascular disease death rates data from our github repository (this data is originally from Our World in Data) #save the csv link heart = 'https://raw.githubusercontent.com/shannonpowelson/global-meat-consumption-analysis/main/cardiovascular-disease-death-rates.csv' # + [markdown] id="D2ImM3PMs6UM" # Create the initial data frame # + id="J8VN3DVKU5ce" #Define the data frame df_heart = pd.read_csv(heart) # + [markdown] id="rU-sfosBtNkw" # Create other data frames needed for the analysis # + id="rzf5-_KXVECg" #Make a new data frame containing the cardiovascular disease deaths for the USA df_heart_USA = df_heart[df_heart['Code']=='USA'] # + id="j28V_t3qWtGF" #Make a new data frame containing the cardiovascular disease deaths for Australia df_heart_AUS = df_heart[df_heart['Code']=='AUS'] # + id="CyOzOXQ3WvtM" #Make a new data frame containing the cardiovascular disease deaths for Israel df_heart_ISR = df_heart[df_heart['Code']=='ISR'] # + id="QlpZ-JpiWzpG" #Make a new data frame containing the cardiovascular disease deaths for India df_heart_IND = df_heart[df_heart['Code']=='IND'] # + id="TaEoAXRNW2Nd" #Make a new data frame containing the cardiovascular disease deaths for Mozambique df_heart_MOZ = df_heart[df_heart['Code']=='MOZ'] # + id="ExyHhYuiW5iI" #Make a new data frame containing the cardiovascular disease deaths for Bangladesh df_heart_BGD = df_heart[df_heart['Code']=='BGD'] # + [markdown] id="lgNhs5kMt5d_" # Graph the cardiovascular disease deaths from 1990 to 2017 for these six countries # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="Z0-2UgQwXF_m" outputId="71337bce-b3c0-4348-ede1-79667db5679c" heart_death = plt.figure() #This will be used later to save the image of the graph ax = plt.gca(title ='Deaths from Cardiovascular Diseases from 1990 to 2017') #This defines the axis in order to plot multiple lines df_heart_USA.plot(x='Year', y='Deaths', kind='line', label='United States', ax=ax) #cardiovascular disease deaths in the USA df_heart_AUS.plot(x='Year', y='Deaths', kind='line', label='Australia', ax=ax) #cardiovascular disease deaths in Australia df_heart_ISR.plot(x='Year', y='Deaths', kind='line', label='Israel', ax=ax) #cardiovascular disease deaths in Israel df_heart_IND.plot(x='Year', y='Deaths', kind='line', label='India', ax=ax) #cardiovascular disease deaths in India df_heart_BGD.plot(x='Year', y='Deaths', kind='line', label='Bangladesh', ax=ax) #cardiovascular disease deaths in Bangladesh df_heart_MOZ.plot(x='Year', y='Deaths', kind='line', label='Mozambique', ax=ax) #cardiovascular disease deaths in Mozambique plt.xlabel('Year') #This labels the x axis plt.ylabel('Deaths per 100,000 People') #This labels the y axis # + [markdown] id="UYbaoaYAunHo" # Save the graph as an image # + id="Ya0Hdm3pXquB" #saves the figure heart_death.savefig('heart_death.png') # + colab={"base_uri": "https://localhost:8080/", "height": 17} id="JN-D2_ujALbM" outputId="94e47d6d-bb1f-4677-d5a3-ba71cde86819" #downloads the figure as an image onto the computer files.download('heart_death.png') # + [markdown] id="QB2Pn1jFu0W6" # Graph the cardiovascular disease deaths from 1990 to 2017 for the three countries that consume the most amount of meat # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="N6nTtV15ZLXX" outputId="f937ed28-e6ba-4111-9b2f-11bb0c43f4d1" heart_death_high = plt.figure() #This will be used later to save the image of the graph ax = plt.gca(title ='Deaths from Cardiovascular Diseases from 1990 to 2017') #This defines the axis in order to plot multiple lines df_heart_USA.plot(x='Year', y='Deaths', kind='line', label='United States', ax=ax) #cardiovascular disease deaths in the USA df_heart_AUS.plot(x='Year', y='Deaths', kind='line', label='Australia', ax=ax) #cardiovascular disease deaths in Australia df_heart_ISR.plot(x='Year', y='Deaths', kind='line', label='Israel', ax=ax) #cardiovascular disease deaths in Israel plt.xlabel('Year') #This labels the x axis plt.ylabel('Deaths per 100,000 People') #This labels the y axis # + [markdown] id="fCKUq4ZTvY1n" # Save the graph as an image # + id="evU9jJZSZOUd" #saves the figure heart_death_high.savefig('heart_death_high.png') # + colab={"base_uri": "https://localhost:8080/", "height": 17} id="__iERqYYAj17" outputId="884ca4ad-eae0-4372-8165-08de42a54161" #downloads the figure as an image onto the computer files.download('heart_death_high.png') # + [markdown] id="0uJ2YAzdvjiF" # Graph the cardiovascular disease deaths from 1990 to 2017 for the three countries that consume the least amount of meat # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="xQEqwa62ZUhs" outputId="4c9f3554-0496-47ae-bce2-386f607e7f5c" heart_death_low = plt.figure() #This will be used later to save the image of the graph ax = plt.gca(title ='Deaths from Cardiovascular Diseases from 1990 to 2017') #This defines the axis in order to plot multiple lines df_heart_IND.plot(x='Year', y='Deaths', kind='line', label='India', ax=ax) #cardiovascular disease deaths in India df_heart_BGD.plot(x='Year', y='Deaths', kind='line', label='Bangladesh', ax=ax) #cardiovascular disease deaths in Bangladesh df_heart_MOZ.plot(x='Year', y='Deaths', kind='line', label='Mozambique', ax=ax) #cardiovascular disease deaths in Mozambique plt.xlabel('Year') #This labels the x axis plt.ylabel('Deaths per 100,000 People') #This labels the y axis # + [markdown] id="Bcrp64_dwKEW" # Save the graph as an image # + id="0kCxUzSwA0dk" #saves the figure heart_death_low.savefig('heart_death_low.png') # + colab={"base_uri": "https://localhost:8080/", "height": 17} id="VC-K-oZcBEbx" outputId="40d1ab41-b4e5-4848-fbfd-5d7d21f5ab2b" #downloads the figure as an image onto the computer files.download('heart_death_low.png') # + [markdown] id="Lgt45ZnnwXQD" # # Meat Consumption Over Time # + [markdown] id="DIrXNfPSwlA9" # Import data from this [github repository](https://github.com/shannonpowelson/global-meat-consumption-analysis/blob/main/Meat_Consumption__India.csv) # + id="e7U344XodGir" #import csv of meat consumption in India data from our github repository (this data is originally from Kaggle) #save the csv link IND_meat = 'https://raw.githubusercontent.com/shannonpowelson/global-meat-consumption-analysis/main/Meat_Consumption__India.csv' # + [markdown] id="g_GZr1IGxJp1" # Create the initial data frame # + id="0yKeepoGdHEu" df_IND_meat = pd.read_csv(IND_meat) # + [markdown] id="yMl3F3m0xOla" # Melt the data frame for plotly express # + id="-YHKjk-WdHdr" #melt the data frame df_IND_melt = pd.melt(df_IND_meat, id_vars=['Year']) # + [markdown] id="TXDNk-Otxi86" # Graph the meat consumption in India from 1990 to 2026 # + id="jjhIpS4nhppd" #Create a trendline using plotly express IND_trend_line = px.line(df_IND_melt, x='Year', y='value', color = 'variable', title = 'Meat Consumption in India from 1990 to 2026', labels={'Year':'Year', 'value':'Meat Consumption (kilogram/capita)', 'variable':'Meat Type'}) # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="rIeosctheYwq" outputId="879a43b7-3ff9-4d3f-d59e-968c42c4ebca" #Plot the trendline IND_trend_line # + [markdown] id="UfqzOySkxyNg" # Import data from this [github repository](https://github.com/shannonpowelson/global-meat-consumption-analysis/blob/main/USA_Meat_Consumption.csv) # + id="2btCQACzBnsw" #import csv of meat consumption in the USA data from our github repository (this data is originally from Kaggle) #save the csv link USA_meat = "https://raw.githubusercontent.com/shannonpowelson/global-meat-consumption-analysis/main/USA_Meat_Consumption.csv" # + [markdown] id="iQ4UqdwWyH-Z" # Create the initial data frame # + id="Pq2yayt7ZYlq" #Define the data frame df_USA_meat = pd.read_csv(USA_meat) # + [markdown] id="L5v4mUc8yNEI" # Melt the data frame for plotly express # + id="XZWPtAtHZli8" #melt the data frame df_USA_melt = pd.melt(df_USA_meat, id_vars=['Year']) # + [markdown] id="vyUv8aiZyXMj" # Graph the meat consumption in the USA from 1990 to 2026 # + id="iWr_SzhFbLob" #Create a trendline using plotly express USA_trend_line = px.line(df_USA_melt, x='Year', y='value', color = 'variable', title = 'Meat Consumption in the USA from 1990 to 2026', labels={'Year':'Year', 'value':'Meat Consumption (kilogram/capita)', 'variable':'Meat Type'}) # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="JV0Rn-lqbyjb" outputId="17c852ff-00a5-45d1-bae8-38d56a8e05a4" #Plot the trendline USA_trend_line
2021_04_12_meat_consumption_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Run ADAM # # The model ADAM (Annual Danish Aggregate Model)is a model of the Danish # economy maintained and developed by Danmarks Statistik. A Link to the model: https://www.dst.dk/pubomtale/18836. It has 4624 equations # # Danmarks Statistik has kindly provided the model version JUN17X and an associated databank for this demo. # # The model and databank is not the current model and forecast, but serves as an example. # # This is a basic demo. However you can use all the features of ModelFlow to manipulate the model. # # The notebook runs when loaded. To run a cell press Shift-enter, # + [markdown] slideshow={"slide_type": "slide"} # # Import ModelFlow and Pandas # - # %load_ext autoreload # %autoreload 2 # + slideshow={"slide_type": "-"} import pandas as pd from ipywidgets import interact,Dropdown,Checkbox from IPython.display import display # , clear_output from modelclass import model import modeljupyter as mj model.modelflow_auto() # + [markdown] slideshow={"slide_type": "slide"} # # load model and databank # The model and baseline has been created in [ADAM - create the model and run.ipynb](ADAM%20-%20create%20the%20model%20and%20run.ipynb) # - turbo=0 madam,basedf0 = model.modelload('adam/jul17x.pcim',run=1,ljit=turbo,stringjit=0) # + [markdown] slideshow={"slide_type": "slide"} # # Define a interface to experiments # # The next cell defines interactive widget. # # You can add additional variables using by adding extra entries to the slidedef dictionary # + # ADAM Variable descriptions vtrans = {'ENL' :'Balance of Payment, surplus','UL':'Unemployment','FY':'Real GDP', 'TFN_O' :'Public surplus','TFN_OY':'Public surplus, Percent of GDP'} # Define some input fields slidedef = {'Value added tax rate': {'var':'TG','value':-0.05,'min':-0.1, 'max':0.1,'op':'+'}, 'Extra labour supply, 1000 ': {'var':'UQ','value':10,'min':-8, 'max':8, 'op':'+','step':1}, } # Now call the input widget to display the input input = madam.inputwidget(2018,2030,slidedef=slidedef ,showout=True,varpat='FY ENL UL TFN_O TFN_OY',trans=vtrans) # - # # Attributions to the results from changed variables display(madam.get_att_gui(desdic=vtrans)) # + [markdown] slideshow={"slide_type": "slide"} # # Attributions to an equation from its input. # You can decide how many levels back in the dependency tree you want to calculate the attributions # - @interact(Threshold = (0.0,10.0,1.)) def explain(Variable = Dropdown(options = sorted(madam.endogene),value='FY'), Pct = Checkbox(description='Percent growth',value=False), Periode = Dropdown(options = madam.current_per,value=2022), Threshold = 0.0): mj.get_frml_latex(madam,Variable) madam.dekomp_plot(Variable,pct=Pct,threshold=Threshold) madam.dekomp_plot_per(Variable,pct=Pct,threshold=Threshold,per=Periode,sort=True) # # Attribution going back in the dependency tree # You might have to zoom in order to look at the actual numbers. # Also the result can be inspected in the graph/ folder. # # Pointing at a variable name will display the variable label. madam.modeldash('UL',jupyter=1,port = 5005,all = 1) wnames = sorted([name for name in madam.endogene if name+'W' in madam.endogene]) wnames for n in wnames[-4:]: frml = madam.allvar[n]['frml'] wfrml = madam.allvar[n+'W']['frml'] print(f'\n{n}\n{frml}\n{wfrml}') madam.draw('fm',filter=30,up=1,down=0,attshow = 1,browser=1) madam.att_dic.keys() madam.CPUXH.frml madam.CPUXHw.frml madam['am*'].frml
Examples/ADAM/test ADAM -Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # CrabAgePrediction # ___ # # <p align="left"> # <span class="image left"> # <img src="https://img.shields.io/badge/Kaggle-035a7d?style=for-the-badge&logo=kaggle&logoColor=white" alt=""/> # <img src="https://img.shields.io/badge/python-3670A0?style=for-the-badge&logo=python&logoColor=ffdd54" alt="" /> # <img src="https://img.shields.io/badge/jupyter-%23FA0F00.svg?style=for-the-badge&logo=jupyter&logoColor=white" alt="" /> # <img src="https://img.shields.io/badge/pycharm-143?style=for-the-badge&logo=pycharm&logoColor=black&color=black&labelColor=green" alt="" /> # </span> # </p> # # # <div align="center" class="inner"> # <span> # <img src="https://i.pinimg.com/originals/09/c6/29/09c62903beeba336dc9da76eb5c9a107.gif" height=100px alt="html" style="vertical-align:top;margin:3px" /> # <img src="https://i.pinimg.com/originals/94/24/c4/9424c4c89a3a37536d05df7cf7d48e25.gif" height=100px alt="html" style="vertical-align:top;margin:3px" /> # <img src="https://i.pinimg.com/originals/08/07/18/080718c3c5f3a56249a2ee3a825221bd.gif" height=100px alt="html" style="vertical-align:top;margin:3px" /> # <img src="https://i.pinimg.com/originals/32/44/01/324401aa18cc80c55f338dcd4674cb80.gif" height=100px alt="html" style="vertical-align:top;margin:3px" /> # <!-- <img src="https://i.pinimg.com/originals/84/da/da/84dada0a5dcfd790700df3dd87897aef.gif" height=100px alt="html" style="vertical-align:top;margin:3px"> --> # <img src="https://i.pinimg.com/originals/35/f5/91/35f5911b6761ecd292e0ee9b8596b69f.gif" height=100px alt="html" style="vertical-align:top;margin:3px" /> # <img src="https://i.pinimg.com/originals/53/ad/92/53ad929213f59c7392bf92605868021f.gif" height=100px alt="html" style="vertical-align:top;margin:3px" /> # </span> # </div> # # # # ## Project Information: # ___ # # # **CLASS:** `CPSC-483 Machine Learning Section-02` # # **LAST UPDATE:** `May 5, 2022` # # **PROJECT NAME:** `Crab Age Prediction` # # **PROJECT GROUP:** # # | Name | Email | Student | # | ------------ | ------------------------------ | ------------- | # | <NAME> | <EMAIL> | Undergraduate | # | <NAME> | <EMAIL> | Graduate | # # **PROJECT PAPER:** [Here](https://github.com/13rianlucero/CrabAgePrediction/blob/main/FirstDraft/Crab%20Age%20Prediction%20Paper.pdf) # # **PROJECT GITHUB REPOSITORY:** [Here](https://github.com/13rianlucero/CrabAgePrediction) # # # # # Overview # # ___ # > # > ## __**1. Abstract**__ # > # > # > #### __**Paper Summary [✔️]**__ # > # > Machine learning can be used to predict the age of crabs. It can be more accurate than simply weighing a crab to estimate its age. Several different models can be used, though support vector regression was found to be the most accurate in this experiment. # > # > <br /> # > # > # >> ___ # >> # >> ## __**2. Introduction**__ # >> # >> # >> # >> | __**The Problem [✔️]**__ | __**Why it's important? [✔️]**__ | __**Our Solution Strategy [✔️]**__ | # >> | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | # >> | <br /><br />*It is quite difficult to determine a crab's age due to their molting cycles which happen throughout their whole life. Essentially, the failure to harvest at an ideal age, increases cost and crab lives go to waste.* | <br /><br />*Beyond a certain age, there is negligible growth in crab's physical characteristics and hence, it is important to time the harvesting to reduce cost and increase profit.* | <br /><br />Prepare crab data and use it to train several machine learning models. Thus, given certain physcial chraracteristics and the corresponding values, the ML models will accurately determine the age of the crabs. | # >> <br /><br /> # >> # >>> ___ # >>> # >>> ## __**3. Background**__ # >>> # >>> # >>> #### __**Process Activities [✔️]**__ # >>> # >>> - Feature Selection & Representation # >>> - Evaluation on variety of methods # >>> - Method Selection # >>> - Parameter Tuning # >>> - Classifier Evaluation # >>> - Train-Test Split # >>> - Cross Validation # >>> - Eliminating Data # >>> - Handle Categorical Data # >>> - One-hot encoding # >>> - Data Partitioning # >>> - Feature Scaling # >>> - Feature Selection # >>> - Choose ML Models # >>> # >>> #### __**Models [✔️]**__ # >>> # >>> - K-Nearest Neighbours (KNN) # >>> - Multiple Linear Regression (MLR) # >>> - Support Vector Machine (SVM) # >>> # >>> #### __**Analysis [✔️]**__ # >>> # >>> - Evaluate Results # >>> - Performance Metrics # >>> - Compare ML Models using Metrics # >>> # >>> <br /><br /> # >>> # >>>> ___ # >>>> # >>>> ## **4. Methods** # >>>> # >>>> # >>>> # >>>> #### __**Approach [✔️]**__ # >>>> # >>>> - Prediction System using 3 main ML Models # >>>> # >>>> #### __**Key Contributions [✔️]**__ # >>>> # >>>> - Justin # >>>> - `KNN` # >>>> - `SVM` # >>>> - Brian # >>>> - `MLR` # >>>> # >>>> <br /> # >>>> # >>>>> ___ # >>>>> # >>>>> ## **5. Experiments** # >>>>> # >>>>> # >>>>> #### __**Prediction System Development Workflow [✔️]**__ # >>>>> # >>>>> <span class="image"><img src="https://raw.githubusercontent.com/13rianlucero/CrabAgePrediction/main/image/README/1651798210121.png" alt="" /></span> # >>>>> # >>>>> #### __**Predicition Model Workflow [✔️]**__ # >>>>> # >>>>> | KNN | MLR | SVM | # >>>>> | --------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------- | # >>>>> | Import Libraries | Import Libraries | Import Libraries | # >>>>> | Import Dataset, create dataframe | Import Dataset, create dataframe | Import Dataset, create dataframe | # >>>>> | Data Preprocessing | Data Preprocessing | Data Preprocessing | # >>>>> | Check for Missing data, Bad Data, Outliers, Data Types, Choose Classifier, Data Organization, Data Scaling, etc | Check for Missing data, Bad Data, Outliers, Data Types, Choose Classifier, Data Organization, Data Scaling, etc | Check for Missing data, Bad Data, Outliers, Data Types, Choose Classifier, Data Organization, Data Scaling, etc | # >>>>> | Feature Selection | Feature Selection | Feature Selection | # >>>>> | Train-Test Split | Train-Test Split | Train-Test Split | # >>>>> | Build Algorithm | Build Algorithm | Build Algorithm | # >>>>> | Train Algorithm | Train Algorithm | Train Algorithm | # >>>>> | Test Algorithm | Test Algorithm | Test Algorithm | # >>>>> | Produce Performance Metrics from Tests | Produce Performance Metrics from Tests | Produce Performance Metrics from Tests | # >>>>> | Evaluate Results | Evaluate Results | Evaluate Results | # >>>>> | Tune Algorithm | Tune Algorithm | Tune Algorithm | # >>>>> | Retest & Re-Analayze | Retest & Re-Analayze | Retest & Re-Analayze | # >>>>> | Predicition Model defined from new train-test-analyze cycle | Predicition Model defined from new train-test-analyze cycle | Predicition Model defined from new train-test-analyze cycle | # >>>>> | Use model to refine the results | Use model to refine the results | Use model to refine the results | # >>>>> | Draw Conclusions | Draw Conclusions | Draw Conclusions | # >>>>> # >>>>> ### __**Code [✔️]**__ # >>>>> ***SHOWN DOWN BELOW IN CODE CELL*** # >>>>> # >>>>> <br /> # >>>>> # >>>>>> ___ # >>>>>> # >>>>>> ## __**6. Conclusion**__ # >>>>>> # >>>>>> #### __***Summary of Results [✔️]***__ # >>>>>> # >>>>>> - Overall, the models were able to predict the age of crabs reasonably well. On average, the predictions were off by about 1.5 months. Although support vector regression performed slightly better than the other two models, it was still close enough that any of the models could be used with satisfactory results. # >>>>>> # >>>>>> - Multiple linear regression was found to be slightly better at predicting older crabs while support vector regression was better at predicting younger crabs. K-nearest neighbor was average overall. What is important to note is that the predictions for all three models were more accurate when the age of the crab was less than 12 months. This makes sense because after a crab reaches full maturity around 12 months, its growth comes to a halt and it is harder to predict its age since its features stay roughly the same. # >>>>>> # >>>>>> - Therefore, predicting the age of a crab becomes less accurate the longer a crab has matured. To circumvent this, the dataset could be further preprocessed so that any crab over the age of 12 months will be set to 12 months. # >>>>>> # >>>>>> - This would greatly increase the accuracy of the machine learning models though the models would no longer be able to predict any ages over 12 months. Since the purpose is to find which crabs are harvestable, this may be a good compromise. # >>>>>> # >>>>>> | **Model** | **Type** | **Error (months)** | # >>>>>> | :-------------------------------- | :------- | :----------------- | # >>>>>> | Linear Regression (Weight vs Age) | Baseline | 1.939 | # >>>>>> | K-nearest Neighbor | ML | 1.610 | # >>>>>> | Multiple Linear Regression | ML | 1.560 | # >>>>>> # >>>>>> #### __**Future work**__ ✔️ # >>>>>> # >>>>>> Predicting the age of a crab becomes less accurate the longer a crab has matured. To circumvent this, the dataset could be further preprocessed so that any crab over the age of 12 months will be set to 12 months. # >>>>>> # >>>>>> This would greatly increase the accuracy of the machine learning models though the models would no longer be able to predict any ages over 12 months. Since the purpose is to find which crabs are harvestable, this may be a good compromise. # >>>>>> # >>>>>> <br /> # >>>>>> # >>>>>>> ___ # >>>>>>> # >>>>>>> ## __**7. References**__ # >>>>>>> # >>>>>>> <p align="center"> # >>>>>>> <img src="https://img.shields.io/badge/Kaggle-035a7d?style=for-the-badge&logo=kaggle&logoColor=white" alt=""/> # >>>>>>> <img src="https://img.shields.io/badge/python-3670A0?style=for-the-badge&logo=python&logoColor=ffdd54" alt="" /> # >>>>>>> <img src="https://img.shields.io/badge/jupyter-%23FA0F00.svg?style=for-the-badge&logo=jupyter&logoColor=white" alt="" /> # >>>>>>> <img src="https://img.shields.io/badge/pycharm-143?style=for-the-badge&logo=pycharm&logoColor=black&color=black&labelColor=green" alt="" /> # >>>>>>> </p> # >>>>>>> # >>>>>>> #### __**Links**__ ✔️ # >>>>>>> # >>>>>>> **[1]** [https://www.kaggle.com/datasets/sidhus/crab-age-prediction](https://www.kaggle.com/datasets/sidhus/crab-age-prediction) # >>>>>>> # >>>>>>> **[2]** [https://scikit-learn.org/stable/modules/svm.html](https://scikit-learn.org/stable/modules/svm.html) # >>>>>>> # >>>>>>> **[3]** [https://repository.library.noaa.gov/view/noaa/16273/noaa_16273_DS4.pdf](https://repository.library.noaa.gov/view/noaa/16273/noaa_16273_DS4.pdf) # >>>>>>> # >>>>>>> **[4]** [https://faculty.math.illinois.edu/~hildebr/tex/latex-start.html](https://faculty.math.illinois.edu/~hildebr/tex/latex-start.html) # >>>>>>> # >>>>>>> **[5]** [https://github.com/krishnaik06/Multiple-Linear-Regression](https://github.com/krishnaik06/Multiple-Linear-Regression) # >>>>>>> # >>>>>>> **[6]** [https://github.com/13rianlucero/CrabAgePrediction](https://github.com/13rianlucero/CrabAgePrediction) # >>>>>>> # >>>>>>> ___ # >>>>>>> # >>>>>> # >>>>>> ___ # >>>>>> # >>>>> # >>>>> ___ # >>>>> # >>>> # >>>> ___ # >>>> # >>> # >>> ___ # >>> # >> # >> ___ # >> # > # > ___ # # ___ # # # ***CODE*** # --- # > **[CONTENTS:]()** # > **[1-INITIAL_SETUP]()** # > - Import Libraries # > - Import Dataset # > - **[2-DATA-PREPROCESSING]()** # > - Convert Sex Categorical value into Numerical Value # > - Pearson correlation for every feature # > - **[3-TRAIN_TEST_SPLIT]()** # > - Split the data into* Test set & Train set # > - Determine value for K in KNN Model # > - **[4-BUILD_AND_TRAIN_MODELS]()** # > - K-Nearest Neighbors -- KNN: # > - Multiple Linear Regression -- MLR: # > - Support Vector Regression -- SVR: # > - **[5-PLOT_TEST_RESULTS]** # > - Plot the [Predicted Age] against the [Actual Age] for the Test Set # > - Plot the Aggreagate Summary (3 model plot) # > - Plot the Aggreagate Summary (3 model plot) # > - **[6-PLOT_EACH_INDIVIDUAL_MODEL]()**: # > - `1. KNN` # > - `2. MLR` # > - `3. SVR` # > - **[MAIN.PY]()** # # # # # # ## **1-InititalSetup** # ___ # ### *Import the Libraries* import pandas import numpy from scipy import stats from matplotlib import pyplot as plt from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn import svm from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score # ### *Import Dataset into Dataframe variable* # # + data = pandas.read_csv(r"CrabAgePrediction.csv").dropna(axis=0) print(data.columns) data["SexValue"] = 0 #create a new column print(data) # - # ## 2-DataPreprocessing # ___ # # ### *Convert Sex Categorical value into Numerical Value* # + # Putting all our data together and dropping Sex for SexValue data = data[["SexValue", "Length", "Diameter", "Height", "Weight", "Shucked Weight", "Viscera Weight", "Shell Weight", "Age"]] X = data[["Length", "Diameter", "Height", "Weight", "Shucked Weight", "Viscera Weight", "Shell Weight"]] y = data[["Age"]] # Visualize the data data.describe() X.info() y.info() # - # ### *Pearson correlation for every feature* # Pearson correlation for every feature col_cor = stats.pearsonr(data["SexValue"], y) col1_cor = stats.pearsonr(data["Length"], y) col2_cor = stats.pearsonr(data["Diameter"], y) col3_cor = stats.pearsonr(data["Height"], y) col4_cor = stats.pearsonr(data["Weight"], y) col5_cor = stats.pearsonr(data["Shucked Weight"], y) col6_cor = stats.pearsonr(data["Viscera Weight"], y) col7_cor = stats.pearsonr(data["Shell Weight"], y) print(col_cor) print(col1_cor) print(col2_cor) print(col3_cor) print(col4_cor) print(col5_cor) print(col6_cor) print(col7_cor) # ## TestTrainReAnalyze # ___ # # ### *Split the data into* ***Test set*** *&* ***Train set*** # # Split the data into test and train set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=132) # ### K-Nearest Neighbours # `Determining the appropriate value for K` # + #n_neighbors plot error_rate = [] y_test2 = numpy.ravel(y_test) for k in range(1, 31): neigh = KNeighborsClassifier(n_neighbors=k) neigh.fit(X_train, numpy.ravel(y_train)) knn_predict = neigh.predict(X_test) error_knn = 0 for x in range(0, 1168): error_knn += abs(knn_predict[x] - y_test2[x]) error_rate.append(error_knn/1169) plt.plot(range(1, 31), error_rate) plt.xlabel("n_neighbors") plt.ylabel("error_rate") plt.title("Average error vs n_neighbors") plt.show() # - # ## **5-BuildTrainModels** # ___ # # ### ***K-Nearest Neighbors -- KNN:*** # # #KNN neigh = KNeighborsClassifier(n_neighbors=20) neigh.fit(X_train, numpy.ravel(y_train)) knn_predict = neigh.predict(X_test) # ### ***Multiple Linear Regression -- MLR:*** # #Multiple Linear Regression regressor = LinearRegression() regressor.fit(X_train, y_train) y_pred = regressor.predict(X_test) score = r2_score(y_test,y_pred) # ### ***Support Vector Regression -- SVR:*** # # ## **6-PlotTestResults** # ___ # # # ### Capture the Live Results # + # Plot the predicted age against the actual age for the test set plt.plot(range(1, 1169), knn_predict) plt.plot(range(1, 1169), y_pred) plt.plot(range(1, 1169), regr_predict) plt.plot(range(1, 1169), numpy.ravel(y_test)) plt.xlim([0, 50]) # - # ### Show Performance Measure # `Evaluate the models in comparison:` # + error_knn = 0 error_mlr = 0 error_svr = 0 y_test2 = numpy.ravel(y_test) for x in range(0, 1168): error_knn += abs(knn_predict[x] - y_test2[x]) error_mlr += abs(y_pred[x] - y_test2[x]) error_svr += abs(regr_predict[x] - y_test2[x]) print (error_knn/1169) print (error_mlr/1169) print (error_svr/1169) # - # End of Main ML System Software # + ################# COMPLETE CODE (MAIN.PY) FILE ################### ################# COMPLETE CODE (MAIN.PY) FILE ################### ################# COMPLETE CODE (MAIN.PY) FILE ################### ################# COMPLETE CODE (MAIN.PY) FILE ################### import pandas import numpy from scipy import stats from matplotlib import pyplot as plt from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn import svm from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score data = pandas.read_csv(r"CrabAgePrediction.csv").dropna(axis=0) print(data.columns) data["SexValue"] = 0 #create a new column for index, row in data.iterrows(): #convert male or female to a numerical value Male=1, Female=2, Indeterminate=1.5 if row["Sex"] == "M": data.iloc[index, 9] = 1 elif row["Sex"] == "F": data.iloc[index, 9] = 2 else: data.iloc[index, 9] = 1.5 #putting all our data together and dropping Sex for SexValue data = data[["SexValue", "Length", "Diameter", "Height", "Weight", "Shucked Weight", "Viscera Weight", "Shell Weight", "Age"]] X = data[["Length", "Diameter", "Height", "Weight", "Shucked Weight", "Viscera Weight", "Shell Weight"]] y = data[["Age"]] #Pearson correlation for every feature col_cor = stats.pearsonr(data["SexValue"], y) col1_cor = stats.pearsonr(data["Length"], y) col2_cor = stats.pearsonr(data["Diameter"], y) col3_cor = stats.pearsonr(data["Height"], y) col4_cor = stats.pearsonr(data["Weight"], y) col5_cor = stats.pearsonr(data["Shucked Weight"], y) col6_cor = stats.pearsonr(data["Viscera Weight"], y) col7_cor = stats.pearsonr(data["Shell Weight"], y) print(col_cor) print(col1_cor) print(col2_cor) print(col3_cor) print(col4_cor) print(col5_cor) print(col6_cor) print(col7_cor) #split the data into test and train set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=132) #n_neighbors plot error_rate = [] y_test2 = numpy.ravel(y_test) for k in range(1, 31): neigh = KNeighborsClassifier(n_neighbors=k) neigh.fit(X_train, numpy.ravel(y_train)) knn_predict = neigh.predict(X_test) error_knn = 0 for x in range(0, 1168): error_knn += abs(knn_predict[x] - y_test2[x]) error_rate.append(error_knn/1169) plt.plot(range(1, 31), error_rate) plt.xlabel("n_neighbors") plt.ylabel("error_rate") plt.title("Average error vs n_neighbors") plt.show() # KNN neigh = KNeighborsClassifier(n_neighbors=20) neigh.fit(X_train, numpy.ravel(y_train)) knn_predict = neigh.predict(X_test) # Multiple Linear Regression regressor = LinearRegression() regressor.fit(X_train, y_train) y_pred = regressor.predict(X_test) score = r2_score(y_test,y_pred) # SVR regr = svm.SVR() regr.fit(X_train, numpy.ravel(y_train)) regr_predict = regr.predict(X_test) # Plot the predicted age against the actual age for the test set plt.plot(range(1, 1169), knn_predict) plt.plot(range(1, 1169), y_pred) plt.plot(range(1, 1169), regr_predict) plt.plot(range(1, 1169), numpy.ravel(y_test)) plt.xlim([0, 50]) #plt.xlim([60, 90]) plt.legend(["KNN Predicted Age", "LR Predicted Age", "SVR Predicted Age", "Actual Age"]) plt.ylabel("Age in months") plt.title("Predicted vs Actual Crab Age") plt.show() error_knn = 0 error_mlr = 0 error_svr = 0 y_test2 = numpy.ravel(y_test) for x in range(0, 1168): error_knn += abs(knn_predict[x] - y_test2[x]) error_mlr += abs(y_pred[x] - y_test2[x]) error_svr += abs(regr_predict[x] - y_test2[x]) print (error_knn/1169) print (error_mlr/1169) print (error_svr/1169)
notebook/.ipynb_checkpoints/crab-age-prediction-v2-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import policynet import qvaluenet import gym gym.logger.set_level(40) import torch import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import numpy as np device = "cuda" if torch.cuda.is_available() else "cpu" env = gym.make('CartPole-v0') print(env.action_space, env.observation_space) # print("List of available actions: ", env.unwrapped) LR = 5e-4 value_net = qvaluenet.network(4,1).to(device) policy_net = policynet.network(4,2).to(device) optimizer_value = optim.Adam(value_net.parameters(), lr=LR) optimizer_policy = optim.Adam(policy_net.parameters(), lr=LR) state = env.reset() g = torch.from_numpy(state).float().to(device) state = env.reset() state = torch.from_numpy(state).float().to(device) while True: action = int(policy_net(state)) next_state, reward, done, _ = env.step(state) next_state = torch.from_numpy(next_state).float().to(device) temp = float(value_net(next_state)) v_value = reward + temp value_net_loss = F.mse_loss(value_net(state), reward + temp) optimizer_value.zero_grad() value_net_loss.backward() optimizer_value.step() advantage_value = reward + gamma*float(value_net(next_state)) - float(value_net(state)) ## advantage function can be used as a baseliine from collections import namedtuple exp = namedtuple('experience',['state','reward','next_state', 'action', 'done']) def collect_trajectory(): global exp experiences = [] state = env.reset() while True: action = int(policy_net(torch.from_numpy(state).float().to(device)).argmax()) next_state, reward, done, _ = env.step(action) experiences.append(exp(state,reward,next_state,action,done)) state = next_state if done: break return experiences states, rewards, next_states, actions, dones = zip(*collect_trajectory()) states = torch.from_numpy(np.array(states)).float().to(device) next_states = torch.from_numpy(np.array(next_states)).float().to(device) actions = torch.from_numpy(np.array(actions)) # rewards = torch.from_numpy(np.array(rewards)) gamma = 0.99 discounts = np.array([gamma**i for i in range(1, len(rewards)+1)]) discounted_rewards = np.multiply(np.array(rewards), discounts) discounted_rewards = discounted_rewards[::-1].cumsum()[::-1]
A2C/run.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Sigmoid Example # Use backpropagation to compute the gradients for x and w of the function. # $$f(w,x) = \frac{1}{1+e^{-(w_0x_0 + w_1x_1 + w_2)}}$$ # ### Sigmoid Function \\(\sigma(x)\\) # The following are the sigmoid function and its derivative # # $$ # \sigma(x) = \frac{1}{1+e^{-x}} \\\\ # \rightarrow \hspace{0.2in} \frac{d\sigma(x)}{dx} = \frac{e^{-x}}{(1+e^{-x})^2} = \left( \frac{1 + e^{-x} - 1}{1 + e^{-x}} \right) \left( \frac{1}{1+e^{-x}} \right) # = \left( 1 - \sigma(x) \right) \sigma(x) # $$ # ### Compute Gradients for \\(f(w,x)\\) # Let \\(dot(w,x) = w_{0} x_{0} + w_{1} x_{1} + w_{2}\\), So \\(f(w,x) = \sigma(dot(w,x))\\). We can now compute the gradients using chain rule. # + import numpy as np w = [2, -3, -3] x = [-1, -2] dot = w[0]*x[0] + w[1]*x[1] + w[2] sigma = 1.0 / (1 + np.exp(-dot)) dsigma = (1 - sigma) * sigma dfdx = [w[0] * dsigma, w[1] * dsigma] dfdw = [x[0] * dsigma, x[1] * dsigma, 1.0 * dsigma] dfdx, dfdw # - # ## Staged Computation # # Suppose that we have a function of the form: # # $$f(x,y) = \frac{x + \sigma(y)}{\sigma(x) + (x+y)^2}$$ # # To be clear, this function is completely useless and it's not clear why you would ever want to compute its gradient, except for the fact that it is a good example of backpropagation in practice. # # It is very important to stress that if you were to launch into performing the differentiation with respect to either x or y, you would end up with very large and complex expressions. However, it turns out that doing so is completely unnecessary because we don’t need to have an explicit function written down that evaluates the gradient. We only have to know how to compute it. # + import numpy as np x = 3 y = -4 # **forward pass** # We have structured the code in such way that it contains multiple intermediate # variables, each of which are only simple expressions for which we already know # the local gradients. By the end of the expression we have computed the forward # pass. sigy = 1.0 / (1 + np.exp(-y)) # sigmoid in numerator num = x + sigy # numerator sigx = 1.0 / (1 + np.exp(-x)) # signoid in denominator xpy = x + y # plus xpysqr = xpy**2 den = sigx + xpysqr # denominator invden = 1.0 / den # inversion f = num * invden # **backward pass** # We’ll go backwards and for every variable along the way in the forward pass. # we will have the same variable, but one that begins with a **d**, which will # hold the gradient of the output of the circuit with respect to that variable. # Additionally, note that every single piece in our backprop will involve computing # the local gradient of that expression, and chaining it with the gradient on that # expression with a multiplication. dnum = invden # f = num * invden dinvden = num dden = (-1.0 / (den**2)) * dinvden # invden = 1.0 / den dsigx = (1) * dden # den = sigx + xpysqr dxpysqr = (1) * dden dxpy = (2 * xpy) * dxpysqr # xpysqr = xpy**2 dx = (1) * dxpy dy = (1) * dxpy dx += ((1-sigx) * sigx) *dsigx # sigx = 1.0 / (1 + np.exp(-x)) dx += (1) * dnum dsigy = (1) * dnum dy += ((1 - sigy) * sigy) * dsigy #sigy = 1.0 / (1 + np.exp(-y)) dx, dy # - # ### Gradients add up at forks # # The forward expression involves the variables x,y multiple times, so when we perform backpropagation we must be careful to use **+=** instead of **=** to accumulate the gradient on these variables (otherwise we would overwrite it). This follows the multivariable chain rule in Calculus, which states that if a variable branches out to different parts of the circuit, then the gradients that flow back to it will add. # ## Patterns in backward flow # # It is interesting to note that in many cases the backward-flowing gradient can be interpreted on an intuitive level. For example, the three most commonly used gates in neural networks (add,mul,max), all have very simple interpretations in terms of how they act during backpropagation. # # * add gate: always takes the gradient on its output and distributes it equally to all of its inputs, regardless of what their values were during the forward pass. # * multiply gate: Its local gradients are the input values (except switched), and this is multiplied by the gradient on its output during the chain rule. It will assign a relatively huge gradient to the small input and a tiny gradient to the large input. # * max gate: routes the gradient, distributes the gradient (unchanged) to exactly one of its inputs # # ## Gradients for vectorized operations # # The above sections were concerned with single variables, but all concepts extend in a straight-forward manner to matrix and vector operations. However, one must pay closer attention to dimensions and transpose operations. # + import numpy as np # forward pass W = np.random.randn(5, 10) X = np.random.randn(10, 3) D = W.dot(X) # now suppose we had the gradient on D from above in the circuit dD = np.random.randn(*D.shape) # same shape as D dW = dD.dot(X.T) # .T gives the transpose of the matrix dX = W.T.dot(dD)
cs231n/backprop.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # H3 Python API # + import os import sys module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) from h3 import h3 import folium def visualize_hexagons(hexagons, color="red", folium_map=None): """ hexagons is a list of hexcluster. Each hexcluster is a list of hexagons. eg. [[hex1, hex2], [hex3, hex4]] """ polylines = [] lat = [] lng = [] for hex in hexagons: polygons = h3.h3_set_to_multi_polygon([hex], geo_json=False) # flatten polygons into loops. outlines = [loop for polygon in polygons for loop in polygon] polyline = [outline + [outline[0]] for outline in outlines][0] lat.extend(map(lambda v:v[0],polyline)) lng.extend(map(lambda v:v[1],polyline)) polylines.append(polyline) if folium_map is None: m = folium.Map(location=[sum(lat)/len(lat), sum(lng)/len(lng)], zoom_start=13, tiles='cartodbpositron') else: m = folium_map for polyline in polylines: my_PolyLine=folium.PolyLine(locations=polyline,weight=8,color=color) m.add_child(my_PolyLine) return m def visualize_polygon(polyline, color): polyline.append(polyline[0]) lat = [p[0] for p in polyline] lng = [p[1] for p in polyline] m = folium.Map(location=[sum(lat)/len(lat), sum(lng)/len(lng)], zoom_start=13, tiles='cartodbpositron') my_PolyLine=folium.PolyLine(locations=polyline,weight=8,color=color) m.add_child(my_PolyLine) return m # + lat=35.2 long=10.1 h3_address = h3.geo_to_h3(lat,long, 1) # lat, lng, hex resolution h3_address2 = h3.geo_to_h3(lat,long, 2) # lat, lng, hex resolution h3_address3 = h3.geo_to_h3(lat,long, 3) # lat, lng, hex resolution h3_address4 = h3.geo_to_h3(lat,long, 4) # lat, lng, hex resolution print(bin(int(h3_address,16))) print(bin(int(h3_address2,16))) print(bin(int(h3_address3,16))) print(bin(int(h3_address4,16))) print(h3.h3_get_base_cell(h3_address4)) print(h3.h3_to_string(int(h3_address4,16))) J=h3.h3_to_parent(h3_address4,2) print(J) print(h3.h3_get_resolution(h3_address4)) print(h3.h3_get_resolution(J)) dg=h3.uncompact(J,4) c=0 for i in dg: print(i) c+=1 print(c) print(int('1018000000000f',16)) #m = visualize_hexagons([h3_address,h3_address2,h3_address3,h3_address4]) #display(m) # - h3_address = h3.geo_to_h3(37.3615593, -122.0553238, 9) # lat, lng, hex resolution hex_center_coordinates = h3.h3_to_geo(h3_address) # array of [lat, lng] hex_boundary = h3.h3_to_geo_boundary(h3_address) # array of arrays of [lat, lng] m = visualize_hexagons(list(h3.k_ring_distances(h3_address, 4)[3]), color="purple") m = visualize_hexagons(list(h3.k_ring_distances(h3_address, 4)[2]), color="blue", folium_map=m) m = visualize_hexagons(list(h3.k_ring_distances(h3_address, 4)[1]), color="green", folium_map=m) m = visualize_hexagons(list(h3.k_ring_distances(h3_address, 4)[0]), color = "red", folium_map=m) display(m) # + geoJson = {'type': 'Polygon', 'coordinates': [[[37.813318999983238, -122.4089866999972145], [ 37.7866302000007224, -122.3805436999997056 ], [37.7198061999978478, -122.3544736999993603], [ 37.7076131999975672, -122.5123436999983966 ], [37.7835871999971715, -122.5247187000021967], [37.8151571999998453, -122.4798767000009008]]] } polyline = geoJson['coordinates'][0] polyline.append(polyline[0]) lat = [p[0] for p in polyline] lng = [p[1] for p in polyline] m = folium.Map(location=[sum(lat)/len(lat), sum(lng)/len(lng)], zoom_start=13, tiles='cartodbpositron') my_PolyLine=folium.PolyLine(locations=polyline,weight=8,color="green") m.add_child(my_PolyLine) hexagons = list(h3.polyfill(geoJson, 8)) polylines = [] lat = [] lng = [] for hex in hexagons: polygons = h3.h3_set_to_multi_polygon([hex], geo_json=False) # flatten polygons into loops. outlines = [loop for polygon in polygons for loop in polygon] polyline = [outline + [outline[0]] for outline in outlines][0] lat.extend(map(lambda v:v[0],polyline)) lng.extend(map(lambda v:v[1],polyline)) polylines.append(polyline) for polyline in polylines: my_PolyLine=folium.PolyLine(locations=polyline,weight=8,color='red') m.add_child(my_PolyLine) display(m) # + binary = lambda x: "".join(reversed( [i+j for i,j in zip( *[ ["{0:04b}".format(int(c,16)) for c in reversed("0"+x)][n::2] for n in [1,0] ] ) ] )) print(binary("823847fffffffff")) h="823847fffffffff" "{0:064b}".format(int(h,16)) # - import binascii h="4016400000000f" binascii.unhexlify(h) # + lat=35.2 long=10.1 res=10 h3_address = h3.geo_to_h3(lat,long,res) # lat, lng, hex resolution h3_str="{0:064b}".format(int(h3_address,16)) h3_index=h3_str[1:5] h3_res=int(h3_str[8:13],2) h3_base=int(h3_str[12:12+7],2) print(h3_str) print(h3_index,h3_res,h3_base) print(h3.h3_get_base_cell(h3_address)) for i in range(1,res+1): h3_cell=h3_str[19+(i-1)*3:19+i*3] print(h3_cell) print(i, int(h3_cell,2)) # - 1 bit reserved and set to 0, 4 bits to indicate the index mode, 3 bits reserved and set to 0, 4 bits to indicate the cell resolution 0-15, 7 bits to indicate the base cell 0-121, and 3 bits to indicate each subsequent digit 0-6 from resolution 1 up to the resolution of the cell (45 bits total are reserved for resolutions 1-15)
Usage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # argv: # - C:\Users\<NAME>\Anaconda3\envs\py35\python.exe # - -m # - ipykernel_launcher # - -f # - '{connection_file}' # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nteract={"transient": {"deleting": false}} # # Variance inflation Factor (VIF) # + [markdown] nteract={"transient": {"deleting": false}} # In statistics, the variance inflation factor (VIF) is the quotient of the variance in a model with multiple terms by the variance of a model with one term alone. It quantifies the severity of multicollinearity in an ordinary least squares regression analysis. It provides an index that measures how much the variance (the square of the estimate's standard deviation) of an estimated regression coefficient is increased because of collinearity. https://en.wikipedia.org/wiki/Variance_inflation_factor # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} import numpy as np import matplotlib.pyplot as plt import pandas as pd import warnings warnings.filterwarnings("ignore") # fetch yahoo data import yfinance as yf yf.pdr_override() # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} # input symbol = 'AMD' start = '2014-01-01' end = '2018-08-27' # Read data dataset = yf.download(symbol,start,end) # Only keep close columns dataset.head() # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} dataset['Increase_Decrease'] = np.where(dataset['Volume'].shift(-1) > dataset['Volume'],1,0) dataset['Buy_Sell_on_Open'] = np.where(dataset['Open'].shift(-1) > dataset['Open'],1,0) dataset['Buy_Sell'] = np.where(dataset['Adj Close'].shift(-1) > dataset['Adj Close'],1,0) dataset['Returns'] = dataset['Adj Close'].pct_change() dataset = dataset.dropna() # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} from statsmodels.stats.outliers_influence import variance_inflation_factor # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} X = dataset # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} vif = pd.DataFrame() vif["VIF Factor"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])] vif["features"] = X.columns # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} vif
Variance_Inflation_Factor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.4.0 # language: julia # name: julia-0.4 # --- # + [markdown] internals={"slide_helper": "subslide_end", "slide_type": "subslide"} slide_helper="slide_end" slideshow={"slide_type": "slide"} # <h1>The Roots package</h1> # - # <p>The <code>Roots</code> package contains simple routines for finding roots of continuous scalar functions of a single real variable. The basic interface is through the function <code>fzero</code>, which through multiple dispatch can handle many different cases.</p> # <p>We will use these pacakges</p> using Plots backend(:gadfly) using Roots # + [markdown] internals={"slide_helper": "subslide_end", "slide_type": "subslide"} slide_helper="slide_end" slideshow={"slide_type": "slide"} # <h2>Bracketing</h2> # - # <p>For a function &#36;f: R \rightarrow R&#36; a bracket is a pair &#36;a&lt;b&#36; for which &#36;f&#40;a&#41;\cdot f&#40;b&#41; &lt; 0&#36;. That is they have different signs. If &#36;f&#36; is continuous this forces there to be a zero on the interval &#36;&#91;a,b&#93;&#36;, otherwise, if &#36;f&#36; is only piecewise continuous, there must be a point &#36;c&#36; in &#36;&#91;a,b&#93;&#36; with the left limit and right limit at &#36;c&#36; having different signs. These values can be found, up to floating point roundoff.</p> # <p>That is, a value &#36;a &lt; c &lt; b&#36; can be found with either <code>f&#40;c&#41; &#61;&#61; 0.0</code> or <code>f&#40;prevfloat&#40;c&#41;&#41; * f&#40;nextfloat&#40;c&#41;&#41; &lt;&#61; 0</code>.</p> # <p>To illustrate, consider the function &#36;f&#40;x&#41; &#61; \cos&#40;x&#41; - x&#36;. From the graph we see readily that &#36;&#91;0,1&#93;&#36; is a bracket:</p> f(x) = cos(x) - x plot(f, -2,2) # <p>The basic function call specifies a bracket using vector notation:</p> x = fzero(f, [0, 1]) x, f(x) # <p>For that function <code>f&#40;x&#41; &#61;&#61; 0.0</code>. Next consider &#36;f&#40;x&#41; &#61; \sin&#40;x&#41;&#36;. A known root is &#36;\pi&#36;. Basic trignometry tells us that &#36;&#91;\pi/2, 3\pi2&#93;&#36; will be a bracket:</p> f(x) = sin(x) x = fzero(f, [pi/2, 3pi/2]) x, f(x) # <p>This value of <code>x</code> does not produce <code>f&#40;x&#41; &#61;&#61; 0.0</code>, however, it is as close as can be:</p> f(prevfloat(x)) * f(x) < 0.0 || f(x) * f(nextfloat(x)) < 0.0 # <p>That is at <code>x</code> the function is changing sign.</p> # <p>The basic algorithm used for bracketing when the values are simple floating point values is the bisection method. Though there are algorithms that mathematically should converge faster &#40;and one is used for the case where <code>BigFloat</code> numbers are used&#41; by exploiting floating point computations this algorithm uses fewer function calls and runs faster.</p> # + [markdown] internals={"slide_helper": "subslide_end", "slide_type": "subslide"} slide_helper="slide_end" slideshow={"slide_type": "slide"} # <h2>Using an initial guess</h2> # - # <p>If a bracket is not known, but a good initial guess is, the <code>fzero</code> function provides an interface to some different algorithms. The basic algorithm is modeled after an algorithm used for <a href="http://www.hpl.hp.com/hpjournal/pdfs/IssuePDFs/1979-12.pdf">HP-34 calculators</a>. This algorithm is much more robust to the quality of the initial guess and does not rely on tolerances for a stopping rule. In many cases it satisfies the criteria for a bracketing solution.</p> # <p>For example, we have:</p> f(x) = cos(x) - x x = fzero(f , 1) x, f(x) # <p>And </p> f(x) = x^3 - 2x - 5 x = fzero(f, 2) x, f(x), f(prevfloat(x)) * f(nextfloat(x)) # <p>For even more precision, <code>BigFloat</code> numbers can be used</p> x = fzero(sin, big(3)) x, f(x), x - pi # + [markdown] internals={"slide_type": "subslide"} slide_helper="slide_end" slideshow={"slide_type": "subslide"} # <h3>Higher order methods</h3> # - # <p>The default call to <code>fzero</code> uses a second order method at best and then bracketing, which involves potentially many more function calls. For some functions, a higher-order method might be better suited. There are algorithms of order 1 &#40;secant method&#41;, 2 &#40;<a href="http://en.wikipedia.org/wiki/Steffensen&#39;s_method">Steffensen</a>&#41;, 5, 8, and 16. The order 2 method is generally more efficient, but is more sensitive to the initial guess than, say, the order 8 method. These algorithms are accessed by specifying a value for the <code>order</code> argument:</p> f(x) = 2x - exp(-x) x = fzero(f, 1, order=2) x, f(x) f(x) = (x + 3) * (x - 1)^2 x = fzero(f, -2, order=5) x, f(x) x = fzero(f, 2, order=8) x, f(x) # <p>The latter shows that zeros need not be simple zeros &#40;i.e. &#36;f&#39;&#40;x&#41; &#61; 0&#36;, if defined&#41; to be found. For the higher-order methods, there is a tolerance that can be specified so that a value is returned as a zero if <code>abs&#40;f&#40;x&#41;&#41; &lt; tol</code>. The default method for <code>fzero</code> uses a very strict tolerance for this, otherwise defaulting to an error that at times might be very close to the actual zero. For this problem it finds the exact value:</p> x = fzero(f, 2) x, f(x) # <p>But not for a similar problem:</p> fzero(x -> x^6, 1) # <p>&#40;Though the answer is basically on track, the algorithm takes too long to improve itself to the very stringent range set. For problems where a bracket is found, this dithering won&#39;t happen.&#41;</p> # <p>The higher-order methods are basically various derivative-free versions of Newtons method which has update step &#36;x - f&#40;x&#41;/f&#39;&#40;x&#41;&#36;. For example, Steffensen&#39;s method is essentially replacing &#36;f&#39;&#40;x&#41;&#36; with &#36;&#40;f&#40;x &#43; f&#40;x&#41;&#41; - f&#40;x&#41;&#41;/f&#40;x&#41;&#36;. This is just a forward-difference approximation to the derivative with &quot;&#36;h&#36;&quot; being &#36;f&#40;x&#41;&#36;, which presumably is close to &#36;0&#36; already. The methods with higher order combine this with different secant line approaches that minimize the number of function calls. The default method uses a combination of Steffensen&#39;s method with modifications, a quadratic fit, and, if possible, a bracketing approach. It may need many more function calls than the higher-order methods. These higher-order methods can be susceptible to some of the usual issues found with Newton&#39;s method: poor initial guess, small first derivative, or large second derivative near the zero.</p> # <p>For a classic example where basically the large second derivative is the issue, we have &#36;f&#40;x&#41; &#61; x^&#123;1/3&#125;&#36;:</p> f(x) = cbrt(x) x = fzero(f, 1, order=8) # all of 2, 5, 8, and 16 fail # <p>However, the default finds the root here</p> x = fzero(f, 1) x, f(x) # <p>Finally, we show another example illustrating that the default <code>fzero</code> call is more forgiving to an initial guess. The devilish function defined below comes from a <a href="http://people.sc.fsu.edu/~jburkardt/cpp_src/test_zero/test_zero.html">test suite</a> of difficult functions. The default method finds the zero:</p> f(x) = cos(100*x)-4*erf(30*x-10) plot(f, -2, 2) fzero(f, 1) # <p>Whereas, with <code>order&#61;n</code> methods fail. For example,</p> fzero(f, 1, order=8) # <p>Basically the high order oscillation can send the proxy tangent line off in nearly random directions.</p> # + [markdown] internals={"slide_helper": "subslide_end", "slide_type": "subslide"} slide_helper="slide_end" slideshow={"slide_type": "slide"} # <h2>Polynomials</h2> # - # <p>The <code>Polynomials</code> package provides a type for working with polynomial functions that allows many typical polynomial operations to be defined. In this context, the <code>roots</code> function is used to find the roots of a polynomial.</p> # <p>For example, </p> using Polynomials x = poly([0.0]) # (x - 0.0) roots((x-1)*(x-2)*(x-3)) # <p>As a convenience, this package adds a function interface to <code>roots</code>:</p> f(x) = (x-1)*(x-2)*(x^2 + x + 1) roots(f) # <p>The <code>fzeros</code> function will find the real roots of a univariate polynomial:</p> fzeros( (x-1)*(x-2)*(x^2 + x + 1)) # <p>As with roots, this function be called with a function:</p> f(x) = x*(x-1)*(x^2 + 1)^4 fzeros(f) # <p>The algorithm can have numeric issues when the polynomial degree gets too large, or the roots are too close together.</p> # <p>The <code>multroot</code> function will also find the roots. The algorithm does a better job when there are multiple roots, as it implements an algorithm that first identifies the multiplicity structure of the roots, and then tries to improve these values.</p> multroot((x-1)*(x-2)*(x-3)) # roots, multiplicity # <p>The <code>factor</code> function provides a more pleasant output.</p> # <p>The <code>roots</code> function degrades as there are multiplicities:</p> p = (x-1)^2*(x-2)^3*(x-3)^4 roots(p) # <p>Whereas, <code>multroot</code> gets it right.</p> factor(p) # <p>The difference gets dramatic when the multiplicities get quite large.</p> # + [markdown] internals={"slide_helper": "subslide_end", "slide_type": "subslide"} slide_helper="slide_end" slideshow={"slide_type": "slide"} # <h2>Classical methods</h2> # - # <p>The package provides some classical methods for root finding: <code>newton</code>, <code>halley</code>, and <code>secant_method</code>. We can see how each works on a problem studied by Newton himself. Newton&#39;s method uses the function and its derivative:</p> f(x) = x^3 - 2x - 5 fp(x) = 3x^2 - 2 x = newton(f, fp, 2) x, f(x), f(prevfloat(x)) * f(nextfloat(x)) # <p>To see the algorithm in progress, the argument <code>verbose&#61;true</code> may be specified. </p> # <p>The secant method needs two starting points, here we start with 2 and 3:</p> x = secant_method(f, 2,3) x, f(x), f(prevfloat(x)) * f(nextfloat(x)) # <p>Halley&#39;s method has cubic convergence, as compared to Newton&#39;s quadratic convergence. It uses the second derivative as well:</p> fpp(x) = 6x x = halley(f, fp, fpp, 2) x, f(x), f(prevfloat(x)) * f(nextfloat(x)) # <p>For many function, the derivatives can be computed automatically. The <code>ForwardDiff</code> package provides a means. This package wraps the process into an operator, <code>D</code> which returns the derivative of a function <code>f</code> &#40;for simple-enough functions&#41;:</p> newton(f, D(f), 2) # <p>Or for Halley&#39;s method</p> halley(f, D(f), D(f,2), 2) # <p>&#40;The operator <code>D2&#40;f&#41;</code> is a convenience for <code>D&#40;f,2&#41;</code>.&#41; Specifying the derivative&#40;s&#41; can be skipped, the functions will default to the above calls.</p> # + [markdown] internals={"slide_helper": "subslide_end", "slide_type": "subslide"} slide_helper="slide_end" slideshow={"slide_type": "slide"} # <h2>Finding critical points</h2> # - # <p>The <code>D</code> function makes it straightforward to find critical points &#40;where the derivative is &#36;0&#36; or undefined&#41;. For example, the critical point of the function &#36;f&#40;x&#41; &#61; 1/x^2 &#43; x^3, x &gt; 0&#36; can be found with:</p> f(x) = 1/x^2 + x^3 fzero(D(f), 1) # <p>For more complicated expressions, <code>D</code> will not work. In this example, we have a function &#36;f&#40;x, \theta&#41;&#36; that models the flight of an arrow on a windy day:</p> function flight(x, theta) k = 1/2 a = 200*cosd(theta) b = 32/k tand(theta)*x + (b/a)*x - b*log(a/(a-x)) end # <p>The total distance flown is when <code>flight&#40;x&#41; &#61;&#61; 0.0</code> for some <code>x &gt; 0</code>: This can be solved for different <code>theta</code> with <code>fzero</code>. In the following, we note that <code>log&#40;a/&#40;a-x&#41;&#41;</code> will have an asymptote at <code>a</code>, so we start our search at <code>a-1</code>:</p> function howfar(theta) a = 200*cosd(theta) fzero(x -> flight(x, theta), a-1) end # <p>To see the trajectory if shot at 30 degrees, we have:</p> theta = 30 plot(x -> flight(x, theta), 0, howfar(theta)) # <p>To maximize the range we solve for the lone critical point of <code>howfar</code> within the range. The derivative can not be taken automatically with <code>D</code>. So, here we use a central-difference approximation and start the search at 45 degrees, the angle which maximizes the trajectory on a non-windy day:</p> h = 1e-5 howfarp(theta) = (howfar(theta+h) - howfar(theta-h)) / (2h) fzero(howfarp, 45)
doc/roots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/yukinaga/ai_programming/blob/main/lecture_08/02_image_generation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="Niaz8_W6OX34" # # RNNによる画像生成 # 画像を時系列のデータと捉えることで、RNNにより画像を生成することが可能になります。 # 今回は、画像データを使ってRNNを訓練し、画像の上半分をもとに画像の下半分を生成します。 # RNNの層にはLSTMを使用します。 # # + [markdown] id="LoISGl864sy9" # ## Fashion-MNIST # torchvision.datasetsを使い、Fashion-MNISTを読み込みます。 # Fashion-MNISTは、6万枚のファッションアイテム画像にラベルをつけたたデータセットです。 # 以下のコードでは、Fashion-MNISTを読み込み、ランダムな25枚の画像を表示します。 # + id="sQ1S5UNy-rpY" from torchvision.datasets import FashionMNIST import torchvision.transforms as transforms from torch.utils.data import DataLoader import numpy as np import matplotlib.pyplot as plt fmnist_data = FashionMNIST(root="./data", train=True,download=True, transform=transforms.ToTensor()) fmnist_classes = np.array(["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]) print("データの数:", len(fmnist_data)) n_image = 25 # 表示する画像の数 fmnist_loader = DataLoader(fmnist_data, batch_size=n_image, shuffle=True) dataiter = iter(fmnist_loader) # イテレータ images, labels = dataiter.next() # 最初のバッチを取り出す img_size = 28 plt.figure(figsize=(10,10)) # 画像の表示サイズ for i in range(n_image): plt.subplot(5,5,i+1) plt.imshow(images[i].reshape(img_size, img_size), cmap="Greys_r") label = fmnist_classes[labels[i]] plt.title(label) plt.tick_params(labelbottom=False, labelleft=False, bottom=False, left=False) # ラベルとメモリを非表示に plt.show() # + [markdown] id="vvRgf3-j2k76" # ## データの前処理 # 画像データをRNNに適した形に整えます。 # 画像を時系列データに変換しますが、正解は時系列の次の行にします。 # + id="bUcJiVtDV7Ad" import torch from torch.utils.data import DataLoader n_time = 14 # 時系列の数 n_in = img_size # 入力層のニューロン数 n_mid = 256 # 中間層のニューロン数 n_out = img_size # 出力層のニューロン数 n_sample_in_img = img_size-n_time # 1枚の画像中のサンプル数 dataloader = DataLoader(fmnist_data, batch_size=len(fmnist_data), shuffle=False) dataiter = iter(dataloader) # イテレータ train_imgs, labels = dataiter.next() # データを取り出す train_imgs = train_imgs.reshape(-1, img_size, img_size) n_sample = len(train_imgs) * n_sample_in_img # サンプル数 input_data = np.zeros((n_sample, n_time, n_in)) # 入力 correct_data = np.zeros((n_sample, n_out)) # 正解 for i in range(len(train_imgs)): for j in range(n_sample_in_img): sample_id = i*n_sample_in_img + j input_data[sample_id] = train_imgs[i, j:j+n_time] correct_data[sample_id] = train_imgs[i, j+n_time] input_data = torch.tensor(input_data, dtype=torch.float) # テンソルに変換 correct_data = torch.tensor(correct_data, dtype=torch.float) dataset = torch.utils.data.TensorDataset(input_data, correct_data) # データセットの作成 train_loader = DataLoader(dataset, batch_size=128, shuffle=True) # DataLoaderの設定 # + [markdown] id="d8pnaugFDnLN" # ## テスト用のデータ # 今回は、訓練済みのモデルが機能することを確かめるために使用します。 # + id="xISsKRTAp7QG" n_disp = 10 # 生成し表示する画像の数 disp_data = FashionMNIST(root="./data", train=False,download=True, transform=transforms.ToTensor()) disp_loader = DataLoader(disp_data, batch_size=n_disp, shuffle=False) dataiter = iter(disp_loader) # イテレータ disp_imgs, labels = dataiter.next() # データを取り出す disp_imgs = disp_imgs.reshape(-1, img_size, img_size) # + [markdown] id="NyLBjYjr3RXu" # ## 画像生成用の関数 # 以下の関数は、オリジナルの画像`disp_imgs`と、この画像の上半分をもとに下半分を生成した`gen_imgs`を並べて表示します。 # 最初は画像の上半分をシードにして新たな行を生成しますが、次はその新たな行を含む直近の時系列からさらに次の行を生成します。 # これを繰り返すことで、下半分の画像が生成されます。 # + id="dFmq4Oy6apUC" def generate_images(): # オリジナルの画像 print("Original:") plt.figure(figsize=(20, 2)) for i in range(n_disp): ax = plt.subplot(1, n_disp, i+1) plt.imshow(disp_imgs[i], cmap="Greys_r", vmin=0.0, vmax=1.0) ax.get_xaxis().set_visible(False) # 軸を非表示に ax.get_yaxis().set_visible(False) plt.show() # 下半分をRNNにより生成した画像 print("Generated:") gen_imgs = disp_imgs.clone() plt.figure(figsize=(20, 2)) for i in range(n_disp): for j in range(n_sample_in_img): x = gen_imgs[i, j:j+n_time].reshape(1, n_time, img_size) x = x.cuda() # GPU対応 gen_imgs[i, j+n_time] = net(x)[0] ax = plt.subplot(1, n_disp, i+1) plt.imshow(gen_imgs[i].detach(), cmap="Greys_r", vmin=0.0, vmax=1.0) ax.get_xaxis().set_visible(False) # 軸を非表示に ax.get_yaxis().set_visible(False) plt.show() # + [markdown] id="FalXNYaJPkoE" # ## モデルの構築 # `nn.Module`モジュールを継承したクラスとして、モデルを構築します。 # LSTMは`nn.LSTM`を使って実装することができます。 # + id="SuqqZmsh_jNK" import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super().__init__() self.rnn = nn.LSTM( # LSTM層 input_size=n_in, # 入力サイズ hidden_size=n_mid, # ニューロン数 batch_first=True, # 入力を (バッチサイズ, 時系列の数, 入力の数) にする ) self.fc = nn.Linear(n_mid, n_out) # 全結合層 def forward(self, x): y_rnn, (h, c) = self.rnn(x, None) # hは次の時刻に渡される値、 cは記憶セル y = self.fc(y_rnn[:, -1, :]) # yは最後の時刻の出力 return y net = Net() net.cuda() # GPU対応 print(net) # + [markdown] id="qsW5zCKhQE9p" # ## 学習 # モデルを訓練します。 # DataLoaderを使い、ミニバッチを取り出して訓練および評価を行います。 # 学習中、一定のエポック間隔ごとに誤差の表示と画像の生成が行われます。 # 学習には時間がかかりますので、編集→ノートブックの設定のハードウェアアクセラレーターでGPUを選択しましょう。 # + id="u6zwN3nArbGC" from torch import optim # 交差エントロピー誤差関数 loss_fnc = nn.MSELoss() # 最適化アルゴリズム optimizer = optim.Adam(net.parameters()) # 学習率は0.01 # 損失のログ record_loss_train = [] # 学習 for i in range(25): # 25エポック学習 net.train() # 訓練モード loss_train = 0 for j, (x, t) in enumerate(train_loader): # ミニバッチ(x, t)を取り出す x, t = x.cuda(), t.cuda() # GPU対応 y = net(x) loss = loss_fnc(y, t) loss_train += loss.item() optimizer.zero_grad() loss.backward() optimizer.step() loss_train /= j+1 record_loss_train.append(loss_train) if i%1 == 0: print("Epoch:", i, "Loss_Train:", loss_train) generate_images() # + [markdown] id="rJwwrWTw43rx" # ## 誤差の推移 # 誤差の推移をグラフ表示します。 # + id="OaJx4swE45XI" import matplotlib.pyplot as plt plt.plot(range(len(record_loss_train)), record_loss_train, label="Train") plt.legend() plt.xlabel("Epochs") plt.ylabel("Error") plt.show() # + [markdown] id="iMrpac0m4Nct" # 滑らかに誤差が減少していることが確認できます。
lecture_08/02_image_generation.ipynb