code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Dependencies and Setup # %matplotlib inline import matplotlib.pyplot as plt import pandas as pd import numpy as np # Hide warning messages in notebook import warnings warnings.filterwarnings('ignore') # File to Load (Remember to Change These) mouse_drug_data_to_load = "data/mouse_drug_data.csv" clinical_trial_data_to_load = "data/clinicaltrial_data.csv" # Read the Mouse and Drug Data and the Clinical Trial Data got_mouse_data = pd.read_csv(mouse_drug_data_to_load) # Preview Mouse and Drug Data got_mouse_data.head() # - # Preview Clinical Trial Data got_clinical_data = pd.read_csv(clinical_trial_data_to_load) got_clinical_data.head() # + # Combine the data into a single dataset combined_data = pd.merge(got_clinical_data, got_mouse_data, on="Mouse ID", how = "left") # Display the data table for preview combined_data.head() # - # ## Tumor Response to Treatment # + # Store the Mean Tumor Volume Data Grouped by Drug and Timepoint tumor_group = combined_data.groupby(["Drug","Timepoint"]) tumor_group_mean = tumor_group["Tumor Volume (mm3)"].mean() # Convert to DataFrame tumor_group_table = pd.DataFrame(tumor_group_mean).reset_index() # Preview DataFrame tumor_group_table # + # Store the Standard Error of Tumor Volumes Grouped by Drug and Timepoint tumor_group_sem = tumor_group["Tumor Volume (mm3)"].sem() # Convert to DataFrame tumor_group_sem_table = pd.DataFrame(tumor_group_sem).reset_index() # Preview DataFrame tumor_group_sem_table.head() # + # Minor Data Munging to Re-Format the Data Frames tumor_group_mean = tumor_group_mean.reset_index() tumor_vols_pivot_mean = tumor_group_mean.pivot(index="Timepoint", columns="Drug")["Tumor Volume (mm3)"] # Preview that Reformatting worked tumor_vols_pivot_mean.head() # + # Generate the Plot (with Error Bars) Capomulin_error = tumor_group_sem_table.loc[tumor_group_sem_table["Drug"] == "Capomulin", "Tumor Volume (mm3)"] Infubinol_error = tumor_group_sem_table.loc[tumor_group_sem_table["Drug"] == "Infubinol", "Tumor Volume (mm3)"] Ketapril_error = tumor_group_sem_table.loc[tumor_group_sem_table["Drug"] == "Ketapril", "Tumor Volume (mm3)"] Placebo_error = tumor_group_sem_table.loc[tumor_group_sem_table["Drug"] == "Placebo", "Tumor Volume (mm3)"] # Set time Time = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45] plt.errorbar(Time, tumor_vols_pivot_mean["Capomulin"] , yerr= Capomulin_error, label= "Capomulin", marker= "o", color="red", linestyle="--", linewidth=0.5) plt.errorbar(Time, tumor_vols_pivot_mean["Infubinol"] , yerr= Infubinol_error, label= "Infubinol", marker= "^", color="blue", linestyle="--", linewidth=0.5) plt.errorbar(Time, tumor_vols_pivot_mean["Ketapril"] , yerr= Ketapril_error, label= "Ketapril", marker= "d", color="green", linestyle="--", linewidth=0.5) plt.errorbar(Time, tumor_vols_pivot_mean["Placebo"] , yerr= Placebo_error , label= "Placebo", marker= "s", color="black", linestyle="--", linewidth=0.5) # Add labels, legend, title and grid plt.xlabel("Time (Days)") plt.ylabel("Tumor Volume (mm3)") plt.legend(loc="upper left") plt.title("Tumor Response to Treatment") plt.grid(axis='y') # Save the Figure plt.savefig("charts_images/treatment_graph.png") # Show the Figure plt.show() # - # ## Metastatic Response to Treatment # + # Store the Mean Met. Site Data Grouped by Drug and Timepoint meta_group_data = combined_data.groupby(["Drug", "Timepoint"]) meta_group_mean = meta_group_data["Metastatic Sites"].mean() meta_group_table = pd.DataFrame(meta_group_mean) # Preview DataFrame meta_group_table.head() # + # Store the Standard Error associated with Met. Sites Grouped by Drug and Timepoint meta_group_sem = meta_group_data["Metastatic Sites"].sem() # Convert to DataFrame meta_group_sem_table = pd.DataFrame(meta_group_sem) # Preview DataFrame meta_group_sem_table.head() # + # Minor Data Munging to Re-Format the Data Frames meta_group_mean = meta_group_mean.reset_index() meta_vols_pivot_mean = meta_group_mean.pivot(index="Timepoint", columns="Drug")["Metastatic Sites"] # Preview that Reformatting worked meta_vols_pivot_mean.head() # + # Generate the Plot (with Error Bars) plt.errorbar(Time, meta_vols_pivot_mean["Capomulin"] , yerr= Capomulin_error, label= "Capomulin", marker= "o", color="red", linestyle="--", linewidth=0.5) plt.errorbar(Time, meta_vols_pivot_mean["Infubinol"] , yerr= Infubinol_error, label= "Infubinol", marker= "^", color="blue", linestyle="--", linewidth=0.5) plt.errorbar(Time, meta_vols_pivot_mean["Ketapril"] , yerr= Ketapril_error, label= "Ketapril", marker= "d", color="green", linestyle="--", linewidth=0.5) plt.errorbar(Time, meta_vols_pivot_mean["Placebo"] , yerr= Placebo_error , label= "Placebo", marker= "s", color="black", linestyle="--", linewidth=0.5) # Add labels, legend, title and grid plt.xlabel("Treatment Duration (Days)") plt.ylabel("Met. Sites") plt.axis([-2, 47.5, -0.3, 3.75]) plt.legend(loc="upper left") plt.title("Metastatic Spread During Treatment") plt.grid(axis='y') # Save the Figure plt.savefig("charts_images/metastatic_spread_graph.png") # Show the Figure plt.show() # - # ## Survival Rates # + # Store the Count of Mice Grouped by Drug and Timepoint (W can pass any metric) survival_count = combined_data.groupby(["Drug", "Timepoint"]).count()["Tumor Volume (mm3)"] survival_count # Convert to DataFrame survival_count_table = pd.DataFrame(survival_count).reset_index() survival_count_table=survival_count_table.rename(columns = {"Tumor Volume (mm3)":"Mouse Count"}) # Preview DataFrame survival_count_table.head() # + # Minor Data Munging to Re-Format the Data Frames survival_count_table = survival_count_table.reset_index() survival_count_table_pivot = survival_count_table.pivot(index="Timepoint", columns="Drug")["Mouse Count"] # Preview the Data Frame survival_count_table_pivot.head() # - # + # Generate the Plot plt.errorbar(Time, survival_count_table_pivot["Capomulin"] , yerr= Capomulin_error, label= "Capomulin", marker= "o", color="red", linestyle="--", linewidth=0.5) plt.errorbar(Time, survival_count_table_pivot["Infubinol"] , yerr= Infubinol_error, label= "Infubinol", marker= "^", color="blue", linestyle="--", linewidth=0.5) plt.errorbar(Time, survival_count_table_pivot["Ketapril"] , yerr= Ketapril_error, label= "Ketapril", marker= "d", color="green", linestyle="--", linewidth=0.5) plt.errorbar(Time, survival_count_table_pivot["Placebo"] , yerr= Placebo_error , label= "Placebo", marker= "s", color="black", linestyle="--", linewidth=0.5) # Add labels, legend, title and grid plt.xlabel("Time (Days)") plt.ylabel("Survival Rate") plt.legend(loc="lower left") plt.title("Survival During Treatment") plt.grid() # Save the Figure plt.savefig("charts_images/survival_graph.png") # Show the Figure plt.show() # - # ## Summary Bar Graph # + # Calculate the percent changes for each drug tumor_vols_pivot_mean_data = 100*(tumor_vols_pivot_mean.iloc[-1]/tumor_vols_pivot_mean.iloc[0]-1) tumor_vols_pivot_mean_data = 100*(tumor_vols_pivot_mean.iloc[-1]/tumor_vols_pivot_mean.iloc[0]-1) # Display the data to confirm tumor_vols_pivot_mean_data # - # Store all Relevant Percent Changes into a Tuple tuple_percent_changes = tuple(zip(tumor_vols_pivot_mean_data.index, tumor_vols_pivot_mean_data)) tuple_percent_changes_list = list(tuple_percent_changes) # + # Splice the data between passing and failing drugs passing = tumor_vols_pivot_mean_data< 0 # Orient widths. Add labels, tick marks, etc. drug_list = ['Capomulin','Infubinol','Ketapril','Placebo'] change_list = [(tumor_vols_pivot_mean_data[drug])for drug in drug_list] change_plt = plt.bar(drug_list,change_list,width=-1,align='edge',color=passing.map({True:'g',False:'r'})) plt.ylim(-30,75) plt.ylabel('% Tumor Volume Change') plt.title('Tumor Change over 45 Day Treatment') plt.grid() # Use functions to label the percentages of changes def autolabel(rects): for rect in rects: height = rect.get_height() if height > 0: label_position = 2 else: label_position = -8 plt.text(rect.get_x() + rect.get_width()/2., label_position, '%d' % int(height)+'%',color='white', ha='center', va='bottom') # Call functions to implement the function calls autolabel(change_plt) # Save the Figure plt.savefig("charts_images/tumor_change.png") # -
AV_pymaceuticals_starter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Obtaining information about your `backend` # # #### _Note: All the attributes of the backend are described in detail in the [Qiskit Backend Specifications](https://arxiv.org/pdf/1809.03452.pdf). This page reviews a subset of the spec._ # # Programming a quantum computer at the microwave pulse level requires more information about the device than is required at the circuit level. A quantum circuit is built for an abstract quantum computer -- it will yield the same quantum state on any quantum computer (except for varying performance levels). A pulse schedule, on the other hand, is so specific to the device, that running one program on two different backends is not expected to have the same result, even on perfectly noiseless systems. # # As a basic example, imagine a drive pulse `q0_X180` calibrated on qubit 0 to enact an $X180$ pulse, which flips the state of qubit 0. If we use the samples from that pulse on qubit 1 on the same device, or qubit 0 on another device, we do not know what the resulting state will be -- but we can be pretty sure it won't be an $X180$ operation. The qubits are each unique, with various drive coupling strengths. If we have specified a frequency for the drive pulse, it's very probable that pulse would have little effect on another qubit, which has its own resonant frequency. # # With that, we have motivated why information from the backend may be very useful at times for building Pulse schedules. The information included in a `backend` is broken into three main parts: # # - [**Configuration**](#Configuration): static backend features # - [**Properties**](#Properties): measured and reported backend characteristics # - [**Defaults**](#Defaults): default settings for the OpenPulse-enabled backend # # which are each covered in the following sections. While all three of these contain interesting data for Pulse users, the defaults are _only_ provided for backends enabled with OpenPulse. # # The first thing you'll need to do is grab a backend to inspect. Here we use a mocked backend that contains a snapshot of data from the real OpenPulse-enabled backend. # + from qiskit.test.mock import FakeAlmaden backend = FakeAlmaden() # - # ## Configuration # # The configuration is where you'll find data about the static setup of the device, such as its name, version, the number of qubits, and the types of features it supports. # # Let's build a description of our backend using information from the `backend`'s config. # + config = backend.configuration() # Basic Features print("This backend is called {0}, and is on version {1}. It has {2} qubit{3}. It " "{4} OpenPulse programs. The basis gates supported on this device are {5}." "".format(config.backend_name, config.backend_version, config.n_qubits, '' if config.n_qubits == 1 else 's', 'supports' if config.open_pulse else 'does not support', config.basis_gates)) # - # Neat! All of the above configuration is available for any backend, whether enabled with OpenPulse or not, although it is not an exhaustive list. There are additional attributes available on Pulse backends. Let's go into a bit more detail with those. # # The **timescale**, `dt`, is backend dependent. Think of this as the inverse sampling rate of the control rack's arbitrary waveform generators. Each sample point and duration in a Pulse `Schedule` is given in units of this timescale. config.dt # units of seconds # The configuration also provides information that is useful for building measurements. Pulse supports three measurement levels: `0: RAW`, `1: KERNELED`, and `2: DISCRIMINATED`. The `meas_levels` attribute tells us which of those are supported by this backend. To learn how to execute programs with these different levels, see this page -- COMING SOON. config.meas_levels # For backends which support measurement level 0, the sampling rate of the control rack's analog-to-digital converters (ADCs) also becomes relevant. The configuration also has this info, where `dtm` is the time per sample returned: config.dtm # The measurement map, explained in detail on [this page COMING SOON], is also found here. config.meas_map # The configuration also supplies convenient methods for getting channels for your schedule programs. For instance: config.drive(0) config.measure(0) config.acquire(0) # It is a matter of style and personal preference whether you use `config.drive(0)` or `DriveChannel(0)`. # # ## Properties # # The `backend` properties contain data that was measured and optionally reported by the provider. Let's see what kind of information is reported for qubit 0. props = backend.properties() # + def describe_qubit(qubit, properties): """Print a string describing some of reported properties of the given qubit.""" # Conversion factors from standard SI units us = 1e6 ns = 1e9 GHz = 1e-9 print("Qubit {0} has a \n" " - T1 time of {1} microseconds\n" " - T2 time of {2} microseconds\n" " - U2 gate error of {3}\n" " - U2 gate duration of {4} nanoseconds\n" " - resonant frequency of {5} GHz".format( qubit, properties.t1(qubit) * us, properties.t2(qubit) * us, properties.gate_error('u2', qubit), properties.gate_length('u2', qubit) * ns, properties.frequency(qubit) * GHz)) describe_qubit(0, props) # - # Properties are not guaranteed to be reported, but backends without Pulse access typically also provide this data. # # ## Defaults # # Unlike the other two sections, `PulseDefaults` are only available for Pulse-enabled backends. It contains the default program settings run on the device. defaults = backend.defaults() # ### Drive frequencies # # Defaults contains the default frequency settings for the drive and measurement signal channels: # + q0_freq = defaults.qubit_freq_est[0] # Hz q0_meas_freq = defaults.meas_freq_est[0] # Hz GHz = 1e-9 print("DriveChannel(0) defaults to a modulation frequency of {} GHz.".format(q0_freq * GHz)) print("MeasureChannel(0) defaults to a modulation frequency of {} GHz.".format(q0_meas_freq * GHz)) # - # ### Pulse Schedule definitions for QuantumCircuit instructions # # Finally, one of the most important aspects of the `backend` for `Schedule` building is the `InstructionScheduleMap`. This is a basic mapping from a circuit operation's name and qubit to the default pulse-level implementation of that instruction. calibrations = defaults.instruction_schedule_map print(calibrations) # Rather than build a measurement schedule from scratch, let's see what was calibrated by the backend to measure the qubits on this device: measure_schedule = calibrations.get('measure', range(config.n_qubits)) measure_schedule.draw(backend=backend) # This can easily be appended to your own Pulse `Schedule` (`sched += calibrations.get('measure', <qubits>) << sched.duration`)! # # Likewise, each qubit will have a `Schedule` defined for each basis gate, and they can be appended directly to any `Schedule` you build. # You can use `has` to see if an operation is defined. Ex: Does qubit 3 have an x gate defined? calibrations.has('x', 3) # Some circuit operations take parameters. U1 takes a rotation angle: calibrations.get('u1', 0, P0=3.1415) # While building your schedule, you can also use `calibrations.add(name, qubits, schedule)` to store useful `Schedule`s that you've made yourself. # # On this [page](07_pulse_scheduler.ipynb), we'll show how to schedule `QuantumCircuit`s into Pulse `Schedule`s. import qiskit.tools.jupyter # %qiskit_version_table # %qiskit_copyright
tutorials/circuits_advanced/08_gathering_system_information.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- direct= "/Users/djamillakhdar-hamina/Desktop/" def email_parser(): """ Assumptions: Given an email, read the email, then scan it for url. If url is found, download, rename, and then save to specified directory. Arguments: email_message and default directory= /Scopus Input: email_message with forwarded emails called pmt_content because that is the property name given by Jenkins when you call as variable to be executed in process Output: A group of zip files, renamed, and stored in specified directory called /Scopus :return: """ parser = ArgumentParser parser.add_argument('-p', '--pmt_content', required=True, help="""email message that will get parsed for url-link and zip-file""") parser.add_argument('-d','--directory', required=True, help="""specified directory for zip-file""")# args = parser.parse_args() ## Open email, fortunately the parse function will treat attachments, essentially, as part of (an instance) of the MIME or email data-structure. ## with open(args.pmt_content, 'r') as email_msg: ## msg = email.parse(email_msg, policy=default) ## Scan emails for url and store the url(s) in a list msg= re.findall('https://\S*', msg) for url_link in msg.walk(): if url_link != re.search('nete.*CITEDBY.zip', url_link): ## Go through list of links, rename request = urllib.urlrequest(url) scopus_update_zip_file = zipfile.ZipFile(request) scopus_update_zip_file.filename = temp[0].split('/')[2] = re.search('nete.*ANI.*zip',links) ## Now store them in specified directory os.path.join(args.directory, scopus_update_zip_file) def email_parser(msg, directory): ##with open(pmt_content, 'r') as email_msg: ## msg = email.parse(email_msg, policy=default) ## Scan emails for url and store the url(s) in a list msg= re.findall('https://\S*', msg) for url_link in msg.walk(): if url_link != re.search('nete.*CITEDBY.zip', url_link): ## Go through list of links, rename request = urllib.urlrequest(url) scopus_update_zip_file = zipfile.ZipFile(request) scopus_update_zip_file.filename = temp[0].split('/')[2] = re.search('nete.*ANI.*zip',links) ## Now store them in specified directory os.path.join(directory, scopus_update_zip_file)
Scopus/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> # <script> # window.dataLayer = window.dataLayer || []; # function gtag(){dataLayer.push(arguments);} # gtag('js', new Date()); # # gtag('config', 'UA-59152712-8'); # </script> # # # Adding Unit Tests to the NRPy+ Unit Testing Infrastructure # # ## Author: <NAME> # # # ## Introduction: # The goal of this module is to give the user an overview/understanding of NRPy+'s Unit Testing framework, which will give the user enough information to begin creating their own unit tests. We will begin by giving an overview of the important prerequisite knowledge to make the most out of unit tests. Next, we give an explanation for the user interaction within the unit testing framework; this will give the user the ability to create tests for themselves. Then we give the user some insight into interpreting the output from their unit tests. Finally, a full example using a test module will be run through in full, both with and without errors, to give the user a realistic sense of what unit testing entails. # # For in-depth explanations of all subfunctions (not user-interactable), see the [UnitTesting Function Reference](./UnitTesting/UnitTesting_Function_Reference.ipynb). This may not be essential to get unit tests up and running, but it will be invaluable information if the user ever wants to make modifications to the unit testing code. # # <a id='toc'></a> # # # Table of Contents # $$\label{toc}$$ # # This notebook is organized as follows: # # 1. [Step 1](#motivation): Motivation and Prerequisite Knowledge # 1. [Step 1.a](#dicts): Dictionaries # 1. [Step 1.b](#logging): Logging # 1. [Step 2](#interaction): User Interaction # 1. [Step 2.a](#testfile): Test File # 1. [Step 2.b](#trustedvaluesdict): trusted_values_dict # 1. [Step 2.c](#bash): Bash Script # 1. [Step 3](#output): Interpreting output # 1. [Step 4](#checklist): Checklist # 1. [Step 4.a](#4a): Directory Creation # 1. [Step 4.b](#4b): Test File Creation # 1. [Step 4.c](#4c): Input Parameters # 1. [Step 4.d](#4d): Fill-in bash script and run # # 1. [Step 5](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF # # # <a id='motivation'></a> # # # Step 1: Motivation and Prerequisite Knowledge \[Back to [top](#toc)\] # $$\label{motivation}$$ # # What is the purpose of unit testing, and why should you do it? To begin # thinking about that, consider what subtleties can occur within your code # that are almost unnoticeable to the eye, but wind up giving you an # incorrect result. You could make a small optimization, and not notice # any change in your result. However, maybe the optimization you made only # works on Python 3 and not Python 2, or it changes a value by some tiny # amount -- too small to be noticeable at a simple glance, but enough to # make a difference in succeeding calculations. # # This is where unit testing comes in. By initially calculating values for # the globals of your modules in a **trusted** version of your code and # storing those values in a dictionary, you can then easily check if # something stopped working correctly by comparing your newly calculated # values to the ones you've stored. On the frontend, there are four # concepts essential to understand to get your unit tests up and running: # `trusted_values_dict`, `create_test`, your testing module (which # will simply be referred to as `test_file`), and a bash script (which # will simply be referred to as `bash_script`). There is also some # important prerequisite knowledge that may be helpful to grasp before # beginning your testing. There are many functions at play in the backend # as well, all of which are described in the Function Reference. Mastery of these functions may not be # essential to get your tests up-and-running, but some basic understanding # of them with undoubtedly help with debugging. # # An important caveat is that the unit testing does not test the # **correctness** of your code or your variables. The unit tests act as a # protective measure to ensure that nothing was broken between versions of # your code; it gets its values by running _your_ code, so if something # starts out incorrect, it will be stored as incorrect in the system. # # <a id='dicts'></a> # # ## Step 1.a: Dictionaries \[Back to [top](#toc)\] # $$\label{dicts}$$ # # Dictionaries are used throughout the unit testing infrastructure. The user must create simple dictionaries to pass to our testing functions. If you know nothing about dictionaries, we recommend [this](https://www.w3schools.com/python/python_dictionaries.asp) article; it will get you up to speed for simple dictionary creation. # <a id='logging'></a> # # ## Step 1.b: Logging \[Back to [top](#toc)\] # $$\label{logging}$$ # # Logging is a python module that allows the user to specify their desired level of output by modifying a parameter, rather than having to use if-statements and print-statements. We allow the user to change the level of output through a parameter `logging_level`, in which we support the following levels: # # `ERROR`: Only print when an error occurs # # `INFO`: Print general information about test beginning, completion, major function calls, etc., as well as everything above. (Recommended) # # `DEBUG`: Print maximum amount of information -- every comparison, as well as everything above. # # A good way to think of these logging levels is that `INFO` is the default, `ERROR` is similar to a non-verbose mode, and `DEBUG` is similar to a verbose mode. # <a id='interaction'></a> # # # Step 2: User Interaction \[Back to [top](#toc)\] # $$\label{interaction}$$ # # Within the module the user is intending to test, a directory named `tests` should be created. This will house the test file for the given module and its associated `trusted_values_dict`. For example, if I intend to test `BSSN`, I will create a new directory `BSSN/tests`. Within the `tests` directory, the user should create a file called `test_(module).py` -- or `test_BSSN.py` for the given example. # # <a id='testfile'></a> # # ## Step 2.a: Test File \[Back to [top](#toc)\] # $$\label{testfile}$$ # # The test file is how the user inputs their module, functions, and globals information to the testing suite. For the purpose of consistency, we've created a skeleton for the test file (found [here](../edit/UnitTesting/test_skeleton.py)) that contains all information the user must specify. The user should change the name of the function to something relevant to their test. However, note that the name of the function must begin with `test_` in order for the bash script to successfully run the test -- this is the default naming scheme for most test suites/software. Inside the function, multiple fields are required to be filled out by the user; these fields are `module`, `module_name`, and `function_and_global_dict`. Below the function there is some code that begins with `if __name__ == '__main__':`. The user can ignore this code as it does backend work and makes sure to pass the proper information for the test. # # `module` is a string representing the module to be tested. # # `module_name` is a string representing the name of the module # # `function_and_global_dict` is a dictionary whose keys are string representations of functions that the user would like to be called on `module` and whose values are lists of string representations of globals that can be acquired by running their respective functions on `module` # # Example: # # ``` # def test_BrillLindquist(): # # module = 'BSSN.BrillLindquist' # # module_name = 'bl' # # function_and_global_dict = {'BrillLindquist(ComputeADMGlobalsOnly = True)': # ['alphaCart', 'betaCartU', 'BCartU', 'gammaCartDD', 'KCartDD']} # # create_test(module, module_name, function_and_global_dict) # ``` # # In most cases, this simple structure is enough to do exactly what the user wants. Sometimes, however, there is other information that needs to be passed into the test -- this is where optional arguments come in. # # The tests can take two optional arguments, `logging_level` and `initialization_string_dict` # # `logging_level` follows the same scheme as described [above](#logging). # # `initialization_string_dict` is a dictionary whose keys are functions that **must** also be in `function_and_global_dict` and whose values are strings containing well-formed Python code. The strings are executed as Python code before their respective function is called on the module. The purpose of this argument is to allow the user to do any necessary NRPy+ setup before the call their function. # # Example: # # ``` # def test_quantities(): # # module = 'BSSN.BSSN_quantities' # # module_name = 'BSSN_quantities' # # function_and_global_dict = {'BSSN_basic_tensors()': ['gammabarDD', 'AbarDD', 'LambdabarU', 'betaU', 'BU']} # # logging_level = 'DEBUG' # # initialization_string = ''' # import reference_metric as rfm # rfm.reference_metric() # rfm.ref_metric__hatted_quantities() # ''' # # initialization_string_dict = {'BSSN_basic_tensors()': initialization_string} # # create_test(module, module_name, function_and_global_dict, logging_level=logging_level, # initialization_string_dict=initialization_string_dict) # ``` # # An important thing to note is that even though `initialization_string` looks odd with its indentation, this is necessary for Python to interpret it correctly. If it was indented, Python would think you were trying to indent that code when it shouldn't be, and an error will occur. # # A question you may be wondering is why we need to create a new dictionary for the intialization string, insted of just passing it as its own argument. This is because the testing suite can accept multiple function calls, each with their own associated global list, in one function. It then naturally follows that we need `initailization_string_dict` to allow each function call to have its own code that runs before its function call. In the following example, the function `BSSN_basic_tensors()` has an initialization string, but the function `declare_BSSN_gridfunctions_if_not_declared_already()` doesn't. You can also clearly see they each have their own associated globals. # # Example: # # ``` # def test_quantities(): # # module = 'BSSN.BSSN_quantities' # # module_name = 'BSSN_quantities' # # function_and_global_dict = {'declare_BSSN_gridfunctions_if_not_declared_already()': # ['hDD', 'aDD', 'lambdaU', 'vetU', 'betU', 'trK', 'cf', 'alpha'], # # 'BSSN_basic_tensors()': ['gammabarDD', 'AbarDD', 'LambdabarU', 'betaU', 'BU']} # # logging_level = 'DEBUG' # # initialization_string = ''' # import reference_metric as rfm # rfm.reference_metric() # rfm.ref_metric__hatted_quantities() # ''' # # initialization_string_dict = {'BSSN_basic_tensors()': initialization_string} # # create_test(module, module_name, function_and_global_dict, logging_level=logging_level, # initialization_string_dict=initialization_string_dict) # ``` # # Lastly, within a single test file, you can define multiple test functions. It's as simple as defining a new function whose name starts with `test_` in the file and making sure to fill out the necessary fields. # <a id='trustedvaluesdict'></a> # # ## Step 2.b: trusted_values_dict \[Back to [top](#toc)\] # $$\label{trustedvaluesdict}$$ # # At this point, it's should be understood that our test suite will compare trusted values of your variables to newly calculated values to ensure that no variables were unknowingly modified. The `trusted_values_dict` acts as the means of storing the trusted value for each variable with the purpose of future comparison. A new `trusted_values_dict` is created by default when a test file is run for the first time -- it's visible in `tests/trusted_values_dict.py`. Note that if you run your code but can't see the file, refresh your IDE -- it's there, sometimes IDE's just get confused when you create a file within Python. The default structure of all `trusted_value_dict` files is as follows: # # ``` # from mpmath import mpf, mp, mpc # from UnitTesting.standard_constants import precision # # mp.dps = precision # trusted_values_dict = {} # # ``` # # The proper code to copy into this file will be printed to the console when a test is run. The test suite will also automatically write its calculated globals' values for a given function to this file in the proper format; make sure to check that things seem correct though! Remember that the `trusted_values_dict` stores **trusted**, not necessarily **correct**, values for each global. # <a id='bash'></a> # # ## Step 2.c: Bash Script \[Back to [top](#toc)\] # $$\label{bash}$$ # # In order to successfully run all the user's unit tests and properly integrate testing with TravisCI, we use a bash script as the 'hub' of all the tests to be run. This makes it easy for the user to comment out tests they don't want to run, add new tests to be automatically run with one line, etc. # # We offer a skeleton file, [`run_NRPy_UnitTests`](../edit/UnitTesting/run_NRPy_UnitTests.sh), which contains all the proper code to be easily run with minimum user interaction. All the user must do is call the `add_test` function on the test file they'd like to be run underneath the `TODO` comment. There are many examples in the file that show exactly how to create a new test. Then to add more tests, simply go to the next line and add another test. It's as simple as that! # # To run the bash script, open up a terminal, type in the path of the bash script, and then pick the Python interpreter to run the code -- for example, `./UnitTesting/run_NRPy_UnitTests.sh python` or `./UnitTesting/run_NRPy_UnitTests.sh python3`. # # # There's an additional field in the bash script called `rerun_if_fail`. It is a boolean that, if true, will automatically rerun the tests that failed with their `logging_level` set to `DEBUG`. This gives the user a plethora of debugging information that should make it much easier to figure out the issue. We'd recommend enabling it if there are only a couple modules that failed, as there is a very large amount of information printed for each failing module. However, it is an invaluable resource for figuring out a bug in your code, so keep it in mind when tests are failing. # # <a id='output'></a> # # # Step 3: Interpreting Output \[Back to [top](#toc)\] # $$\label{output}$$ # # Once a user's tests are fully set up, they need to be able to interpret the output of their tests; doing this allows the user to easily figure out what went wrong, why, and how to fix it. The amount of output for a given module is of course dependent on its logging level. For the purposes of this tutorial, we will assume that `logging_level` is set to `INFO`. # # While running a test, output is printed to the console that tells the user what is occurring at what point in time. # # Example successful test run console output: # # ``` # Testing test_u0_smallb_Poynting__Cartesian... # # INFO:root: Creating file /home/kevin/virtpypy/nrpyunittesting/u0_smallb_Poynting__Cartesian/tests/u0sbPoyn__compute_u0_smallb_Poynting__Cartesian__test.py... # INFO:root: ...Success: File created. # # INFO:root: Currently working on function compute_u0_smallb_Poynting__Cartesian() in module u0sbPoyn... # # INFO:root: Importing trusted_values_dict... # INFO:root: ...Success: Imported trusted_values_dict. # # INFO:root: Calling evaluate_globals... # INFO:root: ...Success: evaluate_globals ran without errors. # # INFO:root: Calling cse_simplify_and_evaluate_sympy_expressions... # INFO:root: ...Success: cse_simplify_and_evaluate_sympy_expressions ran without errors. # # INFO:root: Calling calc_error... # INFO:root: ...Success: calc_error ran without errors. # # . # ---------------------------------------------------------------------- # Ran 1 test in 3.550s # # OK # INFO:root: Test for function compute_u0_smallb_Poynting__Cartesian() in module u0sbPoyn passed! Deleting test file... # INFO:root: ...Deletion successful. Test complete. # # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # # All tests passed! # # ``` # # Step-by-step, how do we interpret this output? # # The first line tells the user what is being called -- this is the function that they define in their current test file being run; this is why a descriptive function name is important. # # Next, a file is created in the same directory as the current test file that actually runs the tests -- a new file is created to ensure that a clean Python environment is used for each test. # # Each function test within the test file is then called successively -- in this example, that is `compute_u0_smallb_Poynting__Cartesian()` in module `u0sbPoyn`. # # The function's associated `trusted_values_dict` is imported. # # Each global in the function's global list is then evaluated to a SymPy expression in `evaluate_globals`. # # Each global's SymPy expression is then evaluated into a numerical value in `cse_simplify_and_evaluate_sympy_expressions`. # # Each global is compared to its trusted value in `calc_error`. # # In this example, since no values differed, the test passed -- there is nothing the user has to do. # # The purpose of giving the user this output is to make it as easy as possible, when something fails, to figure out why and how to fix it. Say the user wasn't given the above output -- instead, after `INFO:root: Calling calc_error...` is printed, an error is printed and the program quits -- then the user knows the error occurred somewhere in `calc_error`, and it's easy to figure out what. If this output wasn't given, it would be extremely difficult to bugfix. # # Now let's consider example output when the trusted values for a couple globals differ from the newly calculated values: # # ``` # Testing function test_u0_smallb_Poynting__Cartesian... # # INFO:root: Creating file /home/kevin/virtpypy/nrpyunittesting/u0_smallb_Poynting__Cartesian/tests/u0sbPoyn__compute_u0_smallb_Poynting__Cartesian__test.py... # INFO:root: ...Success: File created. # # INFO:root: Currently working on function compute_u0_smallb_Poynting__Cartesian() in module u0sbPoyn... # # INFO:root: Importing trusted_values_dict... # INFO:root: ...Success: Imported trusted_values_dict. # # INFO:root: Calling evaluate_globals... # INFO:root: ...Success: evaluate_globals ran without errors. # # INFO:root: Calling cse_simplify_and_evaluate_sympy_expressions... # INFO:root: ...Success: cse_simplify_and_evaluate_sympy_expressions ran without errors. # # INFO:root: Calling calc_error... # ERROR:root: # # Variable(s) ['g4DD[0][0]', 'g4DD[0][1]'] in module u0sbPoyn failed. Please check values. # If you are confident that the newly calculated values are correct, comment out the old trusted values for # u0sbPoyn__compute_u0_smallb_Poynting__Cartesian__globals in your trusted_values_dict and copy the following code between the ##### into your trusted_values_dict. # Make sure to fill out the TODO comment describing why the values had to be changed. Then re-run test script. # # ##### # # # Generated on: 2019-08-14 # # Reason for changing values: TODO # trusted_values_dict['u0sbPoyn__compute_u0_smallb_Poynting__Cartesian__globals'] = {'g4DD[0][0]': mpf('1.42770464273047624140299713523'), 'g4DD[0][1]': mpf('0.813388473397507463814385913308'), 'g4DD[0][2]': mpf('0.652706348793296836714132090803'), 'g4DD[0][3]': mpf('1.22429414375154980405074869244'), 'g4DD[1][0]': mpf('0.813388473397507463814385913308'), 'g4DD[1][1]': mpf('0.657497767033916602485987823457'), 'g4DD[1][2]': mpf('0.057738705167452830657737194997'), 'g4DD[1][3]': mpf('0.391026617743468030141684721457'), 'g4DD[2][0]': mpf('0.652706348793296836714132090803'), 'g4DD[2][1]': mpf('0.057738705167452830657737194997'), 'g4DD[2][2]': mpf('0.142350778742078798444481435581'), 'g4DD[2][3]': mpf('0.723120760610660329170684690325'), 'g4DD[3][0]': mpf('1.22429414375154980405074869244'), 'g4DD[3][1]': mpf('0.391026617743468030141684721457'), 'g4DD[3][2]': mpf('0.723120760610660329170684690325'), 'g4DD[3][3]': mpf('0.919283767179900235255729512573'), 'g4UU[0][0]': mpf('-3.03008926847944211197781568781'), 'g4UU[0][1]': mpf('2.25487680174746330097911429618'), 'g4UU[0][2]': mpf('0.883964088219310673292773829627'), 'g4UU[0][3]': mpf('2.38097417962378184338842987037'), 'g4UU[1][0]': mpf('2.25487680174746330097911429618'), 'g4UU[1][1]': mpf('-0.109478866681308257858746913533'), 'g4UU[1][2]': mpf('-1.57673807708112257475456728614'), 'g4UU[1][3]': mpf('-1.71617440778374167644096697698'), 'g4UU[2][0]': mpf('0.883964088219310673292773829627'), 'g4UU[2][1]': mpf('-1.57673807708112257475456728614'), 'g4UU[2][2]': mpf('-2.06437348264308527564608946534'), 'g4UU[2][3]': mpf('1.11728919891515454886216209391'), 'g4UU[3][0]': mpf('2.38097417962378184338842987037'), 'g4UU[3][1]': mpf('-1.71617440778374167644096697698'), 'g4UU[3][2]': mpf('1.11728919891515454886216209391'), 'g4UU[3][3]': mpf('-2.23203972375107587678882970577'), 'PoynSU[0]': mpf('0.103073801363157111172177901697'), 'PoynSU[1]': mpf('0.11100316917740755485837448786'), 'PoynSU[2]': mpf('-0.00451075406485067218999293829888'), 'smallb2etk': mpf('0.164454779456120937541853683919'), 'smallb4D[0]': mpf('0.567950228622914592095169687713'), 'smallb4D[1]': mpf('0.286535540626704686153523625219'), 'smallb4D[2]': mpf('0.10714698030450909234705495631'), 'smallb4D[3]': mpf('0.455828728852934996540499932291'), 'smallb4U[0]': mpf('0.105192967134481035810628308481'), 'smallb4U[1]': mpf('0.298063886336868595407751154048'), 'smallb4U[2]': mpf('0.338357239072152954142217077157'), 'smallb4U[3]': mpf('-0.0371837983175520496394928051176'), 'u0': mpf('0.751914772923022001194226504595'), 'uBcontraction': mpf('0.214221928967111307128784385185'), 'uD[0]': mpf('0.216251888123560253383388291707'), 'uD[1]': mpf('0.167535113620266400428280039145'), 'uD[2]': mpf('0.232536332826570514618343904792'), 'uU[0]': mpf('-0.364066660324468905733189036042'), 'uU[1]': mpf('-0.0378849772494775056256865716175'), 'uU[2]': mpf('-0.476480636313712572229280970632')} # # ##### # # . # ---------------------------------------------------------------------- # Ran 1 test in 3.481s # # OK # ERROR:root: Test for function compute_u0_smallb_Poynting__Cartesian() in module u0sbPoyn failed! Please examine test file. # # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # # Tests failed! # # Failures: # # u0_smallb_Poynting__Cartesian/tests/test_u0_smallb_Poynting__Cartesian.py: ['test_u0_smallb_Poynting__Cartesian'] # # ---------------------------------------------------------------------- # # # ``` # # This seems like a lot to take-in, but it's not too difficult to understand once fairly well acquainted with the output. The beginning is identical to the output from the successful test run, up until `calc_error` is called. This gives the user some insight that there was an error during `calc_error`, which gives an indication that at least one global had a different calculated and trusted value. # # The next line confirms this suspicion: globals `g4DD[0][0]` and `g4DD[0][1]` had differing trusted and calculated values. This gives the user the **exact** information they're looking for. `g4DD` is a rank-2 tensor as per NRPy+ naming convention, and the user can very easily see the indices that failed. If the user expected this -- say they found a bug in their code that generated this global -- they can then copy the new `trusted_values_dict` entry into their `trusted_values_dict` and comment-out/delete the old entry. Then by re-running the test, there should no longer be an error -- the trusted and calculated values should be the same. # # The next output tells the user that the test failed, and to examine the test file. This is necessary if theres an unexpected failure -- it will help the user figure out why it occurred. # # Finally, additional output is given that tells the user all the test files and their respective functions that failed. This may seem repetitive, but there's a good reason for it. Say 20 modules were being tested, 10 of which had failures. Then to figure out what failed, the user would have to scroll through all the output and keep a mental note of what failed. By printing everything that failed at the very end, the user gets instant insight into what failed; this may help the user figure out why. # # <a id='checklist'></a> # # # Step 4: Checklist for adding a new test \[Back to [top](#toc)\] # $$\label{checklist}$$ # <a id='4a'></a> # # ## Step 4.a: Directory Creation \[Back to [top](#toc)\] # $$\label{4a}$$ # # Create a `tests` directory in the directory of the module being tested. # <a id='4b'></a> # # ## Step 4.b: Test File Creation \[Back to [top](#toc)\] # $$\label{4b}$$ # # Create a `test_file.py` in the `tests` directory based off [UnitTesting/test_skeleton.py](../edit/UnitTesting/test_skeleton.py) # <a id='4c'></a> # # ## Step 4c: Input Parameters \[Back to [top](#toc)\] # $$\label{4c}$$ # # Change the name of `test_your_module()` to whatever you're testing, making sure the name starts with `test_`. Fill in the following paremeters in `test_file.py`: `module`, `module_name`, `function_and_global_dict`, and if need be, `logging_level` and `initialization_string`. # <a id='4d'></a> # # ## Step 4d: Fill-in bash script and run \[Back to [top](#toc)\] # $$\label{4d}$$ # # Use the `add_test` function in the [run_NRPy_UnitTests.sh](../edit/UnitTesting/run_NRPy_UnitTests.sh) to create your test below the `TODO` line. Run the bash script to run your test! # <a id='latex_pdf_output'></a> # # # Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] # $$\label{latex_pdf_output}$$ # # The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename [Tutorial-UnitTesting.pdf](Tutorial-UnitTesting.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) # !jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-UnitTesting.ipynb # !pdflatex -interaction=batchmode Tutorial-UnitTesting.tex # !pdflatex -interaction=batchmode Tutorial-UnitTesting.tex # !pdflatex -interaction=batchmode Tutorial-UnitTesting.tex # !rm -f Tut*.out Tut*.aux Tut*.log
Tutorial-UnitTesting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="a42nCSrUsB9Z" # # **The Effect of Reflected Inertia on Gain Tuning** # + [markdown] colab_type="text" id="VsbCH_XUJDCN" # ## Notebook Setup # The following cell will install Drake, checkout the manipulation repository, and set up the path (only if necessary). # - On Google's Colaboratory, this **will take approximately two minutes** on the first time it runs (to provision the machine), but should only need to reinstall once every 12 hours. # # More details are available [here](http://manipulation.mit.edu/drake.html). # + colab={} colab_type="code" id="Ev062HqpJDCO" import importlib import os, sys from urllib.request import urlretrieve if 'google.colab' in sys.modules and importlib.util.find_spec('manipulation') is None: urlretrieve(f"http://manipulation.csail.mit.edu/scripts/setup/setup_manipulation_colab.py", "setup_manipulation_colab.py") from setup_manipulation_colab import setup_manipulation setup_manipulation(manipulation_sha='c1bdae733682f8a390f848bc6cb0dbbf9ea98602', drake_version='0.25.0', drake_build='releases') # + colab={} colab_type="code" id="KUmZ8IkzJDCR" # python libraries import numpy as np import matplotlib.pyplot as plt, mpld3 from IPython.display import HTML, display # pydrake imports from pydrake.all import (DiagramBuilder, SignalLogger, Variable, PidController, Simulator, SymbolicVectorSystem, ConstantVectorSource, Multiplexer, GenerateHtml, SceneGraph, sin) from manipulation import running_as_notebook # enable mpld3 notebook if running_as_notebook: mpld3.enable_notebook() # + [markdown] colab_type="text" id="mcSz0hOVtBd5" # ## Problem Description # You will notice that in the first few weeks of the lecture, we don't think a lot about the full second-order dynamics of the arm (a more complicated version of $F=ma$), although we will come to revisit this later. This problem set helps to understand one of the reasons why we can abstract many manipulation problems kinematically (purely in terms of joint angles and end-effector positions), as opposed to worrying about dynamics (joint torques and inertias). Even before going to the multi-link case, let's think about how we should control the position of a single pendulum. # # In this problem you will investigate the role of reflected inertia in position control of a simple pendulum. At the end of the notebook, you will be able to check your work in a simulation environment. # # **These are the main steps of the exercise:** # 1. Write down the state-space dynamics function for the simple pendulum with motor and gearbox. # 2. Set up a PID Controller to do position control for the simple pendulum. # 3. Qualitatively answer the difference in behavior between direct-drive and gearbox cases. # # # **Your job for this problem set** # Will be to do step 1 and 3 of the main steps. # # # + [markdown] colab_type="text" id="zGGyX76oBXGN" # ## The Simple Pendulum # # <img src="https://raw.githubusercontent.com/RussTedrake/manipulation/master/figures/exercises/simple_pendulum.png" width="400"> # # The [simple pendulum](http://underactuated.csail.mit.edu/pend.html) is perhaps the simplest abstraction of a 'manipulator', with the following equation for dynamics: # # $$ ml^2 \ddot{q} = -mgl\sin(q) + \tau$$ # # Let's first write down the dynamics of a pendulum in a first-order form, # $$\dot{x}=f(x,u)$$ # # where $x=[q,\dot{q}]^T$ is now our state vector, and our input $u=\tau$. # # $$\frac{d}{dt}\begin{bmatrix} q \\ \dot{q} \end{bmatrix}=\begin{bmatrix} \dot{q} \\ (-mgl\sin(q) + \tau) / ml^2 \end{bmatrix}$$ # # The following function returns $\dot{x}$ given $x,u$, and the parameters. # # NOTE: For a sine implementation, we will be using `pydrake`'s `sin` for this problem set instead of the numpy implementation. We've already imported it above, so it is available by simply calling `sin()`. This version will not only support numeric, but also symbolic computation. # + colab={} colab_type="code" id="6IRgS5ftCc71" def pendulum_dynamics(x, u, p): q = x[0] qdot = x[1] tau = u[0] return [ qdot, ((-p["m"] * p["g"] * p["l"] * sin(q) + tau) / (p["m"] * p["l"] ** 2)) ] # + [markdown] colab_type="text" id="uOFeXrnUwtXA" # ##System Dynamics # # From a systems viewpoint, how should this dynamics be abstracted into a system that we can define inputs and outputs on? # # As explained in the lecture notes, we can abstract the dynamics as a "plant" that will tell us what the current state $x$ is, given the torque $\tau$ into the system. If we were to draw a system diagram of this plant, it would look like: # # <img src="https://raw.githubusercontent.com/RussTedrake/manipulation/master/figures/exercises/simple_pendulum_diagram.png" width="500"> # # # To implement this system in [drake](https://drake.mit.edu), we will be using a [`SymbolicVectorSystem`](https://drake.mit.edu/pydrake/pydrake.systems.primitives.html#pydrake.systems.primitives.SymbolicVectorSystem). This is one of the convenient ways to define a system if you have access to a relatively simple, symbolic form of your dynamics. (But these equations will get [really complicated](https://authors.library.caltech.edu/96308/1/01087644.pdf) as we move to the multi-link case!) # # Note that the output of the system doesn't have to be the state $x$ - it can be a subset of the state, or simply some sensor attached to the pendulum! In this case we will assume full access to the state for feedback (i.e. doing full-state feedback), so we simply set `output=x` as well. # + colab={} colab_type="code" id="fo_dj5WgxPQH" # Symbolic Variables from pydrake.symbolic x = [Variable("theta"), Variable("thetadot")] u = [Variable("tau")] # Example parameters of pendulum dynamics p = {"m": 1.0, # kg "g": 9.81, # m / s^2 "l": 0.5 # m } # Declaration of a symbolic vector system system = SymbolicVectorSystem(state = x, output = x, input = u, dynamics = pendulum_dynamics(x, u, p)) # + [markdown] colab_type="text" id="epwXiNR43OHt" # Note that since this system is deterministic, we can completely predict the future value of the system given the current time, state, and input of the system. Sometimes if our system did something unexpected, we may want to know what those states were so that we can replay the simulation and fix our systems. # # To make the simulation completely repeatable, Drake stores all the relevant states and parameters in the [context](https://drake.mit.edu/doxygen_cxx/classdrake_1_1systems_1_1_context.html) , which can act like a systematic log. # + colab={} colab_type="code" id="-ryw0KL_3Yzl" context = system.CreateDefaultContext() print(context) # + [markdown] colab_type="text" id="qO8u0nZ3DSu9" # ## The Simple Pendulum with Motor and Gearbox # # In class, we saw that the presence of a physical motor and a gearbox can affect the dynamics significantly. # # **Now it's your turn to code!** # # Below, you will implement a similar dynamics function for a simple pendulum, which now has a physical motor and a gearbox attached to it. # # <img src="https://raw.githubusercontent.com/RussTedrake/manipulation/master/figures/exercises/pendulum_with_motor.png" width="700"> # # In addition to the given parameters of mass (`p["m"]`), gravity (`p["g"]`), and length (`p["l"]`), you will need rotor inertia of the motor $I_m$, as well as the gear ratio $N$. You can assume these can be accessed by `p["I"]` and `p["N"]`. # # NOTE: You must implement joint-side dynamics as opposed to motor-side dynamics. # # NOTE: Again, do not use `np.sin`! # # HINT: Are the dynamics what you expect when `N=1` (i.e. direct-drive)? # + colab={} colab_type="code" id="CLd4u2ebE3bx" # Modify this function. you may not change the function name, inputs, or size of the output. def pendulum_with_motor_dynamics(x, u, p): q = x[0] qdot = x[1] tau = u[0] return [ 0., # modify here 0. # modify here ] # + [markdown] colab_type="text" id="V_lotS75Xb8C" # You can check if you got the dynamics correct by running the below autograder. # + colab={} colab_type="code" id="-mHMxD1oYBuj" from manipulation.exercises.robot.test_reflected_inertia import TestSimplePendulumWithGearbox from manipulation.exercises.grader import Grader Grader.grade_output([TestSimplePendulumWithGearbox], [locals()], 'results.json') Grader.print_test_results('results.json') # + [markdown] colab_type="text" id="-HYq6kSj3U_-" # ## Parameter Setting # # Now we have defined the dynamics, we can play around with the parameters. We will try to control a pendulum of endpoint mass `m=1kg`, at length `0.5m`. To make the case more fun, let's base the gear ratio `N` and motor inertia `I` from Kuka iiwa [(DLR LWR III)](https://www.dlr.de/rm/en/desktopdefault.aspx/tabid-12464/21732_read-49777 )'s 3rd joint. # # The iiwa uses a [harmonic drive](https://en.wikipedia.org/wiki/Strain_wave_gearing) geared 160:1, directly from the [HD Company](https://www.harmonicdrive.net/) , and high-performance frameless motors [RoboDrive ILM](https://www.tq-group.com/en/products/tq-robodrive/) from TQ (originally developed at DLR). We will make an educated guess for what exact models they are using: # # - RoboDrive ILM 85, with `I=0.61e-4 kg m^2` # - HD SHF-32-160-2UH-LW, with `I=2.85e-4 kg m^2` # # The "motor inertia" in the problem is in fact the armature inertia of the motor, as well as the part of the transmission attached to the motor shaft! # + colab={} colab_type="code" id="HmrSNzG6p8eb" # Add motor and gearbox parameters p = {"N": 160, "I": 3.46e-4, "m": 1.0, # kg "g": 9.81, # m / s^2 "l": 0.5 # m } # + [markdown] colab_type="text" id="0iKx8BDL_sj4" # ## Building a PID Controlled System # # Now we have to do some plumbing to simulate the system in [drake](https://drake.mit.edu). We've defined the plant for our dynamics before, but we need to hook it up to a PID controller to do position control, so we will need another system to act as a controller. # # Drake offers a built-in [PID Controller](https://drake.mit.edu/pydrake/pydrake.systems.controllers.html#pydrake.systems.controllers.PidController). The role of the controller is to compute the error based on the current and desired states, and command the input, so it could be abstracted by a system with # - Input ports: x_now ($x$), x_desired ($x_d$) # - Output ports: torque command ($\tau_{cmd}$) # # Are there any other systems that we need to consider? Let's think about the requirements of what we need to implement. Since we need to command $x_d$, we might want a `Slider` or a `ConstantVectorSource` to do so. Also, we would like to plot the trajectory of the system, so we might need a `Logger` to do this for us. # # Before we code and hook everything up, it pays to think about what our diagram will actually look like. let's take a moment to think about our diagram. # # <img src="https://raw.githubusercontent.com/RussTedrake/manipulation/master/figures/exercises/system_diagram.png" width="1000"> # # # # Did we get all the systems that we want? Are all the IO ports hooked up to the right places? Seems like we are ready to code! # + colab={} colab_type="code" id="JjBkuba9qR13" def BuildAndSimulate(q_d, pendulum_params, gains, visualize=False): # This defines the plant using the pendulum with motor dynamics. system = SymbolicVectorSystem(state = x, output = x, input = u, dynamics = pendulum_with_motor_dynamics(x, u, pendulum_params)) kp, ki, kd = gains builder = DiagramBuilder() # Add all the systems into the diagram pendulum = builder.AddSystem(system) logger = builder.AddSystem(SignalLogger(2)) pid_controller = builder.AddSystem(PidController([kp], [ki], [kd])) desired_state = builder.AddSystem(ConstantVectorSource([q_d, 0.])) # Connect the IO ports of the systems. builder.Connect(pid_controller.get_output_port(0), system.get_input_port(0)) builder.Connect(system.get_output_port(0), logger.get_input_port(0)) builder.Connect(system.get_output_port(0), pid_controller.get_input_port(0)) builder.Connect(desired_state.get_output_port(0), pid_controller.get_input_port(1)) diagram = builder.Build() diagram.set_name("diagram") # Plot the diagram if visualize is true. if(visualize): ( display(HTML('<script src="https://unpkg.com/gojs/release/go.js"></script>' + GenerateHtml(diagram))) ) logger.reset() simulator = Simulator(diagram) context = simulator.get_mutable_context() # The simulator has three states since the plant has 2, and the PID controller # keeps an additional state for the integral term. We set all of them to zero # initialization. context.SetContinuousState([0., 0., 0.]) return simulator, logger # + [markdown] colab_type="text" id="2orRbwNpMVZv" # Now let's visualize the diagram and see if everything is properly connected for simulation. Does this topologically resemble our diagram above? # + colab={} colab_type="code" id="biHnaJtcHNMg" gains = [5, 2, 1] simulator, logger = BuildAndSimulate(0., p, gains, visualize = True) # + [markdown] colab_type="text" id="kiR00H1zXeqp" # ## Simulating the Closed-Loop System # # We are now ready to simulate our system with motor dynamics! Let's start off with the direct-drive case (i.e. `N=1`). # + colab={} colab_type="code" id="t_dXpC95OrwZ" q_d = (5./8.) * np.pi # Feel free to play around with different final positions. gains = [5, 2, 1] # [P, I, D] gains. p["N"] = 1 simulator, logger = BuildAndSimulate(q_d, p, gains) simulator.Initialize() simulator.AdvanceTo(20.0) time = logger.sample_times() traj = logger.data() plt.figure() plt.plot(time, traj[0,:], 'b-') plt.plot(time, q_d * np.ones(traj.shape[1]), 'r--') plt.xlabel('time (s)') plt.ylabel('q (rads)') mpld3.display() # + [markdown] colab_type="text" id="rbe_hU7wfnWg" # You might have noticed that the closed-loop system is unstable for some desired joint configurations. For the gains above, if we ask the controller to stabilize to an upright position (do a swing-up), then the controller seems to be having trouble stabilizing. # # Let's try to analyze this effect a bit more systematically by plotting how the state behaves depending on how we set the desired position. # # # + colab={} colab_type="code" id="fGP8bnm7h_D7" plt.figure() p["N"] = 1 q_d_lst = np.linspace(-np.pi, np.pi, 10) for i in range(len(q_d_lst)): simulator, logger = BuildAndSimulate(q_d_lst[i], p, gains) simulator.Initialize() simulator.AdvanceTo(20.0) time = logger.sample_times() traj = logger.data() plt.plot(time, traj[0,:], 'b--') plt.plot(time, q_d_lst[i] * np.ones(traj.shape[1]), 'r--') plt.xlabel('time (s)') plt.ylabel('q (rads)') mpld3.display() # + [markdown] colab_type="text" id="sGswFQm2ilQe" # Note how that the system not only destabilizes for more upright positions, but also shows **different behavior** depending on what angle we ask it to stabilize to. Lower angles don't have overshoots, while higher angles do. # # This speaks to the effect of the nonlinear, state-dependent gravity term # $-mgl\sin(q)$. # # + [markdown] colab_type="text" id="QumuOJNOgkrh" # ## Testing out Gearbox Case # # Now we will test the same dynamics with a high gear ratio of `N=160`, with the same gains. # + colab={} colab_type="code" id="jxfyWP-ufmji" q_d = np.pi/1 # Feel free to play around with different gains. gains = [5, 2, 1] # [P, I, D] gains p["N"] = 160 simulator, logger = BuildAndSimulate(q_d, p, gains) simulator.Initialize() simulator.AdvanceTo(20.0) time = logger.sample_times() traj = logger.data() plt.figure() plt.plot(time, traj[0,:], 'b-') plt.plot(time, q_d * np.ones(traj.shape[1]), 'r--') plt.xlabel('time (s)') plt.ylabel('q (rads)') mpld3.display() # + [markdown] colab_type="text" id="IRKy7cINhWqF" # Observe that with the same gains, the controller stabilizes is now able to stabilize to an upright position! Let's plot this over a range of different final joint positions as well. # + colab={} colab_type="code" id="Sq7Ev_lxja-J" plt.figure() p["N"] = 160 q_d_lst = np.linspace(-np.pi, np.pi, 10) for i in range(len(q_d_lst)): simulator, logger = BuildAndSimulate(q_d_lst[i], p, gains) simulator.Initialize() simulator.AdvanceTo(20.0) time = logger.sample_times() traj = logger.data() plt.plot(time, traj[0,:], 'b--') plt.plot(time, q_d_lst[i] * np.ones(traj.shape[1]), 'r--') plt.xlabel('time (s)') plt.ylabel('q (rads)') mpld3.display() # + [markdown] colab_type="text" id="k0KiqeyCjtCR" # This is looking great! Now we are able to stabilize to most configurations. But not only did we achieve stability, but observe how **consistent** the curves are regardless of what joint configuration we ask it to stabilize to. # # You will notice that with a higher gain, we might be able to achieve stability for the direct-drive case as well since we impose no torque bounds. But you are still likely to see that gravity affects the direct-drive case more than the gearbox case. # # # # + [markdown] colab_type="text" id="vohZdqXrkZJH" # ## Qualitative Problem ## # # Below, we have a question for you: # # **Why are we seeing such consistent behavior of closed-loop dynamics for the gearbox case, as opposed to the direct-drive case?** # # You can answer in words using 3-5 sentences. # + [markdown] colab_type="text" id="-xr2qacVj2kI" # ## Your Answer # # Answer the Question here, and copy-paste to the Gradescope 'written submission' section! # + [markdown] colab_type="text" id="zPmeRLtJk410" # ##How will this notebook be Graded?## # # If you are enrolled in the class, this notebook will be graded using [Gradescope](www.gradescope.com). You should have gotten the enrollement code on our announcement in Piazza. # # For submission of this assignment, you must do two things. # - Download and submit the notebook `reflected_inertia.ipynb` to Gradescope's notebook submission section, along with your notebook for the second problem. # - Copy and Paste your answer to the qualitative problem to Gradescope's written submission section. # # We will evaluate the local functions in the notebook to see if the function behaves as we have expected. For this exercise, the rubric is as follows: # - [5 pts] `pendulum_with_motor_dynamics` must be implemented correctly # - [5 pts] You must answer correctly on why the gearbox and direct-drive cases show different qualitative behavior.
exercises/robot/reflected_inertia.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] id="I08sFJYCxR0Z" # ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) # + [markdown] id="LKI5K1wQrSe9" # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/4.Clinical_DeIdentification.ipynb) # # + [markdown] id="Niy3mZAjoayg" # # Clinical Deidentification # + [markdown] id="okhT7AcXxben" # ## Setup # + id="I8Ytt2LLp2rj" import sys import json import os with open('license.json') as f: license_keys = json.load(f) import os locals().update(license_keys) os.environ.update(license_keys) # + id="Uq0mPLNNLuSy" # Installing pyspark and spark-nlp # ! pip install --upgrade -q pyspark==3.1.2 spark-nlp==$PUBLIC_VERSION # Installing Spark NLP Healthcare # ! pip install --upgrade -q spark-nlp-jsl==$JSL_VERSION --extra-index-url https://pypi.johnsnowlabs.com/$SECRET # + colab={"base_uri": "https://localhost:8080/", "height": 254} executionInfo={"elapsed": 26266, "status": "ok", "timestamp": 1649524923654, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="iUNHVaSdL0tO" outputId="255bec2f-c325-4dbb-eb4a-f13c709c53bc" import json import os from pyspark.ml import Pipeline, PipelineModel from pyspark.sql import SparkSession import sparknlp import sparknlp_jsl from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.base import * from sparknlp.util import * from sparknlp.pretrained import ResourceDownloader from pyspark.sql import functions as F import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.expand_frame_repr', False) pd.set_option('max_colwidth', None) import string import numpy as np params = {"spark.driver.memory":"16G", "spark.kryoserializer.buffer.max":"2000M", "spark.driver.maxResultSize":"2000M"} spark = sparknlp_jsl.start(secret = SECRET, params=params) print ("Spark NLP Version :", sparknlp.version()) print ("Spark NLP_JSL Version :", sparknlp_jsl.version()) spark # + id="VtLIEJtPf88T" # if you want to start the session with custom params as in start function above from pyspark.sql import SparkSession def start(SECRET): builder = SparkSession.builder \ .appName("Spark NLP Licensed") \ .master("local[*]") \ .config("spark.driver.memory", "16G") \ .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \ .config("spark.kryoserializer.buffer.max", "2000M") \ .config("spark.jars.packages", "com.johnsnowlabs.nlp:spark-nlp_2.12:"+PUBLIC_VERSION) \ .config("spark.jars", "https://pypi.johnsnowlabs.com/"+SECRET+"/spark-nlp-jsl-"+JSL_VERSION+".jar") return builder.getOrCreate() #spark = start(SECRET) # + [markdown] id="rFQifkFYihOc" # # Deidentification Model # + [markdown] id="2eY0jM7kf88f" # Protected Health Information: # - individual’s past, present, or future physical or mental health or condition # - provision of health care to the individual # - past, present, or future payment for the health care # # Protected health information includes many common identifiers (e.g., name, address, birth date, Social Security Number) when they can be associated with the health information. # + [markdown] id="SLSV0tNjMk2L" # |index|model|index|model|index|model| # |-----:|:-----|-----:|:-----|-----:|:-----| # | 1| [bert_token_classifier_ner_deid]()| 2| [ner_deidentify_dl]()| 3| [ner_deid_subentity_glove]()| # | 4| [ner_deid_subentity_augmented_i2b2_pipeline]()| 5| [ner_deid_subentity_augmented_pipeline]()| 6| [ner_deid_sd]()| # | 7| [ner_deid_generic_augmented_pipeline]()| 8| [ner_deid_enriched_biobert]()| 9| [ner_deidentify_dl_pipeline]()| # | 10| [ner_deid_sd_large]()| 11| [ner_deid_enriched_pipeline]()| 12| [ner_deid_large]()| # | 13| [ner_deid_biobert_pipeline]()| 14| [ner_deid_subentity_augmented]()| 15| [bert_token_classifier_ner_deid_pipeline]()| # | 16| [ner_deid_sd_pipeline]()| 17| [ner_deid_biobert]()| 18| [ner_deid_enriched]()| # | 19| [ner_deid_synthetic]()| 20| [ner_deid_augmented]()| 21| [ner_deid_sd_large_pipeline]()| # | 22| [ner_deid_large_pipeline]()| 23| [ner_deid_enriched_biobert_pipeline]()| 24| [ner_deid_subentity_augmented_i2b2]()| # | 25| [ner_deid_augmented_pipeline]()| 26| [ner_deid_generic_glove]()| 27| []()| # + [markdown] id="UctroTvbKT7C" # |index| model | language |index| model | language | # |----:|:-----------------------------------|--:|----:|:-----------------------------------|--:| # | 1|[ner_deid_generic]() | it| 2|[ner_deid_generic]() | es| # | 3|[ner_deid_subentity]() | it| 4|[ner_deid_generic_roberta]() | es| # | 5|[ner_deid_generic]() | fr| 6|[ner_deid_generic_augmented]() | es| # | 7|[ner_deid_subentity]() | fr| 8|[ner_deid_generic_roberta_augmented]() | es| # | 9|[ ner_deid_generic]() | de|10|[ner_deid_subentity]() | es| # |11|[ner_deid_generic_pipeline]() | de|12|[ner_deid_subentity_roberta]() | es| # |13|[ner_deid_subentity]() | de|14|[ner_deid_subentity_augmented]() | es| # |15|[ner_deid_subentity_pipeline]() | de|16|[ner_deid_subentity_roberta_augmented]()| es| # + [markdown] id="XpIWbIQ-UkJb" # You can find German, Spanish, French, and Italian deidentification models and pretrained pipeline examples in these notebooks: # # # * [Clinical Deidentification in German notebook](https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/4.1.Clinical_Deidentification_in_German.ipynb) # * [Clinical Deidentification in Spanish notebook](https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/4.2.Clinical_Deidentification_in_Spanish.ipynb) # * [Clinical Deidentification in French notebook](https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/4.5.Clinical_Deidentification_in_French.ipynb) # * [Clinical Deidentification in Italian notebook](https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/4.6.Clinical_Deidentification_in_Italian.ipynb) # + [markdown] id="ZKln9C7Ef88f" # Load NER pipeline to identify protected entities: # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 93653, "status": "ok", "timestamp": 1649525026535, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="qHzG7Wvhgcex" outputId="61f0a705-35a0-4633-c953-396b69417b7f" documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") # Sentence Detector annotator, processes various sentences per line sentenceDetector = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentence") # Tokenizer splits words in a relevant format for NLP tokenizer = Tokenizer()\ .setInputCols(["sentence"])\ .setOutputCol("token") # Clinical word embeddings trained on PubMED dataset word_embeddings = WordEmbeddingsModel.pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentence", "token"])\ .setOutputCol("embeddings") # NER model trained on n2c2 (de-identification and Heart Disease Risk Factors Challenge) datasets) clinical_ner = MedicalNerModel.pretrained("ner_deid_generic_augmented", "en", "clinical/models") \ .setInputCols(["sentence", "token", "embeddings"]) \ .setOutputCol("ner") ner_converter = NerConverterInternal()\ .setInputCols(["sentence", "token", "ner"])\ .setOutputCol("ner_chunk") nlpPipeline = Pipeline(stages=[ documentAssembler, sentenceDetector, tokenizer, word_embeddings, clinical_ner, ner_converter]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = nlpPipeline.fit(empty_data) # + [markdown] id="woAJhVv6f88m" # ### Pretrained NER models extracts: # # - Name # - Profession # - Age # - Date # - Contact(Telephone numbers, FAX numbers, Email addresses) # - Location (Address, City, Postal code, Hospital Name, Employment information) # - Id (Social Security numbers, Medical record numbers, Internet protocol addresses) # + id="Fxdh4CBBf88n" text =''' Record date : 2093-01-13 , <NAME> , M.D . , Name : <NAME> , MR # 7194334 Date : 01/13/93 . PCP : Oliveira , 25 years-old , Record date : 2079-11-09 . Cocke County Baptist Hospital , 0295 Keats Street , Phone (302) 786-5227 . ''' # + id="AE7cILT6f88q" result = model.transform(spark.createDataFrame([[text]]).toDF("text")) # + id="5cxcMyvLf88u" result_df = result.select(F.explode(F.arrays_zip('token.result', 'ner.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("token"), F.expr("cols['1']").alias("ner_label")) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 14987, "status": "ok", "timestamp": 1649525190788, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="Nunb_ZZpar4q" outputId="cb4b5fc6-3649-4ea4-8940-97c927483b2d" result_df.select("token", "ner_label").groupBy('ner_label').count().orderBy('count', ascending=False).show(truncate=False) # + [markdown] id="DJvt31qlf880" # ### Check extracted sensetive entities # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1956, "status": "ok", "timestamp": 1649525214573, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="fLuOc_dQa9as" outputId="31a608d8-2d48-4703-f595-39fc56ef1bb2" result.select(F.explode(F.arrays_zip('ner_chunk.result', 'ner_chunk.metadata')).alias("cols")) \ .select(F.expr("cols['0']").alias("chunk"), F.expr("cols['1']['entity']").alias("ner_label")).show(truncate=False) # + [markdown] id="EkD3zogqf89Q" # ### Excluding entities from deidentification # + [markdown] id="my04HL3rf89Q" # Sometimes we need to leave some entities in the text, for example, if we want to analyze the frequency of the disease by the hospital. In this case, we need to use parameter **`setWhiteList()`** to modify `ner_chunk` output. This parameter having using a list of entities type to deidentify as an input. So, if we want to leave the location in the list we need to remove this tag from the list: # + id="3Bj066u9f89R" ner_converter = NerConverterInternal()\ .setInputCols(["sentence", "token", "ner"])\ .setOutputCol("ner_chunk") \ .setWhiteList(['NAME', 'PROFESSION', 'ID', 'AGE', 'DATE']) nlpPipeline = Pipeline(stages=[ documentAssembler, sentenceDetector, tokenizer, word_embeddings, clinical_ner, ner_converter]) empty_data = spark.createDataFrame([[""]]).toDF("text") model_with_white_list = nlpPipeline.fit(empty_data) # + id="oWNZR2trf89U" result_with_white_list = model_with_white_list.transform(spark.createDataFrame([[text]]).toDF("text")) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 2488, "status": "ok", "timestamp": 1649525238337, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="u37rm6C3bePi" outputId="ffd07a1f-50fe-42d2-fc58-71a17aff9043" print("All Labels :") result.select(F.explode(F.arrays_zip('ner_chunk.result', 'ner_chunk.metadata')).alias("cols")) \ .select(F.expr("cols['0']").alias("chunk"), F.expr("cols['1']['entity']").alias("ner_label")).show(truncate=False) print("WhiteListed Labels: ") result_with_white_list.select(F.explode(F.arrays_zip('ner_chunk.result', 'ner_chunk.metadata')).alias("cols")) \ .select(F.expr("cols['0']").alias("chunk"), F.expr("cols['1']['entity']").alias("ner_label")).show(truncate=False) # + [markdown] id="rckvYUKVf89b" # ## Masking and Obfuscation # + [markdown] id="HLt9SFX5f89b" # ### Replace this enitites with Tags # + id="WaFe5bZzhEXT" ner_converter = NerConverterInternal()\ .setInputCols(["sentence", "token", "ner"])\ .setOutputCol("ner_chunk") deidentification = DeIdentification() \ .setInputCols(["sentence", "token", "ner_chunk"]) \ .setOutputCol("deidentified") \ .setMode("mask")\ .setReturnEntityMappings(True) # return a new column to save the mappings between the mask/obfuscated entities and original entities. #.setMappingsColumn("MappingCol") # change the name of the column, 'aux' is default deidPipeline = Pipeline(stages=[ documentAssembler, sentenceDetector, tokenizer, word_embeddings, clinical_ner, ner_converter, deidentification]) empty_data = spark.createDataFrame([[""]]).toDF("text") model_deid = deidPipeline.fit(empty_data) # + id="CZ3QgzH2AD-h" result = model_deid.transform(spark.createDataFrame([[text]]).toDF("text")) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1562, "status": "ok", "timestamp": 1649525247207, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="JR12_5hPdkg5" outputId="049a8f7b-dc38-453c-a9c0-51f7682504af" result.show() # + colab={"base_uri": "https://localhost:8080/", "height": 174} executionInfo={"elapsed": 1644, "status": "ok", "timestamp": 1649525248849, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="sI1C9hJzbyl3" outputId="30e257f4-3267-4c55-fb73-57d3e3ee711a" result.select(F.explode(F.arrays_zip('sentence.result', 'deidentified.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("sentence"), F.expr("cols['1']").alias("deidentified")).toPandas() # + [markdown] id="GHx17rSxOPeJ" # We have three modes to mask the entities in the Deidentification annotator. You can select the modes using the `.setMaskingPolicy()` parameter. The methods are the followings: # # **“entity_labels”**: Mask with the entity type of that chunk. (default) <br/> # **“same_length_chars”**: Mask the deid entities with same length of asterix ( * ) with brackets ( [ , ] ) on both end. <br/> # **“fixed_length_chars”**: Mask the deid entities with a fixed length of asterix ( * ). The length is setting up using the `setFixedMaskLength()` method. <br/> # Let's try each of these and compare the results. # + id="ULCxSmu6OIcE" #deid model with "entity_labels" deid_entity_labels= DeIdentification()\ .setInputCols(["sentence", "token", "ner_chunk"])\ .setOutputCol("deid_entity_label")\ .setMode("mask")\ .setReturnEntityMappings(True)\ .setMaskingPolicy("entity_labels") #deid model with "same_length_chars" deid_same_length= DeIdentification()\ .setInputCols(["sentence", "token", "ner_chunk"])\ .setOutputCol("deid_same_length")\ .setMode("mask")\ .setReturnEntityMappings(True)\ .setMaskingPolicy("same_length_chars") #deid model with "fixed_length_chars" deid_fixed_length= DeIdentification()\ .setInputCols(["sentence", "token", "ner_chunk"])\ .setOutputCol("deid_fixed_length")\ .setMode("mask")\ .setReturnEntityMappings(True)\ .setMaskingPolicy("fixed_length_chars")\ .setFixedMaskLength(4) deidPipeline = Pipeline(stages=[ documentAssembler, sentenceDetector, tokenizer, word_embeddings, clinical_ner, ner_converter, deid_entity_labels, deid_same_length, deid_fixed_length]) empty_data = spark.createDataFrame([[""]]).toDF("text") model_deid = deidPipeline.fit(empty_data) # + id="i0dOky9hOIV4" policy_result = model_deid.transform(spark.createDataFrame([[text]]).toDF("text")) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1858, "status": "ok", "timestamp": 1649525883654, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="SpGRUvM7OINj" outputId="530760c3-2468-48dd-8b23-d5fa26d523f5" policy_result.show() # + colab={"base_uri": "https://localhost:8080/", "height": 306} executionInfo={"elapsed": 2522, "status": "ok", "timestamp": 1649525895377, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="AKXgnr8EOW3j" outputId="534cef9b-697c-49e6-e692-daeeebaf7b3b" policy_result.select(F.explode(F.arrays_zip('sentence.result', 'deid_entity_label.result', 'deid_same_length.result', 'deid_fixed_length.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("sentence"), F.expr("cols['1']").alias("deid_entity_label"), F.expr("cols['2']").alias("deid_same_length"), F.expr("cols['3']").alias("deid_fixed_length")).toPandas() # + [markdown] id="yxv9bu4Ifweb" # ### Mapping Column # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 931, "status": "ok", "timestamp": 1649525900300, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="zcaXaxRFd-n6" outputId="d58949e9-e7f7-46ac-f554-0b2bae22482f" result.select("aux").show(truncate=False) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1981, "status": "ok", "timestamp": 1649525904619, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="rF1YR12bb7U3" outputId="f2dd938f-b364-4a89-8b41-b0a6f41fd59c" result.select(F.explode(F.arrays_zip('aux.metadata', 'aux.result', 'aux.begin', 'aux.end')).alias("cols")) \ .select(F.expr("cols['0']['originalChunk']").alias("chunk"), F.expr("cols['0']['beginOriginalChunk']").alias("beginChunk"), F.expr("cols['0']['endOriginalChunk']").alias("endChunk"), F.expr("cols['1']").alias("label"), F.expr("cols['2']").alias("beginLabel"), F.expr("cols['3']").alias("endLabel")).show(truncate=False) # + [markdown] id="Kz7YLxOlpJPQ" # ## Reidentification # # We can use `ReIdentification` annotator to go back to the original sentence. # + id="7HMTqWaApIw3" reIdentification = ReIdentification()\ .setInputCols(["aux","deidentified"])\ .setOutputCol("original") # + id="Etn6kW6hqJGx" reid_result = reIdentification.transform(result) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 910, "status": "ok", "timestamp": 1649525918235, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="vGLfKOGGqSwy" outputId="5560a919-75ea-4355-b700-f1799a3de8aa" reid_result.show() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1060, "status": "ok", "timestamp": 1649525924265, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="cfhFb1aOcFgI" outputId="fd3e1b53-5cae-4e0c-f739-08a0aab699fe" print(text) reid_result.select('original.result').show(truncate=False) # + [markdown] id="OSKDCxV0NEJc" # ## Using multiple NER in the same pipeline # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 12440, "status": "ok", "timestamp": 1649525990278, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="7lF9lrIWNJMi" outputId="2d0f6596-fe93-4ec6-8504-eec4eb600cd0" from sparknlp_jsl.annotator import * documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentenceDetector = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentence") tokenizer = Tokenizer()\ .setInputCols(["sentence"])\ .setOutputCol("token") word_embeddings = WordEmbeddingsModel.pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentence", "token"])\ .setOutputCol("embeddings") deid_ner = MedicalNerModel.pretrained("ner_deid_generic_augmented", "en", "clinical/models") \ .setInputCols(["sentence", "token", "embeddings"]) \ .setOutputCol("ner_generic") ner_converter = NerConverter()\ .setInputCols(["sentence", "token", "ner_generic"])\ .setOutputCol("ner_generic_chunk")\ .setWhiteList(['ID', 'DATE', 'AGE', 'NAME', 'PROFESSION'])# CONTACT and LOCATION is removed deid_ner_enriched = MedicalNerModel.pretrained("ner_deid_subentity_augmented", "en", "clinical/models") \ .setInputCols(["sentence", "token", "embeddings"]) \ .setOutputCol("ner_subentity") ner_converter_enriched = NerConverter() \ .setInputCols(["sentence", "token", "ner_subentity"]) \ .setOutputCol("ner_subentity_chunk")\ .setWhiteList(['COUNTRY', 'CITY', 'HOSPITAL', 'STATE', 'STREET', 'ZIP']) # we can also add PATIENT and DOCTOR entities and remove NAME entity from the other NER model chunk_merge = ChunkMergeApproach()\ .setInputCols("ner_subentity_chunk","ner_generic_chunk")\ .setOutputCol("deid_merged_chunk") deidentification = DeIdentification() \ .setInputCols(["sentence", "token", "deid_merged_chunk"]) \ .setOutputCol("deidentified") \ .setMode("mask")\ .setIgnoreRegex(True) nlpPipeline = Pipeline(stages=[ documentAssembler, sentenceDetector, tokenizer, word_embeddings, deid_ner, ner_converter, deid_ner_enriched, ner_converter_enriched, chunk_merge, deidentification]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = nlpPipeline.fit(empty_data) # + id="b2rR2tTgP5N5" text =''' Record date : 2093-01-13 , <NAME> , M.D . , Name : <NAME> , MR # 7194334 Date : 01/13/93 . PCP : Oliveira , 25 years-old , Record date : 2079-11-09 . Cocke County Baptist Hospital , 0295 Keats Street , Phone 302-786-5227. ''' # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 3137, "status": "ok", "timestamp": 1649526045939, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="ByIjftyJd8iP" outputId="17d9d274-045d-4a31-81b6-e286f5524419" result = model.transform(spark.createDataFrame([[text]]).toDF("text")) # ner_deid_generic_augmented result.select(F.explode(F.arrays_zip('ner_generic_chunk.result', 'ner_generic_chunk.metadata')).alias("cols")) \ .select(F.expr("cols['0']").alias("chunk"), F.expr("cols['1']['entity']").alias("ner_label")).show(truncate=False) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1611, "status": "ok", "timestamp": 1649526102374, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="aKn2yotweUTS" outputId="9cc283e2-f4d5-41c0-e89e-a5d450236062" # ner_deid_subentity_augmented result.select(F.explode(F.arrays_zip('ner_subentity_chunk.result', 'ner_subentity_chunk.metadata')).alias("cols")) \ .select(F.expr("cols['0']").alias("chunk"), F.expr("cols['1']['entity']").alias("ner_label")).show(truncate=False) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1658, "status": "ok", "timestamp": 1649526104867, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="ILAeyE39rudh" outputId="5fe1323d-92cc-4a52-eb8e-ffc38572fb50" # merged chunk result.select(F.explode(F.arrays_zip('deid_merged_chunk.result', 'deid_merged_chunk.metadata')).alias("cols")) \ .select(F.expr("cols['0']").alias("chunk"), F.expr("cols['1']['entity']").alias("ner_label")).show(truncate=False) # + colab={"base_uri": "https://localhost:8080/", "height": 174} executionInfo={"elapsed": 1423, "status": "ok", "timestamp": 1649526110525, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="6dheTxaJeYg9" outputId="4e33b506-eff0-4d46-a3de-d50c4a54ebd8" result.select(F.explode(F.arrays_zip('sentence.result', 'deidentified.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("sentence"), F.expr("cols['1']").alias("deidentified")).toPandas() # + [markdown] id="1MvCZkTyN0rQ" # ### Enriching with Regex and Override NER # # # # + id="v3ws_gQRQ5FS" # PHONE deidentification = DeIdentification()\ .setInputCols(["sentence", "token", "deid_merged_chunk"])\ .setOutputCol("deidentified")\ .setMode("mask") \ .setRegexOverride(True) pipeline = Pipeline(stages=[ nlpPipeline, deidentification ]) model_default_rgx = pipeline.fit(empty_data) # + colab={"base_uri": "https://localhost:8080/", "height": 174} executionInfo={"elapsed": 2244, "status": "ok", "timestamp": 1649526132365, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="tTKB14F7hX6a" outputId="2bd5336a-760b-4031-dbc1-233750225de5" result = model_default_rgx.transform(spark.createDataFrame([[text]]).toDF("text")) result.select(F.explode(F.arrays_zip('sentence.result', 'deidentified.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("sentence"), F.expr("cols['1']").alias("deidentified")).toPandas() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 248, "status": "ok", "timestamp": 1649526154753, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="CK0s144yN7Tp" outputId="30a96491-2857-4644-efc0-062f7f79a7ec" rgx = '''NUMBER ([0-9]{2}) NUMBER (\d{7})''' with open("./custom_regex.txt", "w") as f: f.write(rgx) f = open("./custom_regex.txt", "r") print(f.read()) # + [markdown] id="Z771fExYOgiW" # We see that two entities have conflict between the regex and the NER. NER has the priroty as a default. We can change this `setRegexOverride` param # + id="J8fl19b2OY1-" deidentification_rgx = DeIdentification()\ .setInputCols(["sentence", "token", "deid_merged_chunk"])\ .setOutputCol("deidentified")\ .setMode("mask") \ .setRegexPatternsDictionary("./custom_regex.txt")\ .setRegexOverride(True) nlpPipeline_rgx = Pipeline(stages=[ documentAssembler, sentenceDetector, tokenizer, word_embeddings, deid_ner, ner_converter, deid_ner_enriched, ner_converter_enriched, chunk_merge, deidentification_rgx]) empty_data = spark.createDataFrame([[""]]).toDF("text") model_rgx = nlpPipeline_rgx.fit(empty_data) # + colab={"base_uri": "https://localhost:8080/", "height": 267} executionInfo={"elapsed": 2371, "status": "ok", "timestamp": 1649526177215, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="tifAsxnFiiWg" outputId="0b951da5-97c8-4e33-d776-8362a05d2821" text ='''Record date : 2093-01-13 , <NAME> , M.D . , Name : Hendrickson , Ora MR . # 7194334 Date : 01/13/93 PCP : Oliveira , 25 years-old , Record date : 2079-11-09 . Cocke County Baptist Hospital . 0295 Keats Street.''' result = model_rgx.transform(spark.createDataFrame([[text]]).toDF("text")) result.select(F.explode(F.arrays_zip('sentence.result', 'deidentified.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("sentence"), F.expr("cols['1']").alias("deidentified")).toPandas() # + [markdown] id="0mkstH6JIuQr" # `.setBlackList()` parameter so that not deidentifiy the labels that are specified in the list. This parameter filters just the detected Regex Entities. # + id="eKuVjZDlIsG7" ner_converter = NerConverterInternal()\ .setInputCols(["sentence", "token", "ner"])\ .setOutputCol("ner_chunk")\ .setWhiteList(["NAME", "LOCATION"]) # DATE, PHONE, URL, EMAIL, ZIP, DATE, SSN, PASSPORT, DLN, NPI, C_CARD, EMAIL, IBAN, DEA deidentification = DeIdentification() \ .setInputCols(["sentence", "token", "ner_chunk"]) \ .setOutputCol("deidentified") \ .setMode("mask")\ .setRegexOverride(True)\ .setBlackList(["DATE", "PHONE"]) deidPipeline = Pipeline(stages=[ documentAssembler, sentenceDetector, tokenizer, word_embeddings, clinical_ner, ner_converter, deidentification]) empty_data = spark.createDataFrame([[""]]).toDF("text") model_deid = deidPipeline.fit(empty_data) # + id="8M49yTi8Ir9n" text =''' Record date : 2093-01-13 , <NAME> , M.D . , Name : Hendrickson , Ora MR . # 7194334 Date : 01/13/93 PCP : Oliveira , 25 years-old , Record date : 2079-11-09 . Cocke County Baptist Hospital . 0295 Keats Street. Phone (302) 786-5227. ''' result = model_deid.transform(spark.createDataFrame([[text]]).toDF("text")) # + colab={"base_uri": "https://localhost:8080/", "height": 299} executionInfo={"elapsed": 1208, "status": "ok", "timestamp": 1649526218251, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="nSYNou-aIr4o" outputId="84b59d45-decc-4f8d-b43e-6209325c61fc" result.select(F.explode(F.arrays_zip('sentence.result', 'deidentified.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("sentence"), F.expr("cols['1']").alias("deidentified")).toPandas() # + [markdown] id="HJokGuGxf89k" # ## Obfuscation mode # + [markdown] id="KG0Bvua7f89l" # In the obfuscation mode **DeIdentificationModel** will replace sensetive entities with random values of the same type. # # + id="TOYTW3ryTVOI" obs_lines = """<NAME>#PATIENT <NAME>#PATIENT <NAME>#PATIENT <NAME>#PATIENT Inci FOUNTAIN#PATIENT <NAME>#DOCTOR <NAME>#DOCTOR <NAME>#DOCTOR <NAME>#DOCTOR""" with open ('obfuscation.txt', 'w') as f: f.write(obs_lines) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 3436, "status": "ok", "timestamp": 1649526252541, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="s6OKIrTUSz0O" outputId="e1971be3-29a3-4bf4-b976-d4e8f2a2f045" deid_ner = MedicalNerModel.pretrained("ner_deid_subentity_augmented", "en", "clinical/models") \ .setInputCols(["sentence", "token", "embeddings"]) \ .setOutputCol("ner") ner_converter = NerConverterInternal()\ .setInputCols(["sentence", "token", "ner"])\ .setOutputCol("ner_chunk") obfuscation = DeIdentification()\ .setInputCols(["sentence", "token", "ner_chunk"]) \ .setOutputCol("deidentified") \ .setMode("obfuscate")\ .setObfuscateDate(True)\ #.setObfuscateRefFile('obfuscation.txt')\ #.setObfuscateRefSource("both") #default: "faker" pipeline = Pipeline(stages=[ documentAssembler, sentenceDetector, tokenizer, word_embeddings, deid_ner, ner_converter, obfuscation ]) obfuscation_model = pipeline.fit(empty_data) # + colab={"base_uri": "https://localhost:8080/", "height": 206} executionInfo={"elapsed": 14107, "status": "ok", "timestamp": 1649526268130, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="dW2y14eCpm2A" outputId="45b7c426-f5ec-4283-ada8-729262968c21" text =''' Record date : 2093-01-13 , <NAME> , M.D . , Name : Hendrickson , Ora MR # 7194334 Date : 01/13/93 . Patient : Oliveira, 25 years-old , Record date : 2079-11-09 . Cocke County Baptist Hospital . 0295 Keats Street ''' result = obfuscation_model.transform(spark.createDataFrame([[text]]).toDF("text")) result.select(F.explode(F.arrays_zip('sentence.result', 'deidentified.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("sentence"), F.expr("cols['1']").alias("deidentified")).toPandas() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 392, "status": "ok", "timestamp": 1649526268517, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="lkQ4ojPftJh0" outputId="d0f4a545-c74c-482d-e46d-76c89a8ea67f" result.select("ner_chunk").collect() # + colab={"base_uri": "https://localhost:8080/", "height": 206} executionInfo={"elapsed": 1300, "status": "ok", "timestamp": 1649526269816, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="bv42DZOxi-lP" outputId="96168752-62cd-461e-b9aa-e33535278674" obfuscation = DeIdentification()\ .setInputCols(["sentence", "token", "ner_chunk"]) \ .setOutputCol("deidentified") \ .setMode("obfuscate")\ .setObfuscateDate(True)\ .setObfuscateRefFile('obfuscation.txt')\ .setObfuscateRefSource("file") pipeline = Pipeline(stages=[ documentAssembler, sentenceDetector, tokenizer, word_embeddings, deid_ner, ner_converter, obfuscation ]) obfuscation_model = pipeline.fit(empty_data) result = obfuscation_model.transform(spark.createDataFrame([[text]]).toDF("text")) result.select(F.explode(F.arrays_zip('sentence.result', 'deidentified.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("sentence"), F.expr("cols['1']").alias("deidentified")).toPandas() # + [markdown] id="9vL7_bDHWZw8" # ## Faker mode # + [markdown] id="sE0P0vXWuJAS" # The faker module allow to the user to use a set of fake entities that are in the memory of the spark-nlp-internal. You can setting up this module using the the following property setObfuscateRefSource('faker'). # # If we select the setObfuscateRefSource('both') then we choose randomly the entities using the faker and the fakes entities from the obfuscateRefFile. # # # The entities that are allowed right now are the followings: # # * Location # * Location-other # * Hospital # * City # * State # * Zip # * Country # * Contact # * Username # * Phone # * Fax # * Url # * Email # * Profession # * Name # * Doctor # * Patient # * Id # * Idnum # * Bioid # * Age # * Organization # * Healthplan # * Medicalrecord # * Ssn # * Passport # * DLN # * NPI # * C_card # * IBAN # * DEA # * Device # # # # + id="w5P97F6jVkNB" obfuscation = DeIdentification()\ .setInputCols(["sentence", "token", "ner_chunk"]) \ .setOutputCol("deidentified") \ .setMode("obfuscate")\ .setObfuscateDate(True)\ .setObfuscateRefSource("faker") \ pipeline = Pipeline(stages=[ documentAssembler, sentenceDetector, tokenizer, word_embeddings, deid_ner, ner_converter, obfuscation ]) obfuscation_model = pipeline.fit(empty_data) # + colab={"base_uri": "https://localhost:8080/", "height": 206} executionInfo={"elapsed": 1128, "status": "ok", "timestamp": 1649526304978, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="lf6OBIjvjWFd" outputId="6e082188-5be9-4f7e-eee5-23f75f488193" text =''' Record date : 2093-01-13 , <NAME> , M.D . , Name : Hendrickson , Ora MR # 7194334 Date : 01/13/93 . Patient : Oliveira, 25 years-old , Record date : 2079-11-09 . Cocke County Baptist Hospital . 0295 Keats Street ''' result = obfuscation_model.transform(spark.createDataFrame([[text]]).toDF("text")) result.select(F.explode(F.arrays_zip('sentence.result', 'deidentified.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("sentence"), F.expr("cols['1']").alias("deidentified")).toPandas() # + [markdown] id="9jAMC21Hf89v" # ## Use full pipeline in the Light model # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 588, "status": "ok", "timestamp": 1649526309505, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="q4mg2R-yW4nz" outputId="0f019d83-2ace-44d8-85ef-86278c7e0457" light_model = LightPipeline(model) annotated_text = light_model.annotate(text) annotated_text['deidentified'] # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 934, "status": "ok", "timestamp": 1649526314108, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="rGALkSGQjf4h" outputId="b8ad1342-ada0-4e72-ece9-e62fdc89aace" obf_light_model = LightPipeline(obfuscation_model) annotated_text = obf_light_model.annotate(text) annotated_text['deidentified'] # + [markdown] id="zwM9GsKnXPOg" # # Structured Deidentification # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1742, "status": "ok", "timestamp": 1649526323752, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="JLY-pfeYXQAY" outputId="0dc78e2c-005e-45a7-8704-a3990e7d80b6" # !wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/data/hipaa-table-001.txt df = spark.read.format("csv") \ .option("sep", "\t") \ .option("inferSchema", "true") \ .option("header", "true") \ .load("hipaa-table-001.txt") df = df.withColumnRenamed("PATIENT","NAME") df.show(truncate=False) # + id="F9bl5oo0ZhD_" from sparknlp_jsl.structured_deidentification import StructuredDeidentification # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 527, "status": "ok", "timestamp": 1649526331463, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="PQ84IQTGcFAd" outputId="e256a7be-2369-4c5a-ab65-224ba7523e25" obfuscator = StructuredDeidentification(spark,{"NAME":"PATIENT","AGE":"AGE"}, obfuscateRefSource = "faker") obfuscator_df = obfuscator.obfuscateColumns(df) obfuscator_df.show(truncate=False) # + id="-GmMv8T-I0CX" obfuscator_unique_ref_test = '''<NAME>#PATIENT <NAME>#PATIENT <NAME>#PATIENT <NAME>GAN#PATIENT ALTHEA COLBURN#PATIENT <NAME>#PATIENT Inci FOUNTAIN#PATIENT <NAME>#PATIENT <NAME>#PATIENT Mahmood ALBURN#PATIENT <NAME>#PATIENT <NAME>#PATIENT <NAME>DER#PATIENT Darene GEORGIOUS#PATIENT <NAME>#PATIENT <NAME>#PATIENT <NAME>#PATIENT <NAME>#PATIENT Aberdeen#CITY Louisburg St#STREET France#LOC <NAME>#DOCTOR 5552312#PHONE St James Hospital#HOSPITAL Calle del Libertador#ADDRESS 111#ID Will#DOCTOR 20#AGE 30#AGE 40#AGE 50#AGE 60#AGE ''' with open('obfuscator_unique_ref_test.txt', 'w') as f: f.write(obfuscator_unique_ref_test) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 652, "status": "ok", "timestamp": 1649526340198, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="PTwdR034I70n" outputId="d2bb1ffd-a840-4cba-ae92-d91cf9b3e1f8" # obfuscateRefSource = "file" obfuscator = StructuredDeidentification(spark,{"NAME":"PATIENT","AGE":"AGE"}, obfuscateRefFile = "/content/obfuscator_unique_ref_test.txt", obfuscateRefSource = "file", columnsSeed={"NAME": 23, "AGE": 23}) obfuscator_df = obfuscator.obfuscateColumns(df) obfuscator_df.select("NAME","AGE").show(truncate=False) # + [markdown] id="w8tW5fFafQmL" # We can **shift n days** in the structured deidentification through "days" parameter when the column is a Date. # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 247, "status": "ok", "timestamp": 1649526349096, "user": {"displayName": "<NAME>", "userId": "08787989274818793476"}, "user_tz": -180} id="iBzoNufMfYCn" outputId="6be869c1-1aaf-45ab-9cf6-5b8dc6cd1a34" df = spark.createDataFrame([ ["<NAME>", "13/02/1977", "711 Nulla St.", "140", "673 431234"], ["<NAME>", "23/02/1977", "1 Green Avenue.", "140", "+23 (673) 431234"], ["<NAME>", "11/04/1900", "Calle del Libertador, 7", "100", "912 345623"] ]).toDF("NAME", "DOB", "ADDRESS", "SBP", "TEL") df.show(truncate=False) # + id="Fez0Rqq_fX3C" obfuscator = StructuredDeidentification(spark=spark, columns={"NAME": "ID", "DOB": "DATE"}, columnsSeed={"NAME": 23, "DOB": 23}, obfuscateRefSource="faker", days=5 ) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 624, "status": "ok", "timestamp": 1649526462072, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="HgNuMKhngFq2" outputId="3540839f-a580-4328-cf74-6abc758744da" result = obfuscator.obfuscateColumns(df) result.show(truncate=False) # + [markdown] id="vI2Lkwf5zuUm" # # Pretrained Deidentification Pipeline # # This pipeline can be used to deidentify PHI information from medical texts. The PHI information will be masked and obfuscated in the resulting text. The pipeline can mask and obfuscate `AGE`, `CONTACT`, `DATE`, `ID`, `LOCATION`, `NAME`, `PROFESSION`, `CITY`, `COUNTRY`, `DOCTOR`, `HOSPITAL`, `IDNUM`, `MEDICALRECORD`, `ORGANIZATION`, `PATIENT`, `PHONE`, `PROFESSION`, `STREET`, `USERNAME`, `ZIP`, `ACCOUNT`, `LICENSE`, `VIN`, `SSN`, `DLN`, `PLATE`, `IPADDR` entities. # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 108774, "status": "ok", "timestamp": 1649526576272, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="hQu0sQ6Tz1Fd" outputId="895c8c9d-9cf4-428f-f481-1a4856ecd983" from sparknlp.pretrained import PretrainedPipeline deid_pipeline = PretrainedPipeline("clinical_deidentification", "en", "clinical/models") # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 246, "status": "ok", "timestamp": 1649526585969, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="J7W8O9R2V8tE" outputId="ab3e3239-da48-4100-cf9a-bdb9abb47b34" deid_pipeline.model.stages # + id="tUo4PG4APG7M" text= """Name : <NAME>, Record date: 2093-01-13, Age: 25, # 719435. Dr. <NAME>, ID: 1231511863, IP 192.168.3.11. He is a 60-year-old male was admitted to the Day Hospital for cystectomy on 01/13/93. Patient's VIN : 1HGBH41JXMN109286, SSN #333-44-6666, Driver's license no:A334455B. Phone (302) 786-5227, 0295 Keats Street, San Francisco, E-MAIL: <EMAIL>""" # + id="qKxhE_2_gB_9" deid_res= deid_pipeline.annotate(text) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 4, "status": "ok", "timestamp": 1649526596000, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="3fYhKkfv0bZ0" outputId="1cd463f5-06a9-4375-c581-ff0b20ace338" deid_res.keys() # + colab={"base_uri": "https://localhost:8080/", "height": 493} executionInfo={"elapsed": 4, "status": "ok", "timestamp": 1649526598984, "user": {"displayName": "Monster C", "userId": "08787989274818793476"}, "user_tz": -180} id="GyMU5j6YfL5b" outputId="93c87418-062e-4e4d-be35-239b147dac24" pd.set_option("display.max_colwidth", 100) df= pd.DataFrame(list(zip(deid_res["sentence"], deid_res["masked"], deid_res["masked_with_chars"], deid_res["masked_fixed_length_chars"], deid_res["obfuscated"])), columns= ["Sentence", "Masked", "Masked with Chars", "Masked with Fixed Chars", "Obfuscated"]) df
jupyter/enterprise/healthcare/4.Clinical_DeIdentification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import trackpal as tp # # Simple track features # ## 1. Create some simulated tracks # 1. Simulate tracks # * brownian motion (pure random walk) # * linear motion with some randomness # # + trj_brownian = tp.simulate.brownian(n_tracks=10) trj_linear = tp.simulate.brownian_linear(n_tracks=10) trj_brownian # - # ## 2. Add information to tables and concatenate tracks # 1. Define column identifiers # 2. Give tracks a tag `"label"` for later use # 3. Merge the two track tables into one # + trackid = "TrackID" frameid = "FRAME" coords_xy = ["Position X", "Position Y"] trj_brownian["label"] = 0 trj_linear["label"] = 1 # concatenate (note: the trackids need to be relabeled) trj = tp.concat_relabel([trj_linear, trj_brownian], trackid=trackid) trj # - # ## 3. Plot tracks trj.groupby(trackid).apply(tp.visu.plot_trj, coords=coords_xy, line_fmt=".-"); # ## 3. Compute simple track features # * Confinement ratio (ratio of start/end net distance vs. total track distance) # * Statistics of the instantaneous speed # + # prepare feature factory feature_factory = tp.features.Features(frame=frameid, coords=coords_xy) # compute two features conf_ratio = feature_factory.get("confinement_ratio") speed_stats = feature_factory.get("speed_stats") conf_ratio_res = trj.groupby(trackid).apply(conf_ratio.compute) speed_stats_res = trj.groupby(trackid).apply(speed_stats.compute) # retrieve labels assignment y = trj.groupby(trackid)["label"].first() # merge into single DataFrame features = pd.concat([conf_ratio_res, speed_stats_res, y], axis=1) # - # ## Visualize two features as scatter plot # * Confinement ratio # * speed average # # and color code each tracks motion type. # # The two motion types have similar average speed, but confinement ratio is disciminating them # plot with pandas features.plot.scatter(x="confinement_ratio", y="speed_stats_mean", c="label", cmap="coolwarm");
examples/01_track_features.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- if 'google.colab' in str(get_ipython()): # !pip install -r https://raw.githubusercontent.com/abbbe/eye-on-stick/main/requirements.txt # !git clone https://github.com/abbbe/eye-on-stick # %cd eye-on-stick # + # %load_ext autoreload # %autoreload 2 import numpy as np import os, urllib, time os.environ["MLFLOW_TRACKING_URI"] = "sqlite:///mlruns/db.sqlite" import mlflow, git mlflow_client = mlflow.tracking.MlflowClient() from stable_baselines.common.cmd_util import make_vec_env from stable_baselines.common.vec_env import VecNormalize from stable_baselines import PPO2, SAC import matplotlib.pyplot as plt # %matplotlib inline from lib.viz import showarray from IPython import display from lib import eos from lib.eos2d import EyeOnStickEnv2D from lib.pyb.eos3d import EyeOnStickEnv3D # - #ENV = EyeOnStickEnv2D ENV = EyeOnStickEnv3D N_ENVS = 1 with git.Repo() as repo: git_info = f'{repo.active_branch.name}/{repo.git.rev_parse(repo.head.object.hexsha, short=4)}' if repo.is_dirty(): git_info = f'*{git_info}' # + import json from lib.run import run_env_nsteps def log_metrics(metrics, step): # log the content of metrics dict as mlflow metrics for key, value in metrics.items(): mlflow.log_metric(key=key, value=value, step=step) def save_and_register_model(model, params, saved_models_dir, era, model_name, mlflow_run): # save the trained models, each era separately model_fname = f'{saved_models_dir}/{era}' model.save(model_fname) params_fname = f'{saved_models_dir}/{era}.json' with open(params_fname, 'w') as fp: json.dump(params, fp) # register the trained model return mlflow_client.create_model_version(name=model_name, source=model_fname, run_id=mlflow_run.info.run_id) # - def learn_and_run( exp_id, run_name, model_name, n_joints, n_eras, n_learn_episodes, params, gym_policy_class=SAC, gym_model_name='MlpPolicy', displayfunc=None, start_version=None ): """ 1. Instanciates environment with n_joints. 2. Train the model for N_LEARN_EPOCHS epochs. 3. Save the model as mlflow artefact, named by 'name'. 2. Step through the environment for N_ERAS x N_STEPS steps, collecting metrics and rendering it (if 'display' is set). 3. Log metrics into mlflow runs (parent run gets '{n_joints}J {name}' name). 7. Returns file name to load the model from. """ env = make_vec_env(lambda: ENV(n_joints, params), n_envs=N_ENVS) #env = VecNormalize(env) n_steps = params.get('MAX_NSTEPS') # create new mlflow run which will become a parent of per-era runs with mlflow.start_run(run_name=run_name, experiment_id=exp_id) as parent_run: # log gym params mlflow.log_param("gym_policy_class", gym_policy_class.__name__) mlflow.log_param("gym_model_name", gym_model_name) mlflow.log_param("start_version", start_version) for key, value in params.items(): mlflow.log_param(key, value) # arrange tensorboard logs mlflow_artifacts_dir = urllib.request.url2pathname(urllib.parse.urlparse(mlflow.get_artifact_uri()).path) tensorboard_logdir = os.path.join(mlflow_artifacts_dir, "tensorboard_log") os.makedirs(tensorboard_logdir, exist_ok=False) # create gym model and directory to save it if start_version: registered_model = mlflow_client.get_model_version(model_name, start_version) model = gym_policy_class.load(registered_model.source) model.set_env(env) else: model = gym_policy_class(gym_model_name, env, verbose=0, tensorboard_log=tensorboard_logdir) saved_models_dir = os.path.join(mlflow_artifacts_dir, "saved_models") os.makedirs(saved_models_dir, exist_ok=False) ## run eras loop metrics = None for era in range(n_eras): child_run_name = f'era={era}' with mlflow.start_run(run_name=child_run_name, experiment_id=exp_id, nested=True) as child_run: model.learn(n_learn_episodes * n_steps) registered_model = save_and_register_model(model, params, saved_models_dir, era, model_name, child_run) mlflow.log_metric("model_version", registered_model.version) env.env_method('set_render_info', {'model_name': registered_model.name, 'model_version': registered_model.version, 'start_version': start_version}) metrics, _data = run_env_nsteps(env, model, n_steps, displayfunc=displayfunc) log_metrics(metrics, step=era) # log to the parent run if metrics: log_metrics(metrics, step=None) env.close() # + # SAC(policy, env, gamma=0.99, learning_rate=0.0003, buffer_size=50000, learning_starts=100, train_freq=1, # batch_size=64, tau=0.005, ent_coef='auto', target_update_interval=1, gradient_steps=1, target_entropy='auto', action_noise=None, # random_exploration=0.0, verbose=0, tensorboard_log=None, _init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False, seed=None, n_cpu_tf_sess=None) # + #raise None # - # ### Train to find the target and aim coarsely # + exp_id = mlflow.get_experiment_by_name("PYB-6J-3S-1A") model_name='eos3d.6j-coarse-aim' run_name= f'{model_name} 008 start-rand target-xz a-25 e-5 alpha_cm alpha-scalar 4p' learn_and_run( exp_id=exp_id.experiment_id, run_name=run_name, model_name=model_name, n_joints=6, n_eras=150, n_learn_episodes=500, params={'MAX_NSTEPS': 150, 'ALPHA_MAXDIFF_GOAL': 20, 'EYE_LEVEL_MAXDIFF_GOAL': 5}, ) # - raise None # ### First decently working 3J policy NJ = 3 # + # we run N_ERAS eras (=mlflow runs) in total: # first we let the agent learn for N_LEARN_EPISODES * MAX_NSTEPS # then we run it one episode and log metrics # + tags=[] N_ERAS = 25 # eras N_LEARN_EPISODES = 100 MAX_NSTEPS = 150 # episode will end after so many steps for _ in range(10): learn_and_run( n_joints=NJ, n_eras=N_ERAS, n_learn_episodes=N_LEARN_EPISODES, params={'MAX_NSTEPS': MAX_NSTEPS, 'ALPHA_MAXDIFF_GOAL': 3, 'EYE_LEVEL_MAXDIFF_GOAL': 3}, name='no eye_phi obs') # - # ### Long run # + N_ERAS = 10 # eras N_LEARN_EPISODES = 2000 MAX_NSTEPS = 150 # episode will end after so many steps #exp_id = mlflow.create_experiment("Train 3J for 3M steps") exp_id = mlflow.get_experiment_by_name("Train 3J for 3M steps") learn_and_run(n_joints=NJ, n_eras=N_ERAS, n_learn_episodes=N_LEARN_EPISODES, params={'MAX_NSTEPS': MAX_NSTEPS, 'ALPHA_MAXDIFF_GOAL': 3, 'EYE_LEVEL_MAXDIFF_GOAL': 3}, name='training', exp_id=exp_id.experiment_id, displayfunc=None) # -
learn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import torch import torchvision from ipynb.fs.full.dataset import CarvanaDataset from torch.utils.data import DataLoader def save_checkpoint(state, filename="my_checkpoint.pth.tar"): print("=> Saving checkpoint") torch.save(state, filename) def load_checkpoint(checkpoint, model): print("=> Loading checkpoint") model.load_state_dict(checkpoint["state_dict"]) def get_loaders( train_dir, train_maskdir, val_dir, val_maskdir, batch_size, train_transform, val_transform, num_workers=4, pin_memory=True, ): train_ds = CarvanaDataset( image_dir=train_dir, mask_dir=train_maskdir, transform=train_transform, ) train_loader = DataLoader( train_ds, batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory, shuffle=True, ) val_ds = CarvanaDataset( image_dir=val_dir, mask_dir=val_maskdir, transform=val_transform, ) val_loader = DataLoader( val_ds, batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory, shuffle=False, ) return train_loader, val_loader def check_accuracy(loader, model, device="cuda"): num_correct = 0 num_pixels = 0 dice_score = 0 model.eval() with torch.no_grad(): for x, y in loader: x = x.to(device) y = y.to(device).unsqueeze(1) preds = torch.sigmoid(model(x)) preds = (preds > 0.5).float() num_correct += (preds == y).sum() num_pixels += torch.numel(preds) dice_score += (2 * (preds * y).sum()) / ( (preds + y).sum() + 1e-8 ) print( f"Got {num_correct}/{num_pixels} with acc {num_correct/num_pixels*100:.2f}" ) print(f"Dice score: {dice_score/len(loader)}") model.train() def save_predictions_as_imgs( loader, model, folder="saved_images/", device="cuda" ): model.eval() for idx, (x, y) in enumerate(loader): x = x.to(device=device) with torch.no_grad(): preds = torch.sigmoid(model(x)) preds = (preds > 0.5).float() torchvision.utils.save_image( preds, f"{folder}/pred_{idx}.png" ) torchvision.utils.save_image(y.unsqueeze(1), f"{folder}{idx}.png") model.train()
Neural Networks/U-NET (Image Segmentation)/utils.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:py36] # language: python # name: conda-env-py36-py # --- import numpy as np import matplotlib.pyplot as plt import csv # %matplotlib inline RCfolder = 'C:/Users/good_user/Documents/GitHub/RapidCell/RapidCell1.4.2/' fileNameIndiv = 'individuals.out' fileNameAve = 'averages.out' A = np.loadtxt(RCfolder + fileNameIndiv) xmax, ymax = 5, 5 #mm, parameters from RapidCell simulation, Main tab. n_readouts = 7 #parameter from RapidCell simulation, Output tab. n_cells = int((A.shape[1]-1)/n_readouts) t_arr = A[:,0] X_arr = A[:,1::n_readouts] Y_arr = A[:,2::n_readouts] Ori_arr = A[:,3::n_readouts] CheA_arr = A[:,4::n_readouts] CheY_arr = A[:,5::n_readouts] Meth_arr = A[:,6::n_readouts] Mbias_arr = A[:,7::n_readouts] # + fig, ax = plt.subplots(2, 3, figsize = (16,8)) ax[0,0].plot(t_arr,X_arr.mean(axis=1)) ax[0,0].fill_between(t_arr, X_arr.mean(axis=1) - X_arr.std(axis=1), X_arr.mean(axis=1) + X_arr.std(axis=1),facecolor='gray') ax[0,0].set_ylim([0, xmax]) ax[0,0].set_ylabel('X(t), mm') ax[0,0].set_title("Mean and std of pos. X(t) of the population") ax[0,1].plot(t_arr,Ori_arr.mean(axis=1)%(2*np.pi),lw=0.5) ax[0,1].fill_between(t_arr, Ori_arr.mean(axis=1)%(2*np.pi) - Ori_arr.std(axis=1)%(2*np.pi), Ori_arr.mean(axis=1)%(2*np.pi) + Ori_arr.std(axis=1)%(2*np.pi),facecolor='gray') ax[0,1].set_ylabel('ori(t), rad') ax[0,1].set_title("Mean and std orientation(t), mod(2*pi)") ax[0,2].plot(t_arr,Mbias_arr.mean(axis=1)) ax[0,2].fill_between(t_arr, Mbias_arr.mean(axis=1) - Mbias_arr.std(axis=1), Mbias_arr.mean(axis=1) + Mbias_arr.std(axis=1),facecolor='gray') ax[0,2].set_ylabel('mb(t)') ax[0,2].set_title("Mean and std of motor bias(t)") ax[1,0].plot(t_arr,CheA_arr.mean(axis=1)) ax[1,0].fill_between(t_arr, CheA_arr.mean(axis=1) - CheA_arr.std(axis=1), CheA_arr.mean(axis=1) + CheA_arr.std(axis=1),facecolor='gray') ax[1,0].set_ylabel('mean CheA-P(t)') ax[1,0].set_xlabel('time, s') ax[1,0].set_title("Mean and std CheA-P(t)") ax[1,1].plot(t_arr,CheY_arr.mean(axis=1)) ax[1,1].fill_between(t_arr, CheY_arr.mean(axis=1) - CheY_arr.std(axis=1), CheY_arr.mean(axis=1) + CheY_arr.std(axis=1),facecolor='gray') ax[1,1].set_ylabel('CheY-P(t)') ax[1,1].set_xlabel('time, s') ax[1,1].set_title("Mean and std CheY-P(t)") ax[1,2].plot(t_arr,Meth_arr.mean(axis=1)) ax[1,2].fill_between(t_arr, Meth_arr.mean(axis=1) - Meth_arr.std(axis=1), Meth_arr.mean(axis=1) + Meth_arr.std(axis=1),facecolor='gray') ax[1,2].set_ylabel('mean m(t)') ax[1,2].set_xlabel('time, s') ax[1,2].set_title("Mean and std methylation(t)") # - #model the gradient max_concentration_mM = 0.1 sigma_mm = 1.0 x_center, y_center = xmax/2.0, ymax/2.0 x = np.linspace(0,xmax,100) y = np.linspace(0,ymax,100) x, y = np.meshgrid(x, y) G = max_concentration_mM*np.exp( - (((x-x_center)**2)/(2*sigma_mm**2) + ((y-y_center)**2)/(2*sigma_mm**2))) fig = plt.figure(figsize=(10,8)) plt.imshow(G, extent=(0, xmax, 0, ymax)) plt.plot(X_arr,Y_arr,lw=0.8) plt.axis("square") plt.xlim([0, xmax]) plt.ylim([0, ymax]) plt.xlabel('X(t), mm') plt.ylabel('Y(t), mm') plt.title("Trajectories of individual cells over time") plt.colorbar()
analysis_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # import libraries needed import numpy as np import pandas as pd import matplotlib.pyplot as plt # import csv file df = pd.read_csv('pokemon.csv') # basic examination of the data df.head() # method head() by default shows us the first 5 data in our dataset df.tail() # used to get the last n rows. This function returns last n rows from the object based on position. df.info() # used to print a concise summary of a DataFrame. This method prints information about a DataFrame including the index # dtype and column dtypes, non-null values and memory usage. df.describe() # used to view some basic statistical details like percentile, mean, std etc. of a data frame or a series of numeric # values. # **Exploring Your Dataset** # value_counts() used to get a Series containing counts of unique values, in this case "Name" df["Name"].value_counts() # value_counts() used to get a Series containing counts of unique values, in this case "Type 1" df["Type 1"].value_counts() # **Cleaning** # + # Check and get a good sense of how many null values the dataset has, according to the result, there is only one # column with no values which is the type two There is almost 50% null values. # df.isna().sum() df.isnull().sum(axis=0) # df[df.'Type 2'.isna()] # - # add fillna to handle missing values df['Type 2'].fillna(df['Type 2'].mode()[0],inplace=True) # data['Native Country'].fillna(data['Native Country'].mode()[0], inplace=True) # Re-checking if the dataframe has missing values df.isna().sum() # **Manipulating DataFrame columns** # Turn the Legendary column into ones and zeros instead of True / False df['Legendary'] = df['Legendary'].astype(int) # Sorting values by name (Aplhabetical) df.sort_values('Name', ascending=True) # Delete the pokedex index column df.drop(df.columns[[0]], axis=1, inplace=True) # + # Add the stats together and create 'Total' value, which give us the insight about the strongest Pokemon df['Total'] = df['HP'] + df['Attack'] + df['Defense'] + df['Sp. Atk'] + df['Sp. Def'] + df['Speed'] # df['Total'] = df.iloc[:, 4:10].sum(axis=1) df.sort_values('Total', ascending=False) # - # **Querying the DataFrame** # Shows the ammount of Pokemons with HP Larger than 150 df.query('HP > 150') # Shows the ammount of Pokemons with Defense Larger than 150 df.query('Defense > 150') # Shows the ammount of Pokemons with Speed Larger than 150 df.query('Speed > 150') # Shows the ammount of Legendaries based on the Generation df.groupby(['Generation'])['Legendary'].count() # Shows the ammount of Type 1 Legendary Pokemon df.groupby(['Type 1'])['Legendary'].count() # Shows the ammount of Type 2 Legendary Pokemon df.groupby(['Type 2'])['Legendary'].count() # **Visualization using Matplotlib** # + # Matplotlib is a 2-D plotting library that helps in visualizing figures. # Matplotlib emulates Matlab like graphs and visualizations. We use histogram. # A histogram shows the frequency on the vertical axis and the horizontal axis is another dimension. df[['Total','Legendary']].hist(figsize=(14,9),bins=16,linewidth='1',edgecolor='k',grid=False) plt.show()
pokemon.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"name": "#%%\n"} # + pycharm={"name": "#%%\n"} import cv2 import time import numpy as np import PIL.Image from io import BytesIO import IPython.display import ipywidgets as widgets import threading # + pycharm={"name": "#%%\n"} def showarray(a, prev_display_id=None, fmt='jpeg'): f = BytesIO() PIL.Image.fromarray(a).save(f, fmt) obj = IPython.display.Image(data=f.getvalue()) if prev_display_id is not None: IPython.display.update_display(obj, display_id=prev_display_id) return prev_display_id else: return IPython.display.display(obj, display_id=True) # + def get_frame(cam): # Capture frame-by-frame ret, frame = cam.read() #flip image for natural viewing # frame = cv2.flip(frame, 1) return frame # + pycharm={"name": "#%%\n"} def get_camera_indexes(max=10): # ref: https://stackoverflow.com/a/53310665/3553367 arr = [] for index in range(0, max): # print(index) cap = cv2.VideoCapture() cap.open(index) if cap.isOpened(): arr.append(index) cap.release() return arr # + camera_indexes = get_camera_indexes() print(camera_indexes) # + pycharm={"name": "#%%\n"} cameras = [] def init_cameras(): for camera_index in camera_indexes: cam = cv2.VideoCapture(camera_index) # cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) # cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) # cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) # cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1024) cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 1024) # cam.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0.25) #ref: https://github.com/opencv/opencv/issues/9738#issuecomment-346584044 # cam.set(cv2.CAP_PROP_EXPOSURE, 0.01) # cam.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0) # cam.set(cv2.CAP_PROP_EXPOSURE, -4.0) cameras.append({ "camera_index": camera_index, 'cam': cam, 'display_id': None, }) def stop_cameras(): for camera in cameras: cam = camera.get('cam') cam.release() # + from inference.inference import * weights = './model/yolor/new_class_1_2/best_257.pt' cfg = './Yolor/cfg/yolor_p6.cfg' cap = cv2.VideoCapture(0) model = setting_model(cfg, weights) print("test") init_cameras() total_cameras = len(camera_indexes) frames_per_camera = 100 capture_image = False fps_output = widgets.Output() display(fps_output) progress = widgets.FloatProgress(value=0.0, min=0.0, max=1.0) current_frame_number = 0 def work(progress): while current_frame_number is not frames_per_camera: time.sleep(1) progress.value = current_frame_number/frames_per_camera thread = threading.Thread(target=work, args=(progress,)) display(progress) thread.start() while True: t1 = time.time() for camera_num, camera in enumerate(cameras): cam = camera.get('cam') # Capture frame-by-frame frame = get_frame(cam) print("test2") # Convert the image from OpenCV BGR format to matplotlib RGB format # to display the image output, ori_image, trans_image = inference(frame, model) result_img = post_processing(output, ori_image, trans_image) frame = cv2.cvtColor(result_img, cv2.COLOR_BGR2RGB) display_id = camera.get('display_id') # print(camera) if display_id is not None: showarray(result_img, display_id) else: display_handle = showarray(result_img) camera['display_id'] = display_handle.display_id t2 = time.time() #ref: https://github.com/jupyter-widgets/ipywidgets/issues/1744#issuecomment-335179855 with fps_output: # print("Camera: %d of %d" % (camera_num + 1, total_cameras)) print("%f FPS" % (1/(t2-t1))) # Display the frame info until new frame is available IPython.display.clear_output(wait=True) # stop_cameras() # with fps_output: # print ("Stream stopped") # -
.ipynb_checkpoints/run-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PyCharm (Project PRML) # language: python # name: pycharm-ddcd9f7a # --- # # PRML Chapter 1 # please follow book along with this notebook. import numpy as np import matplotlib.pyplot as plt # ## Figure 1.2 - # generate sample data of 10 points # due to random noise, you might not find the plot exactly same as book. # + code_folding=[0, 3, 6, 17] pycharm={"name": "#%%\n"} def generate_x(size): return np.linspace(0,1,size) def true_function(x): return np.sin(np.pi * 2 * x) def noise(mean, sigma, size): return np.random.normal(loc=mean, scale=sigma, size=size) def generate_data(no_of_sample): """ figure 1.2 has single dimension data. data generation rule : y = sin(x) + gaussian noise with mean 0, variance 0.25 """ x = generate_x(no_of_sample) t = true_function(x) + noise(0, 0.25, no_of_sample) return x, t def plot_fig_1_2(): # training data for scatter plot x, t = generate_data(10) # test data for green line, the true function without noise. x_test = np.linspace(0,1,100) y_test = true_function(x_test) # plot plt.scatter(x, t, marker= "o", facecolor="none",color = 'b', label = "training data") plt.plot(x_test, y_test, color='g', label='true function') plt.xlabel("x ( feature dimension )") plt.ylabel("t ( labels / training label ) ") plt.legend() plt.show() #---------------------- # plot now #---------------------- plot_fig_1_2() # - # ## Figure 1.3 : # The error function (1.2) corresponds to the sum of the squares of the displacements of each data from the funcion y(x,w) # # E(w) = 0.5 * sum over data points { y(x_n, w) - t_n } ** 2 # + code_folding=[0, 3] pycharm={"name": "#%%\n"} def prediction_function(x): return x def plot_fig_1_3(): # the prediction function y(x,w) sample_size =5 x = generate_x(sample_size) y = prediction_function(x) plt.plot(x, y, color="r") # point 1 x = np.array([0.2,0.2]) y = np.array([0.2,0.8]) plt.plot(x, y, color='g') plt.scatter(x, y, color="b") # point 2 x = np.array([0.4,0.4]) y = np.array([0.4,0.3]) plt.plot(x, y, color='g') plt.scatter(x, y, color="b") # point 3 x = np.array([0.8,0.8]) y = np.array([0.8, 1]) plt.plot(x, y, color='g') plt.scatter(x, y, color="b") plt.text(0.8,0.7, "y(x_n, w)") plt.text(0.82, 1, "t_n") plt.xlabel("x") plt.ylabel("t") plt.show() plt.close() plot_fig_1_3() # - # ## Figure 1.4 # a polynomial regression on multiple M's : 0, 1, 3, 9 # + code_folding=[] pycharm={"name": "#%%\n"} def convert_to_polynomial(x, degree): assert x.ndim == 1, " this is written for conversion of only single dimension x" assert degree >= 0, "degree must be positive" x = x[:,np.newaxis] N = x.shape[0] # X will be a matrix of shape N * (degree + 1) X = np.ones(shape=(N, 1+degree)) for degree_dim in range(1, degree+1): X[:, degree_dim] = np.power ( x[:,0], degree_dim ) return X # + code_folding=[4] x_train, y_train = generate_data(10) x_test = generate_x(100) y_test = true_function(x_test) print(f''' x_train : {x_train.shape} y_train : {y_train.shape} x_test : {x_test.shape} y_test : {y_test.shape} ''') # + code_folding=[] def find_optimal_weights( X, y): """ optimal weights via simple inversion methods. not recommended for large datasets due to matrix inversion complexity. """ if y.ndim == 1: y = y[:, np.newaxis ] W = np.linalg.inv ( X.T.dot(X) ).dot ( X.T ).dot(y) return W # + code_folding=[] def predict( x, W): """ W : Features * 1 matrix X : N * features matrix forward : X * W => N * 1 matrix """ if x.ndim == 1: X = X[:, np.newaxis] return x.dot(W) # + fig, axes = plt.subplots(2, 2, figsize=(20,20)) degrees = [0, 1, 3, 9] for i in range(0,2): for j in range(0,2): degree = degrees[ 2 * i + j ] # convert to the polynomial X = convert_to_polynomial(x_train, degree) # find the weights W = find_optimal_weights(X, y_train) # predict X = convert_to_polynomial(x_test, degree) y_pred = predict ( X, W ) print(f"weights found are {W.squeeze()}") # plots axes[i][j].scatter(x_train, y_train, color="b", facecolor="none", label = "train data") axes[i][j].plot(x_test, y_test, color="g", label = "true function") axes[i][j].plot(x_test, y_pred.squeeze(), color="r", label = f"predicted fn of degree {degree}") axes[i][j].legend() plt.show() # + [markdown] heading_collapsed=true # ## Figure 1.5 # generate graph of root mean square error for for training and test data for degrees 0 to 9 inclusive. # + hidden=true def calculateRMS( y_predicted, y_true ): if y_predicted.ndim > 1 : y_predicted = y_predicted.squeeze() y_true = y_true.squeeze() temp = np.sqrt( np.mean( (y_predicted - y_true)**2, axis = 0) ) return temp # + hidden=true calculateRMS(y_pred, y_test) # + hidden=true # fig, axes = plt.subplots(2, 2, figsize=(20,20)) degrees = list ( np.arange(0,10) ) rms_train = [] rms_test = [] for degree in degrees: # convert to the polynomial X = convert_to_polynomial(x_train, degree) # find the weights W = find_optimal_weights(X, y_train) y_predicted_on_train = predict(X, W) rms_train.append ( calculateRMS (y_predicted_on_train, y_train) ) # predict X = convert_to_polynomial(x_test, degree) y_predicted_on_test = predict ( X, W ) rms_test.append( calculateRMS(y_predicted_on_test, y_test) ) # plots print plt.scatter(degrees, rms_train, color = "b", facecolor="none") plt.plot(degrees, rms_train, color = "b", label = "train rms") plt.scatter(degrees, rms_test, color = "r", facecolor="none") plt.plot(degrees, rms_test, color = "r", label = "test rms") plt.xlabel("M") plt.ylabel("ERMS") plt.legend() plt.show() # + [markdown] heading_collapsed=true # ## Figure 1.6 # Effect of datasize on overfitting for M = 9 # + hidden=true degree = 9 x_test = generate_x(100) y_test = true_function(x_test) for N in [15, 100]: x_train, y_train = generate_data(N) # convert to the polynomial X = convert_to_polynomial(x_train, degree) # find the weights W = find_optimal_weights(X, y_train) # predict X = convert_to_polynomial(x_test, degree) y_pred = predict ( X, W ) # print(f"weights found are {W.squeeze()}") # plots plt.figure(figsize = (10,5)) plt.scatter(x_train, y_train, color="b", facecolor="none", label = "train data") plt.plot(x_test, y_test, color="g", label = "true function") plt.plot(x_test, y_pred.squeeze(), color="r", label = f"predicted fn of degree {degree} and training N = {N}") plt.legend() plt.show() # + [markdown] heading_collapsed=true # ## Figure 1.7 and 1.8 # + hidden=true def find_optimal_weights_with_regularisation( X, y, lambda_ ): """ optimal weights via simple inversion methods. not recommended for large datasets due to matrix inversion complexity. """ if y.ndim == 1: y = y[:, np.newaxis ] temp = X.T.dot(X) I = np.eye(temp.shape[0]) W = np.linalg.inv ( lambda_ * I + temp ).dot ( X.T ).dot(y) return W # + [markdown] heading_collapsed=true hidden=true # ### Figure 1.7 # Effect of regularisation on overfitting for N=10, M=9 # + hidden=true degree = 9 N = 10 x_test = generate_x(100) y_test = true_function(x_test) for lambda_ in [np.exp(-18), 1]: x_train, y_train = generate_data(N) # convert to the polynomial X = convert_to_polynomial(x_train, degree) # find the weights W = find_optimal_weights_with_regularisation(X, y_train, lambda_) # predict X = convert_to_polynomial(x_test, degree) y_pred = predict ( X, W ) # print(f"weights found are {W.squeeze()}") # plots plt.figure(figsize = (10,5)) plt.scatter(x_train, y_train, color="b", facecolor="none", label = "train data") plt.plot(x_test, y_test, color="g", label = "true function") plt.plot(x_test, y_pred.squeeze(), color="r", label = f"predicted for N={N}, degree={degree}, lambda={lambda_}") plt.legend() plt.show() # + [markdown] hidden=true # ### Figure 1.8 # effect of lambda on ERMS for a range of lambda's # + hidden=true # fig, axes = plt.subplots(2, 2, figsize=(20,20)) lambdas = [np.exp(x) for x in range(-40, 5, 5)] degree = 9 rms_train = [] rms_test = [] for lambda_ in lambdas: x_train, y_train = generate_data(10) # convert to the polynomial X = convert_to_polynomial(x_train, degree) # find the weights W = find_optimal_weights_with_regularisation(X, y_train, lambda_) y_predicted_on_train = predict(X, W) rms_train.append ( calculateRMS (y_predicted_on_train, y_train) ) # predict X = convert_to_polynomial(x_test, degree) y_predicted_on_test = predict ( X, W ) rms_test.append( calculateRMS(y_predicted_on_test, y_test) ) # plots print plt.scatter(np.log(lambdas), rms_train, color = "b", facecolor="none") plt.plot(np.log(lambdas), rms_train, color = "b", label = "train rms") plt.scatter(np.log(lambdas), rms_test, color = "r", facecolor="none") plt.plot(np.log(lambdas), rms_test, color = "r", label = "test rms") plt.xlabel("ln lambda") plt.ylabel("ERMS") plt.legend() plt.show() # + [markdown] hidden=true #
Chapter 1/Chapter 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Ruby 2.7.0 # language: ruby # name: ruby # --- # # TodoMvc Testers Toolbox- Part 1 # ## Automation Enabled Testing # # **Inspired by <NAME> (@EvilTester) in his Test Automation U JavaScript Course** # # **[Automating in the Browser Using JavaScript](https://testautomationu.applitools.com/automating-in-the-browser-using-javascript/)** # # Instead of using JavaScript inside Chrome DevTools we'll use: # - Jupyter Notebooks # - Ruby # - Watir (a Ruby Selenium Wrapper) # # To build a toolbox for the Tester while testing and run a bot # #### Setup Watir & Selenium # # The cell below will need to be run before running any other cells # + require 'watir' todo_mvc_app_url= "http://todomvc.com/examples/vanillajs/" selenium_grid_host="http://hub:4444/wd/hub" browser = Watir::Browser.new(:chrome, {url: selenium_grid_host}) browser.goto todo_mvc_app_url # - # ## Add Todo # # **To add a new todo** set the value of `todo_text` in quotes then run cell # + todo_text="hello 2" todo_text_box = browser.text_field(class: 'new-todo') todo_text_box.set(todo_text) todo_text_box.send_keys(:enter) # - # ## Update Todo # # **To Update** set the value of `update_todo_number` and/or `replacement_text` then run cell # # This will find the todo by numeric position the Replace text in input box using javascript # # # + update_todo_number =2 replacement_text ="replacing todo #{update_todo_number} text" todo_to_update= browser.element(css:".todo-list li:nth-child(#{update_todo_number})") todo_to_update.double_click update_value_script=" var inputSelector ='ul.todo-list > li.editing > input.edit'; document.querySelector(inputSelector).value='#{replacement_text}'; document.querySelector(inputSelector).dispatchEvent(new Event('blur'));" browser.execute_script(update_value_script) # - # ## Toggle All Todo's browser.element(id: "toggle-all").click # ## Delete a Todo # **To Delete** set the value of `delete_todo_number` then run cell # # This will find the todo by numeric position then Hover over todo then click `x` button # + delete_todo_number = 1 browser.element(css: ".todo-list li:nth-child(#{delete_todo_number})").hover browser.button(class: "destroy").click # - # ## Delete All Todos # *This isn't visible functionality in the application* # # Using javascript command to clear local storage then refresh browser.execute_script("localStorage.clear();") browser.refresh()
ruby/notebooks/TodoMvc-ToolBox-part1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:py35] # language: python # name: conda-env-py35-py # --- # # Overview # Basically, the idea of the Numerai tournament is to let data scientists use machine learning techniques to earn money by creating predictions for Numerai’s hedge fund. # # The machine learning techniques can be neural networks, random forests, support vector machines, etc… . In this article I will focus on using neural networks. These neural networks will be programmed and trained using Python, Keras and scikit-learn (sklearn). # # In addition to creating predictions for the regular tournament, it is also possible use our predictions to participate in a ‘staked’ tournament. We can stake the cryptocurrency numeraire (NMR) on our predictions in order to earn additional money. In this article, though, I will only cover how to create predictions for the regular tournament. # # Pre-requisites # In this article the following programs and libraries need to be installed: # # * Python 3 (from Anaconda) # * Keras # * scikit-learn # * Pandas # * Numpy # * Jupyter Notebook # # We will also need to have an account on Numerai. import numpy as np import pandas as pd # %matplotlib inline # # Exploring the Datasets # After downloading the current datasets, we can have a look in the unzipped directory. Of relevance are the two CSV files: # * `numerai_training_data.csv` # * `numerai_tournament_data.csv` # # The first file contains the training data and the second file the tournament data. training_data = pd.read_csv('numerai_training_data.csv', header=0) tournament_data = pd.read_csv('numerai_tournament_data.csv', header=0) # Let’s have a quick look at the data in the files using Pandas. training_data.head() # The above statement shows us the first 5 rows of the training data. Here we can see the names of the columns. Take note of the era and data_type columns. print(training_data.era.unique()) print(training_data.data_type.unique()) # Using the above statements we can see all the unique names for eras and data types in our data. In the training data there are 85 eras represented and all data is indeed training data. We can do something similar for the tournament data: tournament_data.head() # Now, we get the first 5 rows of our tournament data. print(tournament_data.era.unique()) print(tournament_data.data_type.unique()) # The above statements show that the tournament data contains more diverse data types — validation, test, live. The eras contained are from 86 to 97 and some additional one called ‘eraX’. Let’s dig a bit deeper into this with the following: print(tournament_data.era[tournament_data.data_type=='validation'].unique()) print(tournament_data.era[tournament_data.data_type=='test'].unique()) print(tournament_data.era[tournament_data.data_type=='live'].unique()) # Now we can see that the tournament data of the ‘validation’ type contains the eras from 86 to 97 and that the data of type ‘test’ and ‘live’ contain the ‘eraX’ era. # In the documentation on the Numerai help page, it is mentioned that the validation data contains the targets (like the training data does). Let’s verify this: print(tournament_data.target[tournament_data.data_type=='validation'].unique()) print(tournament_data.target[tournament_data.data_type=='test'].unique()) print(tournament_data.target[tournament_data.data_type=='live'].unique()) # The validation data does indeed contain the targets [0., 1.], whereas the test and live data contain [nan]. # Given that we cannot ever have too much training data, combining all the eras into one ‘complete’ training set — training data plus validation data —  is the thing to do. We will therefore disregard the warning made by Numerai: # # > We recommend you do not train on the validation data even though you have the targets. # # But, won’t this give us problems with overfitting? Not necessarily, given that we will use cross-validation to test the performance of our neural network models. # Let’s create this ‘complete’ training set: validation_data = tournament_data[tournament_data.data_type=='validation'] complete_training_data = pd.concat([training_data, validation_data]) # And let’s check to see we have the correct eras: complete_training_data.era.unique() # This gives us all eras from 1 to 97, as it is supposed to. # We can now create our features (X) and labels(Y) for training our neural network: features = [f for f in list(complete_training_data) if "feature" in f] X = complete_training_data[features] Y = complete_training_data["target"] X.head() Y.head() # # Performing Predictions with Keras and scikit-learn # Using the Keras neural network library for Python we can define some simple neural network model which we can train on our complete training set. First we define the neural network model in a function. # # Then we create a wrapper for the neural network. This is needed to create a bridge between Keras and scikit-learn. We’ll tell it to run for 10 epochs, with a batch size of 128. Verbosity is set to 0, because we don’t need to see how far the network has been trained. # # Now, from the Numerai documentation we can learn that: # # > For cross-validation, it’s better to hold out a random sample of eras rather than a random sample rows. Using a random sample of rows tends to over fit. # # Therefore, we should use a special cross-validation method called group-k-fold. In our case it is set to do k-fold over the individual eras rather than over the individual rows in the complete training set. We shall use the GroupKFold class from scikit-learn for this. First we create an instance of this class and tell it to create 5 folds. Then using the split method we tell the object to split the training data based on the eras. # # Using GridSearchCV from scikit-learn we can find good hyper-parameters for our neural network model. Here we try to find out what works best — 10 neurons or 14 and a dropout probability of 0.01 or 0.26. This gives a parameter grid with a total of 4 combinations to try out. # # Create an instance of GridSearchCV with the neural network model we defined above, the parameter grid with the 4 combinations, a scoring function that matches the loss function of the neural network, one thread and a verbose level of 2. Then we tell it to fit our training data. # # + import numpy from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score from sklearn.model_selection import GroupKFold from keras.models import Sequential from keras.layers import Dense, BatchNormalization, Dropout, Activation from keras.wrappers.scikit_learn import KerasClassifier def create_model(neurons=200, dropout=0.2): model = Sequential() model.add(Dense(neurons, input_shape=(50,), kernel_initializer='glorot_uniform', use_bias=False)) model.add(BatchNormalization()) model.add(Dropout(dropout)) model.add(Activation('relu')) model.add(Dense(1, activation='sigmoid', kernel_initializer='glorot_normal')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['binary_crossentropy', 'accuracy']) return model model = KerasClassifier(build_fn=create_model, epochs=8, batch_size=128, verbose=0) neurons = [10, 14] dropout = [0.01, 0.26] param_grid = dict(neurons=neurons, dropout=dropout) gkf = GroupKFold(n_splits=5) kfold_split = gkf.split(X, Y, groups=complete_training_data.era) grid = GridSearchCV(estimator=model, param_grid=param_grid, cv=kfold_split, scoring='neg_log_loss',n_jobs=1, verbose=3) grid_result = grid.fit(X.values, Y.values) print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) means = grid_result.cv_results_['mean_test_score'] stds = grid_result.cv_results_['std_test_score'] params = grid_result.cv_results_['params'] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param)) # - # When we run the fit method of the GridSearchCV object, we are told the following: # # > Fitting 5 folds for each of 4 candidates, totalling 20 fits # # This means that our neural network will be trained 20 times. For each of the 4 combinations of our parameter grid, 5 different folds of training data will be used. The latter refers to the 5-fold cross validation. # # Yes, this process takes a long time even with a new high end GPU. On my PC with a Nvidia GTX 1080 GPU every ‘fit’ takes roughly 3.1 min, giving a total of 64.0 min. grid.best_estimator_.model.save('./my_model_2017-11-07_IV.h5') grid.best_estimator_.model.summary() # # Checking the Performance # We can now check the performance for the tournament of the best model found by the GridSearchCV object. Let’s go over the items mentioned in the Numerai documentation: # # > The leaderboard displayed is based only on validation data. To be on the leaderboard, models are required to have concordance, originality, and consistency. # # In order for us to earn money in the regular tournament, we need to be on the leaderboard. Hence, our model needs to satisfy the following 3 criteria — concordance, originality and consistency: # # > Concordance is a measure of whether predictions on the validation set, test set, and live set appear to be generated by the same model. # # This should not be a problem for our model. # # > Originality is a measure of whether a set of predictions is uncorrelated with predictions already submitted. # # This is the most tricky part. If you submit early in the round, then chances are very high that your submission will be original. On the other hand, this probability drops over time. This is why I put values like 0.01 and 0.26 instead 0.0 and 0.25 for the dropout rates in parameter grid. Chances are small that anyone else has these values for their model. This increases the uniqueness of tour model. # # > Consistency measures the percentage of eras in which a model achieves a logloss < -ln(0.5). … Only models with consistency above 75% are considered consistent. # # We can check the consistency with our own function. # + def check_consistency(model, valid_data): eras = valid_data.era.unique() count = 0 count_consistent = 0 for era in eras: count += 1 current_valid_data = valid_data[validation_data.era==era] features = [f for f in list(complete_training_data) if "feature" in f] X_valid = current_valid_data[features] Y_valid = current_valid_data["target"] loss = model.evaluate(X_valid.values, Y_valid.values, batch_size=128, verbose=0)[0] if (loss < -np.log(.5)): consistent = True count_consistent += 1 else: consistent = False print("{}: loss - {} consistent: {}".format(era, loss, consistent)) print ("Consistency: {}".format(count_consistent/count)) check_consistency(grid.best_estimator_.model, validation_data) # - # Our model achieves a consistency above 75%, so our submission passes this criterion. # # Submitting the Predictions # We’ll use the following statements to create predictions and write them to a CSV file. # + from time import gmtime, strftime x_prediction = tournament_data[features] t_id = tournament_data["id"] y_prediction = grid.best_estimator_.model.predict_proba(x_prediction.values, batch_size=128) results = np.reshape(y_prediction,-1) results_df = pd.DataFrame(data={'probability':results}) joined = pd.DataFrame(t_id).join(results_df) # path = "predictions_w_loss_0_" + '{:4.0f}'.format(history.history['loss'][-1]*10000) + ".csv" path = 'predictions_{:}'.format(strftime("%Y-%m-%d_%Hh%Mm%Ss", gmtime())) + '.csv' print() print("Writing predictions to " + path.strip()) # # Save the predictions out to a CSV file joined.to_csv(path,float_format='%.15f', index=False) # - # Now we can upload the CSV file to Numerai and see which score we get. # # Further Explorations # The most obvious thing to try now, is of course to change the lists of hyper-parameter values. Perhaps some other values as well, like the number layers in the neural network or the type of optimizer used. You can find some inspiration for this here: https://machinelearningmastery.com/grid-search-hyperparameters-deep-learning-models-python-keras/ # # For the cross-validation you could try 10-fold cross-validation instead of the 5-fold cross-validation which I showed here. Do take into account that it will take at least twice as long to run. But is the extra running time worth it? # # Use RandomizedSearchCV from the scikit-learn library instead of GridSearchCV. Or, write your own search function. A reason why you would want to do that, is so that you can save the progress as check-points after every fold. More on that here: https://machinelearningmastery.com/check-point-deep-learning-models-keras/ # # Try other machine learning methods like random forests or SVMs. # # References # https://keras.io/layers/core/ # # http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html # # http://scikit-learn.org/stable/modules/cross_validation.html#group-k-fold # # https://numer.ai/help # # https://machinelearningmastery.com/grid-search-hyperparameters-deep-learning-models-python-keras/ # # https://machinelearningmastery.com/check-point-deep-learning-models-keras/
NumeraiArticle.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Natural Gas Fuel Cell Flowsheet Example # # 1. Introduction # This example demonstrates a steady-state simulation of a natural gas fuel cell (NGFC) power plant. Natural gas is reformed into syngas which is directly converted to electricity by the solid oxide fuel cells (SOFC). SOFCs are a type of high temperature fuel cell where oxide ions diffuse through a solid ceramic electrolyte from the cathode (supplied with air) to the anode (supplied with fuel). Since SOFCs do not generate electricity via combustion they are not subject to the Carnot efficiency of conventional generation technologies. SOFCs can theoretically achieve efficiencies greater than 70%. Due to their potential for generating efficient, low pollution power, advanced NGFC systems are being studied as a new addition to the electricity fleet. # # The development of this flowsheet is funded by the ARPA-E DIFFERENTIATE project “Machine Learning for Natural Gas to Electric Power System Design” Project number: DE-FOA-0002107-1625. This project is a collaborative effort between the National Energy Technology Laboratory, Pacific Northwest National Laboratory, and the University of Washington to develop NGFC systems with efficiencies greater than 70% and CO2 emissions less than 30 g/kWh. This flowsheet is an initial example of an NGFC system without carbon capture. Future work will add a carbon capture system and explore a broad design space via superstructure optimization. # # The flowsheet includes two main sections, the autothermal reformer and the SOFC power island. The flowsheet contains first principles models for most of the unit operations and includes a surrogate model for the SOFC system, an SOFC reduced order model (ROM) has been created by PNNL. The ROM was constructed from the results of thousands of simulations of PNNL's SOFC Multiphysics simulation. It allows us to model the detailed behavior of the SOFC in a computationally efficient way. # # This notebook works by importing and running functions from ```NGFC_flowsheet.py```. Please look at this file for more details on how the sections of the flowsheet are structured. The process of building and solving the flowsheet will be outlined in more detail below. # # # ## 1.1 Autothermal Reformer # # In the reformer, natural gas is reacted with steam and air to produce syngas. A recuperator uses the hot syngas to pre-heat the incoming natural gas. Finally, the natural gas bypass stream is also included to enable a certain percentage of internal reformation by the SOFC. The main unit operations are: Autothermal reformer, preheater, and natural gas expander. # # ## 1.2 SOFC Power Island # # In the power island, syngas is supplied to the anode side of the SOFC and air is supplied to cathode. A portion of the anode exhaust is recycled with the remainder being combusted. Both the combustor and cathode exhaust supply heat to a heat recovery steam generator (HRSG) which is used to power a steam turbine. The steam turbine is not explicitly modeled but is assumed to have a thermal efficiency of 38%. # # ### SOFC ROM # # The ROM has been developed using Kriging technique and it takes eight inputs (detailed in section 2.1) and calculates the outlet temperature of the anode and the voltage of the SOFC stack. The DC power output of the SOFC is the voltage times the current (calculated from O2 flowrate across the electrolyte). The energy balance around the SOFC is completed by ensuring that the anode heat duty and the sum of the DC power and cathode heat duty are equal in magnitude. Calling ```build_SOFC_ROM()``` reads in the kriging parameters from a data file and builds constraints for the kriging calculations. It also creates a set of constraints calculating the inputs from the current state of the flowsheet. from IPython.display import Image Image("NGFC_block_flow_diagram.png") # # 2. Problem Statement # # For a given current density, fuel temperature, internal reformation percentage, air temperature, oxygen to carbon ratio (OTC), fuel utilization, and air utilization calculate the net power output, efficiency, and carbon emissions of the NGFC system. # # The inlet natural gas flowrate and composition must be fixed for the results of the ROM to be accurate. # # # ## 2.1 Main Inputs # # - Current density (A/m^2) of the SOFC. The default value is 4000. The range is 2000 - 6000. # - Fuel temperature (C) of the stream entering the SOFC power island. This input is adjusted by changing the outlet temperature of on the hot side of the reformer recuperator. The range is 15 - 600. # - Internal reformation percentage. Due to their high operating temperatures SOFCs are capable of on-cell reformation of methane. The internal reformation percentage is linked to the split fraction of natural gas bypassing the reformer. The range is 0 - 1. # - Air temperature (C) of the stream entering the cathode. This input is controlled by the outlet temperature of the cathode heat exchanger. The range is 550 - 800. # - Cathode recirculation fraction. The cathode recirculation fraction is equal to cathode recycle split fraction. The range is 0 - 0.8. # - OTC, the molar ratio of oxygen to carbon in the stream entering the anode. OTC is controlled by changing the anode recycle split fraction. The range is 1.5 - 3. # - Fuel utilization, the percentage of fuel entering the anode side of the power island that is consumed by the SOFC. In this model the fuel utilization is controlled by changing the amount of oxygen flowing across the electrolyte. The range is 0.40 - 0.95. # - Air utilization, the percentage of air entering the cathode power island that is used by the SOFC. The air utilization is controlled by changing the air inlet flowrate. The range is 0.125 - 0.833. # # # ## 2.2 Main Outputs # # - Net power output in MW # - HHV Efficiency of the NGFC # - CO2 emissions # # These results can be viewed in the SVG file that is generated after the simulation is run. # # 3. Custom Initialization # # Since the flowsheet has multiple recycle streams, the flowsheet initialization consists of splitting the flowsheet in two subsystems (Autothermal reformer and SOFC power island). # # Step 1: Build reformer and initialize all the units # # ```build_reformer()``` creates the unit models in the reformer section and connects them. ```set_reformer_inputs()``` fixes the variables of the inlet streams and unit operations to zero degrees of freedom. ```initialize_reformer()``` uses an initial guess of the reformer outlet to initialize the model. Then the solver is run to get the final solution for this section. # # Step 2: Power Island # # The sequence of ```build_power_island()```, ```set_power_island_inputs()```, and ```initialize_power_island()``` creates and initializes the unit models in the power island. At this stage, the inlet to the anode side of the power island is a guess of the syngas conditions. Then the solver is called to finalize the solution to the power island. The solution to the reformer section remains unaffected. # # To combine the two sections ```connect_reformer_to_power_island()``` is called. The funtion unfixes the guess for the inlet to power island and connects the outlet of the reformer section to it. Then the solver is called a third time on the fully connected flowsheet. # # Step 3: Initialize ROM # # The solver is called to solve the ROM. At this point the ROM is not affecting any unit model variables. # # ```add_anode_temp_constraint()``` unfixes the outlet temperature of the anode and enforces a constraint equating the outlet temperature to the value calculated by the ROM. The solver is called again. # # ```add_cathode_heat_constraint()``` unfixes the heat duty of the cathode and enforces a constraint equating the heat duty to the value that closes the SOFC energy balance calculated using the ROM's stack voltage. The solver is called again. # # Finally, the ROM inputs are fixed and the corresponding flowsheet variables are unfixed to allow the user to control to simulation using the ROM inputs. # # Step 4 - solve full model: The final initialization step, we solve the complete system. # + import os # Import Pyomo libraries import pyomo.environ as pyo # Import IDAES core from idaes.core import FlowsheetBlock from idaes.core.util import model_serializer as ms # Import NGFC model components import NGFC_flowsheet as NGFC # + # create model and flowsheet m = pyo.ConcreteModel(name='NGFC no CCS') m.fs = FlowsheetBlock(default={"dynamic": False}) # create the solver solver = pyo.SolverFactory("ipopt") solver.options = {'bound_push':1e-16} # + if os.path.exists('NGFC_flowsheet_init.json'): # load model from json file if it exists NGFC.build_reformer(m) NGFC.build_power_island(m) NGFC.connect_reformer_to_power_island(m) NGFC.build_SOFC_ROM(m) NGFC.add_anode_temp_constraint(m) NGFC.add_cathode_heat_constraint(m) NGFC.add_result_constraints(m) ms.from_json(m , fname='NGFC_flowsheet_init.json') else: # this will take 15 - 20 minutes to solve # solve the reformer section NGFC.build_reformer(m) NGFC.set_reformer_inputs(m) NGFC.initialize_reformer(m) solver.solve(m, tee=True) print('Reformer Solved') # solve the power island section NGFC.build_power_island(m) NGFC.set_power_island_inputs(m) NGFC.initialize_power_island(m) solver.solve(m, tee=True) print('Power Island Solved') # combine reformer and power island NGFC.connect_reformer_to_power_island(m) solver.solve(m, tee=True) print('Combined Flowsheet Solved') # add SOFC ROM NGFC.build_SOFC_ROM(m) solver.solve(m, tee=True) print('ROM Solved') # complete SOFC energy balance NGFC.add_anode_temp_constraint(m) solver.solve(m, tee=True) NGFC.add_cathode_heat_constraint(m) solver.solve(m, tee=True) print('Energy Balance Solved') # calculate results NGFC.add_result_constraints(m) solver.solve(m, tee=True) print('Results Solved') ms.to_json(m, 'NGFC_flowsheet_init.json') # + # After the initial solve, setup the flowsheet to be controlled by changing the ROM inputs # current density m.fs.SOFC.current_density.fix(4000) # fuel temperature m.fs.reformer_recuperator.tube_outlet.temperature.unfix() m.fs.SOFC.fuel_temperature.fix(348.3) # internal reformation fraction m.fs.reformer_bypass.split_fraction[0, 'bypass_outlet'].unfix() m.fs.SOFC.internal_reforming.fix(0.6) # air temperature m.fs.cathode_hx.area.unfix() m.fs.SOFC.air_temperature.fix(617.3) # air recirculation fraction m.fs.cathode_recycle.split_fraction[0, 'recycle'].unfix() m.fs.SOFC.air_recirculation.fix(0.5) # oxygen to carbon ratio m.fs.anode_recycle.split_fraction[0, 'recycle'].unfix() m.fs.SOFC.OTC.fix(2.1) # fuel utilization m.fs.cathode.ion_outlet.flow_mol.unfix() m.fs.SOFC.fuel_util.fix(0.8) # air utilization m.fs.air_blower.inlet.flow_mol.unfix() m.fs.SOFC.air_util.fix(0.4488) status = solver.solve(m, tee=False) # + tags=["testing", "remove_cell"] # For testing purposes from pyomo.environ import TerminationCondition, value assert status.solver.termination_condition == TerminationCondition.optimal from idaes.core.util.model_statistics import degrees_of_freedom assert degrees_of_freedom(m) == 0 import pytest assert (pytest.approx(659.8e6, abs=1e5) == value(m.fs.net_power)) assert (pytest.approx(0.6248, abs=1e-4) == value(m.fs.HHV_efficiency)) assert (pytest.approx(291.2, abs=1e-1) == value(m.fs.CO2_emissions)) print("Problem solved successfully") # - # # 4 SVG Results # # The results of the simulation can be viewed in the SVG file below. # # It can be seen from the figure that the bulk of the power generated by the NGFC plant is from the SOFCs (541.9 MW). The steam turbine and natural gas expander both contribute smaller amounts at 107.1 and 21.1 MW, respectively. The auxiliary load from the recycle blowers and air compressors is only 10.3 MW. The net power of the system is 659.8 MW. # # The higher heating value of the natural gas feed is 908,839 J/mol. Multiplying by the inlet flowrate of 1,161 mol/s gives a total thermal input of 1056 MW. Based on the thermal input and the net generation the efficiency is 62.48%. The carbon emissions are 291.2 g/kWh. # # The results also show the closure of the energy balance around the SOFC. The heat duty of the anode is -673.02 MW and the duty of the cathode is 114.32 MW. When added together they produce the DC stack power of 558.7 MW. # prepare the SVG file with results NGFC.make_stream_dict(m) from idaes.core.util.tables import create_stream_table_dataframe df = create_stream_table_dataframe(streams=m._streams, orient="index") NGFC.pfd_result("NGFC_results.svg", m, df) # + # display the SVG file from IPython.display import Image, SVG, display display(SVG(filename="NGFC_results.svg")) # if SVG file doesn't display in-line you can open the file in a web browser to view results # -
jupyter/NGFC/NGFC_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Insurance All # ## Business Problem # # Insurance All is a company that provides health insurance to its customers and the product team is analyzing the possibility of offering policyholders a new product: auto insurance. # # As with health insurance, customers of this new auto insurance plan need to pay an amount annually to Insurance All to obtain an amount insured by the company, aimed at the costs of an eventual accident or damage to the vehicle. # # Insurance All conducted a survey of about 380,000 customers about their interest in joining a new auto insurance product last year. All customers expressed interest or not in purchasing auto insurance and these responses were saved in a database along with other customer attributes. # # The product team selected 127 thousand new customers who did not respond to the survey to participate in a campaign, in which they will receive the offer of the new auto insurance product. The offer will be made by the sales team through telephone calls. # # However, the sales team has the capacity to make 20 thousand calls within the campaign period. # # The challenge # In that context, it is necessary build a model that predicts whether or not the customer would be interested in auto insurance. # # With its solution, the sales team hopes to be able to prioritize the people with the greatest interest in the new product and optimize the campaign by making only contacts with customers most likely to make the purchase. # # As a result of the project, it is needed to deliver a report containing some analysis and answers to the following questions: # # - Main Insights about the most relevant attributes of customers interested in purchasing auto insurance. # # - What percentage of customers interested in purchasing auto insurance will the sales team be able to contact by making 20,000 calls? # # - And if the sales team's capacity increases to 40,000 calls, what percentage of customers interested in purchasing auto insurance will the sales team be able to contact? # # - How many calls does the sales team need to make to contact 80% of customers interested in purchasing auto insurance? # ## Data Base Credentials # # - Database: Postgres 12.0 - AWS # - Credentials: # - HOST = comunidade-ds-postgres.c50pcakiuwi3.us-east-1.rds.amazonaws.com # - PORT=5432 # - Database=comunidadedsdb # - Username=member # - Password=<PASSWORD> # - Presentation of PA004: https://docs.google.com/presentation/d/1gMo5xFPSvAEWsFGHyZKgLLQwzvg2lc65BSXuaLLmWyg/edit?usp=sharing # - Link for business probelm: https://sejaumdatascientist.com/como-usar-data-science-para-fazer-a-empresa-vender-mais/ # # Imports # + init_cell=true # databse conmection import psycopg2 from psycopg2 import OperationalError import pandas.io.sql as psql # data manipulation import pandas as pd # statistics import statistics as st # data visualization from matplotlib import pyplot as plt import seaborn as sns # data analysis from pandas_profiling import ProfileReport # data encoding from sklearn.preprocessing import OneHotEncoder # data reescaling from sklearn.preprocessing import RobustScaler, MinMaxScaler, StandardScaler # data balancing from imblearn.combine import SMOTETomek # split dataset from sklearn.model_selection import train_test_split # Feature Selection from boruta import BorutaPy # machine learning from sklearn.dummy import DummyClassifier from xgboost import XGBClassifier from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression, SGDClassifier, RidgeClassifier from lightgbm import LGBMClassifier from sklearn.naive_bayes import GaussianNB from imblearn.ensemble import BalancedRandomForestClassifier from catboost import CatBoostClassifier # metrcis from sklearn.metrics import accuracy_score, balanced_accuracy_score, precision_score, recall_score from sklearn.metrics import roc_auc_score, f1_score, cohen_kappa_score, brier_score_loss, classification_report from sklearn.metrics import precision_recall_curve, roc_curve # confusion matrix from sklearn.metrics import plot_confusion_matrix # threshold from numpy import argmax #fine tuning from skopt import forest_minimize from sklearn.model_selection import GridSearchCV from skopt import BayesSearchCV # calibration from sklearn.calibration import CalibratedClassifierCV, calibration_curve # cross validation from sklearn.model_selection import cross_val_score, RepeatedStratifiedKFold # display from IPython.core.display import HTML from IPython.display import Image # save pickle objects import pickle # filter warnings import warnings warnings.filterwarnings('ignore') # - # ## Functions # + init_cell=true ########################################################################################################################### # Descripite Statistics of a Data Frame def descriptive( df ): # Central Tendency - mean, median, mode ct1 = pd.DataFrame( df.apply( np.mean) ).T ct2 = pd.DataFrame( df.apply( np.median) ).T ct3 = pd.DataFrame( df.apply( st.mode ) ).T # Dispersion d1 = pd.DataFrame(df.apply(np.std)).T d2 = pd.DataFrame(df.apply(min)).T d3 = pd.DataFrame(df.apply(max)).T d4 = pd.DataFrame(df.apply(lambda x: x.max() - x.min())).T d5 = pd.DataFrame(df.apply(lambda x: np.quantile(x, .25))).T d6 = pd.DataFrame(df.apply(lambda x: np.quantile(x, .75))).T d7 = pd.DataFrame(df.apply(lambda x: x.skew())).T d8 = pd.DataFrame(df.apply(lambda x: x.kurtosis())).T # Concatenate ds = pd.concat([ct1, ct2, ct3, d1, d2, d3, d4, d5, d6, d7, d8]).T.reset_index() ds.columns = ['attributes', 'mean', 'median', 'mode', 'std', 'min', 'max', 'range', 'Q1', 'Q3', 'skewness', 'kurtosis'] return ds ########################################################################################################################### # Feature Selection def feature_selection(model, x, y,): model.fit(x, y) # Feature importance data frame feat_imp = pd.DataFrame({'feature': x.columns, 'feature_importance': model.feature_importances_}) \ .sort_values('feature_importance', ascending=False) \ .reset_index(drop=True) # Plot feature importance plt.figure(figsize=(12,6)) sns.barplot(x='feature_importance', y='feature', data=feat_imp, orient='h', palette='magma') plt.title(type(model).__name__, size=16) plt.yticks(size=13) plt.xticks(size=13) plt.xlabel('Feature Importance', size=16, color='grey') plt.ylabel('Features', size=16, color='grey') ########################################################################################################################### # Machine Learning Performance def ml_performance(models, x_train, y_train, x_valid, y_valid, threshold): model_df = [] for model in models: print("Training " + type(model).__name__ + "...") model.fit(x_train, y_train) # predict positive probabilities according to a threshold y_proba = model.predict_proba(x_valid)[:, 1] y_hat = (y_proba >= threshold).astype(int) # metrics f1 = f1_score(y_valid, y_hat) accuracy = accuracy_score(y_valid, y_hat) bal_accuracy = balanced_accuracy_score(y_valid, y_hat) kappa = cohen_kappa_score(y_valid, y_hat) roc_auc = roc_auc_score(y_valid, y_hat) precision = precision_score(y_valid, y_hat) recall = recall_score(y_valid, y_hat) brier = brier_score_loss(y_valid, y_hat, pos_label=1) metrics = pd.DataFrame({'Model Name': type(model).__name__,'Accuracy': accuracy, 'Bal Accuracy': bal_accuracy, 'Precision': precision,'Recall': recall,'F1-Score': f1,'ROC-AUC': roc_auc,'Kappa': kappa, 'Brier': brier}, index = [0]).sort_values('F1-Score', ascending = False) model_df.append(metrics) final_model = pd.concat(model_df) return final_model.sort_values(by='F1-Score', ascending=False).round(5) ########################################################################################################################### # Confusion Matrix def conf_matrix (model, x_train, y_train, x_valid, y_valid): model.fit(x_train, y_train) fig, ax = plt.subplots(figsize=(10, 10)) plot_confusion_matrix(model, x_valid, y_valid, cmap='magma_r', normalize='true', ax=ax) plt.title(type(model).__name__, size = 20) plt.xlabel("Predicted label", size=20) plt.ylabel("True label", size=20) plt.grid(False) ########################################################################################################################### # Cross Validation def cross_validation(models, X, y, cv): cv_df = [] for n in models: accuracy_cv = cross_val_score(n, X, y, cv = cv, scoring='accuracy', n_jobs=-1) accuracy = "{:.4f} +/- %{:.4f}".format(accuracy_cv.mean(), accuracy_cv.std()) bal_accuracy_cv = cross_val_score(n, X, y, cv = cv, scoring='balanced_accuracy', n_jobs=-1) bal_accuracy = "{:.4f} +/- %{:.4f}".format(bal_accuracy_cv.mean(), bal_accuracy_cv.std()) prec_cv = cross_val_score(n, X, y, cv = cv, scoring='precision', n_jobs=-1) prec = "{:.4f} +/- %{:.4f}".format(prec_cv.mean(), prec_cv.std()) recall_cv = cross_val_score(n, X, y, cv = cv, scoring='recall', n_jobs=-1) recall = "{:.4f} +/- %{:.4f}".format(recall_cv.mean(), recall_cv.std()) f1_score_cv = cross_val_score(n, X, y, cv = cv, scoring='f1', n_jobs=-1) f1_score = "{:.4f} +/- %{:.4f}".format(f1_score_cv.mean(), f1_score_cv.std()) roc_auc_cv = cross_val_score(n, X, y, cv = cv, scoring='roc_auc', n_jobs=-1) roc_auc = "{:.4f} +/- %{:.4f}".format(roc_auc_cv.mean(), roc_auc_cv.std()) A = pd.DataFrame( {'Model Name': type(n).__name__, 'Accuracy (Avg+Std)': accuracy, 'Bal Accuracy (Avg+Std)': bal_accuracy, 'Precision (Avg+Std) ': prec, 'Recall (Avg+Std) ': recall, 'F1-Score (Avg+Std)': f1_score, 'ROC-AUC (Avg+Std)': roc_auc }, index = [0]) cv_df.append(A) final_df = pd.concat(cv_df) return final_df ########################################################################################################################### # Jupyter Settings def jupyter_settings(): # %matplotlib inline # %pylab inline plt.style.use( 'seaborn-whitegrid' ) plt.rcParams['figure.figsize'] = [22,12] plt.rcParams['font.size'] = 20 #display( HTML( '<style>.container { width:80% !important; }</style>') ) pd.set_option('display.float_format', lambda x: '%.5f' % x) pd.set_option( 'display.expand_frame_repr', False ) sns.set() jupyter_settings() ########################################################################################################################### # - # # Load Dataset - SQL host = 'comunidade-ds-postgres.c50pcakiuwi3.us-east-1.rds.amazonaws.com' port = 5432 database = 'comunidadedsdb' user = 'member' pwd = '<PASSWORD>' # ## Check Database # Connect to an existing dataset conn = psycopg2.connect(user = user, password = <PASSWORD>, host = host, port = port, database = database) # ## Check Schemas # + # Create a cursor to perfor database operations cursor = conn.cursor() # Take a look at Schemas query_schema = """ SELECT nspname FROM pg_catalog.pg_namespace """ cursor.execute( query_schema ) record = cursor.fetchall() print( record ) # - # ## Select pa004 tables # + # Create a cursor to perfor database operations cursor = conn.cursor() # Take a look at Schemas query_tables = """ SELECT tablename FROM pg_tables WHERE schemaname='pa004' """ cursor.execute( query_tables ) record = cursor.fetchall() print( record ) # - # ## Check Tables # + init_cell=true # import the tables df_users = pd.read_sql("SELECT * FROM pa004.users", con=conn) df_vehicle = pd.read_sql("SELECT * FROM pa004.vehicle", con=conn) df_insurance = pd.read_sql("SELECT * FROM pa004.insurance", con=conn) # - # show users dataset df_users # show vehicle dataset df_vehicle # show insurance dataset df_insurance # ## Raw Data # merge all datasets df_raw = pd.merge( df_users, df_vehicle, how='inner', on='id' ) df_raw = pd.merge( df_raw, df_insurance, how='inner', on='id' ) # see the dataset df_raw # save the dataset pickle.dump(df_raw, open('data/df_raw.pkl', 'wb')) # empty auxiliary datasets df_insurance = None df_users = None df_vehicle = None # # Raw Dataset Analysis # #### Dataset Columns # # - Id: Customer ID # - Gender: Customer Gender # - Customer Age: Customer Age # - Region Code: The code of the region that customer lives # - Policy Sales Channel: The code for the customer disclosure channel (mail, phone, agents, etc.) # - Driving License: Customer has a license 1; customer has no license 0 # - Vehicle Age: The age of the vehicle # - Vehicle Damage: If the vehicle has been damaged in the past, yes or no # - Previously Insured: If the customer has a previous insurance, no: 0, yes: 1 # - Annual Premium: How much the customer paid the company for annual health insurance # - Vintage: Number of days the customer joined the company through the purchase of health insurance. # - Response: The customer has interest in buy the car insurance, no: 0, yes: 1 # # #### The dataset has 381109 rows × 12 columns profile = ProfileReport(df_raw, title='Insurance_All') profile.to_file(output_file='Insurance_All') # # Strings to Numerical Columns # + init_cell=true df1 = pickle.load(open('data/df_raw.pkl', 'rb')) # + # label enconding df1["gender"] = df1["gender"].apply(lambda x: 0 if x == 'Male' else 1).astype(int) df1["vehicle_damage"] = df1["vehicle_damage"].apply(lambda x: 0 if x == 'No' else 1).astype(int) # ordinal enconding df1["vehicle_age"] = df1["vehicle_age"].apply(lambda x: 0 if x == '< 1 Year' else 1 if x == '1-2 Year' else 2).astype(int) # + hide_input=false df1.head() # - # save dataset pickle.dump(df1, open('data/df1.pkl', 'wb')) # ## According to the Pandas Profile Report the following key points can be highlighted: # # - The data set has no missing cells or duplicate rows # - There are more men (54.1%) than women (45.9%) # - The age ranges from 20 to 85 years. The average age is 38 years and the distribution is positively skewed (0.67). There is a peak at the ages of 23 and 24, both representing 13.2% of all ages # - The most common region code is 28, representing 27.9% of all 53 codes # - The most common policy sales channel are 152 (35.4%), 26 (20.9%) and 124 (19.4%) # - Most customers have a driver's license (99.8%) # - Most vehicles are between 1 and 2 years old (52.6%), followed by vehicles under 1 year old (43.2%) and over 2 years old (4.2%) # - Most vehicles were damaged (50.5%) # - Most customers have already been assured (54.2%) # - The average of annual premium is 30564, in addition to a minimum of 2630 and a maximum of 540165. The distribution is highly skewed (1.76) and has a kurtosis of 34, which implies that there are many outliers and data is concentrated at the beginning of the curve # - The target variable (response) is quite unbalanced, since 87.7% of customers answered "no" to a new auto insurance proposal # # Mind Map Image('img/mind_map.jpg') # # Feature Engineering # + init_cell=true df2 = pickle.load(open('data/df1.pkl', 'rb')) # + # age damage age_damage = pd.DataFrame(df2.groupby('age')['vehicle_damage'].sum()).reset_index().rename(columns={'vehicle_damage':'age_damage'}) df2 = pd.merge(df2, age_damage, how='left', on='age') # vintage annual premium df2['vintage_annual_premium'] = (df2['annual_premium']) / (df2['vintage']) # age vintage df2['age_vintage'] = (df2['age']*365) / (df2['vintage']) # age_damage_premium df2['age_damage_premium'] = (df2['annual_premium']) / (df2['age_damage']) # - df2.head() df2.info() pickle.dump(df2, open( 'data/df2.pkl', 'wb' ) ) # # Exploratory Data Analysis - Insight Generation df3 = pickle.load(open('data/df2.pkl', 'rb')) # ## Univariate Analysis df3.hist(bins=35, figsize=(22,12)); aux = df3[['age', 'annual_premium', 'vintage', 'age_damage', 'vintage_annual_premium', 'age_vintage', 'age_damage_premium']] descriptive(aux) # ### Response Variable plt.figure(figsize=(12,5)) ax = sns.countplot(y='response', data=df3, palette='magma') sns.set_style('whitegrid') plt.title('Response Variable', size =20) plt.xlabel('', color='white') plt.ylabel('', color='white'); sns.despine( bottom=True, left=True) total = df3['response'].size for p in ax.patches: percentage = ' {:.1f}%'.format(100 * p.get_width()/total) x = p.get_x() + p.get_width() + 0.02 y = p.get_y() + p.get_height()/2 ax.annotate(percentage, (x, y)) # ## Bivariate Analysis (Hypothesis Test - Insight Generation) # ### Hypothesis List # ### H1 - Older customers are more likely to take out a car insurance # ### H2 - Women are more likely to take out a car insurance # ### H3 - Customers with older cars are more likely to take out a car insurance # ### H4 - Customers previously insured are more likely to take out a car insurance # ### H5 - Customers with previously damaged cars are more likely to take out a car insurance # ### H6 - Customers with higher annual premium are more likely to take out a car insurance # ## H1 - Older customers are more likely to take out a car insurance # # #### False - People between 40 and 50 are more likely to take out a car insurance # + aux1 = df3[df3['response'] == 1 ][['response', 'age']].groupby(['age']).sum().reset_index() bins = list( np.arange( 20, 90, 5) ) aux2 = aux1 aux2['age_bin'] = pd.cut(aux1['age'], bins=bins) aux3 = aux2[['response', 'age_bin']].groupby(['age_bin']).sum().reset_index() plt.figure(figsize(20,8)) plt.subplot(121) sns.barplot(x='age_bin', y='response', data=aux3, palette='magma') sns.despine(bottom=True, left=True) sns.set_style('whitegrid') plt.title('Age by Response', size=20) plt.xlabel('Age', color='grey', size=15) plt.ylabel('Response', color='grey', size=15); plt.xticks(rotation=75) plt.subplot(122) sns.heatmap( aux2.corr( method='pearson' ), annot=True, cmap="magma" ) sns.despine( bottom = True, left = True) plt.title( 'Age by Response', size=20) plt.xticks(size = 15) plt.yticks(size = 15); # - # ## H2 - Women are more likely to take out a car insurance # # #### False - Women are more likely to take out a car insurance # + plt.figure(figsize(20,8)) plt.subplot(121) aux1 = df3[df3['response'] == 1 ] ax = sns.countplot(y='gender', data=aux1, palette='magma') sns.set_style('whitegrid') plt.title('Gender', size =20) plt.yticks(np.arange(2), ['Men', 'Women'], size = 15) plt.xlabel('', color='white') plt.ylabel('', color='white'); sns.despine( bottom=True, left=True) total = aux1['response'].size for p in ax.patches: percentage = ' {:.1f}%'.format(100 * p.get_width()/total) x = p.get_x() + p.get_width() + 0.02 y = p.get_y() + p.get_height()/2 ax.annotate(percentage, (x, y)) plt.subplot(122) aux2= aux1[['response', 'age']].groupby(['age']).sum().reset_index() sns.heatmap( aux2.corr( method='pearson' ), annot=True, cmap="magma") sns.despine( bottom = True, left = True) plt.title( 'Gender by Response', size=20) plt.xticks(size = 15) plt.yticks(size = 15); # - # ## H3 - Customers with older cars are more likely to take out a car insurance # # #### False - Customers with cars between 1 and 2 years are more likely to take out a car insurance # + plt.figure(figsize(20,8)) plt.subplot(121) aux1 = df3[df3['response'] == 1 ] ax = sns.countplot(y='vehicle_age', data=aux1, palette='magma') sns.set_style('whitegrid') plt.title('Vehicle Age by Response', size=20) plt.yticks(np.arange(3), ['< Year', '1-2 Years', '> 2 Years'], size = 15) plt.xlabel('', color='white') plt.ylabel('', color='white'); sns.despine( bottom=True, left=True) total = aux1['response'].size for p in ax.patches: percentage = ' {:.1f}%'.format(100 * p.get_width()/total) x = p.get_x() + p.get_width() + 0.02 y = p.get_y() + p.get_height()/2 ax.annotate(percentage, (x, y)) plt.subplot(122) aux2= aux1[['response', 'vehicle_age']].groupby(['vehicle_age']).sum().reset_index() sns.heatmap( aux2.corr( method='pearson' ), annot=True, cmap="magma") sns.despine( bottom = True, left = True) plt.title( 'Vehicle Age by Response', size=20) plt.xticks(size = 15) plt.yticks(size = 15); # - # ## H4 - Customers previously insured are more likely to take out a car insurance # # #### True - Customers previously insured are more likely to take out a car insurance # + plt.figure(figsize=(12,5)) aux1 = df3[df3['response'] == 1 ] ax = sns.countplot(y='previously_insured', data=aux1, palette='magma') sns.set_style('whitegrid') plt.title('Previously Insured by Response', size = 20) plt.yticks(np.arange(2), ['Insured', 'Uninsured'], size = 15) plt.xlabel('', color='white') plt.ylabel('', color='white'); sns.despine( bottom=True, left=True) total = aux1['response'].size for p in ax.patches: percentage = ' {:.1f}%'.format(100 * p.get_width()/total) x = p.get_x() + p.get_width() + 0.02 y = p.get_y() + p.get_height()/2 ax.annotate(percentage, (x, y)) # - # ## H5 - Customers with previously damaged cars are more likely to take out a car insurance # # #### True - Customers with previously damaged cars are more likely to take out a car insurance # + plt.figure(figsize=(12,5)) aux1 = df3[df3['response'] == 1 ] ax = sns.countplot(y='vehicle_damage', data=aux1, palette='magma') sns.set_style('whitegrid') plt.title('Vehicle Damaged by Response', size = 20) plt.yticks(np.arange(2), ['Damaged', 'Undamaged'], size = 15) plt.xlabel('', color='white') plt.ylabel('', color='white'); sns.despine( bottom=True, left=True) total = aux1['response'].size for p in ax.patches: percentage = ' {:.1f}%'.format(100 * p.get_width()/total) x = p.get_x() + p.get_width() + 0.02 y = p.get_y() + p.get_height()/2 ax.annotate(percentage, (x, y)) # - # ## H6 - Customers with higher annual premium are more likely to take out a car insurance # # #### False - Customers with lower annual premium are more likely to take out a car insurance # + plt.figure(figsize(20,8)) plt.subplot(121) sns.kdeplot(x='annual_premium', data=df3, hue='response', multiple="stack") plt.title( 'Annual Premium by Response', size=20); plt.subplot(122) aux2= aux1[['response', 'annual_premium']].groupby(['annual_premium']).sum().reset_index() sns.heatmap( aux2.corr( method='pearson' ), annot=True, cmap="magma") sns.despine( bottom = True, left = True) plt.title( 'Annual Premium by Response', size=20) plt.xticks(size = 15) plt.yticks(size = 15); # - # # Multivariate Analisys # ## Pearson Correlation - Numeraical Features # + # correlation corr = df3[['age', 'region_code', 'policy_sales_channel', 'vehicle_age', 'annual_premium', 'vintage', 'age_damage', 'vintage_annual_premium', 'age_vintage', 'age_damage_premium', 'response']].corr(method='pearson') # Creating a with mask for the superior triangle mask = np.zeros_like(corr) mask[np.triu_indices_from(mask)] = True with sns.axes_style("white"): # Creating the chart with mask plt.figure(figsize(18,12)) ax = sns.heatmap(corr, mask=mask, annot = True, cmap = 'magma') plt.xticks(size = 14, rotation=70) plt.yticks(size = 14) plt.title("Pearson Correlation of Numerical Variables", size=20); # - # ## Cramer's V (Pandas Profiling) profile = ProfileReport(df3, title='Insurance All 2') profile.to_file(output_file='Insurance_All_2') # Cramer's V Image('img/cramer.png') # # Reescaling df4 = pickle.load(open('data/df2.pkl', 'rb')) df4.hist(bins=35, figsize=(22,12)); # + # scalers ss = StandardScaler() rs = RobustScaler() mms = MinMaxScaler() # age - min-max df4['age'] = mms.fit_transform( df4[['age']].values ) # annual premium - robust df4['annual_premium'] = rs.fit_transform( df4[['annual_premium']].values ) # age_damage - min-max df4['age_damage'] = mms.fit_transform( df4[['age_damage']].values ) # vintage annual premium - robust df4['vintage_annual_premium'] = rs.fit_transform( df4[['vintage_annual_premium']].values ) # age vintage - robust df4['age_vintage'] = rs.fit_transform( df4[['age_vintage']].values ) # age damage premium - robust df4['age_damage_premium'] = rs.fit_transform( df4[['age_damage_premium']].values ) # - pickle.dump(df4, open('data/df4.pkl', 'wb')) # # Feature Selection # load dataset df5 = pickle.load( open( 'data/df4.pkl', 'rb' ) ) # split into x and y x = df5.drop(['id', 'response'], axis=1).copy() y = df5['response'].copy() # ## Random Forest # classifiers rf = RandomForestClassifier(n_jobs=-1) feature_selection(rf, x, y) # ### Based on the EDA and Random Forest It will be used the criterion of 0.06 for feature selection: # # - ['vintage_annual_premium', # - 'age_vintage', # - 'vintage', # - 'age_damage_premium', # - 'annual_premium', # - 'vehicle_damage', # - 'previously_insured'] # # Split into train (70%), validation (10%) and test (20%) # load dataset df6 = pickle.load(open('data/df4.pkl', 'rb')) # + # split into x and y x = df6[['vintage_annual_premium', 'age_vintage', 'vintage', 'age_damage_premium', 'annual_premium', 'vehicle_damage', 'previously_insured']].copy() y = df6['response'].copy() # + # train test split x_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.20, random_state=28, stratify=y) # train valid split X_train, X_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.10, random_state=28) # check balance of classes (0, 1) among y_train, y_test and y_valid print(f"y_train size: {np.bincount(y_train)}") print(f"y_test size: {np.bincount(y_test)}") print(f"y_valid size: {np.bincount(y_valid)}") # - # shape of trains, tests and valids print(X_train.shape) print(X_valid.shape) print(X_test.shape) print(y_train.shape) print(y_valid.shape) print(y_test.shape) # + # saving all data pickle.dump(X_train, open('data/X_train.pkl', 'wb')) pickle.dump(X_test, open('data/X_test.pkl', 'wb')) pickle.dump(X_valid, open('data/X_valid.pkl', 'wb')) pickle.dump(y_train, open('data/y_train.pkl', 'wb')) pickle.dump(y_test, open('data/y_test.pkl', 'wb')) pickle.dump(y_valid, open('data/y_valid.pkl', 'wb')) # - # # Balancing # load data X_train=pickle.load(open('data/X_train.pkl', 'rb')) y_train=pickle.load(open('data/y_train.pkl', 'rb')) # + # ratio ratio= {0: 334399, 1: 7*46710} smt=SMOTETomek(sampling_strategy=ratio, random_state=28, n_jobs=-1) # apply sampler X_train, y_train=smt.fit_resample(X_train, y_train) # - y_train.value_counts() # saving balanced train data pickle.dump(X_train, open('data/X_train.pkl', 'wb')) pickle.dump(y_train, open('data/y_train.pkl', 'wb')) # # Machine Learning # load data X_train=pickle.load(open('data/X_train.pkl', 'rb')) y_train=pickle.load(open('data/y_train.pkl', 'rb')) # + # algorithms models = [CatBoostClassifier(random_state=28, verbose=False), XGBClassifier(random_state=28), LGBMClassifier(random_state=28, n_jobs=-1), LogisticRegression(max_iter=220, random_state=28), RandomForestClassifier(random_state=28), BalancedRandomForestClassifier(random_state=28), KNeighborsClassifier(n_neighbors=5, weights='distance',n_jobs=-1), SGDClassifier(loss='log', random_state=28, n_jobs=-1), DummyClassifier(random_state=28), GaussianNB()] ml_performance(models, X_train, y_train, X_valid, y_valid, 0.5) # - X_train = pickle.load(open('data/X_train.pkl', 'rb')) y_train = pickle.load(open('data/y_train.pkl', 'rb')) X_valid = pickle.load(open('data/X_valid.pkl', 'rb')) y_valid = pickle.load(open('data/y_valid.pkl', 'rb')) # # Cross Validation Catboost Default Model # + editable=false run_control={"frozen": true} # # concat # X = pd.concat([X_train, X_valid], axis = 0) # y = pd.concat([y_train, y_valid], axis = 0) # # model = [CatBoostClassifier(random_state=28, verbose=False)] # # # cross validation # cross_validation(model, X, y, 5) # - # # Cross Validation LGBM Default Model # + # concat X = pd.concat([X_train, x_valid], axis=0) y = pd.concat([y_train, y_valid], axis=0) model=[LGBMClassifier(random_state=28, n_jobs=-1)] # cross validation cross_validation(model, X, y, 5) # - # # Cross Validation XGboost Default Model # + editable=false run_control={"frozen": true} # # concat # X = pd.concat([X_train, X_valid], axis = 0) # y = pd.concat([y_train, y_valid], axis = 0) # # model = [XGBClassifier(random_state=28)] # # # cross validation # cross_validation(model, X, y, 5) # - # ## Classification Report # + deletable=false editable=false run_control={"frozen": true} # model = CatBoostClassifier(random_state=28, verbose=False).fit(X_train, y_train) # # # predictions # y_model = model.predict(X_valid) # # # prints # print(type(model).__name__) # print(classification_report(y_valid, y_model)) # + deletable=false editable=false run_control={"frozen": true} # model = LGBMClassifier(random_state=28, n_jobs=-1).fit(X_train, y_train) # # # predictions # y_model = model.predict(X_valid) # # # prints # print(type(model).__name__) # print(classification_report(y_valid, y_model)) # + deletable=false editable=false run_control={"frozen": true} # model = model = XGBClassifier(random_state=28).fit(X_train, y_train) # # # predictions # y_model = model.predict(X_valid) # # # prints # print(type(model).__name__) # print(classification_report(y_valid, y_model)).fit(X_train, y_train) # # # predictions # y_model = model.predict(X_valid) # # # prints # print(type(model).__name__) # print(classification_report(y_valid, y_model)) # - # ## Confusion Matrix of Default Model conf_matrix(CatBoostClassifier(random_state=28, verbose=False), X_train, y_train, X_valid, y_valid) # ## Best Threshold for ROC AUC # + deletable=false editable=false run_control={"frozen": true} # # model # model = CatBoostClassifier(random_state=28, verbose=False).fit(X_train, y_train) # # # predict probabilities and get the positive outcome only # model_yhat = model.predict_proba(X_valid) # # # keep probabilities for the positive outcome only # model_yhat = model_yhat[:, 1] # # # calculate roc curves # fpr, tpr, thresholds = roc_curve(y_valid, model_yhat) # # # calculate the g-mean for each threshold # gmeans = sqrt(tpr * (1-fpr)) # # # locate the index of the largest g-mean # ix = argmax(gmeans) # print('Best Threshold=%f, G-Mean=%.3f' % (thresholds[ix], gmeans[ix])) # # # plot the roc curve for the model # plt.figure(figsize=(15,8)) # plt.yticks(size=14) # plt.xticks(size=14) # plt.plot([0,1], [0,1], linestyle='--', label='No Skill') # plt.plot(fpr, tpr, marker='.', label='Logistic') # plt.scatter(fpr[ix], tpr[ix], marker='o', color='black', s=150, label='Best') # plt.xlabel('False Positive Rate',size =18) # plt.ylabel('True Positive Rate', size =18) # plt.legend(prop={'size': 14}) # plt.show() # - # ### Performance with best ROC Curve threshold # + models = [CatBoostClassifier(random_state=28, verbose=False)] ml_performance(models, X_train, y_train, X_valid, y_valid, 0.45) # - # ## Best Threshold for Precision Recall Curve # + deletable=false editable=false run_control={"frozen": true} # # model # model = CatBoostClassifier(random_state=28, verbose=False).fit(X_train, y_train) # # # predict probabilities and get the positive outcome only # model_yhat = model.predict_proba(X_valid) # # # keep probabilities for the positive outcome only # model_yhat = model_yhat[:, 1] # # # calculate roc curves # precision, recall, thresholds = precision_recall_curve(y_valid, model_yhat) # # # convert to f score # fscore = (2 * precision * recall) / (precision + recall) # # # locate the index of the largest f score # ix = argmax(fscore) # print('Best Threshold=%f, F1-Score=%.3f' % (thresholds[ix], fscore[ix])) # # # plot the roc curve for the model # plt.figure(figsize=(15,8)) # plt.yticks(size=14) # plt.xticks(size=14) # no_skill = len(y_valid[y_valid==1]) / len(y_valid) # plt.plot([0,1], [no_skill,no_skill], linestyle='--', label='No Skill') # plt.plot(recall, precision, marker='.', label='Logistic') # plt.scatter(recall[ix], precision[ix], marker='o', color='black', s=150, label='Best') # # # axis labels # plt.xlabel('Recall',size =18) # plt.ylabel('Precision', size =18) # plt.legend(prop={'size': 14}) # plt.show() # - # ### Performance with best Precision Recall Curve threshold # + # algorithm models = [CatBoostClassifier(random_state=28, verbose=False)] ml_performance(models, X_train, y_train, X_valid, y_valid, 0.55) # - # # Hyperparameter Optimization X_train = pickle.load(open('data/X_train.pkl', 'rb')) y_train = pickle.load(open('data/y_train.pkl', 'rb')) X_valid = pickle.load(open('data/X_valid.pkl', 'rb')) y_valid = pickle.load(open('data/y_valid.pkl', 'rb')) # ## Forest Minimize # # + def tune_model(params): depth = params[0] learning_rate = params[1] iterations = params[2] #Model model = CatBoostClassifier(depth=depth, learning_rate=learning_rate, iterations=iterations, random_state=28, verbose=False).fit(X_train, y_train) yhat = model.predict(X_valid) yhat_proba = model.predict_proba(X_valid)[:,1] roc_auc = roc_auc_score(y_valid, yhat_proba) return -roc_auc space = [(5,16), #depth (0.01,0.1), #learning_rate (10,500)] #interations result = forest_minimize(tune_model, space, random_state=28, n_random_starts=20, n_calls=20, verbose=0) result_list = result.x result_list # - # ## Bayes Search # + #Using BayesSearchCV cbc = CatBoostClassifier(random_state=28, verbose=False) # define search space params = dict() params['depth'] = (4, 16) params['learning_rate'] = (0.01, 0.9) params['interations'] = (10,3000) # define evaluation cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1) # define the search search = BayesSearchCV(estimator=cbc, search_spaces=params, n_jobs=-1, cv=cv) # perform the search search.fit(X_train, y_train) # report the best result print(search.best_score_) print(search.best_params_) # - # ## Tuned Model Performance # + # algorithm models = [CatBoostClassifier(depth=15, learning_rate=0.16, iterations=35, random_state=28, verbose=False)] ml_performance(models, X_train, y_train, X_valid, y_valid, 0.55) # - # ## Confusion Matrix of Tuned Balanced Random Forest # + model = CatBoostClassifier(depth=15, learning_rate=0.16, iterations=35, random_state=28, verbose=False) conf_matrix(model, X_train, y_train, X_valid, y_valid) # - # # Test Dataset Performance X_test = pickle.load(open('data/x_test.pkl', 'rb')) y_test = pickle.load(open('data/y_test.pkl', 'rb')) X_train = pickle.load(open('data/x_train.pkl', 'rb')) y_train = pickle.load(open('data/y_train.pkl', 'rb')) # + # algorithm models = [CatBoostClassifier(depth=10, learning_rate=0.03, iterations=100, random_state=28, verbose=False)] ml_performance(models, X_train, y_train, X_test, y_test, 0.52) # - # # Cross Validation Tuned Model - Test Dataset # + # concat X = pd.concat([X_train, X_test], axis = 0) y = pd.concat([y_train, y_test], axis = 0) model = [CatBoostClassifier(random_state=28, verbose=False)] # define evaluation cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=28) # cross validation cross_validation(model, X, y, cv) # - # # Calibration
old_notebooks/pa004_andre_rossi-cycle02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Simple ModelGrid Demo # # This demo shows how the ModelGrid classes can work, where all model grid functionality is in the model grid class. Only the geographic reference information and related methods remain in SpatialReference. # + import os import sys import numpy as np # run installed version of flopy or add local path try: from flopy.discretization.structuredgrid import StructuredGrid from flopy.utils.reference import SpatialReference except: fpth = os.path.abspath(os.path.join('..', '..')) sys.path.append(fpth) from flopy.discretization.structuredgrid import StructuredGrid from flopy.utils.reference import SpatialReference import matplotlib as mpl import matplotlib.pyplot as plt import flopy print(sys.version) print('numpy version: {}'.format(np.__version__)) print('matplotlib version: {}'.format(mpl.__version__)) print('flopy version: {}'.format(flopy.__version__)) # - # Technically you need to create both a SpatialReference object and a ModelGrid object, but in practice the code looks very similar and can easily be implemented in one line. mg = StructuredGrid(delc=np.ones(10), delr=np.ones(10), xoff=10, yoff=10, angrot=20, epsg=26715, lenuni=1) # ### Transform, xcenters, xedges # # Transform, xcenters, xedges behaves the same but is now a method of the ModelGrid, but has the same functionatily. mg.get_coords(10, 10) mg.get_local_coords(11.82170571129881, 13.906660505012088) # Methods to get cell centers of cells from the model grid are now in the ModelGrid classes. mg.xcellcenters[1, 7] # x-coordinate at row 1, column 7 mg.xcellcenters[[1, 3], [7, 7]] plt.scatter(mg.xcellcenters.ravel(), mg.ycellcenters.ravel()) plt.scatter(mg.xyzvertices[0].ravel(), mg.xyzvertices[1].ravel(), s=10) # ### Model Coordinates # # The model grid outputs everything in model coordinates if reference information is not given. mg.set_coord_info() plt.scatter(mg.xcellcenters.ravel(), mg.ycellcenters.ravel()) plt.scatter(mg.xyzvertices[0].ravel(), mg.xyzvertices[1].ravel(), s=10) # ### Grid lines def plot_lines(lines): fig, ax = plt.subplots() for ln in lines: plt.plot([ln[0][0], ln[1][0]], [ln[0][1], ln[1][1]]) plot_lines(mg.grid_lines) plt.ylim(11, -1) mg.set_coord_info(xoff=10, yoff=10, angrot=20, epsg=26715) plot_lines(mg.grid_lines) plt.scatter(11.9, 13.5, c='r') # ### Vertices mg.set_coord_info() mg.xyzvertices[0] mg.set_coord_info(xoff=10, yoff=10, angrot=20, epsg=26715) mg.xyzvertices[0] # ### Getting the Model Grid from the Model # + ml = flopy.modflow.Modflow(modelname='new_model', exe_name='mf2005', version='mf2005', model_ws=os.path.join('data')) dis = flopy.modflow.ModflowDis(ml,nlay=5,nrow=5,ncol=5,delr=[50,50,50,50,50], delc=[50,50,50,50,50],top=0.0,botm=-50.0,laycbd=0) ibound = np.ones((5, 5, 5), dtype=np.int) start = np.zeros((5, 5, 5), dtype=np.float) bas = flopy.modflow.ModflowBas(ml,ibound=ibound,strt=start) # - # Use the .modelgrid attribute to get the model grid from the model. ml.modelgrid.xyzvertices[0] # By default the flopy model class caches the model grid. If you later modify model discritization information the model grid can end up out of sync with the model discritization information. dis.delr=[10,10,10,10,10] ml.modelgrid.xyzvertices[0] # To resync the model grid with the latest model discritization information call update_modelgrid. Calling the update_modelgrid method clears the cached model grid and forces flopy to create a new model grid object on the next call to modelgrid. The current model discritization information is used to create the new model grid. ml.update_modelgrid() ml.modelgrid.xyzvertices[0]
examples/Notebooks/flopy3_demo_of_modelgrid_classes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sympy as sym import numpy as np import matplotlib.pyplot as plt # %matplotlib inline sym.init_printing() Omega = sym.symbols('Omega', real=True) k = sym.symbols('k', integer=True) # - # # Transformada de Fourier de tiempo discreto # La [Transformada de Fourier de tiempo discreto](https://en.wikipedia.org/wiki/Discrete-time_Fourier_transform) (DTFT) de una señal discreta $x[k]$ con $k \in \mathbb{Z}$ es: # # \begin{equation} # X(e^{j \Omega}) = \sum_{k = -\infty}^{\infty} x[k] \, e^{-j \Omega k} # \end{equation} # # donde $\Omega \in \mathbb{R}$ representa la frecuencia angular normalizada. Observe que $X(e^{j \Omega}) = \mathcal{F}_* \{ x[k] \}$ es una función compleja y continua que contiene la información de la señal discreta $x[k]$, por lo cual se le denomina el "espectro" de $x[k]$. # # Distintos autores pueden definir esta transformada mediante otras expresiones y por lo tanto debe considerarse tales definiciones al interpretar sus propiedades. # # Una condición suficiente pero no necesaria para que exista la DTFT emerge de la [desigualdad triangular o desigualdad de Minkowski](https://en.wikipedia.org/wiki/Triangle_inequality). es: # # \begin{equation} # \left|X(e^{j \Omega})\right| = \left| \sum_{k = -\infty}^{\infty} x[k] \, e^{-j \Omega k} \right| # \leq \sum_{k = -\infty}^{\infty} \left| x[k] \right| < \infty # \end{equation} # # Por lo cual, si la señal $x[k]$ debe ser absolutamente sumable, existe su DTFT. # # La DTFT es periódica con periodo $T_\text{p} = 2 \pi$, # # \begin{equation} # X(e^{j \Omega}) = X(e^{j (\Omega + n \cdot 2 \pi)}) # \end{equation} # # con $n \in \mathbb{Z}$. Observe que $e^{j \Omega k} = e^{j (\Omega + 2 \pi) k}$ representa oscilaciones puras y también es periódica, por lo que solo es necesario analizar un periodo del espectro de una señal periodica. # # La transformada inversa $x[k] = \mathcal{F}_*^{-1} \{ X(e^{j \Omega}) \}$ se define como: # # \begin{equation} # x[k] = \frac{1}{2 \pi} \int_{-\pi}^{\pi} X(e^{j \Omega}) \, e^{j \Omega k} \; d \Omega # \end{equation} # # # ## Transformada del Impulso de Dirac $\delta[k]$ # # La tranasformada $\mathcal{F}_* \{ \delta[k] \}$ del [impulso de Dirac](../discrete_signals/standard_signals.ipynb#Dirac-Impulse) se obtiene al introducir $\delta[k]$ en la definición de DTFT # # La señal impulso de Dirac discreta es: # # $$ # \delta[k] = \left\{\begin{matrix} # 0 & k \neq 0 \\ # 1 & k = 0 # \end{matrix}\right. # $$ # # \begin{split} # \mathcal{F}_* \{ \delta[k] \} &= \sum_{k = -\infty}^{\infty} \delta[k] \, e^{-j \Omega k} \\ # & = ... +0e^{-j \Omega (-2)}+0e^{-j \Omega (-1)}+\underline{1}+0e^{-j \Omega 1}+0e^{-j \Omega 2}+... \\ # &= 1 # \end{split} # # Esto indica que todas las frecuencias normalizadas tienen el mismo peso en la señal impulso de Dirac. # # # ## Transformada del Impulso de Dirac desplazado $\delta[k- \kappa]$ # # La señal impulso de Dirac discreta es: # # $$ # \delta[k- \kappa] = \left\{\begin{matrix} # 0 & k \neq \kappa \\ # 1 & k = \kappa # \end{matrix}\right. # $$ # # \begin{split} # \mathcal{F}_* \{ \delta[k] \} &= \sum_{k = -\infty}^{\infty} \delta[k] \, e^{-j \Omega k} \\ # & = ... +0e^{-j \Omega (\kappa-2)}+0e^{-j \Omega (\kappa-1)}+1e^{-j \Omega (\kappa)}+0e^{-j \Omega (\kappa+1)}+0e^{-j \Omega (\kappa+2)}+... \\ # &= 1e^{-j \Omega (\kappa)} # \end{split} # # Esto indica que todas las frecuencias normalizadas tienen el mismo peso en la señal impulso de Dirac pero aparecen términos de desplazamientos temporales $e^{-j \Omega (\kappa)}$. # # # ## Transformada de la señal Rectangular $\text{rect}_N[k]$ # # Considere una señal rectangular $\text{rect}_N[k]$ . # # $$ # \text{rect}_N[k] = \left\{\begin{matrix} # 0 & k < 0 \\ # 1 & 0 \leq k < N \\ # 0 & k \geq N \\ # \end{matrix}\right. # $$ # # \begin{split} # \mathcal{F}_* \{ \text{rect}[k] \} &= \sum_{k = -\infty}^{\infty} \text{rect}[k] \, e^{-j \Omega k} \\ # &= \sum_{k = 0}^{N-1} e^{-j \Omega k} \\ # &= e^{-j \Omega 0} + e^{-j \Omega 1} + e^{-j \Omega 3} + ... + e^{-j \Omega (N-2)} + e^{-j \Omega (N-1)} \\ # \end{split} # # # Observe que la serie resultante es una [serie geométrica finita](https://en.wikipedia.org/wiki/Geometric_series) de la forma: # # $$ \sum_{k = 0}^{N} a r^k = a \frac{1-r^{N+1}}{1-r} $$ # # Por lo cual la transformada puede escribirse como: # # \begin{split} # \mathcal{F}_* \{ \text{rect}[k] \} &= \sum_{k = 0}^{N-1} 1 (e^{-j \Omega})^k \\ # &= 1 \frac{1-(e^{-j \Omega})^N}{1-e^{-j \Omega}} \\ # &= 1 \frac{1-e^{-j N \Omega}}{1-e^{-j \Omega}} \\ # \end{split} # # + N = sym.symbols('N', integer=True, positive =True) F_rect = sym.summation(sym.exp(-sym.I*Omega*k), (k, 0, N-1)) F_rect # + # Define una señal rectángular Nr = 5 # Valor N kr = range(-20, 20) # Rango de visualización x = list(kr) # inicia valores de la señal for ind,kx in enumerate(kr): if (kx>= 0) & ( kx < Nr): x[ind]=1 else: x[ind]=0 plt.rcParams['figure.figsize'] = 9, 3 plt.stem(kr, x) # - X = sym.summation(sym.exp(-sym.I*Omega*k), (k, 0, Nr-1)) X plt.rcParams['figure.figsize'] = 9, 3 sym.plot(sym.re(X), (Omega, -20, 20), xlabel=r'$\Omega$', ylabel=r'$Re( X(e^{j \Omega}) )$') plt.rcParams['figure.figsize'] = 9, 3 sym.plot(sym.im(X), (Omega, -20, 20), xlabel=r'$\Omega$', ylabel=r'$Im( X(e^{j \Omega}) )$') plt.rcParams['figure.figsize'] = 9, 3 sym.plot(sym.Abs(X), (Omega, -20, 20), xlabel=r'$\Omega$', ylabel=r'$| X(e^{j \Omega}) |$') plt.rcParams['figure.figsize'] = 9, 3 sym.plot(sym.arg(X), (Omega, -20, 20), xlabel=r'$\Omega$', ylabel=r'$| X(e^{j \Omega}) |$') # ¿Qué observa en las curvas de magnitud y la fase al incrementar o disminuir el tamaño de la señal rectangular $N$? # ## Transformada de la señal exponencial $e^{\Omega_0 k}\cdot \epsilon [k]$ # # La señal exponencial causal se define como: # # $$ # \left\{\begin{matrix} # 0 & k < 0 \\ # e^{\Omega_0 k} & k \geq 0 # \end{matrix}\right. # $$ # # La transformada es: # # \begin{split} # \mathcal{F}_* \{ e^{\Omega_0 k} \epsilon [k] \} &= \sum_{k = 0}^{\infty} e^{\Omega_0 k} \, e^{-j \Omega k} \\ # &= \sum_{k = 0}^{\infty} e^{(\Omega_0 - \Omega j)k} \\ # &= \frac{1}{1-e^{(\Omega_0 - \Omega j)}} \\ # \end{split} # # + # Define una señal rectángular kr = range(-20, 20) # Rango de visualización x = list(kr) # inicia valores de la señal W0 = -0.5 for ind,kx in enumerate(kr): if (kx>= 0): x[ind]=np.exp(W0*kx) else: x[ind]=0 plt.rcParams['figure.figsize'] = 9, 3 plt.stem(kr, np.real(x)) # - plt.rcParams['figure.figsize'] = 9, 3 plt.stem(kr, np.imag(x)) X = 1/(1-sym.exp(W0-1j*(Omega))) rW = (Omega, -2*sym.pi, 2*sym.pi) X plt.rcParams['figure.figsize'] = 9, 3 sym.plot(sym.re(X), rW, xlabel=r'$\Omega$', ylabel=r'$Re( X(e^{j \Omega}) )$', ylim=(-1,3)) plt.rcParams['figure.figsize'] = 9, 3 sym.plot(sym.im(X), rW, xlabel=r'$\Omega$', ylabel=r'$Im( X(e^{j \Omega}) )$', ylim=(-1,3)) plt.rcParams['figure.figsize'] = 9, 3 sym.plot(sym.Abs(X), rW, xlabel=r'$\Omega$', ylabel=r'$| X(e^{j \Omega})|$', ylim=(-1,3)) plt.rcParams['figure.figsize'] = 9, 3 sym.plot(sym.arg(X), rW, xlabel=r'$\Omega$', ylabel=r'$arg( X(e^{j \Omega}) )$') # ## Transformada de la señal seno $\sin(\Omega_0 k)\cdot \epsilon [k]$ # # La señal seno causal se define como: # # $$ # \left\{\begin{matrix} # 0 & k < 0 \\ # \sin(\Omega_0 k) = \frac{e^{j\Omega_0 k}-e^{-j\Omega_0 k}}{2j} & k \geq 0 # \end{matrix}\right. # $$ # # # # La transformada es: # # \begin{split} # \mathcal{F}_* \{ \sin(\Omega_0 k)\cdot \epsilon [k] \} &= \mathcal{F}_* \{ \frac{e^{j\Omega_0 k}-e^{-j\Omega_0 k}}{2j}\}\\ # &= \frac{\mathcal{F}_* \{e^{j\Omega_0 k}\}-\mathcal{F}_* \{e^{-j\Omega_0 k}\}}{2j}\\ # &= \frac{\frac{1}{1-e^{(\Omega_0 - \Omega j)}} - \frac{1}{1-e^{(-\Omega_0 - \Omega j)}}}{2j} \\ # \end{split} # + # Define una señal rectángular kr = range(-20, 20) # Rango de visualización de instantes k x = list(kr) # inicia valores de la señal W0 = 0.1*np.pi # np.pi/4 for ind,kx in enumerate(kr): if (kx>= 0): x[ind]=np.sin(W0*kx) else: x[ind]=0 plt.rcParams['figure.figsize'] = 9, 3 plt.stem(kr, np.real(x)) # - plt.rcParams['figure.figsize'] = 9, 3 plt.stem(kr, np.imag(x)) X = (1/(1-sym.exp(1j*W0-1j*(Omega))) - 1/(1-sym.exp(-1j*W0-1j*(Omega))))/2j rW = (Omega, -2*sym.pi, 2*sym.pi) X = X.simplify() X plt.rcParams['figure.figsize'] = 9, 3 sym.plot(sym.re(X), rW, xlabel=r'$\Omega$', ylabel=r'$Re( X(e^{j \Omega}) )$', ylim=(-10,10), adaptive = False) plt.rcParams['figure.figsize'] = 9, 3 sym.plot(sym.im(X), rW, xlabel=r'$\Omega$', ylabel=r'$Im( X(e^{j \Omega}) )$', ylim=(-1,10), adaptive = False) plt.rcParams['figure.figsize'] = 9, 3 sym.plot(sym.Abs(X), rW, xlabel=r'$\Omega$', ylabel=r'$| X(e^{j \Omega})|$', ylim=(-1,10), adaptive = False) plt.rcParams['figure.figsize'] = 9, 3 sym.plot(sym.arg(X), rW, xlabel=r'$\Omega$', ylabel=r'$arg( X(e^{j \Omega}) )$', adaptive = False) # Analice los espectros de la señal seno con frecuencias: # - $1$ # - $2$ # - $\pi/2$ # - $3\pi/2$ # ## Transformada de una señal arbitraria # + import soundfile as sf x, fs = sf.read('datos/JPDP001.wav')# Lee un archivo .wav x = x.T[0] #Selecciona solo un canal de grabación sf.write('temp.wav',x,fs) x = x[30:260:2] plt.rcParams['figure.figsize'] = 9, 3 plt.stem(x) len(x) # - X = 0 for ind,xk in enumerate(x): X = X + xk*sym.exp(1j*Omega*ind) plt.rcParams['figure.figsize'] = 9, 3 sym.plot(sym.Abs(X), rW, xlabel=r'$\Omega$', ylabel=r'$| X(e^{j \Omega})|$', ylim=(-1,8), adaptive = False) # - ¿Qué espera que pase si toma una señal de $440 000$ muestras? # ## Propiedades # # # | | $x[k]$ | $X(e^{j \Omega}) = \mathcal{F}_* \{ x[k] \}$ | # |:---|:---:|:---:| # | Linealidad | $A \, x_1[k] + B \, x_2[k]$ | $A \, X_1(e^{j \Omega}) + B \, X_2(e^{j \Omega})$ | # | Convolución | $x[k] * h[k]$ | $X(e^{j \Omega}) \cdot H(e^{j \Omega})$ | # | Multiplicación | $x[k] \cdot h[k]$ | $ X(e^{j \Omega}) * H(e^{j \Omega})$ | # | Desplazamiento | $x[k - \kappa]$ | $e^{-j \Omega \kappa} \cdot X(e^{j \Omega})$ | # | Modulación | $e^{j \Omega_0 k} \cdot x[k]$ | $X(e^{j (\Omega- \Omega_0)})$ | # # # Con $A, B \in \mathbb{C}$, $\Omega_0 \in \mathbb{R}$ y $\kappa \in \mathbb{Z}$.
FourierDTFT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.2 64-bit # metadata: # interpreter: # hash: aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49 # name: python3 # --- # ![MLU Logo](../../data/MLU_Logo.png) import numpy as np # A single median of 1000 datapoints from a badly behaved distribution d = -np.log(np.random.rand(1000)) print(np.median(d)) # + # %matplotlib inline import matplotlib.pyplot as plt # histogram of 1000 independent medians of a long tailed distribution (like house prices) meds = [np.median(-np.log(np.random.rand(1000))) for x in range(1000)] plt.figure(figsize = (10,10)) plt.hist(meds) # + # A single distribution of 1000 datapoints › data = -np.log(np.random.rand(1000)) # 1000 samples with replacement and histogram meds = [np.median(np.random.choice(data,1000)) for x in range(1000)] plt.figure(figsize = (10,10)) plt.hist(meds) # -
notebooks/lecture_3/DTE-LECTURE-3-BOOTSTRAP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # 1. Tensor Operations # * This notebook aims to collect codes for tensor operations. # # + pycharm={"name": "#%%\n"} import torch # + pycharm={"name": "#%%\n"} # Create empty tensor. x = torch.empty(5, 3) print(x) # + pycharm={"name": "#%%\n"} # Create Random tensor. x = torch.rand(5, 3) print(x) # + pycharm={"name": "#%%\n"} # Create Zeros tensor. x = torch.zeros(5, 3, dtype=torch.long) print(x) # + pycharm={"name": "#%%\n"} # Create tensor from list. x = torch.tensor([5.5, 3]) print(x) # + pycharm={"name": "#%%\n"} # Use existed tensor. x = x.new_ones(5, 3, dtype=torch.double) # new_* print(x) x = torch.randn_like(x, dtype=torch.float) print(x) # + pycharm={"name": "#%%\n"} # size print(x.size()) # + pycharm={"name": "#%%\n"} # Add y = torch.rand(5, 3) print(x + y) print(torch.add(x, y)) # + pycharm={"name": "#%%\n"} # with _ y.add_(x) print(y) # + pycharm={"name": "#%%\n"} # torch.view x = torch.randn(4, 4) y = x.view(16) z = x.view(-1, 8) # size -1 deduced from other dim. print(x.size(), y.size(), z.size()) # + pycharm={"name": "#%%\n"} # Get value. x = torch.randn(1) print(x) print(x.item()) # + pycharm={"name": "#%%\n"} # numpy import numpy as np a = np.ones(5) b = torch.from_numpy(a) np.add(a, 1, out=a) print(a) print(b) # + pycharm={"name": "#%%\n"} # CUDA # is_available Check CUDA available # ``torch.device`` more tensor to a device if torch.cuda.is_available(): device = torch.device("cuda") y = torch.ones_like(x, device=device) x = x.to(device) z = x + y print(z) print(z.to("cpu", torch.double)) # + pycharm={"name": "#%%\n"}
0_tensor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp datasets # - # # datasets # # This contains the definitions for all the datasets used for the experiments. # # > API details. # + #export from torch.utils.data import Dataset from PIL import Image import json from collections import namedtuple from pathlib import Path annotations = namedtuple('Annotations',['image_id','sentences']) class Flickr8k(Dataset): """ for flickr 8k dataset.""" def __init__(self, img_dir, ann_file, split='train', transform=None, target_transform=None): """ Args: root (str): The root dir that points to the Flickr images. ann_file (str): The file that contains the annotations for the images. split ['train', 'val', 'test']: This decides which partition to load. transform: Transforms for image. target_transforms: transforms for sentences. """ self.img_dir = Path(img_dir) assert split in ['train', 'test', 'val'] self.split = split self.transform = transform self.target_transform = target_transform self.annotations = list() # indices when spliting the json file if self.split == 'train': m, n = 0, 6000 elif self.split == 'val': m, n = 6000, 7000 elif self.split == 'test': m, n = 7000, 8000 with open(ann_file, 'r') as ann_file: ann_json = json.load(ann_file) for image in ann_json['images'][m : n]: image_id = image['filename'] sentences_list = list() for sentence in image['sentences']: sentences_list.append(sentence['tokens']) self.annotations.append(annotations(image_id, sentences_list)) assert image['split'] == self.split print('loading %s complete'%(self.split)) def __len__(self): return len(self.annotations) def __getitem__(self, index): img_id = self.annotations[index].image_id img = Image.open(self.img_dir/img_id).convert('RGB') if self.transform is not None: img = self.transform(img) # Captions target = self.annotations[index].sentences if self.target_transform is not None: target = self.target_transform(target) return img_id, img, target # - flickr8k_dir = '/home/jithin/datasets/imageCaptioning/flicker8k/Flicker8k_Dataset' captions_file = '/home/jithin/datasets/imageCaptioning/captions/dataset_flickr8k.json' dataset = Flickr8k(flickr8k_dir, captions_file, split='val') len(dataset) dataset[0]
nbs/01_Datasets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import squidpy as sq import scanpy as sc import skimage import numpy as np import os import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import time # load data img = sq.datasets.visium_hne_image_crop() adata = sq.datasets.visium_hne_adata_crop() # define workflow def time_image_workflow(img, adata, n_jobs=1): start = time.time() #sq.im.process(img, method="smooth", sigma=2) sq.im.segment(img, layer="image", method="watershed", thresh=None, n_jobs=n_jobs, size=2000) sq.im.calculate_image_features(adata,img,layer="image",key_added='features', features=['summary', 'histogram'], n_jobs=n_jobs, spot_scale=1, scale=1.0, mask_circle=True) end = time.time() return end - start res = [] # + # calculate execution time for different datasets dataset = 'fluo_crop' img = sq.datasets.visium_fluo_image_crop() adata = sq.datasets.visium_fluo_adata_crop() for run in range(3): for n_jobs in [1,2,3,4]: duration = time_image_workflow(img, adata, n_jobs=n_jobs) res_dict = { 'dataset': dataset, 'n_pixels': img['image'].size, 'shape': str(img['image'].shape), 'n_jobs': n_jobs, 'run': run, 'time': duration, } res.append(res_dict) # - dataset = 'hne_crop' img = sq.datasets.visium_hne_image_crop() adata = sq.datasets.visium_hne_adata_crop() for run in range(3): for n_jobs in [1,2,3,4]: duration = time_image_workflow(img, adata, n_jobs=n_jobs) res_dict = { 'dataset': dataset, 'n_pixels': img['image'].size, 'shape': str(img['image'].shape), 'n_jobs': n_jobs, 'run': run, 'time': duration, } res.append(res_dict) dataset = 'hne' img = sq.datasets.visium_hne_image() adata = sq.datasets.visium_hne_adata() for run in range(3): for n_jobs in [1,2,3,4]: duration = time_image_workflow(img, adata, n_jobs=n_jobs) res_dict = { 'dataset': dataset, 'n_pixels': img['image'].size, 'shape': str(img['image'].shape), 'n_jobs': n_jobs, 'run': run, 'time': duration, } res.append(res_dict) df = pd.DataFrame(res) df df.to_csv('figures/feature_extraction_benchmark.csv') # # Make figure # + jupyter={"outputs_hidden": true} df = pd.read_csv('figures/feature_extraction_benchmark.csv', index_col=0) df # - df_grouped = df.groupby(['dataset','n_jobs']).mean().reset_index(drop=False) df_grouped df_grouped.loc[df_grouped['dataset'] == 'fluo_crop', 'dataset'] = 'fluo small ($16\cdot10^7$ px)' df_grouped.loc[df_grouped['dataset'] == 'hne', 'dataset'] = 'H&E large ($40\cdot10^7$ px)' df_grouped.loc[df_grouped['dataset'] == 'hne_crop', 'dataset'] = 'H&E small ($4\cdot10^7$ px)' fig, ax = plt.subplots(1,1, figsize=(5,3), dpi=180, tight_layout=True) sns.lineplot(data=df_grouped, hue='dataset', x='n_jobs', y='time', marker='o', ax=ax) ax.set_xticks([1,2,3,4]) ax.set_ylabel("runtime [s]") ax.set_xlabel("#tasks") ax.get_legend().set_title(None) plt.grid() _ = ax.set_title("runtime of feature extraction workflow") plt.savefig('figures/benchmark_image.png')
notebooks/supp_figures/benchmarks/benchmark_image.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # YAML component place and route # # # We can define a place and route component by a netlist in YAML format # + attributes={"classes": [], "id": "", "n": "5"} import pp yaml = """ instances: mmi_long: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 10 mmi_short: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 5 """ c = pp.component_from_yaml(yaml) pp.show(c) pp.plotgds(c) # - c.instances c.instances['mmi_long'].x = 100 pp.show(c) pp.plotgds(c) # + import pp yaml = """ instances: mmi_long: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 10 mmi_short: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 5 placements: mmi_long: x: 100 y: 100 """ c = pp.component_from_yaml(yaml) pp.show(c) pp.plotgds(c) # + import pp yaml = """ instances: mmi_long: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 10 mmi_short: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 5 placements: mmi_long: x: 100 y: 100 routes: mmi_short,E1: mmi_long,W0 """ c = pp.component_from_yaml(yaml) pp.show(c) pp.plotgds(c) # - # You can **rotate** and instance specifying the angle in degrees # + import pp yaml = """ instances: mmi_long: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 10 mmi_short: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 5 placements: mmi_long: rotation: 180 x: 100 y: 100 routes: mmi_short,E1: mmi_long,E0 """ c = pp.component_from_yaml(yaml) pp.show(c) pp.plotgds(c) # - # You can also define ports for the component # + import pp yaml = """ instances: mmi_long: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 10 mmi_short: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 5 placements: mmi_long: rotation: 180 x: 100 y: 100 routes: mmi_short,E1: mmi_long,E0 ports: E0: mmi_short,W0 W0: mmi_long,W0 """ c = pp.component_from_yaml(yaml) pp.show(c) pp.plotgds(c) # - r = c.routes['mmi_short,E1:mmi_long,E0'] r r.parent.length c.instances c.routes pp.write_gds(c, add_ports_to_all_cells=True)
notebooks/10_YAML_component.ipynb
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # -------------------------------------- # # This notebook is similar in functionality to [this python script](https://github.com/amygdala/tensorflow-workshop/blob/master/workshop_sections/mnist_series/mnist_tflearn.py), and is used with [this README](https://github.com/amygdala/tensorflow-workshop/blob/master/workshop_sections/mnist_series/02_README_mnist_tflearn.md). It shows how to use TensorFlow's high-level apis, in `contrib.tflearn`, to easily build a classifier with multiple hidden layers. # # First, do some imports and set some variables: # + from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os import time import numpy import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data # comment out for less info during the training runs. tf.logging.set_verbosity(tf.logging.INFO) # + DATA_DIR = "/tmp/MNIST_data" # read in data, downloading first as necessary DATA_SETS = input_data.read_data_sets(DATA_DIR) # define a utility function for generating a new directory in which to save # model information, so multiple training runs don't stomp on each other. def getNewPath(name=""): base="/tmp/tfmodels/mnist_tflearn" logpath = os.path.join(base, name + "_" + str(int(time.time()))) print("Logging to {}".format(logpath)) return logpath # - # Next create an input function, using the `tf.train.shuffle_batch` function to take care of the batching and shuffling of the input data. # + BATCH_SIZE = 40 # call with generate_input_fn(DATA_SETS.train) or generate_input_fn(DATA_SETS.test) def generate_input_fn(dataset, batch_size=BATCH_SIZE): def _input_fn(): X = tf.constant(dataset.images) Y = tf.constant(dataset.labels.astype(numpy.int64)) image_batch, label_batch = tf.train.shuffle_batch([X,Y], batch_size=batch_size, capacity=8*batch_size, min_after_dequeue=4*batch_size, enqueue_many=True ) return {'pixels': image_batch} , label_batch return _input_fn # - # We'll first define a function that adds a LinearClassifier and runs its `fit()` method, which will train the model. Note that we didn't need to explicitly define a model graph or a training loop ourselves. # # Once we've trained the model, we run the `evaluate()` method, which uses the trained model. To do this, it loads the most recent checkpointed model info available. The model checkpoint(s) will be generated during the training process. def define_and_run_linear_classifier(num_steps, logdir, batch_size=BATCH_SIZE): """Run a linear classifier.""" feature_columns = [tf.contrib.layers.real_valued_column("pixels", dimension=784)] classifier = tf.contrib.learn.LinearClassifier( feature_columns=feature_columns, n_classes=10, model_dir=logdir ) classifier.fit(input_fn=generate_input_fn(DATA_SETS.train, batch_size=batch_size), steps=num_steps) print("Finished training.") # Evaluate accuracy. accuracy_score = classifier.evaluate(input_fn=generate_input_fn(DATA_SETS.test, batch_size), steps=100)['accuracy'] print('Linear Classifier Accuracy: {0:f}'.format(accuracy_score)) # Next, add a function that defines a `DNNClassifier`, and runs its `fit()` method, which will train the model. Again note that we didn't need to explicitly define a model graph or a training loop ourselves. # # Then after we've trained the model, we run the classifier's `evaluate()` method, which uses the trained model. def define_and_run_dnn_classifier(num_steps, logdir, lr=.1, batch_size=40): """Run a DNN classifier.""" feature_columns = [tf.contrib.layers.real_valued_column("pixels", dimension=784)] classifier = tf.contrib.learn.DNNClassifier( feature_columns=feature_columns, n_classes=10, hidden_units=[128, 32], optimizer=tf.train.ProximalAdagradOptimizer(learning_rate=lr), model_dir=logdir ) # After you've done a training run with optimizer learning rate 0.1, # change it to 0.5 and run the training again. Use TensorBoard to take # a look at the difference. You can see both runs by pointing it to the # parent model directory, which by default is: # # tensorboard --logdir=/tmp/tfmodels/mnist_tflearn classifier.fit(input_fn=generate_input_fn(DATA_SETS.train, batch_size=batch_size), steps=num_steps) print("Finished running the deep training via the fit() method") accuracy_score = classifier.evaluate(input_fn=generate_input_fn(DATA_SETS.test, batch_size=batch_size), steps=100)['accuracy'] print('DNN Classifier Accuracy: {0:f}'.format(accuracy_score)) # Now we can call the functions that define and train our classifiers. Let's start with the LinearClassifier, which won't be very accurate. # print("Running Linear classifier ...") define_and_run_linear_classifier(num_steps=400, logdir=getNewPath("linear"), batch_size=40) # With 400 steps and a batch size of 40, we see accuracy of approx 87% # Now, let's run the DNN Classifier. First, let's try it with a .1 learning rate. print("Running DNN classifier with .1 learning rate...") classifier = define_and_run_dnn_classifier(2000, getNewPath("deep01"), lr=.1) # With 2000 steps and a batch size of 40, we see accuracy of approx 95% # Now, let's run it with a .5 learning rate. print("Running DNN classifier with .1 learning rate...") classifier = define_and_run_dnn_classifier(2000, getNewPath("deep05"), lr=.5) # With 2000 steps and a batch size of 40, we see accuracy of approx 91%, though sometimes it does not converge at all. # To compare your results, start up TensorBoard as follows in a new terminal window. (If you get a 'not found' error, make sure you've activated your virtual environment in that new window): # # ```sh # $ tensorboard --logdir=/tmp/tfmodels/mnist_tflearn # ```
workshop_sections/mnist_series/mnist_tflearn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from __future__ import absolute_import, division, print_function from collections import defaultdict from ipywidgets import interact import matplotlib.pyplot as plt from chempy import Reaction, Substance, ReactionSystem from chempy.kinetics.ode import get_odesys from chempy.kinetics.analysis import plot_reaction_contributions from chempy.printing.tables import UnimolecularTable, BimolecularTable from chempy.util.graph import rsys2graph import sympy sympy.init_printing() # %matplotlib inline A, B, C, D = map(Substance, 'ABCD') One = sympy.S.One reactions = r0, r1, r2 = [ Reaction({'A'}, {'B'}, 4*One/100, name='R1: A cons.'), Reaction({'B', 'C'}, {'A', 'C'}, 10**(4*One), name='R2: A reform.'), Reaction({'B': 2}, {'B', 'C'}, 3*10**(7*One), name='R3: C form.') ] rsys = ReactionSystem(reactions, (A, B, C)) rsys uni, not_uni = UnimolecularTable.from_ReactionSystem(rsys) bi, not_bi = BimolecularTable.from_ReactionSystem(rsys) assert not (not_uni & not_bi), "There are only uni- & bi-molecular reactions in this set" uni bi rsys2graph(rsys, 'robertson.png', save='.') from IPython.display import Image; Image('robertson.png') odesys, extra = get_odesys(rsys, include_params=True) odesys.exprs odesys.get_jac() c0 = defaultdict(float, {'A': 1}) result = odesys.integrate(1e10, c0, integrator='cvode', nsteps=2000) {k: v for k, v in result.info.items() if not k.startswith('internal')} extra['rate_exprs_cb'](result.xout, result.yout) result.plot(xscale='log', yscale='log') fig, axes = plt.subplots(2, 2, figsize=(14, 6)) plot_reaction_contributions(result, rsys, extra['rate_exprs_cb'], 'AB', axes=axes[0, :]) plot_reaction_contributions(result, rsys, extra['rate_exprs_cb'], 'AB', axes=axes[1, :], relative=True, yscale='linear') plt.tight_layout() # We could also have parsed the reactions from a string: str_massaction = """ A -> B; 'k1' B + C -> A + C; 'k2' 2 B -> B + C; 'k3' """ rsys3 = ReactionSystem.from_string(str_massaction, substance_factory=lambda formula: Substance(formula)) rsys3.substance_names() odesys3, extra3 = get_odesys(rsys3, include_params=False, lower_bounds=[0, 0, 0]) extra3['param_keys'], extra3['unique'] odesys3.exprs, odesys3.params, odesys3.names, odesys3.param_names def integrate_and_plot(A0=1.0, B0=0.0, C0=0.0, lg_k1=-2, lg_k2=4, lg_k3=7, lg_tend=9): plt.figure(figsize=(14, 4)) tout, yout, info = odesys3.integrate( 10**lg_tend, {'A': A0, 'B': B0, 'C': C0}, {'k1': 10**lg_k1, 'k2': 10**lg_k2, 'k3': 10**lg_k3}, integrator='cvode', nsteps=3000) plt.subplot(1, 2, 1) odesys3.plot_result(xscale='log', yscale='log') plt.legend(loc='best') plt.subplot(1, 2, 2) plt.plot(tout[tout<.05], yout[tout<.05, odesys3.names.index('B')]) _ = plt.legend('best') interact(integrate_and_plot) #, **kw) # We could also have used SymPy to construct symbolic rates: import sympy rsys_sym = ReactionSystem.from_string(""" A -> B; sp.Symbol('k1') B + C -> A + C; sp.Symbol('k2') 2 B -> B + C; sp.Symbol('k3') """, rxn_parse_kwargs=dict(globals_={'sp': sympy}), substance_factory=lambda formula: Substance(formula)) odesys_sym, _ = get_odesys(rsys_sym, params=True) for attr in 'exprs params names param_names'.split(): print(getattr(odesys_sym, attr)) # For larger systems it is easy to loose track of what substances are actually playing a part, here the html tables can help (note the yellow background color): rsys.substances['D'] = D uni, not_uni = UnimolecularTable.from_ReactionSystem(rsys) uni bi, not_bi = BimolecularTable.from_ReactionSystem(rsys) bi
examples/Robertson_kinetics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Preprocessing # + # Import our dependencies from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import pandas as pd import tensorflow as tf # Import and read the charity_data.csv. import pandas as pd application_df = pd.read_csv("Resources/charity_data.csv") application_df.head() # - # Drop the non-beneficial ID columns, 'EIN' and 'NAME'. application_df = application_df.drop(['EIN','NAME'], axis=1) application_df # Determine the number of unique values in each column. print(application_df.nunique()) # Look at APPLICATION_TYPE value counts for binning print(application_df['APPLICATION_TYPE'].value_counts()) # + # Choose a cutoff value and create a list of application types to be replaced # use the variable name `application_types_to_replace` application_types_to_replace = ["T9", "T13", "T12", "T2", "T25", "T14", "T15", "T29", "T17"] # Replace in dataframe for app in application_types_to_replace: application_df['APPLICATION_TYPE'] = application_df['APPLICATION_TYPE'].replace(app,"Other") # Check to make sure binning was successful application_df['APPLICATION_TYPE'].value_counts() # + # Look at CLASSIFICATION value counts for binning class_values = application_df['CLASSIFICATION'].value_counts() print(class_values) # - # You may find it helpful to look at CLASSIFICATION value counts >1 application_df['CLASSIFICATION'].value_counts().loc[lambda x: x>1] # + # Choose a cutoff value and create a list of classifications to be replaced # use the variable name `classifications_to_replace` classifications_to_replace = ['C7000', 'C1700', 'C4000', 'C5000', 'C1270', 'C2700', 'C2800', 'C7100', 'C1300', 'C1280', 'C1230', 'C1400', 'C7200', 'C2300', 'C1240', 'C8000', 'C7120', 'C1500', 'C6000', 'C1800', 'C1250', 'C8200', 'C1278', 'C1238', 'C1237', 'C1235', 'C7210', 'C2400', 'C1720', 'C4100', 'C1257', 'C1600', 'C1260', 'C0', 'C2710', 'C3200', 'C1256', 'C1246', 'C1234', 'C1267', 'C4120', 'C1820', 'C2150', 'C5200', 'C1370', 'C1283', 'C2600', 'C1732', 'C3700', 'C1728', 'C4200', 'C8210', 'C1580', 'C2561', 'C4500', 'C1248', 'C2170', 'C1245', 'C2380', 'C2570', 'C2500', 'C1236', 'C1900', 'C6100', 'C1570', 'C2190' ] # Replace in dataframe for cls in classifications_to_replace: application_df['CLASSIFICATION'] = application_df['CLASSIFICATION'].replace(cls,"Other") # Check to make sure binning was successful application_df['CLASSIFICATION'].value_counts() # + # Convert categorical data to numeric with `pd.get_dummies` dummies_df = pd.get_dummies(application_df,prefix_sep='', columns=['APPLICATION_TYPE', 'AFFILIATION', 'CLASSIFICATION', 'USE_CASE', 'ORGANIZATION', 'INCOME_AMT', 'SPECIAL_CONSIDERATIONS']) dummies_df.head() # + # Split our preprocessed data into our features and target arrays X = dummies_df.drop(['IS_SUCCESSFUL'], axis=1) y = dummies_df['IS_SUCCESSFUL'] # Split the preprocessed data into a training and testing dataset X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # + # Create a StandardScaler instances scaler = StandardScaler() # Fit the StandardScaler X_scaler = scaler.fit(X_train) # Scale the data X_train_scaled = X_scaler.transform(X_train) X_test_scaled = X_scaler.transform(X_test) # - # ## Compile, Train and Evaluate the Model # + # Define the model - deep neural net, i.e., the number of input features and hidden nodes for each layer. # Initialise the Sequential class nn = tf.keras.models.Sequential() # First hidden layer nn.add(tf.keras.layers.Dense(units=44, input_dim=len(X_train[0]), activation="relu")) # Second hidden layer nn.add(tf.keras.layers.Dense(units=30, activation="relu")) # Output layer nn.add(tf.keras.layers.Dense(units=1, activation="sigmoid")) # Check the structure of the model nn.summary() # - # Compile the model nn.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) # Train the model import keras from keras.callbacks import Callback mc = keras.callbacks.ModelCheckpoint('weights{epoch:08d}.h5',save_weights_only=True, period=5) fit_model = nn.fit(X_train_scaled,y_train,callbacks=[mc],epochs=20) # Evaluate the model using the test data model_loss, model_accuracy = nn.evaluate(X_test_scaled,y_test,verbose=2) print(f"Loss: {model_loss}, Accuracy: {model_accuracy}") # Export our model to HDF5 file nn.save('AlphabetSoupCharity.h5')
.ipynb_checkpoints/Starter_Code-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # 词向量 # ## 词的向量表征 # # one-hot vector,即每个词被表示是一个实数向量,长度为字典大小,每个维度对应一个字典里的每个词,该词的维度是1,其他都是0.只是该表示的缺点是每个词本身的信息量都太小。这种表示方式没法表示“皇上”跟“黄袍”的近似关系,因为这两个词对应的one-hot vectors之间的距离,无论是欧式距离还是余弦相似度都太远了。 # # 这样词向量模型就出场了,通过词向量模型可以将one-hot vector映射到一个维度更低的实数向量(embedding vector),如“皇上”=[0.1,2.3,-2.1,...],“黄袍”=[0.2,3.1,-1.3,...]。这样的实数向量中,两个语义上相似的词对应的词向量就“更像”了,“皇上”和“黄袍”的对应词向量的余弦就不再是零了。 # # ## 神经网络的词向量模型 # 词向量模型可以是概率模型、共生矩阵(co-occurrence matrix)模型或神经元网络模型。我们选用基于神经网络的词向量模型,该模型通过学习语义信息得到词向量,可以解决之前的概率模型或者共生矩阵模型的矩阵稀疏问题、矩阵维度太高、需要去停用词等问题。 # # ## 训练词向量的模型 # 训练词向量的模型有三个:N-gram模型,CBOW模型和Skip-gram模型。 # # ### N-gram neural model # 在计算语言学中,n-gram是一种重要的文本表示方法,表示一个文本中连续的n个项。基于具体的应用场景,每一项可以是一个字母、单词或者音节。 n-gram模型也是统计语言模型中的一种重要方法,用n-gram训练语言模型时,一般用每个n-gram的历史n-1个词语组成的内容来预测第n个词。 # # 这种条件概率建模语言模型,即一句话中的第t个词的概率和该句话的前t-1个词语相关。可实际上越远的词语其实对该词的影响越小,那么如果考虑一个n-gram,每个词只受前面n-1个词的影响。 # # ### Continuous Bag of Words model(CBOW) # CBOW模型通过一个词的上下文(各N个词)预测当前词。当N=2时,是考虑上下文的词语输入顺序,CBOW是用上下文词语的词向量的均值预测当前词的。即: # context=x-2 + x-1 + x+1 + x+2 / 4。 # # ### Skip-gram model # CBOW的好处是对上下文词语的分布在词向量上平滑、去噪了,因此在小数据集上很有效。而在大数据集上,采用Skip-gram的方法更有效,因为Skip-gram的方法中,是用一个词预测其上下文,得到了当前词上下文的很多样本,因此可用于更大的数据集。 # # code #
16paddle/word2v/word2vec.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="UrjQGgr5nUHC" # <h1> Imports and Installs # + id="S2WfmJiVTDpE" # Install required packages. # !pip install -q torch-scatter -f https://pytorch-geometric.com/whl/torch-1.10.0+cu113.html # !pip install -q torch-sparse -f https://pytorch-geometric.com/whl/torch-1.10.0+cu113.html # !pip install -q git+https://github.com/rusty1s/pytorch_geometric.git # + id="eGl9mcc0nOMP" import matplotlib.pyplot as plt import numpy as np # Required imports for neural network import torch.nn as nn import torch from torch.autograd import Variable import random # For GNNs from torch.nn import Linear from torch.nn import BatchNorm1d import torch.nn.functional as F from torch_geometric.nn import GATConv from torch_geometric.nn import GraphConv from torch_geometric.nn import GraphNorm from torch_geometric.nn import global_mean_pool from torch_geometric.nn import global_max_pool import torch.nn as nn domain_type = "graph_classification" # + # !pip3 install higher from higher import innerloop_ctx import warnings #The code includes extensive warnings when run so have used this to ignore them warnings.filterwarnings("ignore") #Set random seeds for reproducibility of results torch.manual_seed(0) random.seed(0) np.random.seed(0) # set GPU or CPU depending on available hardware # help from: https://stackoverflow.com/questions/46704352/porting-pytorch-code-from-cpu-to-gpu device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(f"Available device: {device}") if device == "cuda:0": # set default so all tensors are on GPU, if available # help from: https://stackoverflow.com/questions/46704352/porting-pytorch-code-from-cpu-to-gpu torch.set_default_tensor_type('torch.cuda.FloatTensor') # + [markdown] id="T3KVOwFXFOY0" # <h1> Data Loading and Generation # # Reptile for regression task using GNNs # # Some common GNN datasets are here: # https://pytorch-geometric.readthedocs.io/en/latest/modules/datasets.html#torch_geometric.datasets.GNNBenchmarkDataset # # For classification we use the TUDataset --> ENZYMES # + [markdown] id="jvvZSOiQe-Ne" # ENZYMES has 6 classes # # According to Reptile paper notation # # C = Total number of classes in dataset (ENZYMES C=6) # N = Number of selected classes (We will use C=N=6) # K = Number of examples given to the model for each class (In the paper they use K+1 instead of K for some reason) # # 5-shot learning (5 updates of testing class) # # In general we train on N classes with K examples from each, then train on a single unseen example from a randomly selected class. # # + id="PIExsutGTQcB" import torch from torch_geometric.datasets import TUDataset dataset = TUDataset(root='data/TUDataset',name='ENZYMES') # + id="LxK2rDRNTSxd" # This function is based on https://pytorch-geometric.readthedocs.io/en/latest/notes/colabs.html #Function to display properties of the dataset (it is not necessary for anything) def display_graph_dataset_properties(dataset): print() print(f'Dataset: {dataset}:') print('====================') print(f'Number of graphs: {len(dataset)}') print(f'Number of features: {dataset.num_features}') print(f'Number of classes: {dataset.num_classes}') data = dataset[0] # Get the first graph object. print() print('Look at a sample graph of the dataset') print(data) print('=============================================================') # Gather some statistics about the first graph. print(f'Number of nodes: {data.num_nodes}') print(f'Number of edges: {data.num_edges}') print(f'Average node degree: {data.num_edges / data.num_nodes:.2f}') print(f'Has isolated nodes: {data.has_isolated_nodes()}') print(f'Has self-loops: {data.has_self_loops()}') print(f'Is undirected: {data.is_undirected()}') # + colab={"base_uri": "https://localhost:8080/"} id="GC6T1VZPF9Ba" outputId="5240f583-e193-4858-c1cf-177c8bb781c9" display_graph_dataset_properties(dataset) # + id="lD_MRHC8T8Za" # Transform the dataset into a list dataset_list_0 = [] dataset_list_1 = [] dataset_list_2 = [] dataset_list_3 = [] dataset_list_4 = [] dataset_list_5 = [] #Loop over the dataset and separate the different classes into different lists for i in range(len(dataset)): if int(dataset[i].y[0].item())==0: dataset_list_0.append(dataset[i]) elif int(dataset[i].y[0].item())==1: dataset_list_1.append(dataset[i]) elif int(dataset[i].y[0].item())==2: dataset_list_2.append(dataset[i]) elif int(dataset[i].y[0].item())==3: dataset_list_3.append(dataset[i]) elif int(dataset[i].y[0].item())==4: dataset_list_4.append(dataset[i]) elif int(dataset[i].y[0].item())==5: dataset_list_5.append(dataset[i]) # + id="wwVU9xu9NCdg" #Shuffle the dataset list random.shuffle(dataset_list_0) random.shuffle(dataset_list_1) random.shuffle(dataset_list_2) random.shuffle(dataset_list_3) random.shuffle(dataset_list_4) random.shuffle(dataset_list_5) # + id="a3X51uGHDvSV" #Split into train and test training_proportion = 0.99 GRAPH_TRAIN = dataset_list_0[:int(np.floor(len(dataset_list_0)*training_proportion))]+dataset_list_1[:int(np.floor(len(dataset_list_1)*training_proportion))]\ +dataset_list_2[:int(np.floor(len(dataset_list_2)*training_proportion))]+dataset_list_3[:int(np.floor(len(dataset_list_3)*training_proportion))]\ +dataset_list_4[:int(np.floor(len(dataset_list_4)*training_proportion))]+dataset_list_5[:int(np.floor(len(dataset_list_5)*training_proportion))] GRAPH_TEST = [dataset_list_0[-1]]+[dataset_list_1[-1]]+[dataset_list_2[-1]]+[dataset_list_3[-1]]+[dataset_list_4[-1]]+[dataset_list_5[-1]] random.shuffle(GRAPH_TRAIN) random.shuffle(GRAPH_TEST) # + [markdown] id="cu4urLF7Q88A" # <h1> Neural Network Model # + id="R1B0YTz6ytyN" class GNN(torch.nn.Module): def __init__(self, input_dim=3, hidden_dim=100, output_dim=6): super(GNN, self).__init__() #Hidden Layers self.hidden1 = GraphConv(input_dim, hidden_dim) self.hidden2 = GraphConv(hidden_dim, hidden_dim) self.hidden3 = GraphConv(hidden_dim, output_dim) self.norm = GraphNorm(hidden_dim) #Activation Function self.relu = nn.ReLU() self.softmax = nn.Softmax() def forward(self, input_x, edge_index, batch): #Standard forward x = self.hidden1(input_x,edge_index) x = self.norm(x) x = self.relu(x) x = self.hidden2(x,edge_index) x = self.norm(x) x = self.relu(x) x = self.hidden3(x,edge_index) x = self.relu(x) #Global mean pool across batches x = global_max_pool(x, batch) x = self.softmax(x) return x # + [markdown] id="G-ExWACxQ3mt" # <h1> Helper functions # + id="1zyNHFXdOnug" # The Minimum Square Error is used to evaluate the difference between prediction and ground truth criterion = nn.BCELoss() def transform_label(label): label=label.item() if label==0: label = torch.Tensor([1,0,0,0,0,0])[None,:].float() elif label==1: label = torch.Tensor([0,1,0,0,0,0])[None,:].float() elif label==2: label = torch.Tensor([0,0,1,0,0,0])[None,:].float() elif label==3: label = torch.Tensor([0,0,0,1,0,0])[None,:].float() elif label==4: label = torch.Tensor([0,0,0,0,1,0])[None,:].float() elif label==5: label = torch.Tensor([0,0,0,0,0,1])[None,:].float() return label def copy_existing_model(model): # Function to copy an existing model # We initialize a new model new_model = GNN() # Copy the previous model's parameters into the new model new_model.load_state_dict(model.state_dict()) return new_model def initialization_to_store_meta_losses(): # This function creates lists to store the meta losses global store_train_loss_meta; store_train_loss_meta = [] global store_test_loss_meta; store_test_loss_meta = [] def test_set_validation(model,new_model,graph,lr_inner,k,store_test_loss_meta): # This functions does not actually affect the main algorithm, it is just used to evaluate the new model new_model = training(model, graph, lr_inner, k) # Obtain the loss loss = evaluation(new_model, graph) # Store loss store_test_loss_meta.append(loss) def train_set_evaluation(new_model,graph,store_train_loss_meta): loss = evaluation(new_model, graph) store_train_loss_meta.append(loss) def print_losses(epoch,store_train_loss_meta,store_test_loss_meta,printing_step=1000): if epoch % printing_step == 0: print(f'Epochh : {epoch}, Average Train Meta Loss : {np.mean(store_train_loss_meta)}, Average Test Meta Loss : {np.mean(store_test_loss_meta)}') #This is based on the paper update rule, we calculate the difference between parameters and then this is used by the optimizer, rather than doing the update by hand def reptile_parameter_update(model,new_model): # Zip models for the loop zip_models = zip(model.parameters(), new_model.parameters()) for parameter, new_parameter in zip_models: if parameter.grad is None: parameter.grad = torch.tensor(torch.zeros_like(parameter)) # Here we are adding the gradient that will later be used by the optimizer parameter.grad.data.add_(parameter.data - new_parameter.data) # Define commands in order needed for the metaupdate # Note that if we change the order it doesn't behave the same def metaoptimizer_update(metaoptimizer): # Take step metaoptimizer.step() # Reset gradients metaoptimizer.zero_grad() def metaupdate(model,new_model,metaoptimizer): # Combine the two previous functions into a single metaupdate function # First we calculate the gradients reptile_parameter_update(model,new_model) # Use those gradients in the optimizer metaoptimizer_update(metaoptimizer) def evaluation(new_model, graph, item = True): # Make model prediction prediction = new_model(graph.x,graph.edge_index,graph.batch) # Get loss if item == True: #Depending on whether we need to return the loss value for storing or for backprop loss = criterion(prediction,transform_label(graph.y)).item() else: loss = criterion(prediction,transform_label(graph.y)) return loss def training(model, graph, lr_k, k): # Create new model which we will train on new_model = copy_existing_model(model) # Define new optimizer koptimizer = torch.optim.SGD(new_model.parameters(), lr=lr_k) # Update the model multiple times, note that k>1 (do not confuse k with K) for i in range(k): # Reset optimizer koptimizer.zero_grad() # Evaluate the model loss = evaluation(new_model, graph, item = False) # Backpropagate loss.backward() koptimizer.step() return new_model # for MAML -- see MAML cell for additional citations around structure inspiration def task_specific_train_and_eval(model, T_i, inner_loop_optimizer, N=1): #Description of the loop formulation from https://higher.readthedocs.io/en/latest/toplevel.html with innerloop_ctx(model, inner_loop_optimizer, copy_initial_weights = False) as (fmodel,diffopt): #get our input data and our label per_step_loss = [] for _ in range(N): task_specifc_loss = evaluation(fmodel, T_i, item=False) #Step through the inner gradient diffopt.step(task_specifc_loss) per_step_loss.append(task_specifc_loss.item()) held_out_task_specific_loss = evaluation(fmodel, T_i, item=False) return held_out_task_specific_loss, per_step_loss, fmodel # + [markdown] id="-4Ps8P2IRCmF" # <h1> Reptile # + id="8ogpg_DHizlC" #Define important variables epochs = int(1e5) # number of epochs lr_meta=0.001 # Learning rate for meta model (outer loop) printing_step=100 # how many epochs should we wait to print the loss lr_k=0.0005 # Internal learning rate k=5 # Number of internal updates for each task # Initializations initialization_to_store_meta_losses() model = GNN() metaoptimizer = torch.optim.Adam(model.parameters(), lr=lr_meta) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="-4-zQWWKFt3s" outputId="004e43ec-cdd5-4b66-c5f8-5e8061dacec9" # Training loop for epoch in range(epochs): # Sample a sine graph (Task from training data) graph = random.sample(GRAPH_TRAIN, 1) # Update model predefined number of times based on k new_model = training(model, graph[0], lr_k, k) # Evalaute the loss for the training data train_set_evaluation(new_model,graph[0],store_train_loss_meta) #Meta-update --> Get gradient for meta loop and update metaupdate(model,new_model,metaoptimizer) # Evalaute the loss for the test data # Note that we need to sample the graph from the test data graph = random.sample(GRAPH_TEST, 1) test_set_validation(model,new_model,graph[0],lr_k,k,store_test_loss_meta) # Print losses every 'printing_step' epochs print_losses(epoch,store_train_loss_meta,store_test_loss_meta,printing_step) # + [markdown] id="bQjoz6FYctJM" # <h1> Few Shot learning with new meta-model # + [markdown] id="m-SPUG5Bfpe9" # The model performs good few shot learning # + colab={"base_uri": "https://localhost:8080/", "height": 314} id="GY84TNs8JXVH" outputId="c2d4bdd0-ad29-4d70-88c9-55dd93844c5c" graph = GRAPH_TEST[0] k_shot_updates = 5 initialization_to_store_meta_losses() for shots in range(k_shot_updates): new_model = training(model, graph, lr_k, shots) train_set_evaluation(new_model,graph,store_train_loss_meta) plt.plot(store_train_loss_meta,label = 'Loss') plt.legend() plt.xlabel('k shots') # - # ## Second-Order MAML # + ''' Handling computation graphs and second-order backprop help and partial inspiration from: - https://discuss.pytorch.org/t/how-to-save-computation-graph-of-a-gradient/128286/2 - https://discuss.pytorch.org/t/when-do-i-use-create-graph-in-autograd-grad/32853/3 - https://lucainiaoge.github.io/download/PyTorch-create_graph-is-true_Tutorial_and_Example.pdf - https://www.youtube.com/watch?v=IkDw22a8BDE - https://discuss.pytorch.org/t/how-to-manually-update-network-parameters-while-keeping-track-of-its-computational-graph/131642/2 - https://discuss.pytorch.org/t/how-to-calculate-2nd-derivative-of-a-likelihood-function/15085/3 - https://pytorch.org/tutorials/recipes/recipes/zeroing_out_gradients.html - https://higher.readthedocs.io/en/latest/toplevel.html Neural network configuration and helper class functions copied directly from -https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb Note, different ways to refer to the task-specific vs. meta/aggregate updates to the parameters Sometimes called "inner" and "outer" loop, respectively Here, refered to as "task_specific" and "agg"/meta" (the latter, for consistency w/ ocariz code) ''' #Instantiate the model network model = GNN() # move to the current device (GPU or CPU) # help from: https://stackoverflow.com/questions/46704352/porting-pytorch-code-from-cpu-to-gpu model.to(device) T = 25 # num tasks N = 1 # number of inner loop steps (notation from: https://www.bayeswatch.com/2018/11/30/HTYM/) num_samples = 10 # number of samples to draw from the task lr_task_specific = 0.01 # task specific learning rate lr_meta = 0.001 # meta-update learning rate num_epochs = 10000#70001 #Number of iterations for outer loop printing_step = 5000 # show log of loss every x epochs #Used to store the validation losses metaLosses = [] metaValLosses = [] #Meta-optimizer for the outer loop meta_optimizer = torch.optim.Adam(model.parameters(), lr = lr_meta) #Inner optimizer, we were doing this by hand previously inner_loop_optimizer = torch.optim.SGD(model.parameters(), lr = lr_task_specific) for epoch in range(num_epochs): # store loss over all tasks to then do a large meta-level update of initial params # idea/help from video: https://www.youtube.com/watch?v=IkDw22a8BDE meta_loss = None #Sample a new wave each time train_tasks = random.sample(GRAPH_TRAIN, T)#[SineWaveTask_multi() for _ in range(T)] #Loop through all of the tasks for i, T_i in enumerate(train_tasks): held_out_task_specific_loss, _, _ = task_specific_train_and_eval(model, T_i, inner_loop_optimizer, N) if meta_loss is None: meta_loss = held_out_task_specific_loss else: meta_loss += held_out_task_specific_loss meta_optimizer.zero_grad() meta_loss /= T meta_loss.backward() meta_optimizer.step() metaLosses.append(meta_loss.item()) # validation val_task = GRAPH_TEST[0] # our own addition -- can vary val_loss, _, _ = task_specific_train_and_eval(model, val_task, inner_loop_optimizer, N) metaValLosses.append(val_loss.item()) if epoch % printing_step == 0: print("Iter = ", epoch, " Current Loss", np.mean(metaLosses), " Val Loss: ", np.mean(metaValLosses)) # saving model help from: # https://pytorch.org/tutorials/beginner/saving_loading_models.html torch.save(model.state_dict(), f"{domain_type}_maml_model.pt") # idea: if very good "off-the-bat", likely because of a strong inductive bias??? # - # ## Few Shot learning with new meta-model (MAML) # + # run k-shot to check how rapidly we are able to adapt to unseen tasks # starting w/ a single unseen task test_task = GRAPH_TEST[0] num_k_shots = 10 # use model returned from earlier optimization inner_loop_optimizer = torch.optim.SGD(model.parameters(), lr = lr_task_specific) held_out_task_specific_loss, metaTrainLosses, _ = task_specific_train_and_eval(model, test_task, inner_loop_optimizer, num_k_shots) plt.plot(metaTrainLosses) plt.xlim([0,num_k_shots]) # -
archive/Domain_GraphNN_Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # This notebook provides the functionality to build, train, and test a CNN for predicting mosquito age, grouped age, species, and status. # # ## Structure: # * Import packages to be used. # * Load mosquito data. # * Define fucntions for plotting, visualisation, and logging. # * Define a function to build the CNN. # * Define a function to train the CNN. # * Main section to organise data, define the CNN, and call the building and training of the CNN. # # + import pylab as pl import datetime import pandas as pd import itertools from itertools import cycle import pickle import random as rn import os from time import time from tqdm import tqdm import numpy as np import matplotlib.pyplot as plt from matplotlib.lines import Line2D import matplotlib font = {'weight' : 'normal', 'size' : 18} matplotlib.rc('font', **font) from sklearn.preprocessing import MultiLabelBinarizer from sklearn.model_selection import train_test_split, KFold, StratifiedKFold from sklearn.metrics import confusion_matrix from sklearn.preprocessing import normalize, StandardScaler from sklearn.utils import resample import tensorflow as tf import keras from keras.models import Sequential, Model from keras import layers, metrics from keras.layers import Input from keras.layers.merge import Concatenate from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.normalization import BatchNormalization from keras.layers.convolutional import Conv1D, MaxPooling1D from keras.models import model_from_json, load_model from keras.regularizers import * from keras.callbacks import CSVLogger from keras import backend as K # - # rand_seed = np.random.randint(low=0, high=100) rand_seed = 16 print(rand_seed) # + os.environ['PYTHONHASHSEED'] = '0' ## The below is necessary for starting Numpy generated random numbers in a well-defined initial state. np.random.seed(rand_seed) ## The below is necessary for starting core Python generated random numbers in a well-defined state. rn.seed(12345) ## Force TensorFlow to use single thread. ## Multiple threads are a potential source of ## non-reproducible results. ## For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res # session_conf = tf.ConfigProto(device_count = {'GPU':0}, intra_op_parallelism_threads=4) #session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) # session_conf = tf.ConfigProto(device_count = {'GPU':0}) #session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) #session_conf.gpu_options.per_process_gpu_memory_fraction = 0.5 ## The below tf.set_random_seed() will make random number generation ## in the TensorFlow backend have a well-defined initial state. ## For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed tf.set_random_seed(1234) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.35) sess = tf.Session(graph=tf.get_default_graph(), config=tf.ConfigProto(gpu_options=gpu_options)) K.set_session(sess) # - # ## Load the data # # The data file is created using Loco Mosquito: # https://github.com/magonji/MIMI-project/blob/master/Loco%20mosquito%204.0.ipynb # # ### The data file has headings: Species - Status - RearCnd - Age - Country- Frequencies # + class data_loader_class(): def __init__(self, valid_perc): df = pd.read_csv("/home/josh/Documents/Mosquito_Project/MIMI-Analysis/Data/mosquitoes_country_LM_5_0.dat", '\t') df.head(10) df['AgeGroup'] = 0 df['AgeGroup'] = np.where(df['Age']>10, 2, np.where(df['Age']>4, 1, 0)) df_vf = df[df['RearCnd']=='VF'] df_vf = df_vf[df_vf['Status']=='UN'] df = df[df['RearCnd']!='VF'] df = df[df['Status']!='UN'] df_l = df[df['RearCnd']=='TL'] df_l_g = df_l[df_l['Country']=='S'] df_l_g_a = df_l_g[df_l_g['Species']=='AA'] age_counts = df_l_g_a.groupby('AgeGroup').size() df_l_g_g = df_l_g[df_l_g['Species']=='AG'] age_counts = df_l_g_g.groupby('AgeGroup').size() df_l_g_c = df_l_g[df_l_g['Species']=='AC'] age_counts = df_l_g_c.groupby('AgeGroup').size() df_l_t = df_l[df_l['Country']=='T'] df_l_t_a = df_l_t[df_l_t['Species']=='AA'] age_counts = df_l_t_a.groupby('AgeGroup').size() df_l_t_g = df_l_t[df_l_t['Species']=='AG'] age_counts = df_l_t_g.groupby('AgeGroup').size() df_l_b = df_l[df_l['Country']=='B'] df_l_b_g = df_l_b[df_l_b['Species']=='AG'] age_counts = df_l_b_g.groupby('AgeGroup').size() df_l_b_c = df_l_b[df_l_b['Species']=='AC'] age_counts = df_l_b_c.groupby('AgeGroup').size() df_f = df[df['RearCnd']=='TF'] df_f_t = df_f[df_f['Country']=='T'] df_f_t_a = df_f_t[df_f_t['Species']=='AA'] # df_f_t_g = df_f_t[df_f_t['Species']=='AG'] #There isn't any df_f_b = df_f[df_f['Country']=='B'] df_f_b_g = df_f_b[df_f_b['Species']=='AG'] age_counts = df_f_b_g.groupby('AgeGroup').size() df_f_b_c = df_f_b[df_f_b['Species']=='AC'] age_counts = df_f_b_c.groupby('AgeGroup').size() df_vf_t = df_vf[df_vf['Country']=='T'] df_vf_t_a = df_vf_t[df_vf_t['Species']=='AA'] age_counts = df_vf_t_a.groupby('AgeGroup').size() # print(age_counts) df_vf_t_g = df_vf_t[df_vf_t['Species']=='AG'] age_counts = df_vf_t_g.groupby('AgeGroup').size() # print(age_counts) df_vf_b = df_vf[df_vf['Country']=='B'] df_vf_b_g = df_vf_b[df_vf_b['Species']=='AG'] age_counts = df_vf_b_g.groupby('AgeGroup').size() # print(age_counts) df_vf_b_c = df_vf_b[df_vf_b['Species']=='AC'] age_counts = df_vf_b_c.groupby('AgeGroup').size() # print(age_counts) VF_size_t = len(df_vf_t) VF_size_b = len(df_vf_b) # print('validation size tanzania : {}'.format(VF_size_t)) # print('validation size bobo : {}'.format(VF_size_b)) val_group_size_t = int(((((valid_perc*VF_size_t)/2)/3))) #/2 (t/b) /3 (age groups) val_group_size_b = int(((((valid_perc*VF_size_b)/2)/3))) # print('validation size for testing tanzania : {}'.format(val_group_size_t)) # print('validation size for testing bobo : {}'.format(val_group_size_b)) size_inc = 400 for age in range(3): df_temp = df_l_t_a[df_l_t_a['AgeGroup']==age] size_df_temp = np.arange(len(df_temp)) np.random.seed(42) np.random.shuffle(size_df_temp) index_df_temp_inc = size_df_temp[:size_inc] index_df_temp_not_inc = size_df_temp[size_inc:] if age == 0: df_train = df_temp.iloc[index_df_temp_inc] # df_test = df_temp.iloc[index_df_temp_not_inc] else: df_train = pd.concat([df_train, df_temp.iloc[index_df_temp_inc]]) # df_test = pd.concat([df_test, df_temp.iloc[index_df_temp_not_inc]]) for age in range(3): df_temp = df_l_t_g[df_l_t_g['AgeGroup']==age] size_df_temp = np.arange(len(df_temp)) np.random.seed(42) np.random.shuffle(size_df_temp) index_df_temp_inc = size_df_temp[:size_inc] index_df_temp_not_inc = size_df_temp[size_inc:] df_train = pd.concat([df_train, df_temp.iloc[index_df_temp_inc]]) # df_test = pd.concat([df_test, df_temp.iloc[index_df_temp_not_inc]]) size_inc = 400 for age in range(3): df_temp = df_l_b_g[df_l_b_g['AgeGroup']==age] size_df_temp = np.arange(len(df_temp)) np.random.seed(42) np.random.shuffle(size_df_temp) index_df_temp_inc = size_df_temp[:size_inc] index_df_temp_not_inc = size_df_temp[size_inc:] df_train = pd.concat([df_train, df_temp.iloc[index_df_temp_inc]]) # df_test = pd.concat([df_test, df_temp.iloc[index_df_temp_not_inc]]) for age in range(3): df_temp = df_l_b_c[df_l_b_c['AgeGroup']==age] size_df_temp = np.arange(len(df_temp)) np.random.seed(42) np.random.shuffle(size_df_temp) index_df_temp_inc = size_df_temp[:size_inc] index_df_temp_not_inc = size_df_temp[size_inc:] df_train = pd.concat([df_train, df_temp.iloc[index_df_temp_inc]]) # df_test = pd.concat([df_test, df_temp.iloc[index_df_temp_not_inc]]) size_inc = 300 for age in range(3): df_temp = df_f_t_a[df_f_t_a['AgeGroup']==age] size_df_temp = np.arange(len(df_temp)) np.random.seed(42) np.random.shuffle(size_df_temp) index_df_temp_inc = size_df_temp[:size_inc] index_df_temp_not_inc = size_df_temp[size_inc:] df_train = pd.concat([df_train, df_temp.iloc[index_df_temp_inc]]) # df_test = pd.concat([df_test, df_temp.iloc[index_df_temp_not_inc]]) for age in range(3): df_temp = df_f_b_g[df_f_b_g['AgeGroup']==age] size_df_temp = np.arange(len(df_temp)) np.random.seed(42) np.random.shuffle(size_df_temp) index_df_temp_inc = size_df_temp[:size_inc] index_df_temp_not_inc = size_df_temp[size_inc:] df_train = pd.concat([df_train, df_temp.iloc[index_df_temp_inc]]) # df_test = pd.concat([df_test, df_temp.iloc[index_df_temp_not_inc]]) size_inc = 300 for age in range(3): df_temp = df_f_b_c[df_f_b_c['AgeGroup']==age] size_df_temp = np.arange(len(df_temp)) np.random.seed(42) np.random.shuffle(size_df_temp) index_df_temp_inc = size_df_temp[:size_inc] index_df_temp_not_inc = size_df_temp[size_inc:] df_train = pd.concat([df_train, df_temp.iloc[index_df_temp_inc]]) # df_test = pd.concat([df_test, df_temp.iloc[index_df_temp_not_inc]]) size_inc = val_group_size_t for age in range(3): df_temp = df_vf_t_a[df_vf_t_a['AgeGroup']==age] size_df_temp = np.arange(len(df_temp)) if len(size_df_temp) < size_inc: print('Warning Tanzania Arabiensis VF group {} smaller than amount requested'.format(age)) np.random.seed(42) np.random.shuffle(size_df_temp) index_df_temp_inc = size_df_temp[:size_inc] index_df_temp_not_inc = size_df_temp[size_inc:] df_train = pd.concat([df_train, df_temp.iloc[index_df_temp_inc]]) if age == 0: df_test = df_temp.iloc[index_df_temp_not_inc] else: df_test = pd.concat([df_test, df_temp.iloc[index_df_temp_not_inc]]) for age in range(3): df_temp = df_vf_t_g[df_vf_t_g['AgeGroup']==age] size_df_temp = np.arange(len(df_temp)) if len(size_df_temp) < size_inc: print('Warning Tanzania Gambie VF group {} smaller than amount requested'.format(age)) np.random.seed(42) np.random.shuffle(size_df_temp) index_df_temp_inc = size_df_temp[:size_inc] index_df_temp_not_inc = size_df_temp[size_inc:] df_train = pd.concat([df_train, df_temp.iloc[index_df_temp_inc]]) df_test = pd.concat([df_test, df_temp.iloc[index_df_temp_not_inc]]) size_inc = val_group_size_b for age in range(3): df_temp = df_vf_b_g[df_vf_b_g['AgeGroup']==age] size_df_temp = np.arange(len(df_temp)) if len(size_df_temp) < size_inc: print('Warning Bobo Gambie VF group {} smaller than amount requested'.format(age)) np.random.seed(42) np.random.shuffle(size_df_temp) index_df_temp_inc = size_df_temp[:size_inc] index_df_temp_not_inc = size_df_temp[size_inc:] df_train = pd.concat([df_train, df_temp.iloc[index_df_temp_inc]]) df_test = pd.concat([df_test, df_temp.iloc[index_df_temp_not_inc]]) for age in range(3): df_temp = df_vf_b_c[df_vf_b_c['AgeGroup']==age] size_df_temp = np.arange(len(df_temp)) if len(size_df_temp) < size_inc: print('Warning Bobo Colluzzi VF group {} smaller than amount requested'.format(age)) np.random.seed(42) np.random.shuffle(size_df_temp) index_df_temp_inc = size_df_temp[:size_inc] index_df_temp_not_inc = size_df_temp[size_inc:] df_train = pd.concat([df_train, df_temp.iloc[index_df_temp_inc]]) df_test = pd.concat([df_test, df_temp.iloc[index_df_temp_not_inc]]) print('Percentage of field mosquitoes inc {} - Num mosquitoes {} / {}'.format(valid_perc*100, len(df_train[df_train['RearCnd']=='VF']), len(df_vf))) print('Total number of mosquitoes in the Train set {}'.format(len(df_train))) X = df_train.iloc[:,6:-1] y_age = df_train["Age"] y_age_groups = df_train["AgeGroup"] y_species = df_train["Species"] y_status = df_train["Status"] # print('shape of X : {}'.format(X.shape)) # print('shape of y age : {}'.format(y_age.shape)) # print('shape of y age groups : {}'.format(y_age_groups.shape)) # print('shape of y species : {}'.format(y_species.shape)) # print('shape of y status : {}'.format(y_status.shape)) self.X = np.asarray(X) y_age = np.asarray(y_age) self.y_age_groups = np.asarray(y_age_groups) self.y_species = np.asarray(y_species) y_status = np.asarray(y_status) X_vf = df_test.iloc[:,6:-1] y_age_vf = df_test["Age"] y_age_groups_vf = df_test["AgeGroup"] y_species_vf = df_test["Species"] y_status_vf = df_test["Status"] # print('shape of X_vf : {}'.format(X_vf.shape)) # print('shape of y_age_vf age : {}'.format(y_age_vf.shape)) # print('shape of y_age_groups_vf : {}'.format(y_age_groups_vf.shape)) # print('shape of y y_species_vf : {}'.format(y_species_vf.shape)) # print('shape of y y_status_vf : {}'.format(y_status_vf.shape)) self.X_vf = np.asarray(X_vf) y_age_vf = np.asarray(y_age_vf) self.y_age_groups_vf = np.asarray(y_age_groups_vf) self.y_species_vf = np.asarray(y_species_vf) y_status_vf = np.asarray(y_status_vf) # - def data_loader(valid_perc): return data_loader_class(valid_perc) # ## Function used to create a new folder for the CNN outputs. # Useful to stop forgetting to name a new folder when trying out a new model varient and overwriting a days training. def build_folder(fold, to_build = False): if not os.path.isdir(fold): if to_build == True: os.mkdir(fold) else: print('Directory does not exists, not creating directory!') else: if to_build == True: raise NameError('Directory already exists, cannot be created!') # ## Function for plotting confusion matrcies # This normalizes the confusion matrix and ensures neat plotting for all outputs. def plot_confusion_matrix(cm, classes, output, save_path, model_name, fold, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues, printout=False): font = {'weight' : 'normal', 'size' : 18} matplotlib.rc('font', **font) """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] if printout: print("Normalized confusion matrix") else: if printout: print('Confusion matrix, without normalization') if printout: print(cm) plt.figure(figsize=(8,8)) plt.imshow(cm, interpolation='nearest', cmap=cmap, vmin=0, vmax=1) # np.max(np.sum(cm, axis=1))) # plt.title([title+' - '+model_name]) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout(pad=2) # plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.savefig((save_path+"Confusion_Matrix_"+model_name+"_"+fold+"_"+output[1:]+".png")) plt.close() # ## Function used for visualizing outputs # This splits the output data into the four categories before plotting the confusion matricies. ## for visualizing losses and metrics once the neural network fold is trained def visualize(histories, save_path, model_name, fold, classes, outputs, predicted, true): # Sort out predictions and true labels for label_predictions_arr, label_true_arr, classes, outputs in zip(predicted, true, classes, outputs): classes_pred = np.argmax(label_predictions_arr, axis=-1) classes_true = np.argmax(label_true_arr, axis=-1) cnf_matrix = confusion_matrix(classes_true, classes_pred) plot_confusion_matrix(cnf_matrix, classes, outputs, save_path, model_name, fold) # ## Data logging ## for logging data associated with the model def log_data(log, name, fold, save_path): f = open((save_path+name+'_'+str(fold)+'_log.txt'), 'w') np.savetxt(f, log) f.close() def save_obj(obj, name, savedir_main): with open(savedir_main + name + '.pkl', 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name, savedir_main): with open(savedir_main + name + '.pkl', 'rb') as f: return pickle.load(f) # ## Fucntion for graphing the training data # This fucntion creates tidy graphs of loss and accuracy as the models are training. def graph_history(history, model_name, model_ver_num, fold, save_path): font = {'weight' : 'normal', 'size' : 18} matplotlib.rc('font', **font) #not_validation = list(filter(lambda x: x[0:3] != "val", history.history.keys())) # print('history.history.keys : {}'.format(history.history.keys())) filtered = filter(lambda x: x[0:3] != "val", history.history.keys()) not_validation = list(filtered) for i in not_validation: plt.figure(figsize=(15,7)) # plt.title(i+"/ "+"val_"+i) plt.plot(history.history[i], label=i) plt.plot(history.history["val_"+i], label="val_"+i) plt.legend() plt.xlabel("epoch") plt.ylabel(i) plt.savefig(save_path +model_name+"_"+str(model_ver_num)+"_"+str(fold)+"_"+i) plt.close() # ## funciton to create the CNN # This function takes as an input a list of dictionaries. Each element in the list is a new hidden layer in the model. For each layer the dictionary defines the layer to be used. # # ### Available options are: # Convolutional Layer: # * type = 'c' # * filter = optional number of filters # * kernel = optional size of the filters # * stride = optional size of stride to take between filters # * pooling = optional width of the max pooling # * {'type':'c', 'filter':16, 'kernel':5, 'stride':1, 'pooling':2} # # dense layer: # * type = 'd' # * width = option width of the layer # * {'type':'d', 'width':500} # + def create_models(model_shape, input_layer): regConst = 0.02 sgd = keras.optimizers.SGD(lr=0.003, decay=1e-5, momentum=0.9, nesterov=True, clipnorm=1.) cce = 'categorical_crossentropy' input_vec = Input(name='input', shape=(input_layer_dim,1)) for i, layerwidth in zip(range(len(model_shape)),model_shape): if i == 0: if model_shape[i]['type'] == 'c': xd = Conv1D(name=('Conv'+str(i+1)), filters=model_shape[i]['filter'], kernel_size = model_shape[i]['kernel'], strides = model_shape[i]['stride'], activation = 'relu', kernel_regularizer=l2(regConst), kernel_initializer='he_normal')(input_vec) xd = BatchNormalization(name=('batchnorm_'+str(i+1)))(xd) xd = MaxPooling1D(pool_size=(model_shape[i]['pooling']))(xd) elif model_shape[i]['type'] == 'd': xd = Dense(name=('d'+str(i+1)), units=model_shape[i]['width'], activation='relu', kernel_regularizer=l2(regConst), kernel_initializer='he_normal')(input_vec) xd = BatchNormalization(name=('batchnorm_'+str(i+1)))(xd) xd = Dropout(name=('dout'+str(i+1)), rate=0.5)(xd) else: if model_shape[i]['type'] == 'c': xd = Conv1D(name=('Conv'+str(i+1)), filters=model_shape[i]['filter'], kernel_size = model_shape[i]['kernel'], strides = model_shape[i]['stride'], activation = 'relu', kernel_regularizer=l2(regConst), kernel_initializer='he_normal')(xd) xd = BatchNormalization(name=('batchnorm_'+str(i+1)))(xd) xd = MaxPooling1D(pool_size=(model_shape[i]['pooling']))(xd) elif model_shape[i]['type'] == 'd': if model_shape[i-1]['type'] == 'c': xd = Flatten()(xd) xd = Dropout(name=('dout'+str(i+1)), rate=0.5)(xd) xd = Dense(name=('d'+str(i+1)), units=model_shape[i]['width'], activation='relu', kernel_regularizer=l2(regConst), kernel_initializer='he_normal')(xd) xd = BatchNormalization(name=('batchnorm_'+str(i+1)))(xd) # xAge = Dense(name = 'age', units = 17, # activation = 'softmax', # kernel_regularizer = l2(regConst), # kernel_initializer = 'he_normal')(xd) xAgeGroup = Dense(name = 'age_group', units = 3, activation = 'softmax', kernel_regularizer = l2(regConst), kernel_initializer = 'he_normal')(xd) xSpecies = Dense(name ='species', units = 3, activation = 'softmax', kernel_regularizer = l2(regConst), kernel_initializer = 'he_normal')(xd) outputs = [] # for i in ['xAge', 'xAgeGroup', 'xSpecies']: for i in ['xAgeGroup', 'xSpecies']: outputs.append(locals()[i]) model = Model(inputs = input_vec, outputs = outputs) model.compile(loss=cce, metrics=['acc'], optimizer=sgd) print(model.metrics) # model.summary() return model # - # ## Function to train the model # # This function will split the data into training and validation and call the create models function. This fucntion returns the model and training history. # + def train_models(model_to_test, save_path, SelectFreqs=False): out_path = save_path+'out/' build_folder(out_path, True) model_shape = model_to_test["model_shape"][0] model_name = model_to_test["model_name"][0] # input_layer_dim = model_to_test["input_layer_dim"][0] model_ver_num = model_to_test["model_ver_num"][0] fold = model_to_test["fold"][0] label = model_to_test["labels"][0] features = model_to_test["features"][0] classes = model_to_test["classes"][0] outputs = model_to_test["outputs"][0] compile_loss = model_to_test["compile_loss"][0] compile_metrics = model_to_test["compile_metrics"][0] # ## Split into training / testing # test_splits = train_test_split(features, *(label), test_size=0.1, shuffle=True, random_state=rand_seed) # ## Pack up data # X_train = test_splits.pop(0) # X_val = test_splits.pop(0) # y_train = test_splits[::2] # y_val = test_splits[1::2] # out_model = create_models(model_shape, input_layer_dim, SelectFreqs) # out_model.summary() # out_history = out_model.fit(x = X_train, # y = y_train, # batch_size = 128*16, # verbose = 0, # epochs = 8000, # validation_data = (X_val, y_val), # callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', # patience=400, verbose=0, mode='auto'), # CSVLogger(save_path+model_name+"_"+str(model_ver_num)+'.csv', append=True, separator=';')]) # scores = out_model.evaluate(X_val, y_val) # print(out_model.metrics_names) ## Kfold training seed = rand_seed kfold = KFold(n_splits=10, shuffle=True, random_state=seed) ## Split data into test and train model_ver_num = 0 cv_scores = [] best_score = 0 for train_index, val_index in kfold.split(features): print('Fold {} Running'.format(model_ver_num)) X_train, X_val = features[train_index], features[val_index] y_train, y_val = list(map(lambda y:y[train_index], label)), list(map(lambda y:y[val_index], label)) model = create_models(model_shape, input_layer_dim) if model_ver_num == 0: model.summary() history = model.fit(x = X_train, y = y_train, batch_size = 128*16, verbose = 0, epochs = 8000, validation_data = (X_val, y_val), callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', patience=400, verbose=0, mode='auto'), CSVLogger(out_path+model_name+"_"+str(model_ver_num)+'.csv', append=True, separator=';')]) scores = model.evaluate(X_val, y_val) model.save((out_path+model_name+"_"+str(model_ver_num)+'_Model.h5')) graph_history(history, model_name, model_ver_num, 0, out_path) # print(model.metrics_names) # print(scores) if (scores[3] + scores[4]) > best_score: out_model = model out_history = history model_ver_num += 1 # # Clear the Keras session, otherwise it will keep adding new # # models to the same TensorFlow graph each time we create # # a model with a different set of hyper-parameters. # K.clear_session() # # Delete the Keras model with these hyper-parameters from memory. # del model out_model.save((save_path+'Best_Model.h5')) graph_history(out_history, 'Best_Model', 0, 0, save_path) return out_model, out_history # - # ## Main section # # Functionality: # * Oganises the data into a format of lists of data, classes, labels. # * Define the CNN to be built. # * Define the KFold validation to be used. # * Build a folder to output data into. # * Standardize and oragnise data into training/testing. # * Call the model training. # * Organize outputs and call visualization for plotting and graphing. # # ## 4 layers # + ## Name a folder for the outputs to go into outdir = "Results_Paper/" build_folder(outdir, False) val_results = {'loss':[], 'age_group_loss':[], 'species_loss':[], 'age_group_acc':[], 'species_acc':[]} histories = [] start_time = time() # for valid_inc_perc in [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5]: for valid_inc_perc in [0]: savedir = (outdir+"TEST/") build_folder(savedir, True) data_extract = data_loader(valid_inc_perc) ## Input CNN Size input_layer_dim = len(data_extract.X[0]) ## Transform Data y_age_groups_list = [[age] for age in data_extract.y_age_groups] y_species_list = [[species] for species in data_extract.y_species] age_groups = MultiLabelBinarizer().fit_transform(np.array(y_age_groups_list)) age_group_classes = ["1-4", "5-10", "11-17"] species = MultiLabelBinarizer().fit_transform(np.array(y_species_list)) species_classes = list(np.unique(y_species_list)) y_age_groups_list_vf = [[age] for age in data_extract.y_age_groups_vf] y_species_list_vf = [[species] for species in data_extract.y_species_vf] age_groups_vf = MultiLabelBinarizer().fit_transform(np.array(y_age_groups_list_vf)) species_vf = MultiLabelBinarizer().fit_transform(np.array(y_species_list_vf)) ## Labels labels_default, classes_default, outputs_default = [age_groups, species], [age_group_classes, species_classes], ['xAgeGroup', 'xSpecies'] labels_default_vf, classes_default_vf, outputs_default_vf = [age_groups_vf, species_vf], [age_group_classes, species_classes], ['xAgeGroup', 'xSpecies'] ## Declare and train the model model_size = [{'type':'c', 'filter':16, 'kernel':8, 'stride':1, 'pooling':1}, {'type':'c', 'filter':16, 'kernel':8, 'stride':2, 'pooling':2}, {'type':'c', 'filter':16, 'kernel':3, 'stride':1, 'pooling':1}, {'type':'c', 'filter':16, 'kernel':6, 'stride':2, 'pooling':2}, # {'type':'c', 'filter':16, 'kernel':5, 'stride':1, 'pooling':2}, {'type':'d', 'width':400}] ## Name the model model_name = ('Valid_Inc_'+str(valid_inc_perc)) ## Scale train, test scl = StandardScaler() features_scl = scl.fit(X=np.vstack((data_extract.X, data_extract.X_vf))) X_train = features_scl.transform(X=data_extract.X) X_test = features_scl.transform(X=data_extract.X_vf) ## Split data into test and train y_train, y_test = list(map(lambda y:y, labels_default)), list(map(lambda y:y, labels_default_vf)) X_train = np.expand_dims(X_train, axis=2) X_test = np.expand_dims(X_test, axis=2) model_to_test = { "model_shape" : [model_size], # defines the hidden layers of the model "model_name" : [model_name], "input_layer_dim" : [input_layer_dim], # size of input layer "model_ver_num" : [0], "fold" : [0], # kf.split number on "labels" : [y_train], "features" : [X_train], "classes" : [classes_default], "outputs" : [outputs_default], "compile_loss": [{'age': 'categorical_crossentropy'}], "compile_metrics" :[{'age': 'accuracy'}] } ## Call function to train all the models from the dictionary model, history = train_models(model_to_test, savedir) histories.append(history) predicted_labels = list([] for i in range(len(y_train))) true_labels = list([] for i in range(len(y_train))) y_predicted = model.predict(X_test) temp_eval = model.evaluate(X_test, y_test) for metric, res in zip(model.metrics_names, temp_eval): val_results[metric].append(res) print(val_results) predicted_labels = [x+[y] for x,y in zip(predicted_labels,y_predicted)] true_labels = [x+[y] for x,y in zip(true_labels,y_test)] predicted_labels = [predicted_labels[i][0].tolist() for i in range(len(predicted_labels))] true_labels = [true_labels[i][0].tolist() for i in range(len(true_labels))] ## Visualize the results visualize(histories, savedir, model_name, "0", classes_default, outputs_default, predicted_labels, true_labels) # Clear the Keras session, otherwise it will keep adding new # models to the same TensorFlow graph each time we create # a model with a different set of hyper-parameters. K.clear_session() # Delete the Keras model with these hyper-parameters from memory. del model end_time = time() print('Run time : {} s'.format(end_time-start_time)) print('Run time : {} m'.format((end_time-start_time)/60)) print('Run time : {} h'.format((end_time-start_time)/3600)) # save_obj(val_results, 'Validation_Results_Dict_4_layers', savedir_main) # + font = {'weight' : 'normal', 'size' : 18} matplotlib.rc('font', **font) ## Size / Accuracy Age / Accuracy Species # size_results = [[200952, 395256, 641878, 1594506],[33,32.7,34.7,32.7],[38.3,38,37.7,38.7]] size_results = [[201, 395, 642, 1595],[33,32.7,34.7,32.7],[38.3,38,37.7,38.7]] plt.figure(figsize=(15,7)) plt.scatter(size_results[0], size_results[1], s=80, c='b', label='Grouped Age Accuracy') plt.scatter(size_results[0], size_results[2], s=80, c='g', label='Species Accuracy') z = np.polyfit(size_results[0], size_results[1], 1) p = np.poly1d(z) plt.plot(size_results[0],p(size_results[0]),"b--") z = np.polyfit(size_results[0], size_results[2], 1) p = np.poly1d(z) plt.plot(size_results[0],p(size_results[0]),"g--") plt.legend() plt.xlabel("Number of Trainable Parameters (Thousands)") plt.ylabel('Accuracy') plt.xlim([150,1650]) plt.savefig('Results_Paper/Molel_Size_Study/Model_Size_Study_Results.png') plt.close() # - # ## Re-doing model size reduction plot using accuracy predicted by the model not the confusion matrices. # # Using confusion matrices doesn't work as it assumes a balanced number of each class in the test set where in reality this is not the case and each accuracy needs scaling by percentage of total test samples # + val_results = {'params':[], 'loss':[], 'age_group_loss':[], 'species_loss':[], 'age_group_acc':[], 'species_acc':[]} for model_name in ['Train_5_layers/Valid_Inc_0_Model.h5', 'Train_4_layers/Best_Model.h5', 'Train_3_layers/Valid_Inc_0_Model.h5', 'Train_2_layers/Valid_Inc_0_Model.h5']: model = load_model(('/home/josh/Documents/Mosquito_Project/MIMI-Analysis/Models/Neural_Networks/CNN/Paper/Results_Paper/Molel_Size_Study/'+model_name)) valid_inc_perc =0 data_extract = data_loader(valid_inc_perc) ## Input CNN Size input_layer_dim = len(data_extract.X[0]) ## Transform Data y_age_groups_list = [[age] for age in data_extract.y_age_groups] y_species_list = [[species] for species in data_extract.y_species] age_groups = MultiLabelBinarizer().fit_transform(np.array(y_age_groups_list)) age_group_classes = ["1-4", "5-10", "11-17"] species = MultiLabelBinarizer().fit_transform(np.array(y_species_list)) species_classes = list(np.unique(y_species_list)) y_age_groups_list_vf = [[age] for age in data_extract.y_age_groups_vf] y_species_list_vf = [[species] for species in data_extract.y_species_vf] age_groups_vf = MultiLabelBinarizer().fit_transform(np.array(y_age_groups_list_vf)) species_vf = MultiLabelBinarizer().fit_transform(np.array(y_species_list_vf)) ## Labels labels_default, classes_default, outputs_default = [age_groups, species], [age_group_classes, species_classes], ['xAgeGroup', 'xSpecies'] labels_default_vf, classes_default_vf, outputs_default_vf = [age_groups_vf, species_vf], [age_group_classes, species_classes], ['xAgeGroup', 'xSpecies'] ## Scale train, test scl = StandardScaler() features_scl = scl.fit(X=np.vstack((data_extract.X, data_extract.X_vf))) X_train = features_scl.transform(X=data_extract.X) X_test = features_scl.transform(X=data_extract.X_vf) ## Split data into test and train y_train, y_test = list(map(lambda y:y, labels_default)), list(map(lambda y:y, labels_default_vf)) X_train = np.expand_dims(X_train, axis=2) X_test = np.expand_dims(X_test, axis=2) predicted_labels = list([] for i in range(len(y_train))) true_labels = list([] for i in range(len(y_train))) trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)])) val_results['params'].append(trainable_count) y_predicted = model.predict(X_test) temp_eval = model.evaluate(X_test, y_test) for metric, res in zip(model.metrics_names, temp_eval): val_results[metric].append(res) print(val_results) # + font = {'weight' : 'normal', 'size' : 18} matplotlib.rc('font', **font) ## Size / Accuracy Age / Accuracy Species # size_results = [[200952, 395256, 641878, 1594506],[33,32.7,34.7,32.7],[38.3,38,37.7,38.7]] size_results = [[201, 395, 642, 1595],[33,32.7,34.7,32.7],[38.3,38,37.7,38.7]] plt.figure(figsize=(15,7)) plt.scatter([val/1000 for val in val_results['params']], [val*100 for val in val_results['age_group_acc']], s=80, c='b', label='Grouped Age Accuracy') plt.scatter([val/1000 for val in val_results['params']], [val*100 for val in val_results['species_acc']], s=80, c='g', label='Species Accuracy') z = np.polyfit([val/1000 for val in val_results['params']], [val*100 for val in val_results['age_group_acc']], 1) p = np.poly1d(z) plt.plot([val/1000 for val in val_results['params']],p([val/1000 for val in val_results['params']]),"b--") z = np.polyfit([val/1000 for val in val_results['params']], [val*100 for val in val_results['species_acc']], 1) p = np.poly1d(z) plt.plot([val/1000 for val in val_results['params']],p([val/1000 for val in val_results['params']]),"g--") plt.legend() plt.xlabel("Number of Trainable Parameters (Thousands)") plt.ylabel('Accuracy') plt.xlim([150,1650]) plt.savefig('Results_Paper/Molel_Size_Study/Model_Size_Study_Results_v2.png') plt.close() # -
code/CNN/CNN-model/DL-MIRS-CNN-Lab-Only-Size-Reduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Count touching neighbors # # In this notebook we visualize which cells in a tissue are neighbord by drawing a mesh. Furthermore, we can change the intensity of the mesh so that it corresponds to the distance between the centroids of the cells. import pyclesperanto_prototype as cle import numpy as np import pandas as pd # # Test data # Let's generate some tissue-like structure consisting of cells which typically have approximately 6 neighbors. # + cells = cle.artificial_tissue_2d( delta_x=48, delta_y=32, random_sigma_x=7, random_sigma_y=7, width=250, height=250) cle.imshow(cells, labels=True) # - # ## Mesh between neighboring cells # # Before counting neighbors, we should visualize neighbor-relationships. We can do this by drawing a mesh between centroids of touching neighbor cells. # + mesh = cle.draw_mesh_between_touching_labels(cells) cle.imshow(mesh) # - # We can also combine both visualizations in one image. Note, these images should not be used any further for quantitative analysis. It just serves visualization purposes. # # ## Centroid connections and cell borders # A common way for visualizing tissues in this context is by drawing cell-borders and the centroid mesh in different colours. # + visualization = mesh * 2 + cle.detect_label_edges(cells) cle.imshow(visualization, color_map='jet') # - # ## Analyze and visualize number of touching neighbors # We can also count the touching neighbors and visualize the result as parametric image in colours. # + neighbor_count_image = cle.touching_neighbor_count_map(cells) cle.imshow(neighbor_count_image, color_map='jet', colorbar=True, min_display_intensity=0) # - # Note, the numbers along the image border may not be accurate. Hence, we should exclude the corresponding cells from the further analysis. # + cells_ex_border = cle.exclude_labels_on_edges(cells) cle.imshow(cells_ex_border, labels=True) # - # After correcting the label image, we can also correct the parametric image. # + neighbor_count_image_ex_border = neighbor_count_image * (cells_ex_border != 0) cle.imshow(neighbor_count_image_ex_border, color_map='jet', colorbar=True, min_display_intensity=0) # - # Now, we can measure the number of neighbors. We can either just read those numbers and put them in a list ... cle.read_intensities_from_map(cells_ex_border, neighbor_count_image_ex_border) # ... we can also read these values together with all other statistics and put them in a pandas DataFrame. # + statistics = cle.statistics_of_labelled_pixels(neighbor_count_image_ex_border, cells_ex_border) table = pd.DataFrame(statistics) # rename a column table = table.rename(columns={"mean_intensity": "number_of_neighbors"}) # only filter out a subset of all columns; only what we care table = table[["label", "number_of_neighbors", "centroid_x", "centroid_y"]] table # - # # Exercise # Analyze a larger field of view with more cells and vary the parameters `random_sigma_x` and `random_sigma_y` of the `artificial_tissue_2d` function. Use a touching-neighbor-count map to count the number of touching neighbors before and after applying a median filter to the map.
docs/25_neighborhood_relationships_between_cells/05_count_touching_neighbors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from PIL import Image import numpy as np import matplotlib.pyplot as plt import cv2 # - pil=Image.open("../images/thor.jpg") pil arr=np.array(pil) red=arr.copy() red[:,:,[0,1]]=255 plt.imshow(red) frame=cv2.imread("../images/thor.jpg") plt.imshow(frame) rgb= cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
Projects/ Image Processing with NumPy and OpenCV.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Computer Science Fundamentals and Programming # # # **Essential Skills Needed by Machine Learning Engineers** # 1. Computer Science Fundamentals # 2. Probability and Statistics # 3. Data Modeling and Evaluation # 4. Applying Machine Learning Algorithms and Libraries # 5. Software Engineering and System Design # ### 1. Computer Science Fundamentals and Programming # # **Topics:** # - Data structures: Lists, stacks, queues, strings, hash maps, vectors, matrices, classes & objects, trees, graphs, etc. # - Algorithms: Recursion, searching, sorting, optimization, dynamic programming, etc. # - Computability and complexity: P vs. NP, NP-complete problems, big-O notation, approximate algorithms, etc. # - Computer architecture: Memory, cache, bandwidth, threads & processes, deadlocks, etc. # # **Questions:** # - How would you check if a linked list has cycles? # - Given two elements in a binary search tree, find their lowest common ancestor. # - Write a function to sort a given stack. # - What is the time complexity of any comparison-based sorting algorithm? Can you prove it? # - How will you find the shortest path from one node to another in a weighted graph? What if some weights are negative? # - Find all palindromic substrings in a given string. # # For all such questions, you should be able to: # 1. Reason about the time and space complexity of your approach (usually in big-O notation). # 2. Try to aim for the lowest complexity possible. # # **Extensive practice** is the only way to familiarize yourself with the different classes of problems so that you can quickly converge on an efficient solution. # # Coding/interview prep platforms like InterviewBit, LeetCode and Pramp are highly beneficial for this purpose. # ### 2. Probability and Statistics # # **Topics:** # - Basic probability: Conditional probability, Bayes rule, likelihood, independence, etc. # - Probabilistic models: Bayes Nets, Markov Decision Processes, Hidden Markov Models, etc. # - Statistical measures: Mean, median, mode, variance, population parameters vs. sample statistics etc. # - Proximity and error metrics: Cosine similarity, mean-squared error, Manhattan and Euclidean distance, log-loss, etc. # - Distributions and random sampling: Uniform, normal, binomial, Poisson, etc. # - Analysis methods: ANOVA, hypothesis testing, factor analysis, etc. # # **Questions:** # - The mean heights of men and women in a population were calculated to be M and W. What is the mean height of the total population? # - A recent poll revealed that a third of the cars in Italy are Ferraris, and that half of those are red. If you spot a red car approaching from a distance, what is the likelihood that it is a Ferrari? # - **You’re trying to find the best place to put in an advertisement banner on your website. You can make the size (thickness) small, medium or large, and choose vertical position top, middle or bottom. At least how many total page visits (n) and ad clicks (m) do you need to say with 95% confidence that one of the designs performs better than all the other possibilities?** # - The time period between consecutive eruptions of the Old Faithful geyser in Yellowstone National Park is found to have the following distribution. How would you describe/characterize it? What can you infer from it? # # ![4.4%20Probability_and_statistics.JPG](attachment:4.4%20Probability_and_statistics.JPG) # # Remember: many machine learning algorithms have a basis in probability and statistics. Conceptual clarity of these fundamentals is extremely important, but at the same time, you must be able to relate abstract formulae with real-world quantities. # ### 3. Data Modeling and Evaluation # # **Topics:** # - Data preprocessing: Munging/wrangling, transforming, aggregating, etc. # - Pattern recognition: Correlations, clusters, trends, outliers & anomalies, etc. # - Dimensionality reduction: Eigenvectors, Principal Component Analysis, etc. # - Prediction: Classification, regression, sequence prediction, etc.; suitable error/accuracy metrics. # - Evaluation: Training-testing split, sequential vs. randomized cross-validation, etc. # # **Questions:** # - A dairy farmer is trying to understand the factors that affect milk production of her cattle. She has been keeping logs of the daily temperature (usually 30-40°C), humidity (60-90%), feed consumption (2000-2500 kgs), and milk produced (500-1000 liters). # # - How would you begin processing the data in order to model it, with the goal of predicting liters of milk produced in a day? # - What kind of machine learning problem is this? # # - Your company is building a facial expression coding system, which needs to take input images from a standard HD 1920x1080 pixel webcam, and continuously tell whether the user is in one of the following states: neutral, happy, sad, angry or afraid. When the user’s face is not visible in the camera frame, it should indicate a special state: none. # # - What class of machine learning problems does this belong to? # - If each pixel is made up of 3 values (for red, green, blue channels), what is the raw input data complexity (no. of dimensions) for processing each image? Is there a way to reduce the no. of dimensions? # - How would you encode the output of the system? Explain why. # # - Climate data collected over the past century reveals a cyclic pattern of rising and falling temperatures. How would you model this data (a sequence of average annual temperature values) to predict the average temperature over the next 5 years? # - Your job at an online news service is to collect text reports from around the world, and present each story as a single article with content aggregated from different sources. How would you go about designing such a system? What ML techniques would you apply? # ### 4. Applying Machine Learning Algorithms and Libraries # # **Topics:** # - Models: Parametric vs. nonparametric, decision tree, nearest neighbor, neural net, support vector machine, ensemble of multiple models, etc. # - Learning procedure: Linear regression, gradient descent, genetic algorithms, bagging, boosting, and other model-specific methods; regularization, hyperparameter tuning, etc. # - Tradeoffs and gotchas: Relative advantages and disadvantages, bias and variance, overfitting and underfitting, vanishing/exploding gradients, missing data, data leakage, etc. # # **Questions:** # - You’re trying to classify images of cats and dogs. Plotting the images in some transformed 2-dimensional feature space reveals the following pattern (on the left). In some other space, images of dogs and wolves show a different pattern (on the right). # - What model would you use to classify cats vs. dogs, and what would you use for dogs vs. wolves? Why? # ![4.4.2%20ML_Algo.JPG](attachment:4.4.2%20ML_Algo.JPG) # # - I’m trying to fit a single hidden layer neural network to a given dataset, and I find that the weights are oscillating a lot over training iterations (varying wildly, often swinging between positive and negative values). What parameter do I need to tune to address this issue? **Kernel functions** # - When training a support vector machine, what value are you optimizing for? # - Lasso regression uses the L1-norm of coefficients as a penalty term, while ridge regression uses the L2-norm. Which of these regularization methods is more likely to result in sparse solutions, where one or more coefficients are exactly zero? **Lasso vs Redge Regression** # - When training a 10-layer neural net using backpropagation, I find that the weights for the top 3 layers are not changing at all! The next few layers (4-6) are changing, but very slowly. What’s going on and how do I fix this? **Dead neurons** because of ReLu activation, use **Leaky relu** instead # - I’ve found some data about wheat-growing regions in Europe that includes annual rainfall (R, in inches), mean altitude (A, in meters) and wheat output (O, in kgs/km2). A rough analysis and some plots make me believe that output is related to the square of rainfall, and log of altitude: O = β0 + β1 × R2 + β2 × loge(A) # - Can I fit the coefficients (β) in my model to the data using linear regression? # # Machine Learning challenges such as those on Kaggle are a great way to get exposed to different kinds of problems and their nuances. Try to participate in as many as you can, and apply different machine learning models. # ### 5. Software Engineering and System Design # # **Topics:** # - Software interface: Library calls, REST APIs, data collection endpoints, database queries, etc. # - User interface: Capturing user inputs & application events, displaying results & visualization, etc. # - Scalability: Map-reduce, distributed processing, etc. # - Deployment: Cloud hosting, containers & instances, microservices, etc. # # **Questions:** # - You run an ecommerce website. When a user clicks on an item to open its details page, you would like to suggest 5 more items that the user may be interested in, based on item features as well as the user’s purchase history, and display them at the bottom of the page. What services and database tables would you need to support this behavior? Assuming they’re available, write a query or procedure to fetch the 5 items to suggest. # - What data would you like to collect from an online video player (like YouTube) to measure user engagement and video popularity? # - A very simple spam detection system works as follows: It processes one email at a time and counts the number of occurrences of each unique word in it (term frequency), and then it compares those counts with those of previously seen emails which have been marked as spam or not. In order to scale up this system to handle a large volume of email traffic, can you design a map-reduce scheme that can run on a cluster of computers? # - You want to generate a live visualization of what portion of a webpage users are currently viewing and clicking, sort of like a heat map. What components/services/APIs do you need in place, on the client and server end, to enable this? # # ## Additional Resources # # **General Interview Advice** <br> # Inside the Mind of a Recruiter Check out Udacity's interview with <NAME>, a head recruiter, to get the inside scoop on what he looks for in job candidates. # https://www.udacity.com/blog/2017/07/inside-the-mind-of-a-recruiter.html # # Acing Your Interview This blog post outlines guidelines for success including preparation, strategic responses, and appropriate follow-up. It also includes a bonus webinar recording from Udacity Careers VP, <NAME>, Udacity Engineer, <NAME>, and Data Scientist, <NAME>. # https://career-resource-center.udacity.com/interviews/acing-your-interview # # **Phone Screening**<br> # Phone interviews are often the first stage of the hiring process – doing well will increase your odds of being called back for an on-site interview! Check out these tips for success when you get the call. # # How to Ace a Developer Phone Interview Learn from Palantir how to rock a phone interview and make it to the next phase of interviewing. # # **Onsite Interview**<br> # The final step before receiving a job offer is an interview with the team you would be working with in your new job. This final interview is usually on-site and comprises a behavioral and technical portion. # # These interviews can be intimidating – it’s okay to feel nervous, everyone does! To make sure you're well prepared on the interview day, begin practicing for interviews well before you begin your job search to refine your interviewing skills and address anything you need more practice on. # # Perfecting Body Language Feeling nervous about your interview? This article details how to have body language that communicates confidence and calmness while you are interviewing. # # **Technical Questions**<br> # Coding Interview Tips Interview Cake describes easy-to-adopt behaviors that will help you succeed in the coding interview. <br> # https://www.interviewcake.com/coding-interview-tips # # The Coding Interview Palantir's guide on preparing for the coding portion of your technical interview. # # 21 Machine Learning Interview Questions and Answers Here are some sample questions from EliteData Science to help keep your mind in shape.<br> # **https://elitedatascience.com/machine-learning-interview-questions-answers** # # **More Practice!** <br> # LeetCode LeetCode has over 950 practice questions organized by difficulty, topic, and company. # https://leetcode.com/ # # Interviewing.io Practice interviewing with engineers from top companies, anonymously. # https://interviewing.io/ # # InterviewBit Practice with coding interview questions asked historically and get job referrals. # https://www.interviewbit.com/ # # **Books** <br> # Cracking the Coding Interview This best-selling book from <NAME> offers 189 programming questions and solutions to help you practice coding and answer technical interview questions with confidence. # # Programming Interviews Exposed: Coding Your Way Through the Interview This popular guide to programming interviews includes code examples, information on the latest languages, chapters on sorting and design patterns, tips on using LinkedIn, and a downloadable app to help prepare applicants for the interview. # # Elements of Programming Interviews: The Insiders’ Guide This book from <NAME>, <NAME>, and <NAME> features a great compilation of programming-related problems for interview prep and general refreshers. # #
B-Machine-Learning-Interview-Preparation/4. Practice Questions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: lanfactory # language: python # name: lanfactory # --- import ssms import lanfactory import os import sys import pickle import pandas as pd import numpy as np from matplotlib import pyplot as plt files_ = os.listdir('/users/afengler/data/proj_lan_pipeline/LAN_scripts/data/torch_models') files_csv = ['/users/afengler/data/proj_lan_pipeline/LAN_scripts/data/torch_models/' + file_ for file_ in files_ if 'csv' in file_] for file_tmp in files_csv: print(file_tmp) data = pd.read_csv(file_tmp)#['val_Huber'].values[-1] plt.plot(data['val_loss'].values[10:]) files_csv pickle.load(open('/users/afengler/data/proj_lan_pipeline/LAN_scripts/data/torch_models/db415ca6008311ec8d90a0423f3e9b42_ddm_torch__network_config.pickle', 'rb')) myl = [] idx = 0 for file_ in files_csv: tmp = pd.read_csv(file_) myl.append([idx, tmp['val_Huber'].values[-1], tmp['val_MSE'].values[-1]]) idx += 1 # + # TORCH import os import pandas as pd import pickle from copy import deepcopy # models = ['weibull', 'levy', 'ornstein', # 'ddm', 'angle', # 'par2_no_bias', 'seq2_no_bias', 'mic2_no_bias', # 'par2_angle_no_bias', 'seq2_angle_no_bias', 'mic2_angle_no_bias', # 'par2_weibull_no_bias', 'seq2_weibull_no_bias', 'mic2_weibull_no_bias'] #models = ['lca_no_bias_4', 'lca_no_bias_angle_4', 'race_no_bias_4', 'race_no_bias_angle_4'] models = ['ddm_mic2_adj_no_bias', 'ddm_mic2_adj_weibull_no_bias', 'ddm_mic2_adj_angle_no_bias'] filters_ = models path_ = '/users/afengler/data/proj_lan_pipeline/LAN_scripts/data/torch_models' model_wise_pds = [] for filter_ in filters_: pds = {} models_tmp = deepcopy(models) models_tmp.remove(filter_) for file_ in os.listdir(path_): if filter_ in file_ and not (filter_ in models_tmp): if 'training_history' in file_: pd_tmp = pd.read_csv(path_ + '/' + file_) pd_tmp['model_id'] = file_[:file_.find('_')] pd_tmp['filename'] = file_ pd_tmp['path'] = path_ + '/' + file_ pd_tmp['model_type'] = filter_ pds[file_[:file_.find('_')]] = pd_tmp for m_id in pds.keys(): for file_ in os.listdir(path_): if m_id in file_ and 'network_config' in file_: network_config_tmp = pickle.load(open(path_ + '/' + file_, 'rb')) pds[m_id]['n_hidden_layers'] = len(network_config_tmp['layer_sizes']) - 1 pds[m_id]['size_hidden_layers'] = network_config_tmp['layer_sizes'][0] #print(network_config_tmp) training_dat = pd.concat([pds[m_id] for m_id in pds.keys()]).reset_index(drop = True) best_models_pds = [] for n_h_l in training_dat['n_hidden_layers'].unique(): for s_h_l in training_dat['size_hidden_layers'].unique(): training_dat_sub = training_dat.loc[(training_dat['n_hidden_layers'] == n_h_l) & \ (training_dat['size_hidden_layers'] == s_h_l) & \ (training_dat['epoch'] == training_dat['epoch'].max()), :] val_loss_min = training_dat_sub['val_loss'].min() best_models_pds.append(training_dat_sub.loc[training_dat_sub['val_loss'] == val_loss_min, :]) best_models_dat = pd.concat(best_models_pds) model_wise_pds.append(best_models_dat) full_dat = pd.concat(model_wise_pds).reset_index(drop = True).drop(['Unnamed: 0'], axis = 1) # + # Make dataframe that splits by unique n_hidden layers and unique size hidden layers # --> Stores min val loss # --> Stores filename # - full_dat.loc[full_dat['model_type'] == 'ddm_mic2_adj_no_bias', :] full_dat.loc[full_dat['model_type'] == 'ddm_mic2_adj_no_bias', :].path.values full_dat.loc[full_dat['model_type'] == 'ddm_mic2_adj_weibull_no_bias', :] full_dat.loc[full_dat['model_type'] == 'ddm_mic2_adj_weibull_no_bias', :].path.values full_dat.loc[full_dat['model_type'] == 'ddm_mic2_adj_angle_no_bias', :] full_dat.loc[full_dat['model_type'] == 'ddm_mic2_adj_angle_no_bias', :].path.values full_dat.loc[full_dat['model_type'] == 'par2_angle_no_bias', :]['path'].values 74ca5d6c161b11ec9ebb3cecef056d26 training_data['val_loss'] == val_loss_min os
check_network_performance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from gs_quant.markets.report import FactorRiskReport from gs_quant.session import GsSession, Environment client = 'ENTER CLIENT ID' secret = 'ENTER CLIENT SECRET' GsSession.use(Environment.PROD, client_id=client, client_secret=secret, scopes=()) # + pycharm={"name": "#%%\n"} factor_risk_report = FactorRiskReport(risk_model_id='BARRA_USSLOWL', fx_hedged=True) factor_risk_report.set_position_source('MP0P2141Y8B2SYDN') factor_risk_report.save() factor_risk_report.schedule() print(f"Factor risk report '{factor_risk_report.id}' scheduled.")
gs_quant/documentation/03_portfolios/examples/marquee/00_create_factor_risk_report.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Linear Algebra # # - Why we need Linear Algebra # # 1- To solve a system of linear equations # # 2- Most of machine learning models and all most all deep learning models need it # # # ## Matrix-Vector and Matrix-Matrix Multiplication # # - On piece of paper, multiply the following matrix-vector: # # <img src="matrix_matrix.png" width="300" height="300"> # # - On piece of paper, multiply the following matrices: # # <img src="matrix_vector.png" width="300" height="300"> # ## Verify your answer in Python using Numpy # + import numpy as np A = np.array([[1, 2], [0, 1], [2, 3]]) v = np.array([[5], [7]]) print(np.dot(A, v)) # multiplies the vector and matrix # + # the number of columns of the 1st matrix must be the same as the 2nd matrix A = np.array([[1, 2], [0, 1]]) B = np.array([[1, 0], [0, 1]]) print(np.dot(B, A)) # multiplies the matrix print(np.dot(A, B)) # - # ## Question: Can we express linear regression in matrix-vector format? # + import numpy as np import matplotlib.pyplot as plt # Running Distance in Mile x = np.array([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167, 7.042,10.791,5.313,7.997,5.654,9.27,3.1]) # Water Drinks in Litre y = np.array([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 2.827,3.465,1.65,2.904,2.42,2.94,1.3]) # - # ## We can obtain the prediction vector as the following: w1 = 0.25163494 w0 = 0.79880123 y_pred = [w1*i + w0 for i in x] print(y_pred) # ## Also, we can define our feature matrix as X and weight vector as w # print(np.ones((len(x), 1))) # print(np.transpose([x])) # Concatenate two matrix column-wise X = np.concatenate((np.transpose([x]), np.ones((len(x), 1))), axis=1) print(X) X.shape w = np.array([w1, w0]) np.dot(X, w) # ## Another way v_ones = np.ones((1, len(x))) print(v_ones) X = np.array([x, np.ones((1, len(x)))]) print(X) w = np.array([w1, w0]) np.dot(w, X) # ## Transpose of a Matrix or a Vector # # - In linear algebra, the transpose of a matrix is an operator which switches the row and column indices of the matrix by producing another matrix denoted as Aᵀ # # <img src="matrix-transpose.jpg" width="300" height="300"> # ## Transpose the Matrix in Numpy A = np.array([[1, 2, 3], [4, 5, 6]]) print(A) print("\n") print(A.T) # rotates the rows to columns # ## Norm of a vector # # - We have different norm, here we mean L2-norm, which measures the length of a vector from origin # ## Activity: what is the length of the following vector: # # - v = [3, 4] from numpy import linalg as LA v = np.array([3, 4]) LA.norm(v) # ## Activity: Show that norm of a vector is the sqrt of V*V.T v = np.array([3, 4]) np.sqrt(np.dot(v,v.T)) # ## Activity: The distance between two vector u and v # # <img src="norm.png" width="500" height="500"> u = np.array([1, 1]) v = np.array([2, 2]) r = np.array([3, 8]) print(LA.norm(u - v)) print(LA.norm(u - r)) print(LA.norm(v - r)) # ## Resources: # # - https://docs.scipy.org/doc/numpy/reference/generated/numpy.concatenate.html # # - http://matrixmultiplication.xyz
Notebooks/Linear_Algebra/linear_algebra.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Henry's constant calculations # # In this notebook we'll calculate the characteristic Henry's constant at zero loading. Unlike fitting the entire isotherm with a Henry model, these methods will attempt to fit a straight line only on the initial part of the isotherm. First, make sure the data is imported. # %run import.ipynb # ### Slope method # # The slope method of calculating Henry's fits a linear henry model to the isotherm points. If the model does not fit, it goes to progresivelly lower pressures until it finds a good fit. For data that is very non-linear, this fit might be between p=0 and the first isotherm point. # + # Slope method isotherm = next(i for i in isotherms_n2_77k if i.material=='MCM-41') h1 = pygaps.initial_henry_slope(isotherm, max_adjrms=0.01, logx=True, verbose=True) isotherm = next(i for i in isotherms_n2_77k if i.material=='UiO-66(Zr)') h2 = pygaps.initial_henry_slope(isotherm, max_adjrms=0.01, logx=True, verbose=True) print(h1,h2) # - # ### Virial method # The virial method uses a virial model to fit the data and then obtain the Henry constant from the value of the virial function at n=0. If the data can be fit well by a virial model, the resulting Henry constant will be very accurate. # + # Virial method isotherm = next(i for i in isotherms_n2_77k if i.material=='MCM-41') h1 = pygaps.initial_henry_virial(isotherm, verbose=True) isotherm = next(i for i in isotherms_n2_77k if i.material=='UiO-66(Zr)') h2 = pygaps.initial_henry_virial(isotherm, verbose=True) print(h1,h2) # - # More information about the functions and their use can be found in the [manual](../manual/characterisation.rst).
docs/examples/initial_henryc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Split Data (Python) # Importando as bicliotecas from sklearn.model_selection import train_test_split # Biblioteca utilizada para realizar split data import numpy as np import pandas as pd # Importando os dados df = pd.read_csv('dados/missing_data.csv') # Visualizando dados df.head() # Preenchendo missing values from sklearn.preprocessing import OneHotEncoder, LabelEncoder, Imputer # Criando o objeto resposável por preencher os dados imputer = Imputer(strategy='mean', missing_values='NaN', axis=0 ) # Treinando o modelo imputer.fit(df[['Age', 'Salary']]) # Realizando a conversão df[['Age', 'Salary']] = imputer.transform(df[['Age', 'Salary']]) # Viasualizando as modificações df # Criando objeto para transformação dos dados nominais para discretos le = LabelEncoder() # Aplicando transformação df.Country = le.fit_transform(df.Country) df.Purchased = le.fit_transform(df.Purchased) # Visualizando as tranformações df # + # Separando os dados em descritores e label X = df.iloc[:, :-1].values y = df.iloc[:, -1] y # - # Aplicando a técnica Dummy Coding dc = OneHotEncoder(categorical_features=[0]) # Realizando treinamento X = dc.fit_transform(X).toarray() # Visualizando dados X # Realiando splitting of data X_treino, X_teste, y_treino, y_teste = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y) X_treino
2 - Split Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Visualizzazione features di dataset iris. Sulla diagonale, distribuzione dei valori della feature nel training set import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.patches as mpatches import scipy.stats as st import seaborn as sns # + plt.style.use('ggplot') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.serif'] = 'Ubuntu' plt.rcParams['font.monospace'] = 'Ubuntu Mono' plt.rcParams['font.size'] = 10 plt.rcParams['axes.labelsize'] = 10 plt.rcParams['axes.labelweight'] = 'bold' plt.rcParams['axes.titlesize'] = 10 plt.rcParams['xtick.labelsize'] = 8 plt.rcParams['ytick.labelsize'] = 8 plt.rcParams['legend.fontsize'] = 10 plt.rcParams['figure.titlesize'] = 12 plt.rcParams['image.cmap'] = 'jet' plt.rcParams['image.interpolation'] = 'none' plt.rcParams['figure.figsize'] = (16, 8) plt.rcParams['lines.linewidth'] = 2 plt.rcParams['lines.markersize'] = 8 colors = ['#008fd5', '#fc4f30', '#e5ae38', '#6d904f', '#8b8b8b', '#810f7c', '#137e6d', '#be0119', '#3b638c', '#af6f09', '#008fd5', '#fc4f30', '#e5ae38', '#6d904f', '#8b8b8b', '#810f7c', '#137e6d', '#be0119', '#3b638c', '#af6f09'] # - df = pd.read_csv('../dataset/iris.csv', delimiter=';') features=[ x for x in df.columns if x!='class'] classes=[ x for x in df['class'].unique()] nclasses=len(classes) nfeatures=len(features) data=np.array([np.array([np.array(df[df['class']==c][f]) for f in features]) for c in classes]) g = sns.pairplot(data=df, hue="class", height=4, plot_kws=dict(alpha=.7, s=60), diag_kws=dict(shade=True, alpha=.7))
codici/.ipynb_checkpoints/iris-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8 # language: python # name: python3 # --- # !pip install pmdarima # !pip install yfinance import pandas as pd import matplotlib.pyplot as plt import yfinance as yf from pmdarima import auto_arima from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error from math import sqrt # <b> Importing data from yahoo Finance </b> Gold = yf.Ticker("GC=F") Gold_info=Gold.info Gold_data = Gold.history(period="5Y") Gold_data.reset_index(inplace=True) Gold_data.tail() Gold_data.drop(['Dividends','Stock Splits'],inplace = True, axis =1) # <b> Gold Data info of 5 years </b> Gold_data.info() Gold_data.describe() # <b>How Gold Data Varied in 5 years</b> Gold_data['Close'].plot(figsize=(12,5)) Gold_data.dropna() best_order = auto_arima(Gold_data['Close'],trace = True) Close_gold_data = pd.DataFrame(Gold_data['Close']) Close_gold_data # <b> Spliting Training and Testing dataset in the model </b> Train, Test = Close_gold_data[0:int(len(Close_gold_data)*0.80)], Close_gold_data[int(len(Close_gold_data)*0.80):] Train Test # <b>Preparation of Model</b> model = ARIMA(Train, order=(0,1,0)) model = model.fit() model.summary() # <b> Prediction via Data Visualization</b> start = len(Train) end = len(Train)+len(Test)-1 prediction = model.predict(start = start, end = end, typ='levels').rename("ARIMA Predictions") prediction.plot(legend = True, figsize=(15,7)) Test['Close'].plot(legend = True) rmse = sqrt(mean_squared_error(Test, prediction)) rmse average = Test.mean() Percentage_error = (rmse/average)*100 print('Error is', round(Percentage_error[0]), '%') start = 1 end = 1245 prediction = model.predict(start = start, end = end, typ='levels').rename("ARIMA Predictions") prediction.plot(legend = True, figsize=(15,7)) Test['Close'].plot(legend = True) start = 1 end = 1245 prediction = model.predict(start = start, end = end, typ='levels') prediction.plot(legend = True, color = 'red', linestyle = 'dashed',marker = 'o', label = 'Predicted Price', figsize=(15,7)) Test['Close'].plot(legend = True, color = 'blue', label = 'Tested Data') Train['Close'].plot(legend = True, color = 'orange', label = 'Trained Data') # <b> Price Predication for next 30 days</b> start = 1 end = 1275 prediction = model.predict(start = start, end = end, typ='levels') prediction.plot(legend = True, color = 'red', linestyle = 'dashed',marker = 'o', label = 'Predicted Price', figsize=(15,7)) Test['Close'].plot(legend = True, color = 'blue', label = 'Tested Data') Train['Close'].plot(legend = True, color = 'orange', label = 'Trained Data') # <b>If we see the graph there is a fall in graph from data index 1000 to 1200 and the reason behind it is COVID 19 impact on world economy. We can see from here that the start date is september 2017 when covid 19 impact was on its peak thus we can also analyse a data too</b> Gold_data[1000:1200] # <b> Importing silver data from yahoo finance and plotting its prediction</b> Silver = yf.Ticker("SI=F") Silver_info=Silver.info Silver_data = Silver.history(period="5Y") Silver_data.reset_index(inplace=True) Silver_data.tail() Silver_data.drop(['Dividends','Stock Splits'],inplace = True, axis =1) Silver_data['Close'].plot(figsize=(12,5)) Silver_data.dropna() best_order = auto_arima(Silver_data['Close'],trace = True) Close_silver_data = pd.DataFrame(Silver_data['Close']) Train1, Test1 = Close_silver_data[0:int(len(Close_silver_data)*0.80)], Close_silver_data[int(len(Close_silver_data)*0.80):] model = ARIMA(Train1, order=(3,1,2)) model = model.fit() model.summary() start = 1 end = 1245 prediction = model.predict(start = start, end = end, typ='levels') prediction.plot(legend = True, color = 'red', linestyle = 'dashed',marker = 'o', label = 'Predicted Price', figsize=(15,7)) Test1['Close'].plot(legend = True, color = 'blue', label = 'Tested Data') Train1['Close'].plot(legend = True, color = 'orange', label = 'Trained Data') start = len(Train) end = len(Train)+len(Test)-1 prediction = model.predict(start = start, end = end, typ='levels').rename("ARIMA Predictions") prediction.plot(legend = True, figsize=(15,7)) Test1['Close'].plot(legend = True)
Gold & Silver Price Prediction using Python(ARIMA).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python API # <!-- BEGIN MODULEGEN --> # ```{toctree} # :maxdepth: 3 # widgets/core # widgets/datagrid # widgets/dvcs # widgets/html # widgets/json_e # widgets/json_schema_form # widgets/jsonld # widgets/lab # widgets/notebooks # widgets/svg # widgets/tpl_jinja # widgets/yaml # ``` # <!-- END MODULEGEN --> #
docs/widgets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # ## Homework3 - Seminar Series Neural Networks for Finance # # + slideshow={"slide_type": "slide"} import torch import math import matplotlib.pyplot as plt import numpy as np n_samples = 5000 x = torch.linspace(0, 1.55, steps=n_samples, requires_grad=False) y = torch.tan(x) plt.figure(1) plt.plot(x.detach(),y.detach()) plt.xlabel('$x$') plt.ylabel('$y$') plt.title('$y=\cos(x)$') plt.grid() # - # **Splite the data set into two parts, training and test** # + np.random.seed(0) indices = np.arange(0,n_samples) np.random.shuffle(indices) # shuffle the indicies index_split = math.floor(0.8*n_samples) indices_train = indices[0:index_split] indices_test = indices[index_split:-1] ## create a training part and a test part x = torch.reshape(x, (-1, 1)) x_train = x[indices_train] y_train = y[indices_train] x_test = x[indices_test] y_test = y[indices_test] x_train, y_train, x_test, y_test = map(torch.tensor, (x_train, y_train, x_test, y_test)) # - # **Create your neural networks** # + ## Here build and train your ANNs
homework3-code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Test libcurve.cpp - Image 2D # %load_ext autoreload # %autoreload 2 import numpy as np np.random.seed(0) bool_img = np.random.randint(0, 2, (20,20), dtype=bool) neigh1 = np.array([[True, False],[True, False]]) neigh2 = np.array([[False, True],[True, False]]) img = np.random.randint(0, 256, (10,10), dtype=int) img[1, 1] = 243 from nice.utils import print_dir from nice.plot import matplotlib_plot, imshow_colorspace, line import euchar.cppbinding.utils as u import euchar.cppbinding.curve as c from euchar.display import euler_curve_plot fig, ax = matplotlib_plot(figsize=(2,2), facecolor="g") imshow_colorspace(ax, img, "BGR", title="") print_dir(c) # ### naive_image_2d vs image_2d # + ecc_naive = np.array(c.naive_image_2d(img, 256)) vector_changes = u.vector_of_euler_changes_2d() ecc = np.array(c.image_2d(img, vector_changes, 256)) # - fig, ax = matplotlib_plot(figsize=(6,3)) bins = np.arange(len(ecc)) euler_curve_plot(fig, ax, bins, ecc, xticks=[0, 50, 100, 150, 200, 255], yticks=[-4, -2, 0, 2, 4, 6, 8, 10], xlim=[-5, 300], ylim=[-5, 12], x_arrow_head_width=0.8, x_arrow_head_length=6, y_arrow_head_width=5, y_arrow_head_length=1) # For many images images = [np.random.randint(0, 256, (32,32), dtype=int) for _ in range(20)] list_bool = [(np.array(c.naive_image_2d(img, 256)) == np.array(c.image_2d(img, vector_changes, 256))).all() for img in images] print(all(list_bool))
notebooks/test_curve_image_2d.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="rNdWfPXCjTjY" colab_type="text" # ##### Copyright 2019 The TensorFlow Authors. # + id="I1dUQ0GejU8N" colab_type="code" colab={} cellView="form" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="c05P9g5WjizZ" colab_type="text" # # Classify structured data # + [markdown] colab_type="text" id="zofH_gCzgplN" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/alpha/tutorials/keras/feature_columns"> # <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> # View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/keras/feature_columns.ipynb"> # <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> # Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/keras/feature_columns.ipynb"> # <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> # View source on GitHub</a> # </td> # </table> # + [markdown] colab_type="text" id="K1y4OHpGgss7" # This tutorial demonstrates how to classify structured data (e.g. tabular data in a CSV). We will use [Keras](https://www.tensorflow.org/guide/keras) to define the model, and [feature columns](https://www.tensorflow.org/guide/feature_columns) as a bridge to map from columns in a CSV to features used to train the model. This tutorial contains complete code to: # # * Load a CSV file using [Pandas](https://pandas.pydata.org/). # * Build an input pipeline to batch and shuffle the rows using [tf.data](https://www.tensorflow.org/guide/datasets). # * Map from columns in the CSV to features used to train the model using feature columns. # * Build, train, and evaluate a model using Keras. # # ## The Dataset # # We will use a small [dataset](https://archive.ics.uci.edu/ml/datasets/heart+Disease) provided by the Cleveland Clinic Foundation for Heart Disease. There are several hundred rows in the CSV. Each row describe a patient, and each column describes an attribute. We will use this information to predict whether a patient has heart disease, which in this dataset is a binary classification task. # # Following is a [description](https://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/heart-disease.names) of this dataset. Notice there are both numeric and categorical columns. # # >Column| Description| Feature Type | Data Type # >------------|--------------------|----------------------|----------------- # >Age | Age in years | Numerical | integer # >Sex | (1 = male; 0 = female) | Categorical | integer # >CP | Chest pain type (0, 1, 2, 3, 4) | Categorical | integer # >Trestbpd | Resting blood pressure (in mm Hg on admission to the hospital) | Numerical | integer # >Chol | Serum cholestoral in mg/dl | Numerical | integer # >FBS | (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false) | Categorical | integer # >RestECG | Resting electrocardiographic results (0, 1, 2) | Categorical | integer # >Thalach | Maximum heart rate achieved | Numerical | integer # >Exang | Exercise induced angina (1 = yes; 0 = no) | Categorical | integer # >Oldpeak | ST depression induced by exercise relative to rest | Numerical | integer # >Slope | The slope of the peak exercise ST segment | Numerical | float # >CA | Number of major vessels (0-3) colored by flourosopy | Numerical | integer # >Thal | 3 = normal; 6 = fixed defect; 7 = reversable defect | Categorical | string # >Target | Diagnosis of heart disease (1 = true; 0 = false) | Classification | integer # + [markdown] colab_type="text" id="VxyBFc_kKazA" # ## Import TensorFlow and other libraries # + colab={} colab_type="code" id="LuOWVJBz8a6G" # !pip install sklearn # + colab_type="code" id="9dEreb4QKizj" colab={} from __future__ import absolute_import, division, print_function import numpy as np import pandas as pd # !pip install tensorflow==2.0.0-alpha0 import tensorflow as tf from tensorflow import feature_column from tensorflow.keras import layers from sklearn.model_selection import train_test_split # + [markdown] colab_type="text" id="KCEhSZcULZ9n" # ## Use Pandas to create a dataframe # # [Pandas](https://pandas.pydata.org/) is a Python library with many helpful utilities for loading and working with structured data. We will use Pandas to download the dataset from a URL, and load it into a dataframe. # + colab_type="code" id="REZ57BXCLdfG" colab={} URL = 'https://storage.googleapis.com/applied-dl/heart.csv' dataframe = pd.read_csv(URL) dataframe.head() # + [markdown] colab_type="text" id="u0zhLtQqMPem" # ## Split the dataframe into train, validation, and test # # The dataset we downloaded was a single CSV file. We will split this into train, validation, and test sets. # + colab_type="code" id="YEOpw7LhMYsI" colab={} train, test = train_test_split(dataframe, test_size=0.2) train, val = train_test_split(train, test_size=0.2) print(len(train), 'train examples') print(len(val), 'validation examples') print(len(test), 'test examples') # + [markdown] colab_type="text" id="84ef46LXMfvu" # ## Create an input pipeline using tf.data # # Next, we will wrap the dataframes with [tf.data](https://www.tensorflow.org/guide/datasets). This will enable us to use feature columns as a bridge to map from the columns in the Pandas dataframe to features used to train the model. If we were working with a very large CSV file (so large that it does not fit into memory), we would use tf.data to read it from disk directly. That is not covered in this tutorial. # + colab_type="code" id="NkcaMYP-MsRe" colab={} # A utility method to create a tf.data dataset from a Pandas Dataframe def df_to_dataset(dataframe, shuffle=True, batch_size=32): dataframe = dataframe.copy() labels = dataframe.pop('target') ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels)) if shuffle: ds = ds.shuffle(buffer_size=len(dataframe)) ds = ds.batch(batch_size) return ds # + colab_type="code" id="CXbbXkJvMy34" colab={} batch_size = 5 # A small batch sized is used for demonstration purposes train_ds = df_to_dataset(train, batch_size=batch_size) val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size) test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size) # + [markdown] colab_type="text" id="qRLGSMDzM-dl" # ## Understand the input pipeline # # Now that we have created the input pipeline, let's call it to see the format of the data it returns. We have used a small batch size to keep the output readable. # + colab_type="code" id="CSBo3dUVNFc9" colab={} for feature_batch, label_batch in train_ds.take(1): print('Every feature:', list(feature_batch.keys())) print('A batch of ages:', feature_batch['age']) print('A batch of targets:', label_batch ) # + [markdown] colab_type="text" id="OT5N6Se-NQsC" # We can see that the dataset returns a dictionary of column names (from the dataframe) that map to column values from rows in the dataframe. # + [markdown] colab_type="text" id="ttIvgLRaNoOQ" # ## Demonstrate several types of feature column # TensorFlow provides many types of feature columns. In this section, we will create several types of feature columns, and demonstrate how they transform a column from the dataframe. # + colab_type="code" id="mxwiHFHuNhmf" colab={} # We will use this batch to demonstrate several types of feature columns example_batch = next(iter(train_ds))[0] # + colab_type="code" id="0wfLB8Q3N3UH" colab={} # A utility method to create a feature column # and to transform a batch of data def demo(feature_column): feature_layer = layers.DenseFeatures(feature_column) print(feature_layer(example_batch).numpy()) # + [markdown] colab_type="text" id="Q7OEKe82N-Qb" # ### Numeric columns # The output of a feature column becomes the input to the model (using the demo function defined above, we will be able to see exactly how each column from the dataframe is transformed). A [numeric column](https://www.tensorflow.org/api_docs/python/tf/feature_column/numeric_column) is the simplest type of column. It is used to represent real valued features. When using this column, your model will receive the column value from the dataframe unchanged. # + colab_type="code" id="QZTZ0HnHOCxC" colab={} age = feature_column.numeric_column("age") demo(age) # + [markdown] colab_type="text" id="7a6ddSyzOKpq" # In the heart disease dataset, most columns from the dataframe are numeric. # + [markdown] colab_type="text" id="IcSxUoYgOlA1" # ### Bucketized columns # Often, you don't want to feed a number directly into the model, but instead split its value into different categories based on numerical ranges. Consider raw data that represents a person's age. Instead of representing age as a numeric column, we could split the age into several buckets using a [bucketized column](https://www.tensorflow.org/api_docs/python/tf/feature_column/bucketized_column). Notice the one-hot values below describe which age range each row matches. # + colab_type="code" id="wJ4Wt3SAOpTQ" colab={} age_buckets = feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) demo(age_buckets) # + [markdown] colab_type="text" id="r1tArzewPb-b" # ### Categorical columns # In this dataset, thal is represented as a string (e.g. 'fixed', 'normal', or 'reversible'). We cannot feed strings directly to a model. Instead, we must first map them to numeric values. The categorical vocabulary columns provide a way to represent strings as a one-hot vector (much like you have seen above with age buckets). The vocabulary can be passed as a list using [categorical_column_with_vocabulary_list](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list), or loaded from a file using [categorical_column_with_vocabulary_file](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_file). # + colab_type="code" id="DJ6QnSHkPtOC" colab={} thal = feature_column.categorical_column_with_vocabulary_list( 'thal', ['fixed', 'normal', 'reversible']) thal_one_hot = feature_column.indicator_column(thal) demo(thal_one_hot) # + [markdown] colab_type="text" id="dxQloQ9jOoXL" # In a more complex dataset, many columns would be categorical (e.g. strings). Feature columns are most valuable when working with categorical data. Although there is only one categorical column in this dataset, we will use it to demonstrate several important types of feature columns that you could use when working with other datasets. # + [markdown] colab_type="text" id="LEFPjUr6QmwS" # ### Embedding columns # Suppose instead of having just a few possible strings, we have thousands (or more) values per category. For a number of reasons, as the number of categories grow large, it becomes infeasible to train a neural network using one-hot encodings. We can use an embedding column to overcome this limitation. Instead of representing the data as a one-hot vector of many dimensions, an [embedding column](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column) represents that data as a lower-dimensional, dense vector in which each cell can contain any number, not just 0 or 1. The size of the embedding (8, in the example below) is a parameter that must be tuned. # # Key point: using an embedding column is best when a categorical column has many possible values. We are using one here for demonstration purposes, so you have a complete example you can modify for a different dataset in the future. # + colab_type="code" id="hSlohmr2Q_UU" colab={} # Notice the input to the embedding column is the categorical column # we previously created thal_embedding = feature_column.embedding_column(thal, dimension=8) demo(thal_embedding) # + [markdown] colab_type="text" id="urFCAvTVRMpB" # ### Hashed feature columns # # Another way to represent a categorical column with a large number of values is to use a [categorical_column_with_hash_bucket](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_hash_bucket). This feature column calculates a hash value of the input, then selects one of the `hash_bucket_size` buckets to encode a string. When using this column, you do not need to provide the vocabulary, and you can choose to make the number of hash_buckets significantly smaller than the number of actual categories to save space. # # Key point: An important downside of this technique is that there may be collisions in which different strings are mapped to the same bucket. In practice, this can work well for some datasets regardless. # + colab_type="code" id="YHU_Aj2nRRDC" colab={} thal_hashed = feature_column.categorical_column_with_hash_bucket( 'thal', hash_bucket_size=1000) demo(feature_column.indicator_column(thal_hashed)) # + [markdown] colab_type="text" id="fB94M27DRXtZ" # ### Crossed feature columns # Combining features into a single feature, better known as [feature crosses](https://developers.google.com/machine-learning/glossary/#feature_cross), enables a model to learn separate weights for each combination of features. Here, we will create a new feature that is the cross of age and thal. Note that `crossed_column` does not build the full table of all possible combinations (which could be very large). Instead, it is backed by a `hashed_column`, so you can choose how large the table is. # + colab_type="code" id="oaPVERd9Rep6" colab={} crossed_feature = feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000) demo(feature_column.indicator_column(crossed_feature)) # + [markdown] colab_type="text" id="ypkI9zx6Rj1q" # ## Choose which columns to use # We have seen how to use several types of feature coilumns. Now we will use them to train a model. The goal of this tutorial is to show you the complete code (e.g. mechanics) needed to work with feature columns. We have selected a few columns to train our model below arbitrarily. # # Key point: If your aim is to build an accurate model, try a larger dataset of your own, and think carefully about which features are the most meaningful to include, and how they should be represented. # + colab_type="code" id="4PlLY7fORuzA" colab={} feature_columns = [] # numeric cols for header in ['age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'slope', 'ca']: feature_columns.append(feature_column.numeric_column(header)) # bucketized cols age_buckets = feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) feature_columns.append(age_buckets) # indicator cols thal = feature_column.categorical_column_with_vocabulary_list( 'thal', ['fixed', 'normal', 'reversible']) thal_one_hot = feature_column.indicator_column(thal) feature_columns.append(thal_one_hot) # embedding cols thal_embedding = feature_column.embedding_column(thal, dimension=8) feature_columns.append(thal_embedding) # crossed cols crossed_feature = feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000) crossed_feature = feature_column.indicator_column(crossed_feature) feature_columns.append(crossed_feature) # + [markdown] colab_type="text" id="M-nDp8krS_ts" # ### Create a feature layer # Now that we have defined our feature columns, we will use a [DenseFeatures](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/DenseFeatures) layer to input them to our Keras model. # + colab_type="code" id="6o-El1R2TGQP" colab={} feature_layer = tf.keras.layers.DenseFeatures(feature_columns) # + [markdown] colab_type="text" id="8cf6vKfgTH0U" # Earlier, we used a small batch size to demonstrate how feature columns worked. We create a new input pipeline with a larger batch size. # + colab_type="code" id="gcemszoGSse_" colab={} batch_size = 32 train_ds = df_to_dataset(train, batch_size=batch_size) val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size) test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size) # + [markdown] colab_type="text" id="bBx4Xu0eTXWq" # ## Create, compile, and train the model # + colab_type="code" id="_YJPPb3xTPeZ" colab={} model = tf.keras.Sequential([ feature_layer, layers.Dense(128, activation='relu'), layers.Dense(128, activation='relu'), layers.Dense(1, activation='sigmoid') ]) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.fit(train_ds, validation_data=val_ds, epochs=5) # + colab_type="code" id="GnFmMOW0Tcaa" colab={} loss, accuracy = model.evaluate(test_ds) print("Accuracy", accuracy) # + [markdown] colab_type="text" id="3bdfbq20V6zu" # Key point: You will typically see best results with deep learning with much larger and more complex datasets. When working with a small dataset like this one, we recommend using a decision tree or random forest as a strong baseline. The goal of this tutorial is not to train an accurate model, but to demonstrate the mechanics of working with structured data, so you have code to use as a starting point when working with your own datasets in the future. # + [markdown] colab_type="text" id="SotnhVWuHQCw" # ## Next steps # The best way to learn more about classifying structured data is to try it yourself. We suggest finding another dataset to work with, and training a model to classify it using code similar to the above. To improve accuracy, think carefully about which features to include in your model, and how they should be represented.
site/en/r2/tutorials/keras/feature_columns.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Segment requirements # # <font color='red'>**This notebook is outdated as of 9 May 2021. # Please use more recent notebooks for help.**</font> # # First try at calculating the per-segment requirements, ploughing thruogh Laurent Pueyo's Mathematica notebook and getting results by scaling the modes to the target contrast. # # 1. set target contrast in code cell 2 (e.g. `1e-10`) # 2. set apodizer design in code cell 5 (e.g. `small`) # 3. comment in correct data directory in code cell 9 (`[...]/2020-01-27T23-57-00_luvoir-small`) # + # Imports import os import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import LogNorm # %matplotlib inline from astropy.io import fits import astropy.units as u import hcipy from pastis.e2e_simulators.generic_segmented_telescopes import SegmentedMirror from pastis.config import CONFIG_PASTIS import pastis.util as util from pastis.e2e_simulators.luvoir_imaging import LuvoirAPLC # - eunit = 1e-9 c_target = 1e-10 nmodes = 120 # ## Instantiate Segmented mirror for plotting of primary # + # Load aperture files needed for SM nseg = 120 wvln = 638e-9 datadir = os.path.join(util.find_repo_location(), CONFIG_PASTIS.get('LUVOIR', 'optics_path_in_repo')) aper_path = 'inputs/TelAp_LUVOIR_gap_pad01_bw_ovsamp04_N1000.fits' aper_ind_path = 'inputs/TelAp_LUVOIR_gap_pad01_bw_ovsamp04_N1000_indexed.fits' aper_read = hcipy.read_fits(os.path.join(datadir, aper_path)) aper_ind_read = hcipy.read_fits(os.path.join(datadir, aper_ind_path)) pupil_grid = hcipy.make_pupil_grid(dims=aper_ind_read.shape[0], diameter=15) aper = hcipy.Field(aper_read.ravel(), pupil_grid) aper_ind = hcipy.Field(aper_ind_read.ravel(), pupil_grid) wf_aper = hcipy.Wavefront(aper, wvln) # Load segment positions from fits header hdr = fits.getheader(os.path.join(datadir, aper_ind_path)) poslist = [] for i in range(nseg): segname = 'SEG' + str(i+1) xin = hdr[segname + '_X'] yin = hdr[segname + '_Y'] poslist.append((xin, yin)) poslist = np.transpose(np.array(poslist)) seg_pos = hcipy.CartesianGrid(poslist) # - # Instantiate SM sm = SegmentedMirror(aper_ind, seg_pos) # ## Instantiate LUVOIR telescope for full functionality # + # Instantiate LUVOIR sampling = 4 apodizer_design = 'large' # This path is specific to the paths used in the LuvoirAPLC class optics_input = os.path.join(util.find_repo_location(), CONFIG_PASTIS.get('LUVOIR', 'optics_path_in_repo')) luvoir = LuvoirAPLC(optics_input, apodizer_design, sampling) # - # Make reference image luvoir.flatten() psf_unaber, ref = luvoir.calc_psf(ref=True) norm = ref.max() # + # Make dark hole dh_outer = hcipy.circular_aperture(2*luvoir.apod_dict[apodizer_design]['owa'] * luvoir.lam_over_d)(luvoir.focal_det) dh_inner = hcipy.circular_aperture(2*luvoir.apod_dict[apodizer_design]['iwa'] * luvoir.lam_over_d)(luvoir.focal_det) dh_mask = (dh_outer - dh_inner).astype('bool') plt.figure(figsize=(18, 6)) plt.subplot(131) hcipy.imshow_field(psf_unaber/norm, norm=LogNorm()) plt.subplot(132) hcipy.imshow_field(dh_mask) plt.subplot(133) hcipy.imshow_field(psf_unaber/norm, norm=LogNorm(), mask=dh_mask) # - dh_intensity = psf_unaber/norm * dh_mask baseline_contrast = util.dh_mean(dh_intensity, dh_mask) #np.mean(dh_intensity[np.where(dh_intensity != 0)]) print('contrast:', baseline_contrast) # ### Load the modes # + # Which directory are we working in? #savedpath = '/Users/ilaginja/Documents/data_from_repos/pastis_data/2020-01-27T23-57-00_luvoir-small' #savedpath = '/Users/ilaginja/Documents/data_from_repos/pastis_data/2020-01-28T02-17-18_luvoir-medium' savedpath = '/Users/ilaginja/Documents/data_from_repos/pastis_data/2020-01-28T04-45-55_luvoir-large' # Load PASTIS modes - piston value per segment per mode pastismodes = np.loadtxt(os.path.join(savedpath, 'results', 'pastis_modes.txt')) print('pastismodes.shape: {}'.format(pastismodes.shape)) # pastismodes[segs, modes] # Load PASTIS matrix pastismatrix = fits.getdata(os.path.join(savedpath, 'matrix_numerical', 'PASTISmatrix_num_piston_Noll1.fits')) # Load sigma vector sigmas = np.loadtxt(os.path.join(savedpath, 'results', 'mode_requirements_1e-10_uniform.txt')) #print(sigmas) # Load eigenvalues eigenvalues = np.loadtxt(os.path.join(savedpath, 'results', 'eigenvalues.txt')) # - # plot the PASTIS *mode* matrix, i.e. SegToModes in Mathematica plt.imshow(pastismodes) # + # Calculate the inverse of the PASTIS mode matrix # This is ModeToSegs in Mathematica modestosegs = np.linalg.pinv(pastismodes) # modestosegs[modes, segs] plt.imshow(modestosegs) # + # Calculate mean contrast of all modes with PASTIS matrix AND the sigmas, to make sure this works print(sigmas.shape) c_avg_sigma = [] for i in range(nmodes): c_avg_sigma.append(util.pastis_contrast(sigmas[i] * pastismodes[:,i]*u.nm, pastismatrix) + baseline_contrast) print(c_avg_sigma) # - # Comparing to expectation of error budget to make sure it is the same number: c_target/120 + baseline_contrast # + # Now calculate all mean contrasts of the pastis modes directly c_avg = [] for i in range(nmodes): c_avg.append(util.pastis_contrast(pastismodes[:,i]*u.nm, pastismatrix) + baseline_contrast) print('c_avg:') print(c_avg-baseline_contrast) print('eigenvalues: ') print(eigenvalues) # - # The average contrasts of the unscaled modes are simply the eigenvalues!! # # $$c_{avg}(mode1) = \mathbf{u}_1^T M \mathbf{u}_1 = \lambda_1$$ # # since $\mathbf{u}_1$ is an eigenmode of $M$ and $\lambda_1$ the corresponding eigenvalue. plt.plot(np.log10(c_avg)) # Calculate segment requirements mus = np.sqrt(((c_target-baseline_contrast)/nmodes)/(np.dot(c_avg, np.square(modestosegs)))) #* np.sqrt(3) print(mus) mus baseline_contrast
notebooks/LUVOIR-A/13_Calculate segment requirements.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W2D2_LinearSystems/student/W2D2_Tutorial3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="DZ9Do7XN55B9" # # Neuromatch Academy 2020, Week 2, Day 2, Tutorial 3 # # # Combining determinism and stochasticity # # **Content Creators**: <NAME>, <NAME>, <NAME> # # **Content Reviewers**: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # + [markdown] colab_type="text" id="Mjb7HtWuHclB" # --- # # Tutorial Objectives # # Time-dependent processes rule the world. # # Now that we've spent some time familiarizing ourselves with the behavior of such systems when their trajectories are (1) entirely predictable and deterministic, or (2) governed by random processes, it's time to consider that neither is sufficient to describe neuroscience. Instead, we are often faced with processes for which we know some dynamics, but there is some random aspect as well. We call these **dynamical systems with stochasticity**. # # This tutorial will build on our knowledge and gain some intuition for how deterministic and stochastic processes can both be a part of a dynamical system by: # * Simulating random walks # * Investigating the mean and variance of a Ornstein-Uhlenbeck (OU) process # * Quantifying the OU process's behavior at equilibrium. # + [markdown] colab_type="text" id="amN-ClPIetaF" # --- # # Setup # + colab={} colab_type="code" id="9FKEVWEYeupS" import numpy as np import matplotlib.pyplot as plt # + cellView="form" colab={} colab_type="code" id="46Ba9fOve2MP" #@title Figure settings import ipywidgets as widgets # interactive display # %config InlineBackend.figure_format = 'retina' plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle") # + cellView="form" colab={} colab_type="code" id="YxMBSS8zfIjP" # @title Helper Functions # drift-diffusion model # returns t, x def ddm(T, x0, xinfty, lam, sig): t = np.arange(0, T, 1.) x = np.zeros_like(t) x[0] = x0 for k in range(len(t)-1): x[k+1] = xinfty + lam * (x[k] - xinfty) + sig * np.random.standard_normal(size=1) return t, x # computes equilibrium variance of ddm # returns variance def ddm_eq_var(T, x0, xinfty, lam, sig): t, x = ddm(T, x0, xinfty, lam, sig) # returns variance of the second half of the simulation # this is a hack: assumes system has settled by second half return x[-round(T/2):].var() def plot_random_walk_sims(sims, nsims=10): """Helper for exercise 3A""" fig = plt.figure() plt.plot(sim[:nsims, :].T) plt.xlabel('time') plt.ylabel('position x') plt.show() def plot_mean_var_by_timestep(mu, var): """Helper function for exercise 3A.2""" fig, (ah1, ah2) = plt.subplots(2) # plot mean of distribution as a function of time ah1.plot(mu) ah1.set(ylabel='mean') ah1.set_ylim([-5, 5]) # plot variance of distribution as a function of time ah2.plot(var) ah2.set(xlabel='time') ah2.set(ylabel='variance') plt.show() def plot_ddm(t, x, xinfty, lam, x0): fig = plt.figure() plt.plot(t, xinfty * (1 - lam**t) + x0 * lam**t, 'r') plt.plot(t, x, 'k.') # simulated data pts plt.xlabel('t') plt.ylabel('x') plt.legend({'deterministic solution', 'simulation'}) plt.show() def var_comparison_plot(empirical, analytical): fig = plt.figure() plt.plot(empirical, analytical, '.', markersize=15) plt.xlabel('analytic equilibrium variance') plt.ylabel('empirical equilibrium variance') plt.plot(np.arange(8), np.arange(8), 'k', label='45 deg line') plt.legend() plt.grid(True) plt.show() # + [markdown] colab_type="text" id="zGbdcHCkvz1c" # --- # # Section 1: Random Walks # # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 518} colab_type="code" id="335TVF-yV9U_" outputId="9b8ba00e-bd3a-4ce4-cdbe-fd49d5b5e6ec" #@title Video 1: <NAME> and Random Walks # Insert the ID of the corresponding youtube video from IPython.display import YouTubeVideo video = YouTubeVideo(id="VHwTBCQJjfw", width=854, height=480, fs=1) print("Video available at https://youtu.be/" + video.id) video # + [markdown] colab_type="text" id="z6cBBoCoJv5u" # To begin, let's first take a gander at how life sometimes wanders around aimlessly. One of the simplest and best-studied living systems that has some interesting behaviors is the _E. coli_ bacterium, which is capable of navigating odor gradients on a substrate to seek a food source. Larger life (including flies, dogs, and blindfolded humans) sometimes use the same strategies to guide their decisions. # # Here, we will consider what the _E. coli_ does in the absence of food odors. What's the best strategy when one does not know where to head? Why, flail around randomly, of course! # # The **random walk** is exactly that --- at every time step, use a random process like flipping a coin to change one's heading accordingly. Note that this process is closely related to _Brownian motion_, so you may sometimes hear that terminology used as well. # + [markdown] colab_type="text" id="xTS_dVVEMqsF" # Let's start with a **one-dimensional random walk**. A bacterium starts at $x=0$. At every time step, it flips a coin (a very small, microscopic coin of protein mintage), then heads left $\Delta x = -1$ or right $\Delta x = +1$ for with equal probability. For instance, if at time step $1$ the result of the coin flip is to head right, then its position at that time step becomes $x_1 = x_0 + \Delta x = 1.$ Continuing in this way, its position at time step $k+1$ is given by # $$x_{k+1} = x_k + \Delta x $$ # # We will simulate this process below and plot the position of the bacterium as a function of the time step. **Run the code below.** # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 447} colab_type="code" id="0-vjGR8rJ0U_" outputId="8c1769f6-b237-4979-b0ae-6073b11fce5c" # @title Simulating the random walk # parameters of simulation T = 100 t = np.arange(T) x = np.zeros_like(t) np.random.seed(2020) # set random seed # initial position x[0] = 0 # step forward in time for k in range(len(t)-1): # choose randomly between -1 and 1 (coin flip) this_step = np.random.choice([-1,1]) # make the step x[k+1] = x[k] + this_step # plot this trajectory fig = plt.figure() plt.step(t, x) plt.xlabel('time') plt.ylabel('position x') # + [markdown] colab_type="text" id="s43fCS9rfncP" # ## Exercise 1 (3A Part 1): Random walk simulation # + [markdown] colab_type="text" id="QxCpaVP7p2dr" # In the previous plot, we assumed that the bacterium takes a step of size $1$ at every point in time. Let's let it take steps of different sizes! # # We will code a random walk where the steps have a standard normal distribution (with mean $\mu$ and standard deviation $\sigma$). Instead of running one trajectory at a time, we will write our code so that we can simulate a large number of trajectories efficiently. We will combine this all into a function ``random_walk_simulator`` that generates $N$ random walks each with $T$ time points efficiently. # # We will plot 10 random walks for 10000 time steps each. # # + colab={} colab_type="code" id="vIOyCKCwRz8o" def random_walk_simulator(N, T, mu=0, sigma=1): '''Simulate N random walks for T time points. At each time point, the step is drawn from a Gaussian distribution with mean mu and standard deviation sigma. Args: T (integer) : Duration of simulation in time steps N (integer) : Number of random walks mu (float) : mean of step distribution sigma (float) : standard deviation of step distribution Returns: (numpy array) : NxT array in which each row corresponds to trajectory ''' ############################################################################### ## TODO: Code the simulated random steps to take ## Hints: you can generate all the random steps in one go in an N x T matrix raise NotImplementedError('Complete random_walk_simulator_function') ############################################################################### # generate all the random steps for all steps in all simulations in one go # produces a N x T array steps = np.random.normal(..., ..., size=(..., ...)) # compute the cumulative sum of all the steps over the time axis sim = np.cumsum(steps, axis=1) return sim np.random.seed(2020) # set random seed # Uncomment the lines below once the function above is completed # simulate 1000 random walks for 10000 time steps # sim = random_walk_simulator(1000, 10000, mu=0, sigma=1) # take a peek at the first 10 simulations # plot_random_walk_sims(sim, nsims=10) # + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 433} colab_type="text" id="HWdaTtVLgrQp" outputId="9bd22940-fa24-4bae-8139-a3278c5793a2" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_cb38054b.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D2_LinearSystems/static/W2D2_Tutorial3_Solution_cb38054b_0.png> # # # + [markdown] colab_type="text" id="cNQRqkBCy13T" # We see that the trajectories all look a little different from each other. But there are some general observations one can make: at the beginning almost all trajectories are very close to $x=0$, which is where our bacterium started. As time progresses, some trajectories move further and further away from the starting point. However, a lot of trajectories stay close to the starting point of $x=0$. # # Now let's take a look at the distribution of positions of bacteria at different points in time, analyzing all the trajectories we just generated above. **Run the code below.** You do not have to modify anything. # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 431} colab_type="code" id="X8xtvxSAUThe" outputId="f3b65b30-9048-4716-b216-ec3cf82d43f8" #@title Distribution of bateria positions fig = plt.figure() # look at the distribution of positions at different times for i, t in enumerate([1000,2500,10000]): # get mean and standard deviation of distribution at time t mu = sim[:, t-1].mean() sig2 = sim[:, t-1].std() # make a plot label mytitle = '$t=${time:d} ($\mu=${mu:.2f}, $\sigma=${var:.2f})' # plot histogram plt.hist(sim[:,t-1], color=['blue','orange','black'][i], #make sure the histograms have the same bins! bins=np.arange(-300,300,20), # make histograms a little see-through alpha=0.6, # draw second histogram behind the first one zorder=3-i, label=mytitle.format(time=t, mu=mu, var=sig2)) plt.xlabel('position x') # plot range plt.xlim([-500, 250]) # add legend plt.legend(loc=2) # add title plt.title(r'Distribution of trajectory positions at time $t$') # + [markdown] colab_type="text" id="2xFHfbD1x23F" # At the beginning of the simulation, the distribution of positions is sharply peaked about $0$. As time progresses, the distribution becomes wider but its center stays closer to $0$. In other words, the mean of the distribution is independent of time, but the variance and standard deviation of the distribution scale with time. Such a process is called a **diffusive process**. # # + [markdown] colab_type="text" id="koYl4LzVhaR3" # ## Exercise 2 (3A Part 2): Random walk mean & variance # # Compute and then plot the mean and variance of our bacterium's random walk as a function of time. # + cellView="code" colab={} colab_type="code" id="cw6JmwSQrvNG" # simulate random walks np.random.seed(2020) # set random seed sim = random_walk_simulator(5000, 1000, mu=0, sigma=1) ############################################################################## # TODO: Insert your code here to compute the mean and variance of trajectory positions # at every time point: ############################################################################## # mu = ... # var = ... # Uncomment below once you've completed above task #plot_mean_var_by_timestep(mu, var) # + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 433} colab_type="text" id="4ogjSU9Ik8qE" outputId="84ec5f03-61f0-4662-81ae-054224036dec" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_0605fd94.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D2_LinearSystems/static/W2D2_Tutorial3_Solution_0605fd94_0.png> # # # + [markdown] colab_type="text" id="DGXjdMEdXv4G" # The expected value of $x$ stays close to 0, even for random walks of very long time. Cool! # # The variance, on the other hand, clearly increases with time. In fact, the variance seems to increase linearly with time! # # + [markdown] colab_type="text" id="0-AcghQ77d2y" # ## Interactive Demo: Influence of Parameter Choice # # How do the parameters $\mu$ and $\sigma$ of the Gaussian distribution from which we choose the steps affect the mean and variance of the bacterium's random walk? # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 494, "referenced_widgets": ["f3b5a22097824f3abf9110a81009494c", "255212b862404f7d976b5c10accb7965", "bbf85bbf119540f6a248902ea51f9d4e", "ec916241d0404240a1af827e8e267226", "257683f1a54b444dbf7ae462d5b72af5", "6e3eded1b72a4f308b467093782ada48", "a9fc1ef4927d478096d783112312e36c", "9b834b54e5cb451f9bc004012d8fd858", "a072475acbe14cb082c2781f7c183a0b", "2c0b63019b0b47c38529fae34e5b0a24"]} colab_type="code" id="PAD9voJGiSqY" outputId="37a05dd9-dbba-4f60-daa1-df00cc7c6b7f" #@title #@markdown Make sure you execute this cell to enable the widget! @widgets.interact def plot_gaussian(mean=(-0.5, 0.5, .02), std=(.5, 10, .5)): sim = random_walk_simulator(5000, 1000, mu=mean, sigma=std) # compute the mean and variance of trajectory positions at every time point mu = np.mean(sim, axis=0) var = np.var(sim, axis=0) # make a figure fig, (ah1, ah2) = plt.subplots(2) # plot mean of distribution as a function of time ah1.plot(mu) ah1.set(ylabel='mean') # plot variance of distribution as a function of time ah2.plot(var) ah2.set(xlabel='time') ah2.set(ylabel='variance') # + [markdown] colab={} colab_type="text" id="wZE4vC16iS7J" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_0049e98b.py) # # # + [markdown] colab_type="text" id="rkoIRTkmJ0to" # --- # # Section 2: The Ornstein-Uhlenbeck (OU) process # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 518} colab_type="code" id="uTXG7_q-YKN1" outputId="6f73b329-897a-4fa4-def5-df16039910c8" #@title Video 2: Combining Deterministic & Stochastic Processes # Insert the ID of the corresponding youtube video from IPython.display import YouTubeVideo video = YouTubeVideo(id="pDNfs5p38fI", width=854, height=480, fs=1) print("Video available at https://youtu.be/" + video.id) video # + [markdown] colab_type="text" id="ZVA36nfLtAB4" # The random walk process we just explored is diffusive, and the distribution of possible trajectories _spreads_, taking on increasing variance with time. Even so, at least in one dimension, the mean remains close to the initial value (in the example above, 0). # # Our goal is now to build on this model to construct a **drift-diffusion** model (DDM). DDM is a popular model for memory, which as we all know, is often an exercise in hanging on to a value imperfectly. Decision-making and memory will be the topic for tomorrow, so here we build the mathematical foundations and develop some intuition for how such systems behave! # + [markdown] colab_type="text" id="y1SaXKqNvqBI" # To build such a model, let's combine the random walk model with the first differential equations we explored in Tutorial 1 earlier. Although those models had been written in continuous time as $\dot{x} = a x$, here let's consider the discrete version of the same system and write: # # $x_{k+1} = \lambda x_k$, # # whose solution can be written as # # $x_k = x_0 \lambda^k$, # # where $x_0$ is the value of $x$ at time $t=0$. # # Now, let's simulate and plot the solution of the discrete version of our first differential equation from Tutorial 1 below. **Run the code below.** # + cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 430} colab_type="code" id="lSnGf2yiwNCm" outputId="3c18784b-2cee-40ad-ed86-497506d735a3" # parameters lam = 0.9 T = 100 # total Time duration in steps x0 = 4. # initial condition of x at time 0 # initiatialize variables t = np.arange(0, T, 1.) x = np.zeros_like(t) x[0] = x0 # Step through in time for k in range(len(t)-1): x[k+1] = lam * x[k] # plot x as it evolves in time fig = plt.figure() plt.title('$\lambda=%0.1f$' % lam, fontsize=16) plt.plot(t, x0 * lam**t, 'r') # analytic solution plt.plot(t, x, 'k.') # simulated data pts plt.ylim(0, x0+1) plt.xlabel('t') plt.ylabel('x') plt.show() # + [markdown] colab_type="text" id="ZLPVCYTkyjHP" # Notice that this process decays towards position $x=0$. We can make it decay towards any position by adding another parameter $x_\infty$. The rate of decay is proportional to the difference between $x$ and $x_\infty$. Our new system is # # $x_{k+1} = x_\infty + \lambda(x_k - x_{\infty})$ # # We have to modify our analytic solution slightly to take this into account: # # $x_k = x_\infty(1 - \lambda^k) + x_0 \lambda^k$. # # Let's simulate and plot the dynamics of this process below. Hopefully, we see that it start at $x_0$ and decay towards $x_{\infty}.$ # # + cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 430} colab_type="code" id="Jw9PQESm0jgo" outputId="29a73576-6e09-4b1b-ca27-17950163247c" # parameters lam = 0.9 # decay rate T = 100 # total Time duration in steps x0 = 4. # initial condition of x at time 0 xinfty = 1. # x drifts towards this value in long time # initiatialize variables t = np.arange(0, T, 1.) x = np.zeros_like(t) x[0] = x0 # Step through in time for k in range(len(t)-1): x[k+1] = xinfty + lam * (x[k] - xinfty) # plot x as it evolves in time fig = plt.figure() plt.title('$\lambda=%0.1f$' % lam, fontsize=16) plt.plot(t, xinfty + (x0 - xinfty) * lam**t, 'r') # analytic solution plt.plot(t, x, 'k.') # simulated data pts plt.ylim(0, x0+1) plt.xlabel('t') plt.ylabel('x') plt.legend({'simulation', 'analytic solution'}) plt.show() # + [markdown] colab_type="text" id="mwDr4D6F4CWS" # Now we are ready to take this basic, deterministic difference equation and add a diffusion process on top of it! Fun times in Python land. # # As a point of terminology: this type of process is commonly known as a **drift-diffusion model** or **Ornstein-Uhlenbeck (OU) process**. The model is a combination of a _drift_ term toward $x_{\infty}$ and a _diffusion_ term that walks randomly. You may sometimes see them written as continuous stochastic differential equations, but here we are doing the discrete version to maintain continuity in the tutorial. The discrete version of our OU process has the following form: # # $x_{k+1} = x_\infty + \lambda(x_k - x_{\infty}) + \sigma \eta$ # # where $\eta$ is sampled from a standard normal distribution ($\mu=0, \sigma=1$). # # + [markdown] colab_type="text" id="g_XNSTGijOAc" # ## Exercise 3 (3B): Drift-diffusion model # # Modify the code below so that each step through time has a _deterministic_ part (_hint_: exactly like the code above) plus a _random, diffusive_ part that is drawn from from a normal distribution with standard deviation of $\sigma$ (sig in the code). It will plot the dynamics of this process. # + colab={} colab_type="code" id="7cFAsq7JVomr" def simulate_ddm(lam, sig, x0, xinfty, T): """ Simulate the drift-diffusion model with given parameters and initial condition. Args: lam (scalar): decay rate sig (scalar): standard deviation of normal distribution x0 (scalar): initial condition (x at time 0) xinfty (scalar): drift towards convergence in the limit T (scalar): total duration of the simulation (in steps) Returns: ndarray, ndarray: `x` for all simulation steps and the time `t` at each step """ # initiatialize variables t = np.arange(0, T, 1.) x = np.zeros_like(t) x[0] = x0 # Step through in time for k in range(len(t)-1): ############################################################################## ## TODO: Insert your code below then remove raise NotImplementedError("Student exercise: need to implement simulation") ############################################################################## # update x at time k+1 with a determinstic and a stochastic component # hint: the deterministic component will be like above, and # the stochastic component is drawn from a scaled normal distribution x[k+1] = ... return t, x lam = 0.9 # decay rate sig = 0.1 # standard deviation of diffusive process T = 500 # total Time duration in steps x0 = 4. # initial condition of x at time 0 xinfty = 1. # x drifts towards this value in long time # Uncomment once above is completed to plot x as it evolves in time # np.random.seed(2020) # t, x = simulate_ddm(lam, sig, x0, xinfty, T) # plot_ddm(t, x, xinfty, lam, x0) # + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 433} colab_type="text" id="UA7duy-_crnp" outputId="047f6ea8-4ecc-42ff-856a-7eea46a80143" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_1c99ae40.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D2_LinearSystems/static/W2D2_Tutorial3_Solution_1c99ae40_0.png> # # # + [markdown] colab_type="text" id="5JmpruDPmvl5" # ### Think # # Describe the behavior of your simulation by making some observations. How does it compare to the deterministic solution? How does it behave in the beginning of the stimulation? At the end? # + colab={} colab_type="code" id="mmhpNgQ3atf4" # Some space for your ideas # + [markdown] colab={} colab_type="text" id="2fcynpWTt3oH" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_301f6f83.py) # # # + [markdown] colab_type="text" id="WOY_GNjx3V_v" # --- # # Section 3: Variance of the OU process # # # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 518} colab_type="code" id="f1CMODggZdJj" outputId="95284d8f-4331-4315-eef5-53db1ecf9583" #@title Video 3: Balance of Variances # Insert the ID of the corresponding youtube video from IPython.display import YouTubeVideo video = YouTubeVideo(id="49A-3kftau0", width=854, height=480, fs=1) print("Video available at https://youtu.be/" + video.id) video # + [markdown] colab_type="text" id="06tHk5UBZU1A" # As we can see, the **mean** of the process follows the solution to the deterministic part of the governing equation. So far, so good! # # But what about the **variance**? # # Unlike the random walk, because there's a decay process that "pulls" $x$ back towards $x_\infty$, the variance does not grow without bound with large $t$. Instead, when it gets far from $x_\infty$, the position of $x$ is restored, until an equilibrium is reached. # # The equilibrium variance for our drift-diffusion system is # # Var $= \frac{\sigma^2}{1 - \lambda^2}$. # # Notice that the value of this equilibrium variance depends on $\lambda$ and $\sigma$. It does not depend on $x_0$ and $x_\infty$. # + [markdown] colab_type="text" id="JpHWgyePZ82j" # To convince ourselves that things are behaving sensibly, let's compare the empirical variances of the equilibrium solution to the OU equations with the expected formula. # # # + [markdown] colab_type="text" id="kzAEMU2RkC3L" # ## Exercise 4 (3C): Computing the variances empirically # # Write code to compute the analytical variance: Var $= \frac{\sigma^2}{1 - \lambda^2}$, and compare against the empirical variances (which is already provided for you using the helper function). You should see that they should be about equal to each other and lie close to the 45 degree ($y=x$) line. # + cellView="code" colab={} colab_type="code" id="SOY6CD0r-RsC" np.random.seed(2020) # set random seed # sweep through values for lambda lambdas = np.arange(0.05, 0.95, 0.01) empirical_variances = np.zeros_like(lambdas) analytical_variances = np.zeros_like(lambdas) sig = 0.87 # compute empirical equilibrium variance for i, lam in enumerate(lambdas): empirical_variances[i] = ddm_eq_var(5000, x0, xinfty, lambdas[i], sig) ############################################################################## ## Insert your code below to calculate the analytical variances ############################################################################## # Hint: you can also do this in one line outside the loop! # analytical_variances = ... # Uncomment this line to plot the empirical variance vs analytical variance # var_comparison_plot(empirical_variances, analytical_variances) # + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 433} colab_type="text" id="t9FHP5oqZoyz" outputId="0638d0ea-2b6b-4143-f3e4-0b7752f85e86" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_247b1e1f.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D2_LinearSystems/static/W2D2_Tutorial3_Solution_247b1e1f_0.png> # # # + [markdown] colab_type="text" id="weMb6g9YnUj3" # --- # # Summary # # In this tutorial, we have built and observed OU systems, which have both deterministic and stochastic parts. We see that they behave, on average, similar to our expectations from analyzing deterministic dynamical systems. # # Importantly, **the interplay between the deterministic and stochastic parts** serve to _balance_ the tendency of purely stochastic processes (like the random walk) to increase in variance over time. This behavior is one of the properties of OU systems that make them popular choices for modeling cognitive functions, including short-term memory and decision-making. # + colab={} colab_type="code" id="R9zNuLBvaiZo"
tutorials/W2D2_LinearSystems/student/W2D2_Tutorial3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # 如果你的显示器分辨率很高,notebook在屏幕中显示太小,可以打开这段代码,其中的width表示,大小占据屏幕的百合比。100%就是完全占据 # from IPython.core.display import display, HTML # display(HTML("<style>.container { width:100% !important; }</style>")) # - # ## 运行代码前,请检查你的python版本,理论上兼容3.x版本。编写本文时所用版本为3.5.2。 #版本检查 import platform print ('python env:',platform.python_version() ) # # 引入概率 # ## 1.回归是怎么来的呢? # ### 英国人类学家F.Galton和统计学家<NAME>对上千个家庭的身高做了测量,发现儿子身高(Y)与父亲身高(X)存在一定的关系: # ### 高个子的父代的子代在成年之后的身高平均来说不是更高,而是稍矮于其父代水平,而矮个子父代的子代的平均身高不是更矮,而是稍高于其父代水平。 # ### Galton将这种趋向于种族稳定的现象称之“回归”。 # ### 目前,“回归”已成为表示变量之间某种数量依存关系的统计学术语,如研究糖尿病人血糖与其胰岛素水平的关系,研究儿童年龄与体重的关系等。 # ## 2.回归分析 # ### 回归分析就是一个函数估计的问题。目的是找出自变量X与因变量Y之间的关系。即:y = f(x) # ## 3.回归分为线性回归和非线性回归。 # ### 线性回归中的公式都是一次的,如一元一次方程,二元一次方程等。 # ### 非线性回归则可以有多种形式,如多元N次方程,log函数等。 # # 线性回归 # ## 1.一元线性回归 # ### 一元线性回归是研究一个自变量与一个因变量的统计关系Y=aX+b。 # ### 线性回归的任务:用恰当的方法,估计出参数a,b。回归问题从某种视角看,视同参数估计问题 # ## 2.经典案例引入 --房屋价格问题 # ### 假如你在在帝都的立水桥拥有一套100平的房子,现在你要出来创业,需要把房子卖掉,那么大概应该挂一个什么样的价格呢?于是收集了立水桥周边二手房的价格 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-d587d6412c243792.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ### 我们将这些点描绘在坐标轴上,尝试用一条直线(y = ax + b)去拟合这些点 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-99e1efdbda7cb3b9.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ### 现在我们将x=100代入这条拟合直线,那么我们可以求出一个估计值。 # ### 现在提出一个问题:如何保证估计值得准确性呢?要是低估了房屋价格,说不定会少得到好几百万。 # ### 要保证准确性,就要求找到的这条直线(y= ax+ b)与各个样本点的误差最小,也就是找到最佳参数 a,b。所以我们计 # ### 算出估计值与实际值之间的差值,加起来就得到误差总和,找到一组a,b 得到最小的误差总和 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-44524497083a8130.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ## 3.模型建立 # ### 实际中,影响房价的因素非常多,如房屋的地段、面积、朝向、所在小区、房间的个数等。考虑更多的情况,我们用x1,x2...xn 去描述这些影响房屋售价的因素(特征)。如x1=房间的面积,x2=房间的个数等等。 # ### 为了使估价更为准确,我们决定考虑两个变量, 可以由三维坐标表示房屋售价。 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-10d6fd2a424f8d98.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ### 沿着上面的思路,当我们考虑两个变量时,也可以写出一个估计函数 y = ax1+ bx2 + c 这里为了后面的统一性,我们把公式写成 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-c62fda0f199b2fae.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # # ## 中心极限定理 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-d1ba21ca92f91893.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-ae736474b4b504de.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ## 接下来我们就可以计算概率密度函数 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-3811815b7c40f67b.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ## 似然函数 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-63c86579d735262c.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-64fc8e1a91885a58.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ### 这样我们就得到了日常所说的损失函数,至于为什么有二分之一,就是为了方便计算,后面我们会给出解释 # # 求解损失函数 这里给出两种求解思路 # ## 直接求极值法 # ### 上式中只有θ是未知数,如何求解函数的最小值。通常做法: # ### (1)对目标函数J(θ)求导; # ### (2)令导数为0,求得的点,即为极值点 # ### 上述方法即是最小二乘法偏差(偏差平方和最小)的思路。 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-d6a26d9beeb86eee.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-ae47e080317cff22.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ### 如果看着字母觉得抽象,这里附上一道简单的数学题,帮助理解 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-22b5bb8dc7e450f7.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ## 显然,上面的这种求解方式有很大的局限性,如果维素越多,越麻烦,所以我们需要找到一个通用的求解方法。 # ## 标准回归函数解析式求解 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-aafb57fb8ce8cd84.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-a739177cf2af8388.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ## 过拟合与欠拟合 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-908897abab4a0054.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ### 形象的说就是:一个人上课老师讲的题,都掌握的很好,但是下课后的变式练习题做的很差。俗称书呆子。在这里也就是说模型的泛化能力弱。 # ## 最小二乘意义下的参数最优解 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-cce5fdd09790d1e8.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ## 加入λ扰动后 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-23c7f43fa2eafd84.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ## 正则项与防止过拟合 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-b3731e3bc72a6fd1.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ### [详解L1,L2](http://blog.csdn.net/zouxy09/article/details/24971995) # ### 这里α就是我们要去调的超参数 # ## Ridge回归 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-990a1342bd25436d.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ### 其实在这里可以看出 Ridge回归就是我们的L2正则 # ## Lasso回归 # ### Lasso回归就是我们的L1正则,因为存在一个绝对值,所以在求解上相对的复杂。 # ### Lasso回归常用的解法:坐标轴下降法 # 为了找到一个函数的局部极小值,在每次迭代中可以在当前点处沿一个坐标方向进行一维搜索。在整个过程中循环使用不同的坐标方向。一个周期的一维搜索迭代过程相当于一个梯度迭代(如果你了解KD树算法,这里就是一样的,假设三维情况下,先沿x,在沿y,最后z。然后又从x开始,往复循环)。 其实,gradient descent 方法是利用目标函数的导数(梯度)来确定搜索方向的,而该梯度方向可能不与任何坐标轴平行。而坐标轴下降方法是利用当前坐标系统进行搜索,不需要求目标函数的导数,只按照某一坐标方向进行搜索最小值。坐标下降法在稀疏矩阵上的计算速度非常快,同时也是Lasso回归最快的解法。 # # 案例实战 # ## 标准回归函数找最佳拟合直线 # ### 我们先看看数据长啥样子 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-c919bcf92288fc28.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) import numpy as np import matplotlib from matplotlib import pyplot as plt # %matplotlib inline #将matplotlib的图表直接嵌入到Notebook之中 # ### 设置matplotlib全局配置用黑体显示中文 (注意:linux系统如果没有这个字体需要另外调) matplotlib.rcParams['font.family'] = 'SimHei' # ### 获取训练数据 # #### dataList:训练数据的特征 # #### targetList:训练数据的目标值 def loadDataSet(filename): #获取特征维数 读取第一行数据,按\t切分数据,减1是因为最后一列是目标值 numFeat=len(open(filename).readline().split('\t'))-1 dataList=[]#存样本的特征 targetList=[]#存样本对应的类别标签 fr=open(filename) #按行获取样本数据:特征+目标值 for line in fr.readlines(): lineList=[] curLine=line.strip().split('\t')#strip函数去空格,按回车切分 #每个样本的特征(按行存) for i in range(numFeat): lineList.append(float(curLine[i]))#获取当前行第i列的特征 dataList.append(lineList)#按行累加存储样本特征 targetList.append(float(curLine[-1]))#[-1]取出的是最后一列,样本的类别标签 return dataList,targetList dataMatIn, classLabels = loadDataSet('ex0.txt') print(dataMatIn) print(classLabels) # ## 标准回归函数求解回归系数 # ### 样本特征数据xArr # ### 样本的目标值yArr # #### 说明 .T是转置 .I是求逆 def standRegres(xArr,yArr): #转成矩阵类型 xMat = np.mat(xArr) #行向量转置为列向量形式 yMat = np.mat(yArr).T xTx = xMat.T*xMat #对xTx求逆,需要满足其行列式不为0 if np.linalg.det(xTx) == 0.0: print("This matrix is singular, cannot do inverse") return #回归参数 ws = xTx.I * xMat.T*yMat return ws w_LS=standRegres(dataMatIn,classLabels) print('θ_0:',w_LS[0],'θ_1:',w_LS[1]) def standplot(xarr,yarr,w): xmat=np.mat(xarr) ymat=np.mat(yarr) #画点 fig=plt.figure() ax=fig.add_subplot(111) ax.scatter(xmat[:,1].flatten().A[0],ymat.T[:,0].flatten().A[0]) #画线,为了保证直线上的点是按顺序排列,需按升序排列 xCopy = xmat.copy() #排序 xCopy.sort(0) #排序 yHat = xCopy*w #预测值 ax.plot(xCopy[:,1],yHat,color = 'r',linewidth=2.0) plt.show() # ### 画出我们的样本点与拟合直线 # #### 蓝色点为样本点,红色直线为拟合直线 standplot(dataMatIn,classLabels,w_LS) # ### 测试 # testdata = [1.000000,0.427810] ypredict = testdata * w_LS print(ypredict,'error:',abs(ypredict -3.816464)) # #### 可以看到其实我们拟合的还不错,毕竟是一阶情况下,这个误差能接受 # ## 交叉验证 # ### 交叉验证可以帮助我们找到更优的参数。一般数据我们这样划分。大部分作为训练数据,普遍是70%-80%,小部分作为训练数据,20%-30%,当然这个值并没有固定的说法,你可以根据自己的经验或者喜好来。 # ### 具体做法就是,比如5折交叉验证,就是把训练数据分成五份,每次取其中4份训练,一份做验证,然后取5次结果的平均或者其他的经验方法。5折,10折都是比较常用的 # ## 局部加权线性回归 # ### 假设目标值复合线性模型,那么使用线性回归自然能拟合得很好,但是如果目标假设不是线性模型,比如忽上忽下的样本目标值,这时用线性模型就会拟合得很差。 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-15f16ded3161e690.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-47de8fa4757a0193.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ### 权值 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-2e4443dc59388130.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-4a429bed16125e55.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-ce5235966cd46d68.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # # ### 求解 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-1bce05c68dd31975.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-cf903af4e4947b3e.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-db60e6d1f13746b8.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # # ### 代码实践 # #### testPoint:测试样本点 # #### xArr:训练样本集 # #### yArr:训练样本集目标值 # 这里需要对k值调参,你可以加一个for循环,暴力的求解(0.01,1)范围内的最合适的k值。 def lwlr(testPoint,xArr,yArr,k): xMat=np.mat(xArr) #样本特征数据 yMat=np.mat(yArr).T #类别标签,行转列 m=np.shape(xMat)[0] #样本数 weights=np.mat(np.eye(m))#mxm单位方阵 #遍历所有样本,计算每个样本对于测试样本点的权值 for i in range(m): #测试样本点与每个训练样本的向量差 diffMat=testPoint-xMat[i,:] #对角矩阵weights的对角线上存权值alpha weights[i,i]=np.exp(diffMat*diffMat.T/(-2.0*k**2)) xTX=xMat.T*weights*xMat # 行列式为0,此方法不可解 if np.linalg.det(xTX)==0.0: print("这个矩阵是奇异的,不能做逆") return #回归系数 ws=xTX.I*(xMat.T*weights*yMat) return testPoint*ws # ### 数据测试,遍历所有的测试点,给每个测试点一个预测值 def lwlrTest(testArr,xArr,yArr,k=1.0): #获取测试数据集条数 m=np.shape(testArr)[0] yHat=np.zeros(m) #遍历所有测试数据 for i in range(m): #使用局部加权线性回归 #给出每个测试数据的预测值 yHat[i]=lwlr(testArr[i],xArr,yArr,k) #返回所有测试数据的预测值 return yHat # ### 图形化展示局部加权回归不同k值的回归结果,包括数据集及它的最佳拟合直线 # ### 这段代码仅限在pycharm等编译器中使用,在nootbook中显示有少许问题 # + # fig=plt.figure()#目的是只生成一个fig对象 # # def lwlrPlot(xArr,yArr,yHat,m): # #样本特征数据xArr # #样本的目标值yArr # #预测值yHat # xMat=np.mat(xArr) # srtInd=xMat[:,1].argsort(0) #等价于argsort(xMat[:,1],0) # xSort=xMat[srtInd][:,0,:] #等价于xMat[srtInd.flatten().A[0]] # #fig=plt.figure() # ax=fig.add_subplot(2,3,m) # #直线图plt.plot(),画plot前要排序 # #ax.plot(xMat[:,1],yHat[:].T) # ax.plot(xSort[:,1],yHat[srtInd]) # #画散点图不需要排序 # ax.scatter(xMat[:,1].flatten().A[0],np.mat(yHat).T.flatten().A[0],s=2,c='k') # ax.scatter(xMat[:,1].flatten().A[0],np.mat(yArr).T.flatten().A[0],s=2,c='r') #散点图plt.scatter() # def lwlrPlot3(xArr,yArr): #输入:xArr是n×d矩阵/数组/列表;yArr是n×1 # xMat=np.mat(xArr) # srtInd=xMat[:,1].argsort(0) #等价于argsort(xMat[:,1],0) # # argsort()函数是将x中的元素从小到大排列,提取其对应的index(索引),然后输出到y # xSort=xMat[srtInd][:,0,:] #等价于xMat[srtInd.flatten().A[0]] # yHat1=lwlrTest(xArr,xArr,yArr,1) #调用局部加权回归(lwlr)主函数 # yHat2=lwlrTest(xArr,xArr,yArr,0.01) # yHat3=lwlrTest(xArr,xArr,yArr,0.002) # fig=plt.figure() # ax1=fig.add_subplot(311) # ax2=fig.add_subplot(312) # ax3=fig.add_subplot(313) # #画直线图需要排序 # #直线图plt.plot(),plot前要排序 # #ax1.plot(xMat[:,1],yHat[:].T) # ax1.plot(xSort[:,1],yHat1[srtInd]) # ax2.plot(xSort[:,1],yHat2[srtInd]) # ax3.plot(xSort[:,1],yHat3[srtInd]) # #画散点图不需要排序 # ax1.scatter(xMat[:,1].flatten().A[0],np.mat(yArr).T.flatten().A[0],s=2,c='r',label=u'欠拟合') #散点图plt.scatter() # ax2.scatter(xMat[:,1].flatten().A[0],np.mat(yArr).T.flatten().A[0],s=2,c='r',label=u'最好') # ax3.scatter(xMat[:,1].flatten().A[0],np.mat(yArr).T.flatten().A[0],s=2,c='r',label=u'过拟合') # ax1.legend(loc='upper left') # ax2.legend(loc='upper left') # ax3.legend(loc='upper left') # plt.show() # - # ### notebook中运行下面的代码 # + #fig=plt.figure()#目的是只生成一个fig对象 def lwlrPlot(xArr,yArr,yHat,m): #样本特征数据xArr #样本的目标值yArr #预测值yHat xMat=np.mat(xArr) srtInd=xMat[:,1].argsort(0) #等价于argsort(xMat[:,1],0) xSort=xMat[srtInd][:,0,:] #等价于xMat[srtInd.flatten().A[0]] fig=plt.figure(figsize=(15,15)) # ax=fig.add_subplot(3,2,m) #直线图plt.plot(),画plot前要排序 #ax.plot(xMat[:,1],yHat[:].T) ax.plot(xSort[:,1],yHat[srtInd]) #画散点图不需要排序 ax.scatter(xMat[:,1].flatten().A[0],np.mat(yHat).T.flatten().A[0],s=2,color='k') ax.scatter(xMat[:,1].flatten().A[0],np.mat(yArr).T.flatten().A[0],s=2,color='r') #散点图plt.scatter() #plt.show() def lwlrPlot3(xArr,yArr): #输入:xArr是n×d矩阵/数组/列表;yArr是n×1 xMat=np.mat(xArr) srtInd=xMat[:,1].argsort(0) #等价于argsort(xMat[:,1],0) # argsort()函数是将x中的元素从小到大排列,提取其对应的index(索引),然后输出到y xSort=xMat[srtInd][:,0,:] #等价于xMat[srtInd.flatten().A[0]] yHat1=lwlrTest(xArr,xArr,yArr,1) #调用局部加权回归(lwlr)主函数 yHat2=lwlrTest(xArr,xArr,yArr,0.01) yHat3=lwlrTest(xArr,xArr,yArr,0.002) fig=plt.figure(figsize=(5,10)) ax1=fig.add_subplot(311) ax2=fig.add_subplot(312) ax3=fig.add_subplot(313) #画直线图需要排序 #直线图plt.plot(),plot前要排序 #ax1.plot(xMat[:,1],yHat[:].T) ax1.plot(xSort[:,1],yHat1[srtInd]) ax2.plot(xSort[:,1],yHat2[srtInd]) ax3.plot(xSort[:,1],yHat3[srtInd]) #画散点图不需要排序 ax1.scatter(xMat[:,1].flatten().A[0],np.mat(yArr).T.flatten().A[0],s=2,c='r',label=u'欠拟合') #散点图plt.scatter() ax2.scatter(xMat[:,1].flatten().A[0],np.mat(yArr).T.flatten().A[0],s=2,c='r',label=u'最好') ax3.scatter(xMat[:,1].flatten().A[0],np.mat(yArr).T.flatten().A[0],s=2,c='r',label=u'过拟合') ax1.legend(loc='upper left') ax2.legend(loc='upper left') ax3.legend(loc='upper left') plt.show() # - print("局部加权线性回归法") k = [0.003,0.01, 0.05, 0.1, 0.5, 1] m = 1 for i in k: print(i) #plt.subplot(3, 4, m) lw_ls = lwlrTest(dataMatIn, dataMatIn, classLabels, i) print('m=',m) lwlrPlot(dataMatIn, classLabels, lw_ls,m) #plt.axis('off')#不显示轴 plt.title('k=%0.3f'%i)#设置标题,'k=%0.3f'表示在字符串中插入一个浮点型数据保留3位小数 m+=1 plt.suptitle('局部回归加权回归不同K值结果图', fontsize=10) lwlrPlot3(dataMatIn, classLabels) # ## 讲完造轮子,咱们来看看如果用别人造好的轮子 -- scikit-learn import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import pandas as pd # ### 导入我们的Lasso和Ridge模型 from sklearn.linear_model import Lasso, Ridge import sklearn print(sklearn.__version__) # ## 请务必检查你的scikit-learn版本,如果低于0.18版本,下面的导入语句会报错。 # #### from sklearn.grid_search import GridSearchCV #自版本0.18以来已弃用:此模块将以0.20移除。使用sklearn.model_selection.GridSearchCV来代替 from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV # ### pandas读入数据 data = pd.read_csv('Advertising.csv') # TV、Radio、Newspaper、Sales x = data[['TV', 'Radio', 'Newspaper']] y = data['Sales'] print(data.head(5))#预览前5行数据 # ### 默认test_size=0.25 # ### train_test_split将数组或矩阵分割成随机的训练子集和测试子集 # ### random_state属性值为空(默认为空)则随机分配测试集和训练集,随机分配会导致你的训练和测试误差一直在变,所以最好是固定一个随机数种子,任意数字都可以。 x_train, x_test, y_train, y_test = train_test_split(x, y,test_size=0.25,random_state=1) # ### 选取模型 也可以使用pipeline model = Lasso() #model = Ridge() alpha_can =np.logspace(-2,3,100) #在10的-2次方和10的3次方之间生成100个等差数 np.set_printoptions(suppress=True)#set_printoptions设置打印选项。参数suppress是否抑制小型浮点值的打印使用科学的符号(默认false)。 print(alpha_can) # ### GridSearchCV是sklearn.model_selection的自动调参方法, # ### cv表示cv:int,交叉验证生成器或可重复的,可选的确定跨验证分割策略,俗称k折交叉验证 # ### param_grid字典或字典列表带参数名(字符串)的字典作为键和列表参数设置作为值,或者是一个这样的列表字典,在这种情况下,每一个字典所覆盖的网格在列表中探索。 regression_model = GridSearchCV(model, param_grid={'alpha': alpha_can}, cv=10)#就是做10折交叉验证,寻找在alpha_can中寻找最优的alpha # ### fit运行所有训练集 # ### regression_model.best_params_ 返回最好的超参alpha regression_model.fit(x_train, y_train) print('最优超参数:', regression_model.best_params_) regression_model # ### 我们打印regression_model,可以看到整个模型的参数设置。更详细的介绍请看[这里](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) # ### argsort函数返回的是数组值从小到大的索引值,axis=0表述列,axis=1表述行 order = y_test.argsort(axis=0) y_test = y_test.values[order] x_test = x_test.values[order,:] # ### 用predict进行预测 y_hat = regression_model.predict(x_test) r=regression_model.score(x_test, y_test)#返回模型的得分。其实质就是可决系数(R2,R方),值越接近于1,拟合程度越好。 print(r) mse = np.average((y_hat - np.array(y_test)) ** 2) # 均方误差 rmse = np.sqrt(mse) # 根均方误差 mse,rmse t = np.arange(len(x_test))#获取测试数据个数 mpl.rcParams['font.sans-serif'] = [u'simHei'] mpl.rcParams['axes.unicode_minus'] = False plt.figure(facecolor='w',figsize=(10,5)) plt.plot(t, y_test, 'r-', linewidth=2, label=u'真实数据') plt.plot(t, y_hat, 'g-', linewidth=2, label=u'预测数据') plt.plot(0, 0, linewidth=0, label=u'alpha=%f' % regression_model.best_params_['alpha']) plt.plot(0, 0, linewidth=0, label=u'R2=%f' % r) plt.title(u'线性回归预测销量', fontsize=18) plt.legend(loc='upper right') plt.grid() plt.show() # ## 梯度下降 # ### 梯度是什么:梯度是一个向量(矢量),表示某一函数在该点处的方向导数沿着该方向取得最大值,即函数在该点处沿着该方向(此梯度的方向)变化最快,变化率最大(为该梯度的模)。 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-623d0173edf4d640.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ### 求代价函数的最小值的一种方法叫梯度下降法,也称为最速下降法,是一种迭代的搜索方法。朝着“最快下降”的方向进行搜索。 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-7891312fc61c0897.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-362c4db233285560.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ### 用大白话说就是:当我们随机找到一个起始点,在这个点处找到负梯度下降最快的方向,会有一个交点,再从这个交点出发,继续寻找下降最快的方向,一直做循环,直到某个时候,上一次的梯度和这次的梯度相差在一个很小的范围内,我们就认为找到了局部最优的点,至于是不是全局的最优点,还需要再探索。 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-85bcbe5ce18ed299.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ### 如图,可能会因为步长或者函数的形状等原因,我们有可能只能找到一个局部的最优点,如何跳出这个局部小坑呢,后面我们会继续说。 # ## 梯度下降法一般步骤 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-f736b12577466e0d.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ### 其中α是我们手动设置,θ也是我们随机找的一个点。 # ### 梯度如何求解呢 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-ab51bcc2be5999b5.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-6dc1f519c8872862.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ### 代码实现梯度下降 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-9cb84d7a53db0b45.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # + from numpy import* def f(x): #f(x)=x^4-3x^3+2 return x**4-3*x**3+2 def f1(x): #f(x)的导函数 return 4*x**3-9*x**2 for j in range(-5,0): print('xold=',j) for i in range(1,8): xold=j xnew=i alpha=0.01 count = 0 while(abs(f(xnew)-f(xold))>0.000001): count += 1 xold=xnew xnew=xold-alpha*f1(xold) print ('i=',i,'f(new)=',f(xnew),'xnew=',xnew,'count=',count) # - # ### 批量梯度下降(BGD) # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-43bc2040884a55c6.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ### 代码实现 # + from numpy import * #获取训练数据 #dataList:训练数据的特征 #targetList:训练数据的目标值 def loadDataSet(filename): #获取特征维数 numFeat=len(open(filename).readline().split('\t'))-1 dataList=[]#存样本的特征 targetList=[]#存样本对应的类别标签 fr=open(filename) #按行获取样本数据:特征+目标值 for line in fr.readlines(): lineList=[] curLine=line.strip().split('\t')#去空格,回车等 #每个样本的特征(按行存) for i in range(numFeat): lineList.append(float(curLine[i]))#获取当前行 dataList.append(lineList)#按行累加存储样本特征 targetList.append(float(curLine[-1]))#样本的类别标签 return dataList,targetList def gradDescent(dataMatIn, classLabels): #转为NumPy可识别的矩阵 dataMatrix = mat(dataMatIn) #为了便于计算,classLabels为行向量转为列向量 labelMat = mat(classLabels).transpose() #获取输入数据的条数m,特征数n m,n = shape(dataMatrix) #设定迭代的步长alpha alpha = 0.001 #设置循环次数500次,即训练次数,人为给定 maxCycles = 500 #权值初始化为1(就是让θ矩阵是全为1),后面根据样本数据调整 #训练结束得到最优权值 #weights为n行,1维。为列向量。 weights = ones((n,1))#array类型 #循环maxCycles次, #每次根据模型输出结果与真实值的误差,调整权值。 for k in range(maxCycles): #dataMatrix*weights矩阵的乘法。 #事实上包含600次的乘积 #h为模型给出的一个预测值 h = dataMatrix*weights #dataMatrix是全量数据 #计算误差,每条记录真实值与预测值之差 error = h-labelMat #权值调整(未知参数调整),强制转换为matrix类型 weights = weights - alpha * dataMatrix.transpose()* error #循环次数结束,返回回归系数 return weights #图形化显示标准线性回归结果,包括数据集及它的最佳拟合直线 def standplot(xarr,yarr,w): import matplotlib.pyplot as plt xmat=mat(xarr) ymat=mat(yarr) #画点 fig=plt.figure() ax=fig.add_subplot(111) ax.scatter(xmat[:,1].flatten().A[0],ymat.T[:,0].flatten().A[0]) #画线,为了保证直线上的点是按顺序排列,需按升序排列 xCopy = xmat.copy() #排序 xCopy.sort(0) #排序 yHat = xCopy*w #预测值 ax.plot(xCopy[:,1],yHat) plt.show() dataMatIn, classLabels = loadDataSet('ex0.txt') #批处理梯度下降法 w_gradDescent=gradDescent(dataMatIn, classLabels) print('w_gradDescent=') print(w_gradDescent) standplot(dataMatIn,classLabels,w_gradDescent) # - # ### 随机梯度下降(SGD) # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-4ce341a4cd0d4349.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ### 代码实现 def stocGradDescent0(dataMatrix, classLabels, numIter=100): #list类型转换array类型 dataMatrix=array(dataMatrix) #获取训练数据条数m,维度n m,n = shape(dataMatrix) #步长 alpha = 0.01 #权值初始化为1,后面根据样本数据调整 weights = ones(n) #array类型 #循环numIter次 for j in range(numIter): #遍历每一条数据 for i in range(m): #h为当前样本的预测值,批处理梯度上升算法的h为所有样本的模型输出 #此处h为一个值,即一次只有一个样本更新 #dataMatrix[i]*weights也为当前样本行乘以权值weights h = sum(dataMatrix[i]*weights) #array类型元素级运算 #误差,此处error为一个值 error = h-classLabels[i] #只选择当前样本进行权值更新 #array类型,元素级别运算 weights = weights - alpha * error*dataMatrix[i] #返回权值 return mat(weights).transpose() #随机梯度下降法 w_stocGradDescent0=stocGradDescent0(dataMatIn, classLabels) print('w_stocGradDescent0=') print(w_stocGradDescent0) standplot(dataMatIn,classLabels,w_stocGradDescent0) # ## 梯度下降法考虑因素 # ### 梯度下降法主要考虑两个方面问题:一是方向,二是步长。 # ### 方向决定是否走在最优化的道路上,而步长决定了要多久才能到达最优的地方。 # ### 对于第一方面,就是求梯度,多元函数求相应变量的偏导数;对于第二方面,如果步子太少,则需要很长的时间才能达到目的地,如果步子过大,可能导致在目的地周围来回震荡,所以步长选择比较关键。 # ![image.png](http://upload-images.jianshu.io/upload_images/1656466-70e87b43c2761bb2.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) # ## 求最优解方法: # ### 1、如果优化函数存在解析解。例如我们求最值一般是对优化函数求导,找到导数为0的点。如果代价函数能简单求导,并且求导后为0的式子存在解析解,那么我们就可以直接得到最优的参数。 # ### 2、如果式子很难求导,例如函数里面存在隐含的变量或者变量相互间存在耦合,互相依赖的情况。或者求导后式子得不到解释解,或者未知参数的个数大于方程组的个数等。这时候使用迭代算法来一步一步找到最优解。 # # 进阶知识点 # ### ridge回归代码实现 # + from cycler import cycler import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model # 这里设计矩阵X是一个希尔伯特矩阵(Hilbert matrix) # 其元素A(i,j)=1/(i + j -1),i和j分别为其行标和列标 # 希尔伯特矩阵是一种数学变换矩阵,正定,且高度病态即,任何一个元素发生一点变动,整个矩阵的行列式的值和逆矩阵都会发生巨大变化 # 这里设计矩阵是一个10x5的矩阵,即有10个样本,5个变量 #np.newaxis的功能是插入新维度 X = 1. / (np.arange(1, 6) + np.arange(0, 10)[:, np.newaxis]) y = np.ones(10) print('设计矩阵为:') print(X) # alpha 取值为10^(-10)到10^(-2)之间的连续的200个值 n_alphas = 200 alphas = np.logspace(-10, -2, n_alphas) print('\n alpha的值为:') print(alphas) # 初始化一个Ridge Regression clf = linear_model.Ridge(fit_intercept=False) # 参数矩阵,即每一个alpha对于的参数所组成的矩阵 coefs = [] # 根据不同的alpha训练出不同的模型参数 for a in alphas: clf.set_params(alpha=a) clf.fit(X, y) coefs.append(clf.coef_) # 获得绘图句柄 ax = plt.gca() # 参数中每一个维度使用一个颜色表示 #ax.set_color_cycle(['b', 'r', 'g', 'c', 'k']) ax.set_prop_cycle(cycler('color', ['b', 'r', 'g', 'c', 'k'])) #报错请使用上面的语句 # 绘制alpha和对应的参数之间的关系图 ax.plot(alphas, coefs) ax.set_xscale('log') #x轴使用对数表示 ax.set_xlim(ax.get_xlim()[::-1]) # 将x轴反转,便于显示 plt.grid() plt.xlabel('alpha') plt.ylabel('weights') plt.title('Ridge coefficients as a function of the regularization') plt.axis('tight') plt.show() # - # ## 坐标轴下降算法(coordinate descent) # + import numpy as np import matplotlib.pyplot as plt import time from sklearn.model_selection import train_test_split from sklearn.linear_model import Lasso from sklearn.metrics import r2_score # 用于产生稀疏数据 np.random.seed(1) # 生成系数数据,样本为50个,参数为200维 n_samples, n_features = 50, 200 # 基于高斯函数生成数据 x = np.random.randn(n_samples, n_features) # 每个变量对应的系数 coef = 3 * np.random.randn(n_features) # 变量的下标 inds = np.arange(n_features) # 变量下标随机排列 np.random.shuffle(inds) # 仅仅保留10个变量的系数,其他系数全部设置为0 # 生成稀疏参数 coef[inds[10:]] = 0 # 得到目标值,y y = np.dot(x, coef) # 为y添加噪声 y += 0.01 * np.random.normal((n_samples,)) # 将数据分为训练集和测试集 x_train, x_test, y_train, y_test = train_test_split(x, y,test_size=0.25,random_state=1) #默认test_size=0.25 #train_test_split将数组或矩阵分割成随机的训练子集和测试子集 #random_state属性值为空(默认为空)则随机分配测试集和训练集x_test, y_test = x[n_samples / 2:], y[n_samples / 2:] # Lasso 回归的参数 alpha = 0.1 lasso = Lasso(max_iter=10000, alpha=alpha) # 基于训练数据,得到的模型的测试结果 # 这里使用的是坐标轴下降算法(coordinate descent) y_pred_lasso = lasso.fit(x_train, y_train).predict(x_test) # 这里是R2可决系数(coefficient of determination) # 回归平方和(RSS)在总变差(TSS)中所占的比重称为可决系数 # 可决系数可以作为综合度量回归模型对样本观测值拟合优度的度量指标。 # 可决系数越大,说明在总变差中由模型作出了解释的部分占的比重越大,模型拟合优度越好。 # 反之可决系数小,说明模型对样本观测值的拟合程度越差。 # R2可决系数最好的效果是1。 r2_score_lasso = r2_score(y_test, y_pred_lasso) print("测试集上的R2可决系数 : %f" % r2_score_lasso) plt.plot(lasso.coef_, label='Lasso coefficients') plt.plot(coef, '--', label='original coefficients') plt.legend(loc='best') plt.show() # - # ## 最小角回归(LARS)算法 # + import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model from sklearn import datasets # 导入数据集 # 这个数据集,总的样本个数为442个,特征维度为10 diabetes = datasets.load_diabetes() X = diabetes.data y = diabetes.target print(X.shape) # 参数正则化路径 # 其实就是最小角回归(LARS)算法每次迭代的时候,每个参数的数值所组成的曲线 # 其横轴对应着迭代的程度,纵轴是每个特征参数对应的数值 # 这里一共有10个特征,所以有10条特征正则化曲线 print("基于LARS算法计算正则化路径:") alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True) # 这里将迭代程度归一化到[0,1]直间 xx = np.sum(np.abs(coefs.T), axis=1) xx /= xx[-1] plt.plot(xx, coefs.T) ymin, ymax = plt.ylim() plt.vlines(xx, ymin, ymax, linestyle='dashed') plt.xlabel('|coef| / max|coef|') plt.ylabel('Coefficients') plt.title('LASSO Path') plt.axis('tight') plt.show() # -
linear regression/Linear regression 1/Linear regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import random import matplotlib.pyplot as plt import seaborn as sns sns.set(font_scale=1.4) import copy import pickle df = pd.read_csv("data/flights_cleaned_no_outlier_iqr_with_delays.csv") df.head() # # Prepare data for feature selection # # Feature selection # + # https://scikit-learn.org/stable/modules/feature_selection.html # + ## After testing, found most suitable columns and will remap for final modelling # + very_important_columns = [ # ran with what the test data can do 'fl_date', # get month and bin # 'op_unique_carrier', # most extensive name list # 'origin', # need 'origin' to merge weather but already merged! ;) # 'dest_airport_id', # not sure about this one 'crs_dep_time', # bin times # 'dep_time', # only using in TRAIN, to learn how other columns affect this # 'crs_arr_time', # 'arr_time', # only using in TRAIN, to learn how other columns affect this 'weather_type', # add weight values # 'passengers', # not sure about this one 'arr_delay', # so we can make a target column... 'distance', 'air_time', ] # important columns seem to be weather(4), time(bin), month(constant) ''' According to plots: Weather weight: Snow=10, Rain=5, Cloudy=2, Sunny=1 Time weight: 0-500 = 1, 501-1000 = 8, 1001-1500 = 10, 1501-2000 = 8, 2001 > = 5 Month weight = Oct = 1, Nov, Jan = 5, Dec = 10 ''' # - df_ = df.filter(items=very_important_columns) df_.head() # + # make a copy of dataframe scaled_df = df_.copy() col_names = ['distance', 'air_time'] features = scaled_df[col_names] # Use scaler of choice; here Standard scaler is used from sklearn.preprocessing import StandardScaler scaler = StandardScaler().fit(features.values) features = scaler.transform(features.values) scaled_df[col_names] = features # - df_ = scaled_df # # remapping crs_dep_time # Time weight: 0-500 = 1, 501-1000 = 8, 1001-1500 = 10, 1501-2000 = 8, 2001 > = 5 df_.crs_dep_time = df_.crs_dep_time // 100 crs_dep_time_remap = { 0: 0.10, 1: 0.10, 2: 0.10, 3: 0.10, 4: 0.10, 5: 0.10, 6: 0.40, 7: 0.40, 8: 0.40, 9: 0.40, 10: 0.40, 11: 0.50, 12: 0.50, 13: 0.50, 14: 0.50, 15: 0.50, 16: 0.40, 17: 0.40, 18: 0.40, 19: 0.40, 20: 0.40, 21: 0.20, 22: 0.20, 23: 0.20 } df_["dep_time_hour_weight"] = df_.crs_dep_time.map(crs_dep_time_remap) df_.head() df_.isna().sum() df_ = df_.dropna() # # remapping fl_date to month df_["month"] = [ i [5:7] for i in df_.fl_date ] # change to datetime and get day of week df_.head() # don't drop next time df_ = df_.drop(labels="fl_date", axis=1) df_ df_.month.unique() # + # # Month weight = Oct = 1, Nov, Jan = 5, Dec = 10 # month_remap = { # '10': 0.10, # '11': 0.50, # '12': 1, # '01': 0.50 # } # df_["month_weight"] = df_.month.map(month_remap) # + # df_.head() # see month weight # - # # remapping weather df_.weather_type.unique() # Weather weight: Snow=10, Rain=5, Cloudy=2, Sunny=1 weather_remap = { "Rainy": 0.40, "Sunny": 0.05, "Snowy": 0.80, "Cloudy": 0.10 } df_['weather_weight'] = df_.weather_type.map(weather_remap) df_ = pd.get_dummies(df_, columns=['weather_type'], drop_first=True) # df_ = pd.get_dummies(df_, columns=['op_unique_carrier'], drop_first=True) df_ = pd.get_dummies(df_, columns=['month'], drop_first=True) df_.head() # + # # Used dummies before, got 0.03 to 0.06 results. Trying feature selection/engineering next. # df_dummies = pd.get_dummies(df_, columns=['weather_type']) # df_dummies = pd.get_dummies(df_dummies, columns=['op_unique_carrier']) # df_dummies = pd.get_dummies(df_dummies, columns=['origin']) # - df_.head() sns.histplot(df_.arr_delay); # # remove outliers df_.shape # + # Get rid of 0 y's # df_ = df_[df_['arr_delay'] != 0] # - df_checkpoint = df_.copy() # df_checkpoint = df_checkpoint.sample(frac=0.5) df_checkpoint.columns df_checkpoint.head() df_checkpoint.shape df_checkpoint.arr_delay.describe() Q1_dep = df_checkpoint.arr_delay.quantile(0.20) Q3_dep = df_checkpoint.arr_delay.quantile(0.80) IQR_dep = Q3_dep - Q1_dep df_checkpoint = df_checkpoint.loc[(df_checkpoint['arr_delay'] >= (Q1_dep - 1.5*IQR_dep))\ & (df_checkpoint['arr_delay'] <= Q3_dep + 1.5*IQR_dep)] df_checkpoint.arr_delay.describe() df_checkpoint.shape X = df_checkpoint[df_checkpoint.columns.difference(['arr_delay'])] y = df_checkpoint["arr_delay"] print(X.shape) print(y.shape) y = pd.DataFrame(y) # + # make all y neg values, 0 # y[y < 0] = 0 # - y.shape sns.histplot(y); from scipy import stats shapiro_test = stats.shapiro(y) print(shapiro_test.statistic) print(shapiro_test.pvalue) # + from numpy import mean from numpy import std print('mean=%.3f stdv=%.3f' % (mean(y), std(y))) # - from statsmodels.graphics.gofplots import qqplot from matplotlib import pyplot # q-q plot qqplot(y, line='s') plt.show() # + from scipy.stats import shapiro stat, p = shapiro(y) print('Statistics=%.3f, p=%.3f' % (stat, p)) # interpret alpha = 0.05 if p > alpha: print('Sample looks Gaussian (fail to reject H0)') else: print('Sample does not look Gaussian (reject H0)') # - # # Smote? # + # # check version number # import imblearn # # transform the dataset # from collections import Counter # from sklearn.datasets import make_classification # from imblearn.over_sampling import SMOTE # oversample = SMOTE() # X, y = oversample.fit_resample(X, y) # + # print(X.shape) # print(y.shape) # + # sns.histplot(y); # + # y.arr_delay.mean() # + # # remerge y to X... sample frac... resplit. # X["arr_delay"] = y.arr_delay # X_checkpoint = X.copy() # X_checkpoint = X_checkpoint.sample(frac=0.5) # + # X = X_checkpoint[X_checkpoint.columns.difference(['arr_delay'])] # y = X_checkpoint["arr_delay"] # + # y = pd.DataFrame(y) # + # print(X.shape) # print(y.shape) # - # # Smote end # ## Main Task: Regression Problem # The target variable is ARR_DELAY. We need to be careful which columns to use and which don't. For example, DEP_DELAY is going to be the perfect predictor, but we can't use it because in real-life scenario, we want to predict the delay before the flight takes of --> We can use average delay from earlier days but not the one from the actual flight we predict. # For example, variables CARRIER_DELAY, WEATHER_DELAY, NAS_DELAY, SECURITY_DELAY, LATE_AIRCRAFT_DELAY shouldn't be used directly as predictors as well. However, we can create various transformations from earlier values. # We will be evaluating your models by predicting the ARR_DELAY for all flights 1 week in advance. # #### linear / logistic / multinomial logistic regression # #### Naive Bayes # #### Random Forest # #### SVM # #### XGBoost # #### The ensemble of your own choice # + # X = X.replace([np.inf, -np.inf], np.nan) # X = X.dropna() # + from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(X,y,train_size=0.75,random_state=42) # - from sklearn.linear_model import Lasso, Ridge, SGDRegressor, ElasticNet from sklearn.tree import DecisionTreeRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from sklearn.model_selection import RepeatedKFold from sklearn.model_selection import cross_val_score from numpy import absolute from numpy import mean from numpy import std # ## feature selection test # + # ANOVA feature selection for numeric input and categorical output from sklearn.datasets import make_classification from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import f_classif # define feature selection fs = SelectKBest(score_func=f_classif, k=4) # apply feature selection X_selected = fs.fit_transform(X, y) print(X_selected.shape) # - # # Linear Regression with MinMax Pipeline from sklearn.preprocessing import MinMaxScaler from sklearn.linear_model import LinearRegression from sklearn.pipeline import Pipeline # prepare the model with input scaling pipeline = Pipeline(steps=[('normalize', MinMaxScaler()), ('model', LinearRegression())]) # fit pipeline pipeline.fit(X_train, y_train) # make predictions y_pred = pipeline.predict(X_test) from sklearn import metrics print(pipeline.score(X_train, y_train)) # ## Naive Bayes Model # + # 0.0361 score from sklearn import naive_bayes gnb = naive_bayes.GaussianNB() gnb.fit(X_train, y_train) y_pred = gnb.predict(X_test) from sklearn import metrics print(metrics.accuracy_score(y_test, y_pred)) # save the model to disk filename = 'finalized_Naive_Bayes_imb_all.sav' pickle.dump(gnb, open(filename, 'wb')) # - # ## Lasso (not good) # + # # 0.060 score unscaled: scaled data 0.041: after trimming huge 0.034 # model = Lasso(alpha=0.5) # cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=42) # scores = cross_val_score(model, X_train, y_train, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1) # # force scores to be positive # scores = absolute(scores) # print('Mean MAE: %.3f (%.3f)' % (mean(scores), std(scores))) # - # ## Random Forest Classifier Model # + # 0.036 from sklearn.ensemble import RandomForestClassifier from sklearn.datasets import make_classification clf = RandomForestClassifier(max_depth=3, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # 0.03 score from sklearn.metrics import accuracy_score accuracy = accuracy_score(y_test,y_pred) print(accuracy) # save the model to disk filename = 'finalized_Random_forest_imb_all.sav' pickle.dump(clf, open(filename, 'wb')) # - # ## Gridsearch cells. Do not run. # + # # parameter grid # parameter_candidates = { # 'n_estimators':[270, 285, 300], # 'max_depth':[3] # } # from sklearn import datasets, svm # from sklearn.model_selection import GridSearchCV # grid_result = GridSearchCV(clf, param_grid=parameter_candidates, n_jobs=-1) # the_fit = grid_result.fit(X_train, y_train.values.ravel()) # bestresult = grid_result.best_estimator_ # + # # View the accuracy score best run: MD3, nest300 score:0.04 # print('Best score for data1:', grid_result.best_score_) # print(grid_result.best_params_) # print(bestresult) # grid_result.score(X_train, y_train) # - # ## Random Forest tuned # + # # 0.043 # from sklearn.ensemble import RandomForestClassifier # from sklearn.datasets import make_classification # clf2 = RandomForestClassifier(max_depth=3, n_estimators=285, random_state=42) # clf2.fit(X_train, y_train) # y_pred = clf2.predict(X_test) # # score # from sklearn.metrics import accuracy_score # accuracy = accuracy_score(y_test,y_pred) # print(accuracy) # # save the model to disk? No. Same as first RF # - # ## Linear/Log Regression # + # 0.05 from sklearn.linear_model import LinearRegression reg = LinearRegression().fit(X_train, y_train) print(reg.score(X_train, y_train)) # save the model to disk filename = 'finalized_LinReg_imb_all.sav' pickle.dump(reg, open(filename, 'wb')) # - reg.coef_ reg.intercept_ # ## Decision Tree # + # 0.08 from sklearn.tree import DecisionTreeClassifier from sklearn import metrics clf_dt = DecisionTreeClassifier() clf_dt = clf_dt.fit(X_train,y_train) y_pred = clf_dt.predict(X_test) print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) # save the model to disk filename = 'finalized_Decision_Tree_imb_all.sav' pickle.dump(clf_dt, open(filename, 'wb')) # - y_pred[:50] # ## SVM (do not run) from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import Normalizer scaler = StandardScaler() scaler.fit(df_checkpoint) X = scaler.transform(df_checkpoint.loc[:, df_checkpoint.columns != 'arr_delay']) X = df_checkpoint[df_checkpoint.columns.difference(['arr_delay'])] y = df_checkpoint["arr_delay"] from sklearn import svm clf = svm.SVC(kernel='poly') clf.fit(X_train, y_train.values.ravel()) y_pred = clf.predict(X_test) # + # from sklearn.metrics import confusion_matrix # confusion_matrix(y_test, y_pred) # + # clf2 = svm.SVC(kernel='rbf') # clf2.fit(X_train, y_train.values.ravel()) # y_pred2 = clf2.predict(X_test) # + # from sklearn.metrics import confusion_matrix # confusion_matrix(y_test, y_pred2) # + # clf3 = svm.SVC(kernel='sigmoid') # clf3.fit(X_train, y_train.values.ravel()) # y_pred3 = clf3.predict(X_test) # + # from sklearn.metrics import confusion_matrix # confusion_matrix(y_test, y_pred3) # + # from sklearn import metrics # print("Accuracy poly:",metrics.accuracy_score(y_test, y_pred)) # print("Accuracy rbg:",metrics.accuracy_score(y_test, y_pred2)) # print("Accuracy sigmoid:",metrics.accuracy_score(y_test, y_pred3)) # - # ## XGBoost # + # import xgboost as xgb # from sklearn.metrics import mean_squared_error # data_dmatrix = xgb.DMatrix(data=X_train, label=y_train, enable_categorical=True) # xg_reg = xgb.XGBRegressor(objective ='reg:linear', # not XGBClassifier() bc regression. # colsample_bytree = 0.3, # learning_rate = 0.1, # max_depth = 3, # alpha = 10, # n_estimators = 250) # err about categorical values
ML_play_around.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/4_image_classification_zoo/Classifier%20-%20Vehicle%20Make%20and%20Model%20type%20classification%20-%20Ensemble%20of%20classifiers%20VS%20Multi-Label%20classifier.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # # Table of contents # # # ## Install Monk # # # ## Using pretrained model for classifying elements in cifar-10 dataset # # # ## Training a classifier from scratch - Ensemble of clssifiers # # # ## Training a classifier from scratch - Single multi-label classifiers # <a id='0'></a> # # Install Monk # ## Using pip (Recommended) # # - colab (gpu) # - All bakcends: `pip install -U monk-colab` # # # - kaggle (gpu) # - All backends: `pip install -U monk-kaggle` # # # - cuda 10.2 # - All backends: `pip install -U monk-cuda102` # - Gluon bakcned: `pip install -U monk-gluon-cuda102` # - Pytorch backend: `pip install -U monk-pytorch-cuda102` # - Keras backend: `pip install -U monk-keras-cuda102` # # # - cuda 10.1 # - All backend: `pip install -U monk-cuda101` # - Gluon bakcned: `pip install -U monk-gluon-cuda101` # - Pytorch backend: `pip install -U monk-pytorch-cuda101` # - Keras backend: `pip install -U monk-keras-cuda101` # # # - cuda 10.0 # - All backend: `pip install -U monk-cuda100` # - Gluon bakcned: `pip install -U monk-gluon-cuda100` # - Pytorch backend: `pip install -U monk-pytorch-cuda100` # - Keras backend: `pip install -U monk-keras-cuda100` # # # - cuda 9.2 # - All backend: `pip install -U monk-cuda92` # - Gluon bakcned: `pip install -U monk-gluon-cuda92` # - Pytorch backend: `pip install -U monk-pytorch-cuda92` # - Keras backend: `pip install -U monk-keras-cuda92` # # # - cuda 9.0 # - All backend: `pip install -U monk-cuda90` # - Gluon bakcned: `pip install -U monk-gluon-cuda90` # - Pytorch backend: `pip install -U monk-pytorch-cuda90` # - Keras backend: `pip install -U monk-keras-cuda90` # # # - cpu # - All backend: `pip install -U monk-cpu` # - Gluon bakcned: `pip install -U monk-gluon-cpu` # - Pytorch backend: `pip install -U monk-pytorch-cpu` # - Keras backend: `pip install -U monk-keras-cpu` # ## Install Monk Manually (Not recommended) # # ### Step 1: Clone the library # - git clone https://github.com/Tessellate-Imaging/monk_v1.git # # # # # ### Step 2: Install requirements # - Linux # - Cuda 9.0 # - `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt` # - Cuda 9.2 # - `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt` # - Cuda 10.0 # - `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt` # - Cuda 10.1 # - `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt` # - Cuda 10.2 # - `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt` # - CPU (Non gpu system) # - `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt` # # # - Windows # - Cuda 9.0 (Experimental support) # - `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt` # - Cuda 9.2 (Experimental support) # - `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt` # - Cuda 10.0 (Experimental support) # - `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt` # - Cuda 10.1 (Experimental support) # - `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt` # - Cuda 10.2 (Experimental support) # - `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt` # - CPU (Non gpu system) # - `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt` # # # - Mac # - CPU (Non gpu system) # - `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt` # # # - Misc # - Colab (GPU) # - `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt` # - Kaggle (GPU) # - `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt` # # # # ### Step 3: Add to system path (Required for every terminal or kernel run) # - `import sys` # - `sys.path.append("monk_v1/");` # # Used trained classifier for demo - Ensemble # + #Using mxnet-gluon backend # When installed using pip from monk.gluon_prototype import prototype # When installed manually (Uncomment the following) #import os #import sys #sys.path.append("monk_v1/"); #sys.path.append("monk_v1/monk/"); #from monk.gluon_prototype import prototype # + # Download trained weights # - # ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1Ojb_O7zJuzynRFyABJ75UHO0rrl-mC-N' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1Ojb_O7zJuzynRFyABJ75UHO0rrl-mC-N" -O cls_vehicle_trained.zip && rm -rf /tmp/cookies.txt # ! unzip -qq cls_vehicle_trained.zip # ! ls workspace/Project-Vehicle # + gtf1 = prototype(verbose=0); gtf1.Prototype("Project-Vehicle", "Classify-Make-Type", eval_infer=True); gtf2 = prototype(verbose=0); gtf2.Prototype("Project-Vehicle", "Classify-Model-Type", eval_infer=True); gtf3 = prototype(verbose=0); gtf3.Prototype("Project-Vehicle", "Classify-Drive-Type", eval_infer=True); gtf4 = prototype(verbose=0); gtf4.Prototype("Project-Vehicle", "Classify-Body-Style", eval_infer=True); # + img_name = "workspace/test/test1.jpg"; print("Img name - ", img_name); output1 = gtf1.Infer(img_name=img_name) output2 = gtf2.Infer(img_name=img_name) output3 = gtf3.Infer(img_name=img_name) output4 = gtf4.Infer(img_name=img_name) print("Make Type- ", output1["predicted_class"]); print("Model Type- ", output2["predicted_class"]); print("Drive Type- ", output3["predicted_class"]); print("Body Style - ", output4["predicted_class"]); from IPython.display import Image Image(filename=img_name) # + img_name = "workspace/test/test2.jpg"; print("Img name - ", img_name); output1 = gtf1.Infer(img_name=img_name) output2 = gtf2.Infer(img_name=img_name) output3 = gtf3.Infer(img_name=img_name) output4 = gtf4.Infer(img_name=img_name) print("Make Type- ", output1["predicted_class"]); print("Model Type- ", output2["predicted_class"]); print("Drive Type- ", output3["predicted_class"]); print("Body Style - ", output4["predicted_class"]); from IPython.display import Image Image(filename=img_name) # + img_name = "workspace/test/test3.jpg"; print("Img name - ", img_name); output1 = gtf1.Infer(img_name=img_name) output2 = gtf2.Infer(img_name=img_name) output3 = gtf3.Infer(img_name=img_name) output4 = gtf4.Infer(img_name=img_name) print("Make Type- ", output1["predicted_class"]); print("Model Type- ", output2["predicted_class"]); print("Drive Type- ", output3["predicted_class"]); print("Body Style - ", output4["predicted_class"]); from IPython.display import Image Image(filename=img_name) # + img_name = "workspace/test/test4.jpg"; print("Img name - ", img_name); output1 = gtf1.Infer(img_name=img_name) output2 = gtf2.Infer(img_name=img_name) output3 = gtf3.Infer(img_name=img_name) output4 = gtf4.Infer(img_name=img_name) print("Make Type- ", output1["predicted_class"]); print("Model Type- ", output2["predicted_class"]); print("Drive Type- ", output3["predicted_class"]); print("Body Style - ", output4["predicted_class"]); from IPython.display import Image Image(filename=img_name) # - # # Dataset # # - Credits: https://github.com/nicolas-gervais/predicting-car-price-from-scraped-data/tree/master/picture-scraper # ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1TQQuT60bddyeGBVfwNOk6nxYavxQdZJD' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1TQQuT60bddyeGBVfwNOk6nxYavxQdZJD" -O thecarconnectionpicturedataset.rar && rm -rf /tmp/cookies.txt # ! sudo apt-get install unrar # ! mkdir thecarconnectionpicturedataset # ! unrar x thecarconnectionpicturedataset.rar thecarconnectionpicturedataset/ # # Training a classifier from scratch - Ensemble of clssifiers import os import cv2 import numpy as np from tqdm import tqdm img_list = os.listdir("thecarconnectionpicturedataset"); len(img_list) # + combined = []; for i in tqdm(range(len(img_list))): splits = img_list[i].split(".")[0].split("_"); make = splits[0]; make = make.replace(" ", "_"); if make not in make_list: make_list.append(make); combined.append([img_list[i], make]) import pandas as pd df = pd.DataFrame(combined, columns = ['ID', 'Labels']) df.to_csv("vehicles_make.csv", index = False) # + combined = []; for i in tqdm(range(len(img_list))): splits = img_list[i].split(".")[0].split("_"); model = splits[1]; model = model.replace(" ", "_"); if model not in model_list: model_list.append(model); combined.append([img_list[i], model]) import pandas as pd df = pd.DataFrame(combined, columns = ['ID', 'Labels']) df.to_csv("vehicles_model.csv", index = False) # + combined = []; for i in tqdm(range(len(img_list))): splits = img_list[i].split(".")[0].split("_"); drive_type = splits[12]; drive_type = drive_type.replace("FWD", "front_wheel_drive"); drive_type = drive_type.replace("RWD", "rear_wheel_drive"); drive_type = drive_type.replace("AWD", "all_wheel_drive"); drive_type = drive_type.replace("4WD", "four_wheel_drive"); drive_type = drive_type.replace(" ", "_"); if drive_type not in drive_type_list: drive_type_list.append(drive_type); combined.append([img_list[i], drive_type]) import pandas as pd df = pd.DataFrame(combined, columns = ['ID', 'Labels']) df.to_csv("vehicles_drive_type.csv", index = False) # + combined = []; for i in tqdm(range(len(img_list))): splits = img_list[i].split(".")[0].split("_"); body_style = splits[15]; body_style = body_style.replace("2dr", "2_door_car"); body_style = body_style.replace("3dr", "3_door_car"); body_style = body_style.replace("4dr", "4_door_car"); body_style = body_style.replace(" ", "_"); if body_style not in body_style_list: body_style_list.append(body_style); combined.append([img_list[i], body_style]) import pandas as pd df = pd.DataFrame(combined, columns = ['ID', 'Labels']) df.to_csv("vehicles_body_style.csv", index = False) # - # ## Vehicle Make # + # Using mxnet-gluon backend from monk.gluon_prototype import prototype # For pytorch backend #from monk.pytorch_prototype import prototype # For Keras backend #from monk.keras_prototype import prototype # + # Create Project and Experiment gtf = prototype(verbose=1); gtf.Prototype("Project-Vehicle", "Classify-Make-Type"); # - gtf.Default(dataset_path="thecarconnectionpicturedataset/", path_to_csv="vehicles_make.csv", model_name="resnet50_v2", freeze_base_network=False, num_epochs=2); gtf.update_batch_size(128); gtf.Reload(); gtf.Train(); # ## Vehicle Model # + # Using mxnet-gluon backend from monk.gluon_prototype import prototype # For pytorch backend #from monk.pytorch_prototype import prototype # For Keras backend #from monk.keras_prototype import prototype # + # Create Project and Experiment gtf = prototype(verbose=1); gtf.Prototype("Project-Vehicle", "Classify-Model-Type"); # - gtf.Default(dataset_path="thecarconnectionpicturedataset/", path_to_csv="vehicles_model.csv", model_name="resnet50_v1", freeze_base_network=False, num_epochs=2); gtf.update_batch_size(128); gtf.Reload(); gtf.Train(); # ## Vehicle Drive type # + # Using mxnet-gluon backend from monk.gluon_prototype import prototype # For pytorch backend #from monk.pytorch_prototype import prototype # For Keras backend #from monk.keras_prototype import prototype # + # Create Project and Experiment gtf = prototype(verbose=1); gtf.Prototype("Project-Vehicle", "Classify-Drive-Type"); # - gtf.Default(dataset_path="thecarconnectionpicturedataset/", path_to_csv="vehicles_drive_type.csv", model_name="resnet50_v1", freeze_base_network=False, num_epochs=2); gtf.update_batch_size(128); gtf.Reload(); gtf.Train(); # ## Vehicle Body Style # + # Using mxnet-gluon backend from monk.gluon_prototype import prototype # For pytorch backend #from monk.pytorch_prototype import prototype # For Keras backend #from monk.keras_prototype import prototype # + # Create Project and Experiment gtf = prototype(verbose=1); gtf.Prototype("Project-Vehicle", "Classify-Body-Style"); # - gtf.Default(dataset_path="thecarconnectionpicturedataset/", path_to_csv="vehicles_body_style.csv", model_name="resnet50_v1", freeze_base_network=False, num_epochs=2); gtf.update_batch_size(128); gtf.Reload(); gtf.Train(); # ## Run Inference # + # Using mxnet-gluon backend from monk.gluon_prototype import prototype # For pytorch backend #from monk.pytorch_prototype import prototype # For Keras backend #from monk.keras_prototype import prototype # + gtf1 = prototype(verbose=0); gtf1.Prototype("Project-Vehicle", "Classify-Make-Type", eval_infer=True); gtf2 = prototype(verbose=0); gtf2.Prototype("Project-Vehicle", "Classify-Model-Type", eval_infer=True); gtf3 = prototype(verbose=0); gtf3.Prototype("Project-Vehicle", "Classify-Drive-Type", eval_infer=True); gtf4 = prototype(verbose=0); gtf4.Prototype("Project-Vehicle", "Classify-Body-Style", eval_infer=True); # - import os img_list = os.listdir("thecarconnectionpicturedataset"); # + import time img_name = "thecarconnectionpicturedataset/" + img_list[190]; print("Img name - ", img_name); start = time.time(); output1 = gtf1.Infer(img_name=img_name) output2 = gtf2.Infer(img_name=img_name) output3 = gtf3.Infer(img_name=img_name) output4 = gtf4.Infer(img_name=img_name) end = time.time(); print("Time taken for predictions - ", end-start); print("Make Type- ", output1["predicted_class"]); print("Model Type- ", output2["predicted_class"]); print("Drive Type- ", output3["predicted_class"]); print("Body Style - ", output4["predicted_class"]); from IPython.display import Image Image(filename=img_name) # - # ## Stats at prediction time # # - Memory taken on GPU - 1805 MB # - Avg prediction time - 0.05 sec # # Training a classifier from scratch - Single multi-label classifier # ## Data-label CSV generation import os import cv2 import numpy as np from tqdm import tqdm img_list = os.listdir("thecarconnectionpicturedataset"); len(img_list) # + make_list = []; model_list = []; drive_type_list = []; #passenger_capacity_list = []; #passenger_doors_list = []; body_style_list = []; combined = []; for i in tqdm(range(len(img_list))): splits = img_list[i].split(".")[0].split("_"); make = splits[0]; model = splits[1]; drive_type = splits[12]; #passenger_capacity = splits[13]; #passenger_doors = splits[14]; body_style = splits[15]; drive_type = drive_type.replace("FWD", "front_wheel_drive"); drive_type = drive_type.replace("RWD", "rear_wheel_drive"); drive_type = drive_type.replace("AWD", "all_wheel_drive"); drive_type = drive_type.replace("4WD", "four_wheel_drive"); body_style = body_style.replace("2dr", "2_door_car"); body_style = body_style.replace("3dr", "3_door_car"); body_style = body_style.replace("4dr", "4_door_car"); make = make.replace(" ", "_"); model = model.replace(" ", "_"); drive_type = drive_type.replace(" ", "_"); #passenger_capacity = passenger_capacity.replace(" ", "_"); #passenger_doors = passenger_doors.replace(" ", "_"); body_style = body_style.replace(" ", "_"); if make not in make_list: make_list.append(make); if model not in model_list: model_list.append(model); if drive_type not in drive_type_list: drive_type_list.append(drive_type); #if passenger_capacity not in passenger_capacity_list: # passenger_capacity_list.append(passenger_capacity); #if passenger_doors not in passenger_doors_list: # passenger_doors_list.append(passenger_doors); if body_style not in body_style_list: body_style_list.append(body_style); wr_labels = make + " " + model + " " + drive_type + " " + body_style; combined.append([img_list[i], wr_labels]) # - import pandas as pd df = pd.DataFrame(combined, columns = ['ID', 'Labels']) df.to_csv("vehicles.csv", index = False) # ## Training #Using mxnet-gluon backend from monk.gluon_prototype import prototype #Create project and experiment ptf = prototype(verbose=1); ptf.Prototype("Project-Vehicle", "Multi-Label-Classifier"); # + #Set data parameters ptf.Dataset_Params(dataset_path="./thecarconnectionpicturedataset", path_to_csv="./vehicles.csv", delimiter = " ", split=0.9, input_size=224, batch_size=128, shuffle_data=True, num_processors=3); # - #Apply data transformations ptf.apply_random_horizontal_flip(train=True, val=True); ptf.apply_normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], train=True, val=True, test=True); #Load dataset ptf.Dataset(); # Set model parameters ptf.Model_Params(model_name="resnet50_v1", freeze_base_network=False, use_gpu=True, use_pretrained=True); # Load model ptf.Model(); # Set Training parameters ptf.Training_Params(num_epochs=2, display_progress=True, display_progress_realtime=True, save_intermediate_models=False, save_training_logs=True); #optimizer ptf.optimizer_rmsprop(0.001); #learning rate scheduler ptf.lr_fixed(); #Loss function ptf.loss_sigmoid_binary_crossentropy() #Start Training ptf.Train(); # ## Inference # + #Imports from monk.gluon_prototype import prototype # + # Load experiment in evaluation and inferencing mode ptf = prototype(verbose=1); ptf.Prototype("Project-Vehicle", "Multi-Label-Classifier", eval_infer=True); # + import time img_name = "thecarconnectionpicturedataset/" + img_list[1490]; start = time.time(); output = ptf.Infer(img_name=img_name, return_raw=True) end = time.time(); print("Time take to process - ", end-start) from IPython.display import Image Image(filename=img_name) # - # ## Stats at prediction time # # - Memory taken on GPU - 1181 MB # - Avg prediction time - 0.04 sec
study_roadmaps/4_image_classification_zoo/Classifier - Vehicle Make and Model type classification - Ensemble of classifiers VS Multi-Label classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- squares = [i**2 for i in range(5)] print(squares) # + squares = [] for i in range(5): squares.append(i**2) print(squares) # - odds = [i for i in range(10) if i % 2 == 1] print(odds) # + odds = [] for i in range(10): if i % 2 == 1: odds.append(i) print(odds) # - odd_even = ['odd' if i % 2 == 1 else 'even' for i in range(10)] print(odd_even) # + odd_even = [] for i in range(10): if i % 2 == 1: odd_even.append('odd') else: odd_even.append('even') print(odd_even) # - odd10 = [i * 10 if i % 2 == 1 else i for i in range(10)] print(odd10) matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] flat = [x for row in matrix for x in row] print(flat) # + flat = [] for row in matrix: for x in row: flat.append(x) print(flat) # - cells = [(row, col) for row in range(3) for col in range(2)] print(cells) cells = [(row, col) for row in range(3) for col in range(2) if col == row] print(cells) cells = [(row, col) for row in range(3) if row % 2 == 0 for col in range(2) if col % 2 == 0] print(cells) l_str1 = ['a', 'b', 'c'] l_str2 = ['x', 'y', 'z'] l_zip = [(s1, s2) for s1, s2 in zip(l_str1, l_str2)] print(l_zip) # + l_zip = [] for s1, s2 in zip(l_str1, l_str2): l_zip.append((s1, s2)) print(l_zip) # - l_enu = [(i, s) for i, s in enumerate(l_str1)] print(l_enu) # + l_enu = [] for i, s in enumerate(l_str1): l_enu.append((i, s)) print(l_enu) # - l_zip_if = [(s1, s2) for s1, s2 in zip(l_str1, l_str2) if s1 != 'b'] print(l_zip_if) l_int1 = [1, 2, 3] l_int2 = [10, 20, 30] l_sub = [i2 - i1 for i1, i2 in zip(l_int1, l_int2)] print(l_sub)
notebook/list_comprehension.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Analysis of contigs mapped to viral CDDs # # We used contigs assembled using SKESA from ~3000 SRR metagenomes (55,503,968 contigs). These metagenomes were selected based on its viral content. See [here](https://github.com/NCBI-Hackathons/VirusDiscoveryProject/tree/master/DataSelection) for details on how these metagenomes were selected. Thesse contigs were then used as queries to search using RPSTBLASTN against CDD database that had been filtered to only have viral specific CDDs. These list were also generated in previous hackathon. See [here](https://github.com/NCBI-Hackathons/VirusDiscoveryProject/tree/master/DomainLabeling/RPSTBLN/CDDlabels) for the list the ids. # importing relvant python packages import pandas as pd import seaborn as sns import sklearn.decomposition import matplotlib.pyplot as plt sns.set_color_codes() # Read in the rps blast result dataframe and parse information on contig lengths rps_cdd_df = pd.read_csv("cdd_data_forindex_original.tsv", sep="\t", index_col=0) rps_cdd_df[['contig_id_only','start_end']] = rps_cdd_df['contig_id'].str.split(":", expand=True) rps_cdd_df[['cstart', 'contig_length']] = rps_cdd_df['start_end'].str.split(".", expand=True) rps_cdd_df = rps_cdd_df.drop(['start_end', 'cstart'], axis=1) rps_cdd_df.head() # # of CDD rps_cdd_df.groupby(by="CDD").size().shape rps_cdd_df_filtered = rps_cdd_df[rps_cdd_df['evalue'] < 1.0e-10] rps_cdd_df_filtered.to_csv("test.txt", sep="\t", index=False) rps_cdd_df_filtered.head() # # of CDD rps_cdd_df_filtered.groupby(by="CDD").size().shape rps_cdd_df_filtered.groupby(by="CDD").size().sort_values(ascending=False) # # of contigs with CDD rps_cdd_df_filtered.groupby(by="contig_id").size().shape rps_cdd_df_filtered.groupby(by='SRR').size().shape # of SRR represented rps_cdd_df.groupby(by='SRR').size().shape # Number of contigs representated cdd_percontig = pd.DataFrame(rps_cdd_df.groupby(by='contig_id').size()) cdd_percontig = cdd_percontig.rename(columns={0:"cdd_per_contigs"}) cdd_percontig.head() # Number of contigs that were processed cdd_percontig.shape # Top 10 contig with highest hit cdd_percontig.sort_values(['cdd_per_contigs'], ascending=False).head(n=10) # Number of contigs representated cdd_percontig_filtered = pd.DataFrame(rps_cdd_df_filtered.groupby(by='contig_id').size()) cdd_percontig_filtered = cdd_percontig_filtered.rename(columns={0:"cdd_per_contigs"}) cdd_percontig_filtered.sort_values(['cdd_per_contigs'], ascending=False).head(n=10) cdd_percontig_filtered[cdd_percontig_filtered['cdd_per_contigs'] < 2 ] # + # adding a column with density of CDD per 1Kb # generating a separate table rps_cdd_df_index = rps_cdd_df # converting a column to index rps_cdd_df_index.index = rps_cdd_df['contig_id'] rps_cdd_df_index = rps_cdd_df_index.drop(columns=["contig_id"]) rps_cdd_df_density = rps_cdd_df_index.merge(cdd_percontig, how="left", left_index=True, right_index=True) rps_cdd_df_density['contig_length'] = rps_cdd_df_density['contig_length'].astype(int) rps_cdd_df_density['cdd_per_contigs'] = rps_cdd_df_density['cdd_per_contigs'].astype(int) rps_cdd_df_density['cdd_per_kb'] = (rps_cdd_df_density['cdd_per_contigs']/rps_cdd_df_density['contig_length'])*1000 rps_cdd_df_density.head() # - contig_length_density = rps_cdd_df_density[['contig_length', 'cdd_per_kb']].drop_duplicates() contig_length_density.head() sns.set(rc={'figure.figsize':(11.7,8.27)}) sns.set_style("white") sns.scatterplot(x=contig_length_density["contig_length"], y=contig_length_density["cdd_per_kb"], color="r") sns.set(rc={'figure.figsize':(11.7,8.27)}) sns.set_style("white") contig_2000 = contig_length_density[contig_length_density["contig_length"] < 20000] sns.scatterplot(x=contig_2000["contig_length"], y=contig_2000["cdd_per_kb"], color="r") # + # adding a column with density of CDD per 1Kb # generating a separate table rps_cdd_df_index_filtered = rps_cdd_df_filtered # converting a column to index rps_cdd_df_index_filtered.index = rps_cdd_df_filtered['contig_id'] rps_cdd_df_index_filtered = rps_cdd_df_index_filtered.drop(columns=["contig_id"]) rps_cdd_df_density_filtered = rps_cdd_df_index_filtered.merge(cdd_percontig, how="left", left_index=True, right_index=True) rps_cdd_df_density_filtered['contig_length'] = rps_cdd_df_density_filtered['contig_length'].astype(int) rps_cdd_df_density_filtered['cdd_per_contigs'] = rps_cdd_df_density_filtered['cdd_per_contigs'].astype(int) rps_cdd_df_density_filtered['cdd_per_kb'] = (rps_cdd_df_density_filtered['cdd_per_contigs']/rps_cdd_df_density_filtered['contig_length'])*1000 rps_cdd_df_density_filtered.head() # - # Distribution of number of CDD per 1 KB sns.set(rc={'figure.figsize':(11.7,8.27)}) sns.set_style("white") sns.distplot(rps_cdd_df_density['cdd_per_kb'], color="g") sns.set(rc={'figure.figsize':(11.7,8.27)}) sns.set_style("white") sns.distplot(rps_cdd_df_density_filtered['cdd_per_kb'], color="g") rps_cdd_df_density_filtered[rps_cdd_df_density_filtered.cdd_per_kb > 400] rps_cdd_df_density_filtered.cdd_per_kb[rps_cdd_df_density_filtered.contig_id == "NC_001479.1:1.7835"] # count the number of CDDs per SRR ids srr_cdd_df = rps_cdd_df_density[["CDD", "SRR"]] srr_cdd_df.head() srr_cdd_count = srr_cdd_df.groupby(['SRR', 'CDD']).size() srr_cdd_count.head() # count the number of CDDs per SRR ids srr_cdd_df_filtered = rps_cdd_df_density_filtered[["CDD", "SRR"]] srr_cdd_count_filtered = srr_cdd_df_filtered.groupby(['SRR', 'CDD']).size() srr_cdd_count_filtered.head() # + # pivot the table to sample as index and then CDD ids as coumn header srr_cdd_table = pd.DataFrame(srr_cdd_count).reset_index() srr_cdd_pivot_table = srr_cdd_table.pivot(index="SRR", columns="CDD", values=0).fillna(0) srr_cdd_pivot_table.head() # + # pivot the table to sample as index and then CDD ids as coumn header srr_cdd_table_filtered = pd.DataFrame(srr_cdd_count_filtered).reset_index() srr_cdd_pivot_table_filtered = srr_cdd_table_filtered.pivot(index="SRR", columns="CDD", values=0).fillna(0) srr_cdd_pivot_table_filtered.head() # + # distribution of number of CDDs per sample sns.distplot(srr_cdd_pivot_table.sum(axis=1), bins=70) # sns.clustermap(srr_cdd_pivot_table) # - # Top 25 SRR with most CDD hits( not unique CDD hits) srr_cdd_pivot_table.sum(axis=1).sort_values(ascending=False).head(n=25) filtered_srr_cds = srr_cdd_pivot_table[srr_cdd_pivot_table.sum(axis = 1) > 99] # + # Perform the PCA again retaining only the top 2 components sklearn_pca = sklearn.decomposition.PCA(n_components=2) sklearn_pca.fit(filtered_srr_cds) # # Project the data into this 2D space and convert it back to a tidy dataframe df_2D = pd.DataFrame(sklearn_pca.transform(filtered_srr_cds), columns=['PCA1', 'PCA2']) # # Create a column for species name df_2D['SRR'] = filtered_srr_cds.index # # Look at the result df_2D.head() # + # Perform the PCA again retaining only the top 2 components sklearn_pca = sklearn.decomposition.PCA(n_components=2) sklearn_pca.fit(srr_cdd_pivot_table_filtered) # # Project the data into this 2D space and convert it back to a tidy dataframe df_2D_filtered = pd.DataFrame(sklearn_pca.transform(srr_cdd_pivot_table_filtered), columns=['PCA1', 'PCA2']) # # Create a column for species name df_2D_filtered['SRR'] = srr_cdd_pivot_table_filtered.index # # Look at the result df_2D_filtered.head() # - # Tidy up plot plt.plot(df_2D_filtered.PCA1, df_2D_filtered.PCA2, 'o', alpha=0.7) plt.legend(loc=0) plt.margins(0.05) plt.xlabel('PCA 1') plt.ylabel('PCA 2') # Tidy up plot plt.plot(df_2D.PCA1, df_2D.PCA2, 'o', alpha=0.7) plt.legend(loc=0) plt.margins(0.05) plt.xlabel('PCA 1') plt.ylabel('PCA 2') # Most CDD hits in SRR5678966 # Plots of distribution of scores df['pident'].plot.hist(bins=25) df.groupby("contig_id").count().sort_values(["CDD"], ascending=False) # Distribution of number of domain per contigs df.groupby("contig_id").count()['CDD'].plot.hist(range=(0,20000), log=True) contig_df = df[df.contig_id.str.contains('^Contig_')] contig_df.head() contig_df.groupby("contig_id").count().sort_values(["CDD"], ascending=False)['pident'].plot.hist(range=(0,250),log=True)
post_analysis/jupyter_notebook/CDD_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PyCharm (My_3D_DeTracker) # language: python # name: pycharm-6c5cad46 # --- import torch import torch.nn as nn import torch.functional as F import numpy as np import tensorboardX import os import sys import dla34 heads = {'hm':3, 'dim':3, 'depthmap':1, 'orientation':8} the_dla34 = dla34.DLASeg('dla34', heads, '../../model/') the_dla34 dir(the_dla34) a = the_dla34.modules() DLA = the_dla34._modules['base'] base_layer = DLA._modules['base_layer'] origin_conv = base_layer[0] weights = torch.cat((origin_conv.weight, origin_conv.weight), dim=1) origin_state = origin_conv.state_dict() origin_state new_conv = nn.Conv2d(6, 16, kernel_size=7, stride=1, padding=3, bias=False) new_conv.load_state_dict({'weight':weights}) base_layer[0] = new_conv the_dla34 model = nn.Sequential(the_dla34) model[0] for i, val in enumerate(models): print(val) dummy_input = np.random.random((1, 6, 384, 1280)) dummy_input = torch.from_numpy(dummy_input) dummy_input = torch.tensor(dummy_input, dtype=torch.float32) from tensorboardX import SummaryWriter with SummaryWriter(comment='dla34_2') as w: w.add_graph(the_dla34, (dummy_input,))
network/test_dir/test_dla34.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="CjWvnaQUrZmD" # # Speech Emotion Recognition using the RAVDESS AND TESS dataset # + [markdown] colab_type="text" id="uxVUuvOQnG_H" # For this task, I have used 5252 samples from # # the Ryerson Audio-Visual Database of Emotional Speech and Song (RAVDESS) dataset # the Toronto emotional speech set (TESS) dataset # The samples include: # # **1440 **speech files and **1012** Song files from **RAVDESS**. This dataset includes recordings of 24 professional actors (12 female, 12 male), vocalizing two lexically-matched statements in a neutral North American accent. Speech includes calm, happy, sad, angry, fearful, surprise, and disgust expressions, and song contains calm, happy, sad, angry, and fearful emotions. Each file was rated 10 times on emotional validity, intensity, and genuineness. Ratings were provided by 247 individuals who were characteristic of untrained adult research participants from North America. A further set of 72 participants provided test-retest data. High levels of emotional validity, interrater reliability, and test-retest intrarater reliability were reported. Validation data is open-access, and can be downloaded along with our paper from PLoS ONE. # # **2800** files from **TESS**. A set of 200 target words were spoken in the carrier phrase "Say the word _____' by two actresses (aged 26 and 64 years) and recordings were made of the set portraying each of seven emotions (anger, disgust, fear, happiness, pleasant surprise, sadness, and neutral). There are 2800 stimuli in total. Two actresses were recruited from the Toronto area. Both actresses speak English as their first language, are university educated, and have musical training. Audiometric testing indicated that both actresses have thresholds within the normal range. # + [markdown] colab_type="text" id="9tXShPTZno76" # Mounting Drive so that we can access the contents directly from the drive # + colab={"base_uri": "https://localhost:8080/", "height": 126} colab_type="code" id="o94IYrgInXFX" outputId="aaf90ae0-8af4-4835-8dcd-185d366496c9" #from google.colab import drive #drive.mount('/content/drive') # + [markdown] colab_type="text" id="67STjT5On4Wf" # Importing Librosa library(A python package) for extracting important features of the audio like the pitch,tone. # + colab={"base_uri": "https://localhost:8080/", "height": 228} colab_type="code" id="N3b7x9shnZwp" outputId="3c16d672-0775-48b3-ba3d-37825f42da05" # #!pip install librosa # + [markdown] colab_type="text" id="ywPJyaHHpOGE" # Loading one audio file from the drive using librosa. # Librosa loads the audio file as a floating point time series. # + colab={} colab_type="code" id="aWgCbHxAn1_h" import librosa from librosa import display data, sampling_rate = librosa.load('C:/Users/vip/Documents/speech_final/audio files/RAVADES/Actor_01/03-01-02-02-01-01-01.wav') # + [markdown] colab_type="text" id="Vi77VvlppM5d" # Plotting the audio file which was just loaded by librosa using waveplot function of librosa.This Function Plots the amplitude envelope of a waveform. # + colab={"base_uri": "https://localhost:8080/", "height": 367} colab_type="code" id="hZintfRxqK1A" outputId="bf38ea0c-a176-453b-9ddc-3e4bfb57f2a5" import matplotlib.pyplot as plt plt.figure(figsize=(12, 4)) librosa.display.waveplot(data, sr=sampling_rate) # + [markdown] colab_type="text" id="jPKKk4wEpMMW" # # Load all files # # We will create our numpy array extracting Mel-frequency cepstral coefficients (MFCCs), while the classes to predict will be extracted from the name of the file (see the introductory section of this notebook to see the naming convention of the files of this dataset). # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="ZdxY8LUotJCU" outputId="5e2390c6-afe1-4cea-c73b-db71200ebe49" import time import os import numpy as np path ='C:/Users/vip/Documents/speech_final/audio files/' lst = [] start_time = time.time() for subdir, dirs, files in os.walk(path): for file in files: try: #Load librosa array, obtain mfcss, store the file and the mcss information in a new array X, sample_rate = librosa.load(os.path.join(subdir,file), res_type='kaiser_fast') mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T,axis=0) # The instruction below converts the labels (from 1 to 8) to a series from 0 to 7 # This is because our predictor needs to start from 0 otherwise it will try to predict also 0. file = int(file[7:8]) - 1 arr = mfccs, file lst.append(arr) # If the file is not valid, skip it except ValueError: continue print("--- Data loaded. Loading time: %s seconds ---" % (time.time() - start_time)) # + colab={} colab_type="code" id="Mi8NEOUItS8f" # Creating X and y: zip makes a list of all the first elements, and a list of all the second elements. X, y = zip(*lst) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="Gd6hsVYjukTK" outputId="8de85110-e0d5-4411-a5c6-523c117176dc" import numpy as np X = np.asarray(X) y = np.asarray(y) X.shape, y.shape # + colab={} colab_type="code" id="hK5XIQ4hunMH" # Saving joblib files to not load them again with the loop above import joblib X_name = 'X.joblib' y_name = 'y.joblib' save_dir = 'C:/Users/vip/Documents/speech_final' savedX = joblib.dump(X, os.path.join(save_dir, X_name)) savedy = joblib.dump(y, os.path.join(save_dir, y_name)) # + colab={} colab_type="code" id="iMEMavtnu2LB" # Loading saved models import joblib X = joblib.load('C:/Users/vip/Documents/speech_final/X.joblib') y = joblib.load('C:/Users/vip/Documents/speech_final/y.joblib') # + [markdown] colab_type="text" id="Agw-3KN1sDhh" # # Decision Tree Classifier # # To make a first attempt in accomplishing this classification task I chose a decision tree: # + colab={} colab_type="code" id="CR4t166IxGdi" from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.33, random_state=42) # + colab={} colab_type="code" id="L_aIVuq9xKgn" from sklearn.tree import DecisionTreeClassifier # + colab={} colab_type="code" id="PRh12VK70wwh" dtree = DecisionTreeClassifier() # + colab={"base_uri": "https://localhost:8080/", "height": 122} colab_type="code" id="jBbgNaz50zPt" outputId="9c63f143-8108-426a-d682-36284c1634dc" dtree.fit(X_train, y_train) # + colab={} colab_type="code" id="xtx3jG6v01v1" predictions = dtree.predict(X_test) # + colab={"base_uri": "https://localhost:8080/", "height": 281} colab_type="code" id="mbWRTHjm06Hn" outputId="7510b3bb-df6e-4ce1-8ef2-94b019570bc7" from sklearn.metrics import classification_report,confusion_matrix print(classification_report(y_test,predictions)) # + [markdown] colab_type="text" id="t9eqMHV3S8i6" # # Neural network # + colab={} colab_type="code" id="nIhRgcH3B0gu" import numpy as np x_traincnn = np.expand_dims(X_train, axis=2) x_testcnn = np.expand_dims(X_test, axis=2) # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="XRg78iarB1OI" outputId="06b0191f-1dcd-416e-f0e3-01393bb73164" x_traincnn.shape, x_testcnn.shape # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="LgYdhmGJB4ur" outputId="7e07341f-fbad-474c-fa50-b88991b23ebe" import keras import numpy as np from tensorflow.keras import optimizers import matplotlib.pyplot as plt import tensorflow as tf from keras.preprocessing import sequence from keras.models import Sequential from keras.layers import Dense, Embedding from tensorflow.keras.utils import to_categorical from keras.layers import Input, Flatten, Dropout, Activation from keras.layers import Conv1D, MaxPooling1D from keras.models import Model from keras.callbacks import ModelCheckpoint model = Sequential() model.add(Conv1D(64, 5,padding='same', input_shape=(40,1))) model.add(Activation('relu')) model.add(Dropout(0.1)) model.add(MaxPooling1D(pool_size=(4))) model.add(Conv1D(128, 5,padding='same',)) model.add(Activation('relu')) model.add(Dropout(0.1)) model.add(MaxPooling1D(pool_size=(4))) model.add(Conv1D(256, 5,padding='same',)) model.add(Activation('relu')) model.add(Dropout(0.1)) model.add(Flatten()) model.add(Dense(8)) model.add(Activation('softmax')) opt = tf.keras.optimizers.RMSprop(learning_rate=0.00005, rho=0.9, epsilon=1e-07, decay=0.0) # + colab={"base_uri": "https://localhost:8080/", "height": 657} colab_type="code" id="ImZFafVANMP_" outputId="d03e9f1d-9bd0-4a99-f9be-cf9b889d679a" model.summary() # + #I used two callbacks one is `early stopping` for avoiding overfitting training data #and other `ReduceLROnPlateau` for learning rate. from tensorflow.keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau early_stopping = EarlyStopping( monitor='val_accuracy', min_delta=0.00005, patience=11, verbose=1, restore_best_weights=True, ) lr_scheduler = ReduceLROnPlateau( monitor='val_accuracy', factor=0.5, patience=7, min_lr=1e-7, verbose=1, ) callbacks = [ early_stopping, lr_scheduler, ] # + colab={} colab_type="code" id="s8LuQtKzON7i" model.compile(loss='sparse_categorical_crossentropy', optimizer='Adam', metrics=['accuracy'] ) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="cQKYfMONOow7" outputId="6fc8e1ad-5271-4f86-f238-fee2045ade1e" cnnhistory=model.fit(x_traincnn, y_train, batch_size=64, epochs=500, validation_data=(x_testcnn, y_test),callbacks=callbacks) # + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="WEomk3HUOwSz" outputId="1cde7e1d-64a5-4c41-b1ef-b68f08982e49" plt.plot(cnnhistory.history['loss']) plt.plot(cnnhistory.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="n4UNnOn80S_3" outputId="37e334a6-5adf-4fbb-c478-070c5501c5b3" plt.plot(cnnhistory.history['accuracy']) plt.plot(cnnhistory.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # + colab={} colab_type="code" id="CSR0-VCVPO81" #predictions = model.predict_classes(x_testcnn) predict_x=model.predict(x_testcnn) classes_x=np.argmax(predict_x,axis=1) # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="7uKNuzd6PVZi" outputId="44bc4bc2-057e-47a5-ee3e-8cef839277d3" predictions # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="GhfXlBdJQFr9" outputId="500b7540-7641-41eb-fba1-e7dd0a49c475" y_test # + colab={} colab_type="code" id="FC1heRemQJHA" new_Ytest = y_test.astype(int) # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="65ZqfKMHQNBY" outputId="d1f51eb3-ab89-492a-adc3-0d770a342096" new_Ytest # + colab={"base_uri": "https://localhost:8080/", "height": 284} colab_type="code" id="wK2gsnN-QO1C" outputId="fdd8cf81-901f-42a9-ac51-0cbe2fbad867" from sklearn.metrics import classification_report report = classification_report(new_Ytest, predictions) print(report) # + colab={"base_uri": "https://localhost:8080/", "height": 159} colab_type="code" id="XipKuFRTQSFj" outputId="c55049fe-4461-4899-e70a-87555479dd4c" from sklearn.metrics import confusion_matrix matrix = confusion_matrix(new_Ytest, predictions) print (matrix) # 0 = neutral, 1 = calm, 2 = happy, 3 = sad, 4 = angry, 5 = fearful, 6 = disgust, 7 = surprised # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="ORm3pz9NQdlW" outputId="a50537b9-f833-4d66-cafb-eec3f3fd219a" model.save('C:/Users/vip/Documents/speech_final/testing10_model.h5') print("MODEL SAVED") # + colab={"base_uri": "https://localhost:8080/", "height": 657} colab_type="code" id="6aFHzp7PQvMJ" outputId="fcd82c73-683a-4dde-c866-aafaa285fa66" new_model=keras.models.load_model('C:/Users/vip/Documents/speech_final/testing10_model.h5') new_model.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="whzXUxB5Q06g" outputId="95bca73d-1171-4b97-c46f-2c7c74e5587b" loss, acc = new_model.evaluate(x_testcnn, y_test) print("Restored model, accuracy: {:5.2f}%".format(100*acc)) # + colab={} colab_type="code" id="r6n31dxyQ5Sp"
CNN_MODELS/SER_final.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np from fbprophet import Prophet covid = pd.read_csv('https://raw.githubusercontent.com/sandeco/CanalSandeco/master/covid-19/covid_19_data.csv') covid.tail() covid.rename(columns={'ObservationDate': 'Date','Country/Region': 'Country'},inplace=True) covid.tail() # # **agrupando por data** # mortes =covid.groupby('Date').sum()['Deaths'].reset_index() mortes.tail() mortes.plot() # renomeando e organizando para aplicar no prophet mortes.columns=['ds','y'] mortes.tail() mortes['ds']=pd.to_datetime(mortes['ds']) mortes.tail() # Previsão de mortes da covid-19 no mundo m = Prophet(interval_width=0.95) m.fit(mortes) futuro = m.make_future_dataframe(periods=7) futuro.tail() previsao = m.predict(futuro) previsao.tail() previsao[['ds','yhat_lower','yhat','yhat_upper']].tail(7) confirmed_forecast_plot = m.plot(previsao)
corona virus worldwide with propher.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # 04-Spam-Classifier # It's time to make our first real Machine Learning application of NLP: a spam classifier! # A spam classifier is a Machine Learning model that classifier texts (email or SMS) into two categories: Spam (1) or legitimate (0). # # To do that, we will reuse our knowledge: we will apply preprocessing and BOW (Bag Of Words) on a dataset of texts. # Then we will use a classifier to predict to which class belong a new email/SMS, based on the BOW. # First things first: import the needed libraries. # Import NLTK and all the needed libraries import nltk import numpy as np import pandas as pd from sklearn.feature_extraction.text import CountVectorizer # Load now the dataset in *spam.csv* using pandas. Use the 'latin-1' encoding as loading option. # TODO: Load the dataset df = pd.read_csv("spam.csv", encoding="latin-1") df.head() # As usual, I suggest you to explore a bit this dataset. # TODO: explore the dataset df.info() # So as you see we have a column containing the labels, and a column containing the text to classify. # We will begin by doing the usual preprocessing: tokenization, punctuation removal and lemmatization. # + # TODO: Perform preprocessing over all the text from nltk.stem import WordNetLemmatizer from nltk.tokenize import word_tokenize wnl = WordNetLemmatizer() def lemmize_text(text): return [wnl.lemmatize(word) for word in text] # Remove punctuation df["Message"] = df["Message"].str.replace('[^a-zA-Z0-9 ]', '') df["tokens"] = df["Message"].apply(word_tokenize) df["tokens"] = df["tokens"].apply(lemmize_text) print(df["tokens"]) # - # Ok now we have our preprocessed data. Next step is to do a BOW. # TODO: compute the BOW vectorizer = CountVectorizer(stop_words="english", token_pattern=r"(?u)\b[a-zA-Z][a-zA-Z]+\b") df["joined_tokens"] = df["tokens"].apply(lambda row: ' '.join(row)) BOW = vectorizer.fit_transform(df["joined_tokens"]).toarray() BOW.shape # Then make a new dataframe as usual to have a visual idea of the words used and their frequencies. # TODO: Make a new dataframe with the BOW tokens = vectorizer.get_feature_names_out() bow_df = pd.DataFrame(data=BOW, columns=tokens) bow_df.head(5) # Let's check what is the most used word in the spam category and the non spam category. # # There are two steps: first add the class to the BOW dataframe. Second, filter on a class, sum all the values and print the most frequent one. # TODO: print the most used word in the spam and non spam category # You should find that the most frequent spam word is 'free', not so surprising, right? # Now we can make a classifier based on our BOW. We will use a simple logistic regression here for the example. # # You're an expert, you know what to do, right? Split the data, train your model, predict and see the performance. # TODO: Perform a classification to predict whether a message is a spam or not # What precision do you get? Check by hand on some samples where it did predict well to check what could go wrong... # # Try to use other models and try to improve your results.
Lab 5/Spam-Classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.7 ('hand-gesture') # language: python # name: python3 # --- # + import json import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from config import class_names # - sns.set_theme() with open('../data/dataset.json') as file: dataset = json.load(file) data_count = [ { 'class_name': class_name, 'count': dataset['train']['labels'].count(class_name) + dataset['test']['labels'].count(class_name) } for class_name in class_names ] data_count = pd.DataFrame(data_count) data_count plt.figure(figsize=(8, 6), dpi=80) sns.barplot(x='class_name', y='count', data=data_count) plt.xticks(rotation=45) plt.show()
hand_gesture/data-exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # https://www.hackerrank.com/challenges/python-lists/problem li = list() for dummy in range(int(input())): command = input().split() try: getattr(li, command[0])(*(map(int, command[1:]))) except AttributeError: exec('{}({})'.format(command[0], 'li'))
02 - Basic Data Types/05 - Lists.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Use [regex101](https://regex101.com) !! # a few words about ```match``` & ```search``` # - ***search*** <u>hell</u> **in** \*<u>hell</u>o world\* # - <u>hell</u>o world ***match*** \*<u>hell</u>\* # and one more # - The functionality might be overlapped, that's .. normal. # - Take it easy. from re import * from re import compile as recompile # pre-comp is recommended. # - 0x01 - either is fine! search(r'at|home', "at") [0] search(r'at|home', "home")[0] # - 0x02 - sole '.' # + # match anything ( if with 'flags' ) # => match the next alphabet (one) search(r"(hello)(.)", "helloXY").groups() # => match the `\n` (for multiline) search(r"(hello)(.)", """ hello world """, flags=DOTALL).groups() # - # - 0x03 - start/end # + # with ^ search( r'(^From)(.*)', 'From.. Swiss!').groups() # with $ search(r'(.*)(it$)', 'Kill it').groups() search(r'(.*)(it$)', 'Hate it').groups() # with ^ and $ search( r'(^hello)(.*)(!$)', 'helloooo world!').groups() search( r'(^hello)(.*)(!$)', 'hello world!').groups() # - # - 0x04 - \b # + # --- It was called 'word boundary' --- # What is it? # Anything except the [a-zA-Z0-9_], all clear? search(r'(the)', 'the' ).groups() # itself, literally search(r'(\bthe)(.*)', 'x thehell').groups() # [BOUNDARY]the search(r'(.*)(the\b)(.*)', 'whatthe f').groups() # the[BOUNDARY] # --- As an boundary, but not consuming anythin'. --- # + # --- The '\B' kinda reverses that (match -> NOT match) --- search(r'(.*)(\Bthe)(.*)', 'xthe-x' ).groups() # [NO-BOUNDARY]the search(r'(.*)(\bthe)(.*)', 'x-the-x').groups() # [BOUNDARY]the # - # - 0x05 - \[xyz\] # + ptn = recompile(r'(^b[aoi](.*))') ptn.search('baby').groups() ptn.search('box' ).groups() # + ptn2 = recompile(r'([ax])([by])([cz])') ptn2.search('abc').groups() ptn2.search('xyz').groups() ptn2.search('ayc').groups() # + ptn3 = recompile(r'([abx-z])') ptn3.search('a').groups(), \ ptn3.search('b').groups(), \ ptn3.search('y').groups(), # + ptn4 = recompile(r'([^aeiou])') ptn4.search('c').groups(), \ ptn4.search('d').groups(), # + ptn5 = recompile(r'(.*)([^\t\n])(.*)', flags=DOTALL) ptn5.search(""" X Y """).groups() # '\nX\n' -> 'Y' -> '\n' # - # - 0x06 - repetition # + # * / + # ra* --- _, ra, raaa # ra+ --- ra, raaa # + # # ? ptn_qm = recompile(r'([dn]ot?)') ptn_qm.match('no').groups() # 0 t ptn_qm.match('not').groups() # 1 t # + # {M, N} match( r'(\d{3})-(\d{3})-(\d{4})', '555-555-5555').groups() match(r'(\d{0,3})', '' ).groups() match(r'(\d{0,3})', '1' ).groups() match(r'(\d{0,3})', '111').groups() # - # - 0x07 - charset # + # \d === [0-9] match(r'[0-9]*.(\d+)', '.005' ).groups() match(r'\d*.(\d+)' , '.005' ).groups() # + # \w === [a-zA-Z0-9_] ptn_valid_varname = recompile(r'([a-zA-Z_]+)(\w*)') ptn_valid_varname.match('_register').groups() ptn_valid_varname.match('_' ).groups() ptn_valid_varname.match('a1' ).groups() # + # \s ptn_space_stuff = recompile(r'(\w*)\s+\+\s+(\w*)') # W_+_W ptn_space_stuff.match('x + y').groups() ptn_space_stuff.match('x + y').groups() ptn_space_stuff.match('x + y').groups() # + # Reversed version: {\W, \D, \S}. # Like this: # \d == [0-9] # \D == [^0-9] ptn_trim_sortof = recompile(r'\W*(\w*)\W*') ptn_trim_sortof.match('x ').groups() ptn_trim_sortof.match('yyy').groups() ptn_trim_sortof.match(' z').groups() # - # - 0x08 - ```?``` gangster # + # ?: --- used in () but not saved # words only after 'hello' match(r'(?:hello)(.*)', 'helloXD').groups() # words only after 'xxx :)' ptn_afterxxx_smile = recompile(r"(?:\w+\s\:\)\s)(.*)") ptn_afterxxx_smile.match(r"yoooo :) babe").groups() # (?: ) \w+ \s ptn_afterxxx_smile.match(r"heyyy :) girl").groups() # \: \) \s # + # ?# --- comment, LITERALLY! match( r'(?#im-comment)(.*)', 'really').groups() # + # ?= --- match only XX follows # core differences were explained down below match(r'(.*)(?:\.)(?=google)', 'drive.google.com').groups() match(r'(.*)(?:\.)(google)', 'drive.google.com').groups() # - # I'm confused that why we need to use stuff like ```?X``` # - (What I think is that there's **SOME** functions may do their job as well) # - (so, why we need it) (of course, the '?:' '?#' is kinda reasonable) # # ---- # # ***Suddenly***, there's somethin' came into my mind # - Stuff like ```?X``` does **NOT** consume space! (It was called **assertions**) # - Take an example: # - What I think before is '(google\\.com)' # - By using ```?X``` is ```(?=google\.com)``` # - what's the difference? # - The first one **will** match (and return) the 'google.com' # - But the 2nd will **NOT** unless I change it like (?=***(***google\.com***)***) # + # ?! --- match only XX NOT follows # kinda like the diff between '\b' and 'B' (as an example) match(r'(.*)(?!google)', 'drive.google.com').groups() # -> nothin for 2nd, missed match(r'(.*)(?=google)', 'drive.google.com').groups() # -> matched (not consume!) # + # ?<= # diff between '?:' and '?=' (kinda similar) match(r'(.*)(?:(google))(.*)', 'drive.google.com').groups() # not show but consm match(r'(.*)(?=(google))(.*)', 'drive.google.com').groups() # not consume # and diff between "lookahead" and "lookbehind" match(r'(.*)(?=800)(.*)', '400-800-1111').groups() # before 800 match(r'(.*)(?<=800)(.*)', '400-800-1111').groups() # before 800 plus 800 # its usage already contained at above, so that's all. # + # ?!= ?<! # name relations (?= -- ?<= -- ?<!) (?! -- ?!=) match(r'(\d{3})(?=USD)(.*)', '123USD4567').groups() # the middle should be 'USD' match(r'(\d{3})(?!=USD)(.*)', '123xxx4567').groups() # this one shouldn't
Part 01 - Regex/getting-started.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # This notebook is intended to plot maps of O2 and CO2 fluxes, their ratio, and biotic and abiotic CO2 fluxes and their ratio # # ## Steps: # - use intake-esm to read available model output into xarray datasets # - loop through models, using groupby seas to get DFJ average fluxes and write/read to nc to speed up plotting # - create multipanel plot # # ## Future to dos: # - look at fgco2abio, and fgco2-fgco2abio, and their ratio # - estimate N2 fluxes from SST, SSS, and heat flux to adjust O2 fluxes to match flux impact on atmospheric d(O2/N2) # - more models as they become available # - allow models missing r1 and just select the lowest ensemble of the ones available # - use OMIP1 and OMIP2 instead of historical to get common (reanalysis) forcing across models, and e.g. SO winds in the right place # ### Setup: # + # %matplotlib inline import xarray as xr import intake import util import os import cartopy import cartopy.crs as ccrs import matplotlib.pyplot as plt import numpy as np import cartopy.feature as cfeature # - # ### Get collection of models using Intake-ESM: if util.is_ncar_host(): col = intake.open_esm_datastore("../catalogs/glade-cmip6.json") else: col = intake.open_esm_datastore("../catalogs/pangeo-cmip6.json") #col # ### Check collection columns: col.df.columns # ### Check unique models ("source_id"), experiments ("experiment_id") and temporal frequencies ("table_id"): import pprint uni_dict = col.unique(['source_id', 'experiment_id', 'table_id']) #pprint.pprint(uni_dict, compact=True) # ### Find all the O2 and CO2 flux data at monthly frequency from the ocean for the `historical` experiment. cat = col.search(experiment_id='historical', table_id='Omon', grid_label='gn', variable_id=['fgo2','fgco2']) #cat.df #Note: NorESM2 files are 10 years each, so there are many of these: # ### Select only the models from historical simulation that have *both* `fgo2` and `fgco2` data. # #### Selecting only the r1 ensemble member, which is excluding 2 models that have multiple ensemble members but no r1 (show up eventually?) # + models = set(uni_dict['source_id']['values']) # all the models #for variable_id in ['fgo2', 'fgco2', 'fgco2abio', 'fgco2nat', 'tos', 'sos', 'hfds']: for variable_id in ['fgo2', 'fgco2']: query = dict(variable_id=variable_id, experiment_id='historical', table_id='Omon', grid_label='gn', member_id='r1i1p1f1') ## we would get more models if we did not restrict to r1 (some models missing the first ensemble member) cat = col.search(**query) models = models.intersection({model for model in cat.df.source_id.unique().tolist()}) # ensure the CESM2 models are not included (oxygen was erroneously submitted to the archive) models = models - {'CESM2-WACCM', 'CESM2'} # tested just to look at without , 'NorESM2-LM'} models = list(models) models # - # ### Make a catalog of files corresponding to these models and variables: cat = col.search(experiment_id='historical', variable_id=['fgo2', 'fgco2'], table_id='Omon', grid_label='gn', source_id=models, member_id='r1i1p1f1') #cat.df #cat.df.path.tolist() # ### Create a dictionary of output using intake-esm: dset_dict = cat.to_dataset_dict(zarr_kwargs={'consolidated': True, 'decode_times': True}, cdf_kwargs={'chunks': {'time': 36}, 'decode_times': True}) # `dset_dict` is a dictionary of `xarray.Dataset`'s; its keys are constructed to refer to compatible groups. dset_dict.keys() # ### Manually hack in coordinate variable names latitudevars={'CMIP.CCCma.CanESM5.historical.Omon.gn': 'latitude', 'CMIP.IPSL.IPSL-CM6A-LR.historical.Omon.gn': 'nav_lat','CMIP.NCC.NorESM2-LM.historical.Omon.gn' : 'latitude'} longitudevars={'CMIP.CCCma.CanESM5.historical.Omon.gn': 'longitude', 'CMIP.IPSL.IPSL-CM6A-LR.historical.Omon.gn': 'nav_lon','CMIP.NCC.NorESM2-LM.historical.Omon.gn' : 'longitude'} # ### Loop on models, calculate DJF average fluxes, and write out to small netcdf files and read back in to speed up plotting, and collect into a dictionary multimoddjf={} # dictionary for k in dset_dict.keys(): print(k) ds=dset_dict[k] da=ds['fgo2'] # make DJf average ### to do: select most recent decade ### to do: do co2 too djf=da.groupby('time.season').mean('time').sel(season='DJF') dsout = djf.to_dataset().squeeze() da=ds['fgco2'] #print(da) djf=da.groupby('time.season').mean('time').sel(season='DJF') dsout['fgco2'] = djf.squeeze() dsout['latitude'] = dset_dict[k][latitudevars[k]] dsout['longitude'] = dset_dict[k][longitudevars[k]] print(dset_dict[k].source_id) dsout.attrs['model'] = dset_dict[k].source_id ### add CO2 and others here #os.remove('so_o2_co2_flux_files/'+k+'.nc') # if crashes, have to manually remove these files at command line before rerunning dsout.to_netcdf('so_o2_co2_flux_files/'+k+'.nc') dsin = xr.open_dataset('so_o2_co2_flux_files/'+k+'.nc') multimoddjf[k]=dsin dsin.close() # ### Correct NorESM for it having flipped fluxes multimoddjf['CMIP.NCC.NorESM2-LM.historical.Omon.gn']['fgo2']*=-1 multimoddjf['CMIP.NCC.NorESM2-LM.historical.Omon.gn']['fgco2']*=-1 # Make plots # + #To Do: # get polar stereo working # only need color bar once per row # add axis values? # add units #f, axs = plt.subplots(ncols=3, nrows=2, subplot_kw=dict(projection=ccrs.SouthPolarStereo()),figsize=(15,10)) ## not working for some reason f, axs = plt.subplots(ncols=3, nrows=3, subplot_kw=dict(projection=ccrs.PlateCarree()),figsize=(15,10)) for ax, (k, v) in zip(axs.ravel()[0:3], multimoddjf.items()): ax.set_extent([-180, 180, -90, -30], ccrs.PlateCarree()) tmp=ax.pcolormesh(v.longitude, v.latitude, v.fgco2*1000/44*86400, transform=ccrs.PlateCarree(), vmin=-1E-3, vmax=1E-3) #molCO2/m2/day ax.set_aspect('auto') ax.add_feature(cfeature.LAND, color='k',zorder=4) ax.set_title(v.model+' DJF CO2 Flux') #ax.set_xlabel('longitude') #ax.set_ylabel('latitude') f.colorbar(tmp,ax=ax) for ax, (k, v) in zip(axs.ravel()[3:6], multimoddjf.items()): ax.set_extent([-180, 180, -90, -30], ccrs.PlateCarree()) tmp=ax.pcolormesh(v.longitude, v.latitude, v.fgo2*86400, transform=ccrs.PlateCarree(), vmin=-0.03, vmax=0.03) # mol/m2/day ax.set_aspect('auto') ax.add_feature(cfeature.LAND, color='k',zorder=4) ax.set_title(v.model+' DJF O2 Flux') #ax.set_xlabel('longitude') #ax.set_ylabel('latitude') f.colorbar(tmp,ax=ax) for ax, (k, v) in zip(axs.ravel()[6:9], multimoddjf.items()): ax.set_extent([-180, 180, -90, -30], ccrs.PlateCarree()) tmp=ax.pcolormesh(v.longitude, v.latitude, v.fgo2/(v.fgco2*1000/44), transform=ccrs.PlateCarree(), vmin=-20, vmax=20) ax.set_aspect('auto') ax.add_feature(cfeature.LAND, color='k',zorder=4) ax.set_title(v.model+' DJF O2:CO2 Ratio') f.colorbar(tmp,ax=ax)
notebooks/raw_notebooks/so_o2_co2_flux_ratios.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from sklearn.datasets import make_regression from keras.layers import Dense from keras.models import Sequential from keras.optimizers import SGD import matplotlib.pyplot as plt plt.rcParams['figure.dpi'] = 150 # 构造回归问题数据集 X, y = make_regression(n_samples=1000, n_features=20, noise=0.1, random_state=1) # 划分训练集和验证集 n_train = 500 trainX, testX = X[:n_train, :], X[n_train:, :] trainy, testy = y[:n_train], y[n_train:] # 定义模型 model = Sequential() model.add(Dense(25, input_dim=20, activation='relu', kernel_initializer='he_uniform')) model.add(Dense(1, activation='linear')) # 编译模型 model.compile(loss='mean_squared_error', optimizer=SGD(lr=0.01, momentum=0.9)) # 训练模型 history = model.fit(trainX, trainy, validation_data=(testX, testy), epochs=100, verbose=0) # 评估模型 train_mse = model.evaluate(trainX, trainy, verbose=0) test_mse = model.evaluate(testX, testy, verbose=0) print('Train: %.3f, Test: %.3f' % (train_mse, test_mse)) # 绘制损失曲线 plt.title('Mean Squared Error') plt.plot(history.history['loss'], label='train') plt.plot(history.history['val_loss'], label='test') plt.legend() plt.show() # + from sklearn.datasets import make_regression from keras.layers import Dense from keras.models import Sequential from keras.optimizers import SGD import matplotlib.pyplot as plt plt.rcParams['figure.dpi'] = 150 # 构造回归问题数据集 X, y = make_regression(n_samples=1000, n_features=20, noise=0.1, random_state=1) # 划分训练集和验证集 n_train = 500 trainX, testX = X[:n_train, :], X[n_train:, :] trainy, testy = y[:n_train], y[n_train:] # 定义模型 model = Sequential() model.add(Dense(25, input_dim=20, activation='relu', kernel_initializer='he_uniform')) model.add(Dense(1, activation='linear')) # 编译模型 opt = SGD(lr=0.01, momentum=0.9, clipnorm=1.0) model.compile(loss='mean_squared_error', optimizer=opt) # 训练模型 history = model.fit(trainX, trainy, validation_data=(testX, testy), epochs=100, verbose=0) # 评估模型 train_mse = model.evaluate(trainX, trainy, verbose=0) test_mse = model.evaluate(testX, testy, verbose=0) print('Train: %.3f, Test: %.3f' % (train_mse, test_mse)) # 绘制损失曲线 plt.title('Mean Squared Error') plt.plot(history.history['loss'], label='train') plt.plot(history.history['val_loss'], label='test') plt.legend() plt.show() # + from sklearn.datasets import make_regression from keras.layers import Dense from keras.models import Sequential from keras.optimizers import SGD import matplotlib.pyplot as plt plt.rcParams['figure.dpi'] = 150 # 构造回归问题数据集 X, y = make_regression(n_samples=1000, n_features=20, noise=0.1, random_state=1) # 划分训练集和验证集 n_train = 500 trainX, testX = X[:n_train, :], X[n_train:, :] trainy, testy = y[:n_train], y[n_train:] # 定义模型 model = Sequential() model.add(Dense(25, input_dim=20, activation='relu', kernel_initializer='he_uniform')) model.add(Dense(1, activation='linear')) # 编译模型 opt = SGD(lr=0.01, momentum=0.9, clipvalue=5.0) model.compile(loss='mean_squared_error', optimizer=opt) # 训练模型 history = model.fit(trainX, trainy, validation_data=(testX, testy), epochs=100, verbose=0) # 评估模型 train_mse = model.evaluate(trainX, trainy, verbose=0) test_mse = model.evaluate(testX, testy, verbose=0) print('Train: %.3f, Test: %.3f' % (train_mse, test_mse)) # 绘制损失曲线 plt.title('Mean Squared Error') plt.plot(history.history['loss'], label='train') plt.plot(history.history['val_loss'], label='test') plt.legend() plt.show()
02 TensorFlow/tutorial/19 Gradient Clipping aviod Gradient Exploding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: cn-altair # language: python # name: cn-altair # --- # + [markdown] iooxa={"id": {"block": "YGrB5GrCk19USvlFCTjF", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # # Simple Charts: Core Concepts # # The goal of this section is to teach you the core concepts required to create a basic Altair chart; namely: # # - **Data**, **Marks**, and **Encodings**: the three core pieces of an Altair chart # # - **Encoding Types**: ``Q`` (quantitative), ``N`` (nominal), ``O`` (ordinal), ``T`` (temporal), which drive the visual representation of the encodings # # - **Binning and Aggregation**: which let you control aspects of the data representation within Altair. # # With a good understanding of these core pieces, you will be well on your way to making a variety of charts in Altair. # + [markdown] iooxa={"id": {"block": "aA1sXE4XTIau7IBqgpzz", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # We'll start by importing Altair, and (if necessary) enabling the appropriate renderer: # + iooxa={"id": {"block": "tAZRS2japvFner3Xy9mz", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}, "outputId": null} import altair as alt # + [markdown] iooxa={"id": {"block": "T11UGxttnzSXfptdUOu4", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # ## A Basic Altair Chart # # The essential elements of an Altair chart are the **data**, the **mark**, and the **encoding**. # # The format by which these are specified will look something like this: # # ```python # alt.Chart(data).mark_point().encode( # encoding_1='column_1', # encoding_2='column_2', # # etc. # ) # ``` # # Let's take a look at these pieces, one at a time. # + [markdown] iooxa={"id": {"block": "mbIyqMy8NNU1JLFmuVid", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # ### The Data # # Data in Altair is built around the [Pandas Dataframe](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html). # For this section, we'll use the cars dataset that we saw before, which we can load using the [vega_datasets](https://github.com/altair-viz/vega_datasets) package: # + iooxa={"id": {"block": "YfFj3Vgi8dwBoqSBKiG9", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}, "outputId": {"block": "JAgEFcni0BTSkWdEaqfc", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} from vega_datasets import data cars = data.cars() cars.head() # + [markdown] iooxa={"id": {"block": "wWGUQZP9RRga2YBrVJW7", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # Data in Altair is expected to be in a [tidy format](http://vita.had.co.nz/papers/tidy-data.html); in other words: # # - each **row** is an observation # - each **column** is a variable # # See [Altair's Data Documentation](https://altair-viz.github.io/user_guide/data.html) for more information. # + [markdown] iooxa={"id": {"block": "l2ukXVB6TtMD9zR72Wnt", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # ### The *Chart* object # # With the data defined, you can instantiate Altair's fundamental object, the ``Chart``. Fundamentally, a ``Chart`` is an object which knows how to emit a JSON dictionary representing the data and visualization encodings, which can be sent to the notebook and rendered by the Vega-Lite JavaScript library. # Let's take a look at what this JSON representation looks like, using only the first row of the data: # + iooxa={"id": {"block": "yRYo081rQk06PdolmPiJ", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}, "outputId": {"block": "t8au3lLKEedg3FfWIJY0", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} cars1 = cars.iloc[:1] alt.Chart(cars1).mark_point().to_dict() # + [markdown] iooxa={"id": {"block": "YmDX19JvgOfE3b09RENf", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # At this point the chart includes a JSON-formatted representation of the dataframe, what type of mark to use, along with some metadata that is included in every chart output. # + [markdown] iooxa={"id": {"block": "eFLhTA21YblKk9F26iEp", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # ### The Mark # # We can decide what sort of *mark* we would like to use to represent our data. # In the previous example, we can choose the ``point`` mark to represent each data as a point on the plot: # + iooxa={"id": {"block": "OtYKvC8QLySjzwpJqL9B", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}, "outputId": {"block": "GyJDBaihlHORUhCobfPC", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} alt.Chart(cars).mark_point() # + [markdown] iooxa={"id": {"block": "mqJVwgjhly7iHeSVBraI", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # The result is a visualization with one point per row in the data, though it is not a particularly interesting: all the points are stacked right on top of each other! # # It is useful to again examine the JSON output here: # + iooxa={"id": {"block": "toUcaABR2GRKQaJ9dtiY", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}, "outputId": {"block": "NSna0hqVPMXXW8yayo20", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} alt.Chart(cars1).mark_point().to_dict() # + [markdown] iooxa={"id": {"block": "54IsKE14cxyPvKqAdcXp", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # Notice that now in addition to the data, the specification includes information about the mark type. # + [markdown] iooxa={"id": {"block": "bBILBQgbZ4yaSo1BYcpY", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # There are a number of available marks that you can use; some of the more common are the following: # # * ``mark_point()`` # * ``mark_circle()`` # * ``mark_square()`` # * ``mark_line()`` # * ``mark_area()`` # * ``mark_bar()`` # * ``mark_tick()`` # # You can get a complete list of ``mark_*`` methods using Jupyter's tab-completion feature: in any cell just type: # # alt.Chart.mark_ # # followed by the tab key to see the available options. # + [markdown] iooxa={"id": {"block": "IIkIKl5OtUvWN2EBZeqP", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # ### Encodings # # The next step is to add *visual encoding channels* (or *encodings* for short) to the chart. An encoding channel specifies how a given data column should be mapped onto the visual properties of the visualization. # Some of the more frequenty used visual encodings are listed here: # # * ``x``: x-axis value # * ``y``: y-axis value # * ``color``: color of the mark # * ``opacity``: transparency/opacity of the mark # * ``shape``: shape of the mark # * ``size``: size of the mark # * ``row``: row within a grid of facet plots # * ``column``: column within a grid of facet plots # # For a complete list of these encodings, see the [Encodings](https://altair-viz.github.io/user_guide/encoding.html) section of the documentation. # # Visual encodings can be created with the `encode()` method of the `Chart` object. For example, we can start by mapping the `y` axis of the chart to the `Origin` column: # + iooxa={"id": {"block": "37cIW7VxeFgamriTo5ho", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}, "outputId": {"block": "F3IzPwpXdxljMgxqZF2k", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} alt.Chart(cars).mark_point().encode( y='Origin' ) # + [markdown] iooxa={"id": {"block": "k7r2CqmwlCZjG0TGl8Hg", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # The result is a one-dimensional visualization representing the values taken on by `Origin`, with the points in each category on top of each other. # As above, we can view the JSON data generated for this visualization: # + iooxa={"id": {"block": "gtPArfK5aINELGMgVmyZ", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}, "outputId": {"block": "kaQaQTtULn4wFxj2Lvl6", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} alt.Chart(cars1).mark_point().encode( x='Origin' ).to_dict() # + [markdown] iooxa={"id": {"block": "kGmWHEdMebkjKUC8esbl", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # The result is the same as above with the addition of the `'encoding'` key, which specifies the visualization channel (`y`), the name of the field (`Origin`), and the type of the variable (`nominal`). # We'll discuss these data types in a moment. # + [markdown] iooxa={"id": {"block": "f6Y8QPO53IuxBMz2mN9v", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # The visualization can be made more interesting by adding another channel to the encoding: let's encode the `Miles_per_Gallon` as the `x` position: # + iooxa={"id": {"block": "Bvrw28Lkl2IReIKUwWgq", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}, "outputId": {"block": "fv6H1lhD1FUh3321Udjq", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} alt.Chart(cars).mark_point().encode( y='Origin', x='Miles_per_Gallon' ) # + [markdown] iooxa={"id": {"block": "EoxSyU8wEa7vVMUOoCz2", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # You can add as many encodings as you wish, with each encoding mapped to a column in the data. # For example, here we will color the points by *Origin*, and plot *Miles_per_gallon* vs *Year*: # + iooxa={"id": {"block": "dzW5xJwJsudyipuqGUU5", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}, "outputId": {"block": "0qPBdqODvkFxVJ1ljXfr", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} alt.Chart(cars).mark_point().encode( color='Origin', y='Miles_per_Gallon', x='Year' ) # + [markdown] iooxa={"id": {"block": "qfzrSNTQunsMy4mNbkNC", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # ### Excercise: Exploring Data # # Now that you know the basics (Data, encodings, marks) take some time and try making a few plots! # # In particular, I'd suggest trying various combinations of the following: # # - Marks: ``mark_point()``, ``mark_line()``, ``mark_bar()``, ``mark_text()``, ``mark_rect()``... # - Data Columns: ``'Acceleration'``, ``'Cylinders'``, ``'Displacement'``, ``'Horsepower'``, ``'Miles_per_Gallon'``, ``'Name'``, ``'Origin'``, ``'Weight_in_lbs'``, ``'Year'`` # - Encodings: ``x``, ``y``, ``color``, ``shape``, ``row``, ``column``, ``opacity``, ``text``, ``tooltip``... # # Work with a partner to use various combinations of these options, and see what you can learn from the data! In particular, think about the following: # # - Which encodings go well with continuous, quantitative values? # - Which encodings go well with discrete, categorical (i.e. nominal) values? # # After about 10 minutes, we'll ask for a couple volunteers to share their combination of marks, columns, and encodings. # + [markdown] iooxa={"id": {"block": "P8Nxs2G1S3bHr3R6ZiQ1", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # --- # + [markdown] iooxa={"id": {"block": "8CVQ4RvIfiYeaMN8Ykqw", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # ## Encoding Types # + [markdown] iooxa={"id": {"block": "CmmqslfliLJHXsJh4KrU", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # One of the central ideas of Altair is that the library will **choose good defaults for your data type**. # # The basic data types supported by Altair are as follows: # # <table> # <tr> # <th>Data Type</th> # <th>Code</th> # <th>Description</th> # </tr> # <tr> # <td>quantitative</td> # <td>Q</td> # <td>Numerical quantity (real-valued)</td> # </tr> # <tr> # <td>nominal</td> # <td>N</td> # <td>Name / Unordered categorical</td> # </tr> # <tr> # <td>ordinal</td> # <td>O</td> # <td>Ordered categorial</td> # </tr> # <tr> # <td>temporal</td> # <td>T</td> # <td>Date/time</td> # </tr> # </table> # # When you specify data as a pandas dataframe, these types are **automatically determined** by Altair. # # When you specify data as a URL, you must **manually specify** data types for each of your columns. # + [markdown] iooxa={"id": {"block": "ECzScKOIoFskcVdKxH3y", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # Let's look at a simple plot containing three of the columns from the cars data: # + iooxa={"id": {"block": "xqAOCXteGz2a1n6205IA", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}, "outputId": {"block": "cPucFqhTWUA20S1O2QHc", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} alt.Chart(cars).mark_tick().encode( x='Miles_per_Gallon', y='Origin', color='Cylinders' ) # + [markdown] iooxa={"id": {"block": "akWYIdXHXuDgQlo1yfPe", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # Questions: # # - what data type best goes with ``Miles_per_Gallon``? # - what data type best goes with ``Origin``? # - what data type best goes with ``Cylinders``? # + [markdown] iooxa={"id": {"block": "fXLA1YifyWkeyW9Xt6KZ", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # Let's add the shorthands for each of these data types to our specification, using the one-letter codes above # (for example, change ``"Miles_per_Gallon"`` to ``"Miles_per_Gallon:Q"`` to explicitly specify that it is a quantitative type): # + iooxa={"id": {"block": "ATkaUGTKebHQikZxqFmr", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}, "outputId": {"block": "37bUbydvYnTkspHqEs3U", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} alt.Chart(cars).mark_tick().encode( x='Miles_per_Gallon:Q', y='Origin:N', color='Cylinders:O' ) # + [markdown] iooxa={"id": {"block": "MiGb0GRles9CUP8F4VIJ", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # Notice how if we change the data type for ``'Cylinders'`` to ordinal the plot changes. # # As you use Altair, it is useful to get into the habit of always specifying these types explicitly, because this is *mandatory* when working with data loaded from a file or a URL. # + [markdown] iooxa={"id": {"block": "PkzNjuSe8bvqGlIBjmGJ", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} # ### Exercise: Adding Explicit Types # # Following are a few simple charts made with the cars dataset. For each one, try to add explicit types to the encodings (i.e. change ``"Horsepower"`` to ``"Horsepower:Q"`` so that the plot doesn't change. # # Are there any plots that can be made better by changing the type? # + iooxa={"id": {"block": "xXLzvhA6DXWdd8VHoZzQ", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}, "outputId": {"block": "1m8eckhLwtbyNJiUWgR0", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} alt.Chart(cars).mark_bar().encode( y='Origin', x='mean(Horsepower)' ) # + iooxa={"id": {"block": "ZsQ1j3dWPGTLgG6d7Ip1", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}, "outputId": {"block": "fPt8IA5lApiADCeDjjC0", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} alt.Chart(cars).mark_line().encode( x='Year', y='mean(Miles_per_Gallon)', color='Origin' ) # + iooxa={"id": {"block": "uk7USqEGzyBhPoMvluZ4", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}, "outputId": {"block": "1qFvXLCrhwEbhsxojHTx", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} alt.Chart(cars).mark_bar().encode( y='Cylinders', x='count()', color='Origin' ) # + iooxa={"id": {"block": "LmB4FSqxLSbv19io5a6w", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}, "outputId": {"block": "JZRQgCY6YybYNyXSw0wq", "project": "ySdPfWr0YQDiIwvYxD9u", "version": 1}} alt.Chart(cars).mark_rect().encode( x='Cylinders', y='Origin', color='count()' )
notebooks/02-Simple-Charts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- import tensorflow as tf import numpy as np from utils import read_mnist_data, plot_images from tfops import fc from time import time from os.path import exists data, _ = read_mnist_data() # + tf.reset_default_graph() z_dim = 32 cls_dim = 10 with tf.name_scope('inputs'): x = tf.placeholder(tf.float32, (None, 784), 'x') epsilon = tf.placeholder(tf.float32, (None, z_dim), 'epsilon') with tf.name_scope('classifier'): y = fc(x, cls_dim, 'softmax', 'classifier') with tf.name_scope('encoder'): z_mean = fc(x, z_dim, 'sigmoid', 'z_mean') z_std = fc(x, z_dim, 'sigmoid', 'z_std') with tf.name_scope('latent-space'): z = epsilon * z_std + z_mean with tf.name_scope('decoder'): x_gen = fc(tf.concat([z, y], axis=1, name='concatentate'), 784, 'sigmoid', 'decode') with tf.name_scope('optimize'): generation_loss = tf.reduce_mean((x - x_gen)**2, name='generation_loss') tf.summary.scalar('generation_loss', generation_loss) latent_loss = tf.reduce_mean(0.5 * tf.reduce_sum(z_mean ** 2 + z_std ** 2 - tf.log(z_std ** 2) - 1, axis=1)) tf.summary.scalar('latent_loss', latent_loss) loss = generation_loss + 1e-3 * latent_loss tf.summary.scalar('loss', loss) optimizer = tf.train.AdamOptimizer(1e-4).minimize(loss) summ = tf.summary.merge_all() # - def plot_reconstructions(session): img_idx = np.random.randint(0, data.validation.num_examples, 11) noise = np.random.randn(11, z_dim) original_img = data.validation.images[img_idx] generated_img, labels = session.run([x_gen, y], {x: original_img, epsilon: noise}) plot_images(original_img) plot_images(generated_img, np.argmax(labels, 1)) def plot_generated_img(session, cls_idx=None): noise = np.random.randn(11, z_dim) labels = np.zeros((11, cls_dim)) if cls_idx is None: cls_idx = np.random.randint(0, cls_dim, 11) labels[range(11), cls_idx] = 1 generated_img = session.run(x_gen, {z: noise, y:labels}) plot_images(generated_img, np.argmax(labels, 1)) def test_acc(session): labels = session.run(y, {x: data.test.images}) labels = np.argmax(labels, 1) correctness = session.run(tf.equal(labels, data.test.cls)) return session.run(tf.reduce_mean(tf.cast(correctness, tf.float32)) * 100) # + batch_size = 256 batches_per_epoch = int(data.train.num_examples / batch_size) def optimize(epochs): start_time = time() with tf.Session() as sess: writer = tf.summary.FileWriter('checkpoints/VAE-Classifier') writer.add_graph(tf.get_default_graph()) saver = tf.train.Saver() if exists('checkpoints/VAE-Classifier/VAE-Classifier.data-00000-of-00001'): saver.restore(sess, 'checkpoints/VAE-Classifier/VAE-Classifier') else: sess.run(tf.global_variables_initializer()) for epoch in range(epochs): for batch in range(batches_per_epoch): x_batch, y_batch = data.train.next_batch(batch_size) noise = np.random.randn(batch_size, z_dim) sess.run(optimizer, {x: x_batch, epsilon: noise}) if batch % 1000 == 0: writer.add_summary(sess.run(summ, {x: x_batch, epsilon: noise}), global_step=epoch * batches_per_epoch + batch) print("{} / {} ({}%)".format(epoch + 1, epochs, np.round((epoch + 1) / epochs * 100, 2))) plot_reconstructions(sess) saver.save(sess, 'checkpoints/VAE-Classifier/VAE-Classifier', write_meta_graph=False) print("Time taken - {}s".format(np.round(time() - start_time, 2))) # - if exists('checkpoints/VAE-Classifier/VAE-Classifier.data-00000-of-00001'): with tf.Session() as sess: saver = tf.train.Saver() saver.restore(sess, 'checkpoints/VAE-Classifier/VAE-Classifier') #print("Test accuracy: ", test_acc(sess)) print("Reconstructions:") plot_reconstructions(sess) print("Generated:") for i in range(cls_dim): plot_generated_img(sess, i) else: optimize(30)
Exploration/VAE/Notebooks/VAE-Classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### How to Handle an unbalanced Dataset # ###### 1.Up Sampling # ###### 2.Down Sampling import pandas as pd import numpy as np import matplotlib.pyplot as plt df = pd.read_csv('../Datasets/titanicKaggle.csv') df.head() df.tail() df.shape df.Survived.value_counts() df.Survived.value_counts(normalize=True) df.Survived.value_counts().plot(kind='bar') # ### Upsampling pos = df[df.Survived == 1].sample(200) updf = pd.concat([df,pos]) updf.shape updf.Survived.value_counts() updf.Survived.value_counts(normalize=True) # ### Down Sampling df.Survived.value_counts() neg = df[df.Survived == 0].sample(342) pos = df[df.Survived == 1] neg.shape pos.shape downdf = pd.concat([pos,neg]) downdf.shape downdf.Survived.value_counts() downdf.Survived.value_counts(normalize=True)
Pandas - Crash Course/JP Nan/PythonL10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # hide # #%load_ext lab_black # - # # altair_morberg # # A simple personal theme for Altair. # ## Install # # `pip install altair_morberg` # ## How to use # + #hide_output import altair as alt import altair_morberg.core as morberg alt.themes.register("morberg_theme", morberg.theme) alt.themes.enable("morberg_theme") # - # [Examples](https://morberg.github.io/altair_morberg/examples.html) using this theme are available in [the documentation](https://morberg.github.io/altair_morberg/). # + # hide from nbdev.export import notebook2script notebook2script() # -
index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Tristan-Brown1096/DS-Unit-2-Applied-Modeling/blob/master/module1-define-ml-problems/LS_DS_231_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="nCc3XZEyG3XV" # Lambda School Data Science # # *Unit 2, Sprint 3, Module 1* # # --- # # # # Define ML problems # # You will use your portfolio project dataset for all assignments this sprint. # # ## Assignment # # Complete these tasks for your project, and document your decisions. # # - [ ] Choose your target. Which column in your tabular dataset will you predict? # - [ ] Is your problem regression or classification? # - [ ] How is your target distributed? # - Classification: How many classes? Are the classes imbalanced? # - Regression: Is the target right-skewed? If so, you may want to log transform the target. # - [ ] Choose your evaluation metric(s). # - Classification: Is your majority class frequency >= 50% and < 70% ? If so, you can just use accuracy if you want. Outside that range, accuracy could be misleading. What evaluation metric will you choose, in addition to or instead of accuracy? # - Regression: Will you use mean absolute error, root mean squared error, R^2, or other regression metrics? # - [ ] Choose which observations you will use to train, validate, and test your model. # - Are some observations outliers? Will you exclude them? # - Will you do a random split or a time-based split? # - [ ] Begin to clean and explore your data. # - [ ] Begin to choose which features, if any, to exclude. Would some features "leak" future information? # # If you haven't found a dataset yet, do that today. [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2) and choose your dataset. # # Some students worry, ***what if my model isn't “good”?*** Then, [produce a detailed tribute to your wrongness. That is science!](https://twitter.com/nathanwpyle/status/1176860147223867393) # + [markdown] id="whssprmFPqH6" colab_type="text" # #Answers # + [markdown] id="hvrFBsPGPsJR" colab_type="text" # Target: POSTSEASON # # Problem type: classification # # Target distribution: 9 categories, skewed toward no tourney # # Evaluation metrics: accuracy, f1 score # # Split type: time-based, train on 2010-2016, validate on 2017-2018, and test on 2019 # # Features to exclude: SEED, combining TEAM and YEAR as ID
module1-define-ml-problems/LS_DS_231_assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="RsV70a0f7nPH" # # chapter 5 loops and iterations # + [markdown] id="eB_numXb7rV1" # # write a program to say hello 5 times # + colab={"base_uri": "https://localhost:8080/", "height": 122} id="JdMGqd5c7ie3" outputId="8d8713a2-9122-4fdb-ac30-602545ab9c60" n = 5 while n > 0: print('hello') n = n - 1 print('done!') # + colab={"base_uri": "https://localhost:8080/", "height": 122} id="UBImPJ7b8Ab7" outputId="76c1fa17-954d-4ffd-bb01-14b7342536f0" n = 0 while n < 5: print('hello') n = n + 1 print('done!') # + [markdown] id="BE9LpRlu8E9G" # * write a program to count-up 1-5 # * write a program to count-down 5-0 # + colab={"base_uri": "https://localhost:8080/", "height": 122} id="v_mtmasy8Ooo" outputId="1b24d8a3-7192-44a5-f8ab-93232ebb040c" n = 1 while n <= 5 : print(n) n +=1 # n = n -1 print('done') # + colab={"base_uri": "https://localhost:8080/", "height": 140} id="UpKOchAp8rnk" outputId="987a216a-24bd-46f4-ebcd-1899150f0861" n = 5 while n >= 0: print(n) n -=1 print('done') # + [markdown] id="A6fyyOKW9JQ7" # # write a program to say hello 5 times each second # + colab={"base_uri": "https://localhost:8080/", "height": 122} id="0zmx47Az9IQK" outputId="c3db6d14-39ad-45c7-a8b2-b6199ae6d7c1" from time import sleep n = 0 while n < 5: print('hello') n +=1 sleep(2) print('done') # + colab={"base_uri": "https://localhost:8080/", "height": 122} id="_eUh_Hez-P3s" outputId="d6639608-02fd-4045-8bd0-943db6544f4c" while True: line = input('> ') if line == 'exit': break # go to line 6 print(line) print('done') # + colab={"base_uri": "https://localhost:8080/", "height": 158} id="WyeQ3q2F_U76" outputId="99eb58cb-b93c-411f-f4e0-05914ceddafb" while True: line = input('> ') if line[0] == "#": continue # go to line 1 if line == 'exit': break # go to line 8 print(line) print('done') # + colab={"base_uri": "https://localhost:8080/", "height": 140} id="nT8HBBsL_9cq" outputId="91a38ea5-e74f-4bb9-886a-a59fb6024fa7" # indefinite loop while True: line = input('> ') if line[0] == "#": continue # go to line 1 if line == 'exit': break # go to line 8 print(line) print('done') # + colab={"base_uri": "https://localhost:8080/", "height": 70} id="x67o0sadAvTQ" outputId="2b512729-2c8a-4723-da3a-c80a9a2cf17f" names = ['Ahmed', 'Adham', 'George', 'Ali', 'Majdi', 'Khitam'] # write code to find names that start with A for name in names: if name[0] != 'A': continue print(name) # + colab={"base_uri": "https://localhost:8080/", "height": 70} id="pDS91FM-BSWP" outputId="918aa40d-2986-405c-ccb6-fa9a6d37fba7" names = ['Ahmed', 'Adham', 'George', 'Ali', 'Majdi', 'Khitam'] # write code to find names that do not start with A for name in names: if name[0] == 'A': continue print(name) # + colab={"base_uri": "https://localhost:8080/", "height": 87} id="pyeGqEwKBnoP" outputId="6ecc6f0e-0a07-4645-a525-8e07f8468bfc" # loop using iteration variable numbers = [2, 5, 1, 8] for n in numbers: print(n) # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="11cIe-x9B0rx" outputId="bbc8b0b6-2a76-4937-c04f-01629e06de6c" # write a program to sum a list numbers = [2, 5, 1, 8] total = 0 for n in numbers: total = total + n print(total) # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="ZrU7gumrCr4H" outputId="f4946e10-44c3-4706-f586-72b94cb4e437" numbers[0] # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="rMdfvdHQCus1" outputId="ea89893f-f2e5-4eee-d273-2f311ebb6e28" numbers[3] # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="UAqIN7iwCyL3" outputId="57991477-f1be-486a-d952-47d394a8a4b2" range(4) # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="Jvva2ApEC1Sa" outputId="451f2381-8f81-4867-ddf6-3be69ed1c4fb" list(range(4)) # + colab={"base_uri": "https://localhost:8080/", "height": 87} id="CmWz9QoYCDmB" outputId="466f68e0-1db6-458b-80f6-32314db826fd" # loop using index for i in range(4): print(numbers[i]) # + colab={"base_uri": "https://localhost:8080/", "height": 87} id="r7ikAqJkDFYe" outputId="09583cbd-236e-4142-ba4c-e0f1440c7a0d" # loop using index for i in range(len(numbers)): print(numbers[i])
notebooks/ch5_loops_iterations_part1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 1. Importing data from the Internet # **The web is a rich source of data from which you can extract various types of insights and findings. In this chapter, you will learn how to get data from the web, whether it is stored in files or in HTML. You'll also learn the basics of scraping and parsing web data.** # ## Importing flat files from the web # ### Is it possible to import web data? # - You can: go to URL and click to download files # - BUT: not reproducible, not scalable # # ### The urllib package # - Provides interface for fetching data across the web # - `urlopen()` - accepts URLs instead of file names # # ### How to automate file download in Python # ```python # from urllib.request import urlretrieve # url = 'http://www.website.com/example-link/example_file.csv' # urlretrieve(url, 'example_file.csv') # ``` # ``` # ('example_file.csv', <http.client.HTTPMessage at 0x103cf1128>) # ``` # ## Importing flat files from the web: your turn # The flat file you will import will be `'winequality-red.csv'` from the University of California, Irvine's [Machine Learning repository](http://archive.ics.uci.edu/ml/index.html). The flat file contains tabular data of physiochemical properties of red wine, such as pH, alcohol content and citric acid content, along with wine quality rating. # # The URL of the file is # ``` # 'https://s3.amazonaws.com/assets.datacamp.com/production/course_1606/datasets/winequality-red.csv' # ``` # After you import it, you'll check your working directory to confirm that it is there and then you'll load it into a `pandas` DataFrame. # - Import the function `urlretrieve` from the subpackage `urllib.request`. # - Assign the URL of the file to the variable `url`. # - Use the function `urlretrieve()` to save the file locally as `'winequality-red.csv'`. # - Execute the remaining code to load `'winequality-red.csv'` in a pandas DataFrame and to view its head to the shell. # + # Import package from urllib.request import urlretrieve # Import pandas import pandas as pd # Assign url of file: url url = 'https://s3.amazonaws.com/assets.datacamp.com/production/course_1606/datasets/winequality-red.csv' # Save file locally urlretrieve(url, 'winequality-red.csv') # Read file into a DataFrame and view its head df = pd.read_csv('winequality-red.csv', sep=';') display(df.head()) # - # ## Opening and reading flat files from the web # You have just imported a file from the web, saved it locally and loaded it into a DataFrame. If you just wanted to load a file from the web into a DataFrame without first saving it locally, you can do that easily using `pandas`. In particular, you can use the function `pd.read_csv()` with the URL as the first argument and the separator `sep` as the second argument. # # The URL of the file, once again, is # ``` # 'https://s3.amazonaws.com/assets.datacamp.com/production/course_1606/datasets/winequality-red.csv' # ``` # - Assign the URL of the file to the variable `url`. # - Read file into a DataFrame `df` using `pd.read_csv()`, recalling that the separator in the file is `';'`. # - View the head of the DataFrame `df`. # - Execute the rest of the code to plot histogram of the first feature in the DataFrame `df`. # + # Import packages import matplotlib.pyplot as plt import pandas as pd # Assign url of file: url url = 'https://s3.amazonaws.com/assets.datacamp.com/production/course_1606/datasets/winequality-red.csv' # Read file into a DataFrame: df df = pd.read_csv(url, sep=';') # View the head of the DataFrame display(df.head()) # Plot first column of df pd.DataFrame.hist(df.iloc[:, 0:1]) plt.xlabel('fixed acidity (g(tartaric acid)/dm$^3$)') plt.ylabel('count') plt.show() # - # ## Importing non-flat files from the web # Congrats! You've just loaded a flat file from the web into a DataFrame without first saving it locally using the `pandas` function `pd.read_csv()`. This function is super cool because it has close relatives that allow you to load all types of files, not only flat ones. In this interactive exercise, you'll use `pd.read_excel()` to import an Excel spreadsheet. # # The URL of the spreadsheet is # ``` # 'http://s3.amazonaws.com/assets.datacamp.com/course/importing_data_into_r/latitude.xls' # ``` # Your job is to use `pd.read_excel()` to read in all of its sheets, print the sheet names and then print the head of the first sheet *using its name, not its index*. # # Note that the output of `pd.read_excel()` is a Python dictionary with sheet names as keys and corresponding DataFrames as corresponding values. # - Assign the URL of the file to the variable `url`. # - Read the file in `url` into a dictionary `xls` using `pd.read_excel()` recalling that, in order to import all sheets you need to pass `None` to the argument `sheet_name`. # - Print the names of the sheets in the Excel spreadsheet; these will be the keys of the dictionary `xls`. # - View the head of the first sheet using the sheet name, not the index of the sheet! The sheet name is `'1700'` # + # Assign url of file: url url = 'http://s3.amazonaws.com/assets.datacamp.com/course/importing_data_into_r/latitude.xls' # Read in all sheets of Excel file: xls xls = pd.read_excel(url, sheet_name=None) # Print the sheetnames print(xls.keys()) # View the head of the first sheet (using its name, NOT its index) display(xls['1700'].head()) # - # --- # ## HTTP requests to import files from the web # ### URL # - Uniform/Universal Resource Locator # - References to web resources # - Focus: web addresses # - Ingredients: # - Protocol identifier - http: # - Resource name - webpage.com # - These specify web addresses uniquely # # ### HTTP # - HyperText Transfer Protocol # - Foundation of data communication for the web # - HTTPS - more secure form of HTTP # - Going to a website = sending HTTP request # - GET request # - `urlretrieve()` performs a GET request # - HTML - HyperText Markup Language # # ### GET requests using urllib # ```python # from urllib.request import urlopen, Request # url = "https://www.website.com/" # request = Request(url) # response = urlopen(request) # html = response.read() # response.close() # ``` # # ### GET requests using requests # According to the requests package website. *"Requests allows you to send organic, grass-fed HTTP/1 dot 1 requests, without the need for manual labor."* and the following organizations claim to use requests internally: *"Her Majesty's Government, Amazon, Google, Twilio, NPR, Obama for America, Twitter, Sony, and Federal U.S. Institutions that prefer to be unnamed."* # # - One of the most downloaded Python packages # # ```python # import requests # url = "https://www.website.com/" # r = requests.get(url) # text = r.text # ``` # ## Performing HTTP requests in Python using urllib # Now that you know the basics behind HTTP GET requests, it's time to perform some of your own. In this interactive exercise, you will ping our very own DataCamp servers to perform a GET request to extract information from `"https://campus.datacamp.com/courses/1606/4135?ex=2".` # # In the next exercise, you'll extract the HTML itself. Right now, however, you are going to package and send the request and then catch the response. # - Import the functions `urlopen` and `Request` from the subpackage `urllib.request`. # - Package the request to the url `"https://campus.datacamp.com/courses/1606/4135?ex=2"` using the function `Request()` and assign it to `request`. # - Send the request and catch the response in the variable `response` with the function `urlopen()`. # - Run the rest of the code to see the datatype of `response` and to close the connection! # + # Import packages from urllib.request import urlopen, Request # Specify the url url = "https://campus.datacamp.com/courses/1606/4135?ex=2" # This packages the request: request request = Request(url) # Sends the request and catches the response: response response = urlopen(request) # Print the datatype of response print(type(response)) # Close the response response.close() # - # ## Printing HTTP request results in Python using urllib # You have just packaged and sent a GET request to `"https://campus.datacamp.com/courses/1606/4135?ex=2"` and then caught the response. You saw that such a response is a `http.client.HTTPResponse` object. The question remains: what can you do with this response? # # Well, as it came from an HTML page, you could *read* it to extract the HTML and, in fact, such a `http.client.HTTPResponse` object has an associated `read()` method. In this exercise, you'll build on your previous great work to extract the response and print the HTML. # - Send the request and catch the response in the variable `response` with the function `urlopen()`, as in the previous exercise. # - Extract the response using the `read()` method and store the result in the variable `html`. # - Print the string `html`. # + # Import packages from urllib.request import urlopen, Request # Specify the url url = "https://campus.datacamp.com/courses/1606/4135?ex=2" # This packages the request request = Request(url) # Sends the request and catches the response: response response = urlopen(request) # Extract the response: html html = response.read() # Print the html print(html[:1000]) # Close the response response.close() # - # ## Performing HTTP requests in Python using requests # Now that you've got your head and hands around making HTTP requests using the urllib package, you're going to figure out how to do the same using the higher-level requests library. You'll once again be pinging DataCamp servers for their `"http://www.datacamp.com/teach/documentation"` page. # # Note that unlike in the previous exercises using urllib, you don't have to close the connection when using requests. # - Import the package `requests`. # - Assign the URL of interest to the variable `url`. # - Package the request to the URL, send the request and catch the response with a single function `requests.get()`, assigning the response to the variable `r`. # - Use the `text` attribute of the object `r` to return the HTML of the webpage as a string; store the result in a variable `text`. # - Print the HTML of the webpage. # + # Import package import requests # Specify the url: url url = "http://www.datacamp.com/teach/documentation" # Packages the request, send the request and catch the response: r r = requests.get(url) # Extract the response: text text = r.text # Print the html print(text[:2035]) # - # --- # ## Scraping the web in Python # ### HTML # - Mix of unstructured and structured data # - Structured data: # - Has pre-defined data model, or # - Organized in a defined manner # - Unstructured data: neither of these properties # # ### BeautifulSoup # - Parse and extract structured data from HTML # - Make tag soup beautiful and extract information from bs4 import BeautifulSoup import requests url = 'https://www.crummy.com/software/BeautifulSoup/' r = requests.get(url) html_doc = r.text soup = BeautifulSoup(html_doc) # ### Prettified Soup # ```python # print(soup.prettify()) # ``` # ### Exploring BeautifulSoup # - Many methods such as: print(soup.title) # + tags=[] print(soup.get_text()) # - # - `find_all()` # + tags=[] for link in soup.find_all('a'): print(link.get('href')) # + [markdown] tags=[] # ## Parsing HTML with BeautifulSoup # In this interactive exercise, you'll learn how to use the BeautifulSoup package to *parse*, *prettify* and *extract* information from HTML. You'll scrape the data from the webpage of <NAME>, Python's very own [Benevolent Dictator for Life](https://en.wikipedia.org/wiki/Benevolent_dictator_for_life). In the following exercises, you'll prettify the HTML and then extract the text and the hyperlinks. # # The URL of interest is `url = 'https://www.python.org/~guido/'`. # + [markdown] tags=[] # - Import the function `BeautifulSoup` from the package `bs4`. # - Assign the URL of interest to the variable `url`. # - Package the request to the URL, send the request and catch the response with a single function `requests.get()`, assigning the response to the variable `r`. # - Use the `text` attribute of the object `r` to return the HTML of the webpage as a string; store the result in a variable `html_doc`. # - Create a BeautifulSoup object `soup` from the resulting HTML using the function `BeautifulSoup()`. # - Use the method `prettify()` on `soup` and assign the result to `pretty_soup`. # - Print to prettified HTML to your shell! # + tags=[] # Import packages import requests from bs4 import BeautifulSoup # Specify url: url url = 'https://www.python.org/~guido/' # Package the request, send the request and catch the response: r r = requests.get(url) # Extracts the response as html: html_doc html_doc = r.text # Create a BeautifulSoup object from the HTML: soup soup = BeautifulSoup(html_doc) # Prettify the BeautifulSoup object: pretty_soup pretty_soup = soup.prettify() # Print the response print(pretty_soup) # - # ## Turning a webpage into data using BeautifulSoup: getting the text # In the following exercises, you'll learn the basics of extracting information from HTML soup. In this exercise, you'll figure out how to extract the text from the BDFL's webpage, along with printing the webpage's title. # - In the sample code, the HTML response object `html_doc` has already been created: your first task is to Soupify it using the function `BeautifulSoup()` and to assign the resulting soup to the variable soup. # - Extract the title from the HTML soup `soup` using the attribute `title` and assign the result to `guido_title`. # - Print the title of Guido's webpage. # + # Import packages import requests from bs4 import BeautifulSoup # Specify url: url url = 'https://www.python.org/~guido/' # Package the request, send the request and catch the response: r r = requests.get(url) # Extract the response as html: html_doc html_doc = r.text # Create a BeautifulSoup object from the HTML: soup soup = BeautifulSoup(html_doc) # Get the title of Guido's webpage: guido_title guido_title = soup.title # Print the title of Guido's webpage print(guido_title) # - # - Extract the text from the HTML soup `soup` using the method `get_text()` and assign to `guido_text`. # - Print the text from Guido's webpage. # + # Get Guido's text: guido_text guido_text = soup.text # Print Guido's text to the shell print(guido_text) # - # ## Turning a webpage into data using BeautifulSoup: getting the hyperlinks # In this exercise, you'll figure out how to extract the URLs of the hyperlinks from the BDFL's webpage. In the process, you'll become close friends with the soup method `find_all()`. # + # Import packages import requests from bs4 import BeautifulSoup # Specify url url = 'https://www.python.org/~guido/' # Package the request, send the request and catch the response: r r = requests.get(url) # Extracts the response as html: html_doc html_doc = r.text # create a BeautifulSoup object from the HTML: soup soup = BeautifulSoup(html_doc) # Print the title of Guido's webpage print(soup.title) # - # - Use the method `find_all()` to find all hyperlinks in `soup`, remembering that hyperlinks are defined by the HTML tag `<a>` but passed to `find_all()` without angle brackets; store the result in the variable `a_tags`. # - The variable `a_tags` is a results set: your job now is to enumerate over it, using a `for` loop and to print the actual URLs of the hyperlinks; to do this, for every element `link` in `a_tags`, you want to `print()` `link.get('href')`. # + # Find all 'a' tags (which define hyperlinks): a_tags a_tags = soup.find_all('a') # Print the URLs to the shell for link in a_tags: print(link.get('href')) # -
Data Analyst with Python/10_Intermediate_Importing_Data_in_Python/10_1_Importing data from the Internet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Finding Pattern import numpy as np # ## Given this array, find the next number in the sequence my_teaser_array = np.array([1, 7, 19, 37, 61, 91, 127, 169, 217, 271, 331]) my_teaser_array # ##### for information about numpy.diff, please see: http://docs.scipy.org/doc/numpy/reference/generated/numpy.diff.html np.diff(my_teaser_array) np.diff(my_teaser_array, n=2) np.diff(my_teaser_array, n=3) # ##### Warning: imports should (usually) appear at top of notebook # # If SymPy not included on your computer; open a console and type 'conda sympy' from sympy import init_session init_session() diff(x**3) diff(x**3, x, 2) diff(x**3, x, 3) diff(x**3, x, 4) def my_guess(n): return (n+1)**3 - n**3 my_guess(np.arange(20))
numpy-data-science-essential-training/Ex_Files_NumPy_Data_EssT/Exercise Files/Ch 6/06_04/Finish/.ipynb_checkpoints/Finding Pattern-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction to machine learning # # *<NAME>* # # Fall, 2019 # # ## Practical session 2: kernel methods for classification and regression # # Table of contents # 1. [Intuitive approach to SVM](#part1) # 1. [Classification](#part2) # 1. [Regression](#part3) # 1. [Regression with precomputed kernel](#part4) # 1. [Duality gap](#part5) # from mllab import * # # Intuitive approach to SVM <a id="part1"></a> # # >The following expression enables to assess, in real time, the impact of a kernel and of the tradeoff parameter $C = \frac{1}{n\lambda}$. # !python3 svm_gui.py # >Make some tests with this GUI. # In particular, design # - linearly separable data / or not; # - unimodal classes / or not, # # >and compare # - different kernels; # - different values for C and for hyperparameters (kernel parameters). # # >For unimodal classes (first, linearly separable, then not), how do the margin and the frontiere behave with respect to $C$ (for a linear kernel) and to $\gamma$ (for a Gaussian kernel)? # # >Are the values for these two parameters decisive for obtaining good classification rates? # **Answer:** # # For a linearly separable class, the margin and the frontiere don't depend on $C$ for a linear kernel. For the Gaussian kernel, the lesser $\gamma$, the more the decision ressembles a linear kernel. # # For a non-linearly separable class, the margin grows when $C$ shrinks for a linear kernel. For a Gaussian kernel, if $\gamma$ is too large, the model overfits. If $\gamma$ is too small, the decision is similar to a linear kernel. # # The values of these two parameters are indeed decivise for obtaining good classification rates. # >Fit a linear classifier on unimodal (but not linearly separable) classes. Add some points one at a time. # # >Determine three particular areas for the classifier. # How does the classifier react when adding a point in each area? # # >What can we say about the dual variable $\alpha_i$ (associated to each point $X_i$) for each area? # **Answer:** # # The three areas are: # + on the "good side" of the classifier beyond the margin. The classifier does not change when adding a point. $\alpha_i=0$ # + on the "good side" of the classifier, between the margin and the frontier: the classifier changes, and the margin slightly decreases. $0 \leq \alpha_i \leq C$ # + on the "bad side" of the classifier: the classifier changes, and the margin increases. $\alpha_i=C$ # >Design a very unbalanced dataset (really more points in a class than in the other). # With a linear kernel, decrease gradually the value of $C$. What do you observe? # # >This penomenon can be avoided with a class-dependent weighting of the data-fitting term (parameter `class_weight` of [sklearn.svm.SVC](http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html)). # **Answer:** # # When $C$ decreases, the classifier classifies all points in the largest class. # # Classification <a id="part2"></a> # >Let us consider the dataset defined below. # + # Classification dataset from sklearn.datasets import make_classification X, y = make_classification(n_samples=500, n_classes=2, n_features=2, n_redundant=0, n_clusters_per_class=1) plotXY(X, y) # - # >For $C \in \{10^{-3}, \dots, 10^{2}\}$, fit a [linear support vector classifier](http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html) and store its classification accuray (also known as score). # # >Plot the score vs the values of $C$. # + from sklearn.svm import SVC scores = [] Cs = [1e-3, 1e-2, 1e-1, 1, 1e1, 1e2] n_train = 250 for C in Cs: clf = SVC(C=C) clf.fit(X[:n_train], y[:n_train]) scores.append(clf.score(X[n_train:], y[n_train:])) plt.xscale('log') plt.plot(Cs, scores) plt.title('Score de classification en entrainement en fct de C') plt.show() # - # >Determine the value of $C$ leading to the best score. print('Best C: ' +str(Cs[np.argmax(np.array(scores))])) # # Regression <a id="part3"></a> # >What about for this dataset? # + # Regression dataset n = 100 X_train = np.sort(5 * np.random.rand(n)) y_train = np.sin(X_train) y_train[::5] += 1 * (0.5 - np.random.rand(n//5)) X_test = np.arange(0, 5, step=1e-2) y_test = np.sin(X_test) # Make 2d-arrays X_train = X_train[:, np.newaxis] X_test = X_test[:, np.newaxis] plt.scatter(X_train, y_train) # + from sklearn.svm import SVR scores = [] Cs = [1e-3, 1e-2, 1e-1, 1, 1e1, 1e2] for C in Cs: reg = SVR(C=C) reg.fit(X_train, y_train) scores.append(reg.score(X_test, y_test)) best_C = Cs[np.argmax(np.array(scores))] print('Best C: ' +str(best_C)) plt.xscale('log') plt.plot(Cs, scores) plt.title('Score de régression en entrainement en fct de C') plt.show() # - # **Answer:** # … # >Plot on the same figure the training points and the prediction for the test set. plt.scatter(X_train, y_train) reg = SVR(C=best_C) reg.fit(X_train, y_train) plt.scatter(X_test, reg.predict(X_test)) plt.show() # # Regression with precomputed kernel <a id="part4"></a> # # >We focus here on molecule activity prediction. # For this purpose, we consider a molecule to be a graph, represented by a set of relations between its nodes (the atoms of the molecule). # Following these relations in a molecule $x$, we go over a path $p$ in the corresponding graph. # Let $\mathcal P_d$ be the set of all possible paths of length less than $d$ for the family of graph considered. # Let also $I(x, p)$ being $1$ if the path $p$ is in the molecule $x$ and $0$ otherwise. # Then, for two molecules $x$ and $x'$, we can define the similarity measure # $$ # u(x, x') = \sum_{p \in \mathcal P_d} I(x, p) I(x', p). # $$ # The *Tanimoto kernel* is then defined by # $$ # k(x, x') = \frac{u(x, x')}{u(x, x) + u(x', x') - u(x, x')}. # $$ # # >Please, download the [dataset](https://drive.google.com/file/d/0B6VyRTVgbDFeWFFUUVRidUR3MFE/view?usp=sharing) in the directory `./data/` and run the following script to build the train and test kernel matrices. # + path = "data_ncicancer/" # Load the data K = np.loadtxt(path + "/ncicancer_kernel_hf_ex0.txt") # Load the kernel y = np.loadtxt(path + "/ncicancer_targets_ex0.txt")[:, 0] # Load the targets y = (y-np.min(y)) / (np.max(y)-np.min(y)) # Scale the targets # Split train/test sets indices = np.random.permutation(K.shape[0]) train_idx, test_idx = indices[:K.shape[0]//4], indices[K.shape[0]//4:] K_train = K[train_idx][:, train_idx] y_train = y[train_idx] K_test = K[test_idx][:, train_idx] y_test = y[test_idx] print("Number of training examples:", K_train.shape[0]) print("Number of test examples:", K_test.shape[0]) # - # >We would like to apply support vector regression. # Plot the training and test accuracies for $C=10^{-1}$ and different values of $\epsilon$ in $[10^{-3}, 10^{-1}]$. # + from sklearn.svm import SVR scores_train = [] scores_test = [] epsilons = np.logspace(-3, -1, 20, base=10) for eps in epsilons: reg = SVR(kernel='precomputed', C=0.1, epsilon=eps) reg.fit(K_train, y_train) scores_train.append(reg.score(K_train, y_train)) scores_test.append(reg.score(K_test, y_test)) best_eps = epsilons[np.argmax(np.array(scores_test))] print('Best eps: ' +str(best_eps)) print('Best accuracy: ' + str(np.max(scores_test))) plt.xscale('log') plt.plot(epsilons, scores_train, label='Score train') plt.plot(epsilons, scores_test, label='Score test') plt.title('Score du SVR en fct de eps') plt.legend() plt.show() # - # >Do the same with kernel regularized regression. # + from sklearn.kernel_ridge import KernelRidge scores_train = [] scores_test = [] alphas = np.logspace(-2, 1, 20, base=10) for alpha in alphas: reg = KernelRidge(kernel='precomputed', alpha=alpha) reg.fit(K_train, y_train) scores_train.append(reg.score(K_train, y_train)) scores_test.append(reg.score(K_test, y_test)) best_alphas = alphas[np.argmax(np.array(scores_test))] print('Best eps: ' +str(best_alphas)) print('Best accuracy: ' + str(np.max(scores_test))) plt.xscale('log') plt.plot(alphas, scores_train, label='Score train') plt.plot(alphas, scores_test, label='Score test') plt.title('Score du KernelRidge en fct de alpha') plt.legend() plt.show() # - # >Given the kernel matrices for training and testing, give the best possible accuracy on the test sample with a kernel machine. # The testing data should not intervene in fitting the model. # + from sklearn.svm import SVR from sklearn.model_selection import GridSearchCV epsilons = np.logspace(-3, -1, 20, base=10) Cs = np.logspace(-5, 5, 20, base=10) param_grid = dict(C=Cs, epsilon=epsilons) clf = GridSearchCV(SVR(kernel='precomputed'), param_grid=param_grid, refit=True, cv=5) clf.fit(K_train, y_train) print('Best parameters: %s' %clf.best_params_) print('Training score: %0.2f' % clf.score(K_train, y_train)) print('Test score: %0.2f' % clf.score(K_test, y_test)) # - # >Compare to regularized regression with kernels. # + scores_train = [] scores_test = [] alphas = np.logspace(-2, 1, 20, base=10) param_grid = dict(alpha=alphas) clf = GridSearchCV(KernelRidge(kernel='precomputed'), param_grid=param_grid, refit=True, cv=5) clf.fit(K_train, y_train) print('Best parameters: %s' %clf.best_params_) print('Training score: %0.2f' % clf.score(K_train, y_train)) print('Test score: %0.2f' % clf.score(K_test, y_test)) # - # # Duality gap <a id="part5"></a> # # + # Dataset import numpy as np from sklearn.datasets import load_digits X, Y = load_digits(return_X_y=True) Y[Y<5] = 1 # Class 1: digits 1, …, 4 Y[Y>4] = -1 # Class -1: digits 5, …, 9 ind = np.random.permutation(X.shape[0])[:X.shape[0]//4] X = X[ind] Y = Y[ind] print("Digits dataset:") print("X shape:", X.shape) print("Labels:", Y.min(), Y.max()) # - # >After being fitted, the object [SVC](http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html) has many interesting attributes: # - `coef_` (1 x #features): is the vector defining the Riesz representation (primal coefficients); # - `intercept_` (1): is the model intercept; # - `support_` (#support vectors): is the array of indexes of the support vectors; # - `dual_coef_` (1 x #support vectors): is the array of non-zero signed dual variables (that is $y_i \alpha_i$). # # >Write a function, called `primal_dual(clf, X_train, y_train)`, that given a classifier object, a data matrix, and a label array, fits the classifier and returns the tuple `(primal, dual)` of primal and dual objective values. # Check, on the dataset previously loaded, that the primal and the dual objectives are close to each other. # + from sklearn.svm import SVC def primal_dual(clf, X_train, y_train): clf.fit(X, Y) Y_pred = clf.decision_function(X_train) loss = 1-Y*Y_pred primal = 0.5 * np.linalg.norm(clf.coef_)**2 + clf.C * np.sum(loss[loss>0]) dual = np.sum(Y[clf.support_]*clf.dual_coef_[0]) - 0.5 * np.dot(clf.dual_coef_, np.dot(np.matmul(X[clf.support_], X[clf.support_].T), clf.dual_coef_.T))[0][0] return (primal, dual) print(primal_dual(SVC(kernel='linear'), X, Y)) # - # >How does the duality gap (difference between primal and dual objectives) behave with respect to the optimization tolerance (parameter `tol` of [SVC](http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html))? # To anwser, plot the gap with respect to the tolerance in x-log-scale. # + from sklearn.svm import SVC primals = [] duals = [] tols = np.logspace(-5, -1, 10, base=10) for tol in tols: primal, dual = primal_dual(SVC(kernel='linear', C=0.01, tol=tol), X, Y) primals.append(primal) duals.append(dual) plt.xscale('log') plt.yscale('log') plt.plot(tols, np.array(primals)-np.array(duals), label='Duality gap') plt.legend() plt.show() # -
intro_ML/2-SVM_student.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sklearn.datasets as datasets import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression import sklearn.metrics as sm import pickle import pandas as pd import numpy as np #读取原始数据 pd_data = pd.read_csv('./btc-eth.csv') np_array = np.array(pd_data) x = np_array[:,:4] y = np_array[:,4].reshape(-1,1) #测试数据集切分 x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2) #模型训练 clf = LinearRegression() clf.fit(x_train, y_train) # 模型预测 y_feature = clf.predict(x_test) # 模型评分 print('R2得分:', sm.r2_score(y_test, y_feature)) print('平均平方误差:',sm.mean_squared_error(y_test, y_feature)) fig1= plt.figure(figsize=[10,10]) plt.scatter(y_test, y_feature) plt.title('y_test vs y_feature') plt.xlabel('y_test shep') plt.ylabel('y_feature shep') plt.show() #保存模型 # 模型持久化 fw = open('./lr-eth.pkl', 'wb') pickle.dump(clf, fw) # + # 模型加载 fr = open('./lr-eth.pkl', 'rb') model = pickle.load(fr) #模型评分 y = model.predict(x_test) print('R2得分:', sm.r2_score(y_test, y)) print('平均平方误差:',sm.mean_squared_error(y_test, y)) fig2= plt.figure(figsize=[10,10]) plt.scatter(y_test, y) plt.title('y_test vs y') plt.xlabel('y_test shep') plt.ylabel('y shep') plt.show() # -
jupyter/btc-eth-lr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import Dataset, DataLoader import numpy as np import json import fastai from random import randint from fastai.text import SortishSampler from fastai.basic_data import DataBunch ROOT = "../../data/protein/structure/secondary_structure/" DATA_PATH = ROOT+"cullpdb" EMBEDDING_PATH = "../../data/protein/classification/data_sources/protVec_100d_3grams.csv" MODEL_PATH = "../../weights/protein/structure/secondary_structure/1_kmers/" SEQUENCE_LENGTH=729 NUM_CLASSES = 9 features = np.array([16, 32, 64, 128, 256])*2 num_workers = 8 # On cloud 8 batch_size = 256 features class EnzymeDataSet(Dataset): """Face Landmarks dataset.""" def __init__(self, data, transform=None): self.data = data def __len__(self): return len(self.data) def __getitem__(self, idx): row = self.data[idx] return np.int64(row[0]), np.int64(row[1]) embeddings = np.eye(22,22) #embeddings = np.loadtxt(open(EMBEDDING_PATH, "rb"), delimiter="\t", skiprows=1, usecols=[i for i in range(1,101)]) train = np.load(DATA_PATH+"/train/data.npy") val = np.load(DATA_PATH+"/val/data.npy") test = np.concatenate([np.load(DATA_PATH+"/test/data.npy"),np.load(DATA_PATH+"/test/data.npy")],axis=0) #test = np.load(DATA_PATH+"/test/data.npy") len(train) np.unique(np.concatenate(train[:,1]).ravel(), return_counts=True) train_sampler = SortishSampler(train, key=lambda x: len(train[x][0]), bs=batch_size//2) val_sampler = SortishSampler(val, key=lambda x: len(val[x][0]), bs=batch_size//2) test_sampler = SortishSampler(test, key=lambda x: len(test[x][0]), bs=batch_size//2) def pad_tensor(seq, label, length): seq_len = len(seq) to_pad = length - seq_len end_padding = randint(0, to_pad) begin_padding = to_pad - end_padding seq = np.pad(seq, mode="constant", pad_width=(begin_padding,end_padding)) label = np.pad(label, mode="constant", pad_width=(begin_padding,end_padding)) return seq,label def pad_collate(samples, random=True): "Function that collect samples and adds padding." max_len = max(64 ,max(map(lambda x: len(x[0]), samples))) batch = list(map(lambda x: pad_tensor(x[0], x[1], max_len), samples)) # stack all x = torch.stack([torch.from_numpy(b[0]) for b in batch], 0) y = torch.stack([torch.from_numpy(b[1]) for b in batch], 0) return x, y train_ds = EnzymeDataSet(train) val_ds = EnzymeDataSet(val) test_ds = EnzymeDataSet(test) dls = [DataLoader(*o, num_workers=num_workers) for o in zip((train_ds, val_ds, test_ds), (batch_size,batch_size*2,batch_size*2), (False,False,False), (train_sampler, val_sampler, test_sampler))] data = DataBunch(*dls, collate_fn=pad_collate) class UnetDown(nn.Module): def __init__(self, num_inputs, num_outputs, kernel_size, strides, dilation_rate=1, dropout=0.2, act=F.selu, pool=True): super(UnetDown, self).__init__() self.act = act self.conv1 = nn.Conv1d(num_inputs, num_inputs, kernel_size, stride=1, dilation = 1) self.conv2 = nn.Conv1d(num_inputs, num_inputs, kernel_size, stride=1, dilation = dilation_rate) self.pool = nn.Conv1d(num_inputs, num_outputs, kernel_size, stride=strides, dilation = 1) self.bn1 = nn.BatchNorm1d(num_inputs) self.bn2 = nn.BatchNorm1d(num_inputs) self.to_pool = pool before = (kernel_size -1) // 2 after = (kernel_size -1) - before self.padding = nn.ConstantPad1d((before,after), 0) stride_padding = (strides - 1) // 2 self.padding_stride = nn.ConstantPad1d((before-stride_padding, after -((strides - 1)-stride_padding)), 0) def forward(self, x): out = self.conv1(self.padding(x)) before_pooling = self.act(self.bn1(out)) # out = self.conv2(self.padding(out)) # before_pooling = self.act(self.bn2(out)) if self.to_pool: out = self.pool(self.padding_stride(before_pooling)) return out, before_pooling else: return before_pooling class UnetUp(nn.Module): def __init__(self, num_inputs, num_outputs, kernel_size, strides, dilation_rate=1, dropout=0.2, act=F.relu): super(UnetUp, self).__init__() self.act = act self.up = nn.ConvTranspose1d(num_inputs, num_outputs, kernel_size, stride=strides, dilation = 1, padding=kernel_size-1) self.conv1 = nn.Conv1d(num_outputs*2, num_outputs, kernel_size, stride=1, dilation = dilation_rate) self.conv2 = nn.Conv1d(num_inputs, num_outputs, kernel_size, stride=1, dilation = 1) self.bn1 = nn.BatchNorm1d(num_outputs) self.bn2 = nn.BatchNorm1d(num_outputs) self.before = (kernel_size -1) // 2 self.after = (kernel_size -1) - self.before self.padding = nn.ConstantPad1d((self.before,self.after), 0) self.strides = strides def forward(self, x, across): # stride_padding = self.strides//2 # extra = 1- across.shape[2]%2 # self.padding_stride = nn.ConstantPad1d((self.before-(stride_padding+extra), # self.after -((self.strides+1)-stride_padding)), 0) out = self.act(self.bn2(self.conv2((self.padding(x))))) # print(x.shape, out.shape, across.shape) # if out.shape[2] != across.shape[2]: # out = nn.ReflectionPad1d((0, 1))(out) out = torch.cat([out, across], 1) out = self.conv1(self.padding(out)) out = self.act(self.bn1(out)) # out = self.conv2(self.padding(out)) # out = self.act(self.bn2(out)) return out class Unet(nn.Module): def __init__(self): super(Unet, self).__init__() #Encoder self.embedding = nn.Embedding(embeddings.shape[0], embeddings.shape[1]) self.embedding.from_pretrained(torch.from_numpy(embeddings)) self.emb_cov = nn.Conv1d(embeddings.shape[1], features[0], 1) self.unet_down1 = UnetDown(features[0], features[1], 5, 1) self.unet_down2 = UnetDown(features[1], features[2], 5, 1) self.unet_down3 = UnetDown(features[2], features[3], 7, 1) self.unet_down4 = UnetDown(features[3], features[4], 9, 1) self.center = UnetDown(features[3], features[3], 9, 1, pool=False) #Decoder self.unet_block1 = UnetUp(features[4], features[3], 9, 1) self.unet_block2 = UnetUp(features[3], features[2], 7, 1) self.unet_block3 = UnetUp(features[2], features[1], 5, 1) self.unet_block4 = UnetUp(features[1], features[0], 5, 1) self.classification = nn.Conv1d(features[0], NUM_CLASSES, 1) def forward(self, x): embedded_seq = self.embedding(x) embedded_seq.transpose_(1, 2) embedded_seq = self.emb_cov(embedded_seq) d1, before_pooling1 = self.unet_down1(embedded_seq) d1 = nn.Dropout(p=0.3)(d1) d2, before_pooling2 = self.unet_down2(d1) d2 = nn.Dropout(p=0.4)(d2) d3, before_pooling3 = self.unet_down3(d2) d3 = nn.Dropout(p=0.5)(d3) #d4, before_pooling4 = self.unet_down4(d3) c = self.center(d3) #u1 = self.unet_block1(c, before_pooling4) c = nn.Dropout(p=0.5)(c) u2 = self.unet_block2(c, before_pooling3) u2 = nn.Dropout(p=0.4)(u2) u3 = self.unet_block3(u2, before_pooling2) u3 = nn.Dropout(p=0.3)(u3) u4 = self.unet_block4(u3, before_pooling1) u4 = nn.Dropout(p=0.2)(u4) output = self.classification(u4) return output def accuracy(input, targs): "Compute accuracy with `targs` when `input` is bs * n_classes." mask = (targs > 0) targs = targs.masked_select(mask) input = input.argmax(dim=1) input = input.masked_select(mask) return (input==targs).float().mean() unet = Unet().cuda() #criterion = nn.CrossEntropyLoss(weight=torch.tensor([0.1, 1, 1, 1, 1, 1, 1, 1, 1]).cuda()) criterion = nn.CrossEntropyLoss(weight=torch.tensor([0.1, 4.4, 1.2, 1.4, 3.1, 1, 8.5, 2.4, 2]).cuda()) optimizer = optim.Adam(unet.parameters(), lr=0.001) learner = fastai.basic_train.Learner(data, unet, loss_func=criterion, metrics=accuracy, path=None, model_dir='models') learner.fit(5, lr=0.001) learner.fit_one_cycle(30) learner.recorder.plot() preds, truth = learner.get_preds(is_test=True) accuracy(preds, truth) # + acc = [] min_element = 0 min_score = 1 s = [0, 0, 0, 0, 0, 0, 0, 0, 0] s_t = [0, 0, 0, 0, 0, 0, 0, 0, 0] total = [0, 0, 0, 0, 0, 0, 0, 0, 0] for row in range(len(truth)): a = preds.argmax(dim=1)[row] correct = 0 actual = 0 for i, t in enumerate(truth[row]): s[t] = s[t]+1 total[a[i]] = total[a[i]]+1 if t != 0: # and t != 1: actual += 1 if a[i] == t: correct += 1 s_t[t] = s_t[t]+1 score = correct/actual if min_score > score: min_score = score min_element = row acc.append(score) print(np.mean(acc)) # - np.array(s_t)/np.array(s) import pandas as pd print("|".join(["{0: <6}".format(x) for x in s_t])) print("|".join(["{0: <6}".format(x) for x in s])) print("|".join(["{0: <6}".format(x) for x in total])) print(min_score) preds.argmax(dim=1)[min_element], truth[min_element] t_s = "" p_s = "" element = 4 p = preds.argmax(dim=1)[element] for i, t in enumerate(truth[element]): if t != 0: t_s += str(int(t)) p_s += str(int(p[i])) print(t_s) print(p_s)
embedding_test_structure_pytorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from taxcrunch import multi_cruncher as mc # create an instance of the Batch: class with the file path to the CSV input file batch = mc.Batch('../docs/WP_example/input.csv') # + # each row in output file represents a filing unit's liabilities in 2026 under the TCJA extension # write output file to CSV batch.write_output_file(output_filename='TJCA_ext_output.csv', reform_file='../docs/WP_example/TCJA_ext.json') # create the same table as a Pandas dataframe output_df = batch.create_table(reform_file='../docs/WP_example/TCJA_ext.json') output_df.head() # + # each row in output file represents the difference in a filing unit's liabilities in 2026 # between the TCJA extension and current law # write output diff file to CSV batch.write_diff_file(output_filename='TCJA_ext_diff_output.csv', reform_file='../docs/WP_example/TCJA_ext.json') # create the same diff table as a Pandas dataframe diff_df = batch.create_diff_table(reform_file='../docs/WP_example/TCJA_ext.json') diff_df.head()
docs/WP_example/TCJA_ext_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !nvidia-smi # + from pygaggle.rerank.base import Query, Text from pygaggle.rerank.transformer import MonoT5, MonoBERT from trectools import TrecRun import ir_datasets monoT5Reranker = MonoT5() import pandas as pd monoBert = MonoBERT() DIR='/mnt/ceph/storage/data-in-progress/data-teaching/theses/wstud-thesis-probst/retrievalExperiments/runs-ecir22/' DIR_v2='/mnt/ceph/storage/data-in-progress/data-teaching/theses/wstud-thesis-probst/retrievalExperiments/runs-marco-v2-ecir22/' # + def load_topics(version, file): import pandas as pd return pd.read_csv('../../Data/navigational-topics-and-qrels-ms-marco-v' + str(version) + '/' + file, sep='\t', names=['num', 'query']) df_popular_queries = load_topics(1, 'topics.msmarco-entrypage-popular.tsv') df_random_queries = load_topics(1, 'topics.msmarco-entrypage-random.tsv') df_popular_run = TrecRun(DIR + 'entrypage-popular/run.ms-marco-content.bm25-default.txt') df_random_run = TrecRun(DIR + 'entrypage-random/run.ms-marco-content.bm25-default.txt') df_dl_queries = pd.read_csv('../../Data/dl19+20/dl19+20-queries.tsv', sep='\t', names=['num', 'query']) df_dl_run = TrecRun(DIR + 'dl19+20/run.ms-marco-content.bm25-default.txt') df_popular_queries_v2 = load_topics(2, 'topics.msmarco-v2-entrypage-popular.tsv') df_random_queries_v2 = load_topics(2, 'topics.msmarco-v2-entrypage-random.tsv') df_popular_run_v2 = TrecRun(DIR_v2 + 'entrypage-popular/run.msmarco-doc-v2.bm25-default.txt') df_random_run_v2 = TrecRun(DIR_v2 + 'entrypage-random/run.msmarco-doc-v2.bm25-default.txt') # + ALL_DOC_IDS = [] for i in [df_popular_run, df_random_run, df_popular_run_v2, df_random_run_v2, df_dl_run]: ALL_DOC_IDS += [j for j in i.run_data.docid.unique()] ALL_DOC_IDS = set(ALL_DOC_IDS) # + def parse_passages(i): import json i = json.loads(i) passages = [] for t in range(1, len(i['passages'])): if t == 1: passages += [i['passages'][0][1] + i['passages'][1][1]] else: passages += [i['passages'][t][1]] return (i['id'], passages) def load_documents(file_name): from tqdm import tqdm ret = {} with open('/mnt/ceph/storage/data-in-progress/data-teaching/theses/wstud-thesis-probst/deep-ct/' + file_name) as f: for i in tqdm(f): doc_id, passages = parse_passages(i) assert doc_id not in ret if doc_id in ALL_DOC_IDS: ret[doc_id] = passages return ret DOC_ID_TO_PASSAGES = {} DOC_ID_TO_PASSAGES.update(load_documents('msmarco-document-passages.jsonl')) DOC_ID_TO_PASSAGES.update(load_documents('msmarco-document-passages-v2.jsonl')) # - len(DOC_ID_TO_PASSAGES) # + def get_query_or_fail(df_queries, topic_number): ret = df_queries[df_queries['num'] == int(topic_number)] if len(ret) != 1: raise ValueError('Could not handle ' + str(topic_number)) return ret.iloc[0]['query'] def docs_for_topic(df_run, topic_number): return df_run.run_data[df_run.run_data['query'] == int(topic_number)].docid def rerank_with_model(topic, df_queries, df_run, model): query = get_query_or_fail(df_queries, topic) documents = [] for i in docs_for_topic(df_run, topic)[:100]: for passage in DOC_ID_TO_PASSAGES[i]: documents += [Text(passage, {'docid': i}, 0)] ret = sorted(model.rerank(Query(query), documents), key=lambda i: i.score, reverse=True) returned_ids = set() for i in ret: if i.metadata['docid'] not in returned_ids: returned_ids.add(i.metadata['docid']) yield {'score': i.score, 'id': i.metadata['docid']} def rerank(file_name, df_run, df_queries, model, tag): from tqdm import tqdm with open(file_name, 'w') as out_file: for topic in tqdm(df_queries.num): for i in zip(range(100), rerank_with_model(topic, df_queries, df_run, model)): out_file.write(str(topic) + ' Q0 ' + i[1]['id'] + ' ' + str(i[0] + 1) + ' ' + str(i[1]['score']) + ' ' + tag + '\n') # - # # Marco V1 rerank(DIR + 'entrypage-random/run.ms-marco-content.bm25-mono-t5-maxp.txt', df_random_run, df_random_queries.copy(), monoT5Reranker, 'mono-t5-maxp-at-bm25') rerank(DIR + 'entrypage-popular/run.ms-marco-content.bm25-mono-t5-maxp.txt', df_popular_run, df_popular_queries.copy(), monoT5Reranker, 'mono-t5-maxp-at-bm25') # # Marco V2 rerank(DIR_v2 + 'entrypage-random/run.ms-marco-content.bm25-mono-t5-maxp.txt', df_random_run_v2, df_random_queries_v2.copy(), monoT5Reranker, 'mono-t5-maxp-at-bm25') rerank(DIR_v2 + 'entrypage-popular/run.ms-marco-content.bm25-mono-t5-maxp.txt', df_popular_run_v2, df_popular_queries_v2.copy(), monoT5Reranker, 'mono-t5-maxp-at-bm25') # # Rerank with MonoBERT rerank(DIR + 'entrypage-random/run.ms-marco-content.bm25-mono-bert-maxp.txt', df_random_run, df_random_queries.copy(), monoBert, 'mono-bert-maxp-at-bm25') rerank(DIR + 'entrypage-popular/run.ms-marco-content.bm25-mono-bert-maxp.txt', df_popular_run, df_popular_queries.copy(), monoBert, 'mono-bert-maxp-at-bm25') rerank(DIR_v2 + 'entrypage-random/run.ms-marco-content.bm25-mono-bert-maxp.txt', df_random_run_v2, df_random_queries_v2.copy(), monoBert, 'mono-bert-maxp-at-bm25') rerank(DIR_v2 + 'entrypage-popular/run.ms-marco-content.bm25-mono-bert-maxp.txt', df_popular_run_v2, df_popular_queries_v2.copy(), monoBert, 'mono-bert-maxp-at-bm25') # # Rerank remaining topics for DL 19 + 20 # + import pandas as pd df_dl_queries = pd.read_csv('../../Data/dl19+20/dl19+20-queries.tsv', sep='\t', names=['num', 'query']) df_dl_run = TrecRun(DIR + 'dl19+20/run.ms-marco-content.bm25-default.txt') # + # delete ALL_DOC_IDS = [] for i in [df_popular_run, df_random_run, df_popular_run_v2, df_random_run_v2, df_dl_run]: ALL_DOC_IDS += [j for j in i.run_data.docid.unique()] ALL_DOC_IDS = set(ALL_DOC_IDS) # + # delete def parse_passages(i): import json i = json.loads(i) passages = [] for t in range(1, len(i['passages'])): if t == 1: passages += [i['passages'][0][1] + i['passages'][1][1]] else: passages += [i['passages'][t][1]] return (i['id'], passages) def load_documents(file_name): from tqdm import tqdm ret = {} with open('/mnt/ceph/storage/data-in-progress/data-teaching/theses/wstud-thesis-probst/deep-ct/' + file_name) as f: for i in tqdm(f): doc_id, passages = parse_passages(i) assert doc_id not in ret if doc_id in ALL_DOC_IDS: ret[doc_id] = passages return ret DOC_ID_TO_PASSAGES = {} DOC_ID_TO_PASSAGES.update(load_documents('msmarco-document-passages.jsonl')) DOC_ID_TO_PASSAGES.update(load_documents('msmarco-document-passages-v2.jsonl')) # - rerank(DIR + 'dl19+20/run.ms-marco-content.bm25-mono-t5-maxp.txt', df_dl_run, df_dl_queries.copy(), monoT5Reranker, 'mono-t5-maxp-at-bm25') rerank(DIR + 'dl19+20/run.ms-marco-content.bm25-mono-bert-maxp.txt', df_dl_run, df_dl_queries.copy(), monoBert, 'mono-bert-maxp-at-bm25')
src/jupyter/rerank-with-mono-t5-maxp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CH. 7 - TOPIC MODELS # ## Activities # #### Activity 7.01 # + # not necessary # added to suppress warnings coming from pyLDAvis import warnings warnings.filterwarnings('ignore') # - import langdetect # language detection import matplotlib.pyplot # plotting import nltk # natural language processing import numpy # arrays and matrices import pandas # dataframes import pyLDAvis # plotting import pyLDAvis.sklearn # plotting import regex # regular expressions import sklearn # machine learning # + # define path path = 'latimeshealth.txt' # + # load data df = pandas.read_csv(path, sep="|", header=None) df.columns = ["id", "datetime", "tweettext"] # + # define quick look function for data frame def dataframe_quick_look(df, nrows): print("SHAPE:\n{shape}\n".format(shape=df.shape)) print("COLUMN NAMES:\n{names}\n".format(names=df.columns)) print("HEAD:\n{head}\n".format(head=df.head(nrows))) # - dataframe_quick_look(df, nrows=2) # + # view final data that will be carried forward raw = df['tweettext'].tolist() print("HEADLINES:\n{lines}\n".format(lines=raw[:5])) print("LENGTH:\n{length}\n".format(length=len(raw))) # + # define function for checking language of tweets # filter to english only def do_language_identifying(txt): try: the_language = langdetect.detect(txt) except: the_language = 'none' return the_language # + # define function to perform lemmatization def do_lemmatizing(wrd): out = nltk.corpus.wordnet.morphy(wrd) return (wrd if out is None else out) # + # define function to cleaning tweet data def do_tweet_cleaning(txt): # identify language of tweet # return null if language not english lg = do_language_identifying(txt) if lg != 'en': return None # split the string on whitespace out = txt.split(' ') # identify screen names # replace with SCREENNAME out = ['SCREENNAME' if i.startswith('@') else i for i in out] # identify urls # replace with URL out = [ 'URL' if bool(regex.search('http[s]?://', i)) else i for i in out ] # remove all punctuation out = [regex.sub('[^\\w\\s]|\n', '', i) for i in out] # make all non-keywords lowercase keys = ['SCREENNAME', 'URL'] out = [i.lower() if i not in keys else i for i in out] # remove keywords out = [i for i in out if i not in keys] # remove stopwords list_stop_words = nltk.corpus.stopwords.words('english') list_stop_words = [regex.sub('[^\\w\\s]', '', i) for i in list_stop_words] out = [i for i in out if i not in list_stop_words] # lemmatizing out = [do_lemmatizing(i) for i in out] # keep words 4 or more characters long out = [i for i in out if len(i) >= 5] return out # + # apply cleaning function to every tweet clean = list(map(do_tweet_cleaning, raw)) # + # remove none types clean = list(filter(None.__ne__, clean)) print("HEADLINES:\n{lines}\n".format(lines=clean[:5])) print("LENGTH:\n{length}\n".format(length=len(clean))) # + # turn tokens back into strings # concatenate using whitespaces clean_sentences = [" ".join(i) for i in clean] # - print(clean_sentences[0:10]) # #### Activity 7.02 # + # define global variables number_words = 10 number_docs = 10 number_features = 1000 # + # bag of words conversion # count vectorizer (raw counts) vectorizer1 = sklearn.feature_extraction.text.CountVectorizer( analyzer="word", max_df=0.95, min_df=10, max_features=number_features ) clean_vec1 = vectorizer1.fit_transform(clean_sentences) print(clean_vec1[0]) feature_names_vec1 = vectorizer1.get_feature_names() # + # define function to calculate perplexity based on number of topics def perplexity_by_ntopic(data, ntopics): output_dict = { "Number Of Topics": [], "Perplexity Score": [] } for t in ntopics: lda = sklearn.decomposition.LatentDirichletAllocation( n_components=t, learning_method="online", random_state=0 ) lda.fit(data) output_dict["Number Of Topics"].append(t) output_dict["Perplexity Score"].append(lda.perplexity(data)) output_df = pandas.DataFrame(output_dict) index_min_perplexity = output_df["Perplexity Score"].idxmin() output_num_topics = output_df.loc[ index_min_perplexity, # index "Number Of Topics" # column ] return (output_df, output_num_topics) # + # execute function on vector of numbers of topics # takes several minutes df_perplexity, optimal_num_topics = perplexity_by_ntopic( clean_vec1, ntopics=[i for i in range(1, 21) if i % 2 == 0] ) # - print(df_perplexity) # + # define and fit lda model lda = sklearn.decomposition.LatentDirichletAllocation( n_components=optimal_num_topics, learning_method="online", random_state=0 ) lda.fit(clean_vec1) # + # define function to format raw output into nice tables def get_topics(mod, vec, names, docs, ndocs, nwords): # word to topic matrix W = mod.components_ W_norm = W / W.sum(axis=1)[:, numpy.newaxis] # topic to document matrix H = mod.transform(vec) W_dict = {} H_dict = {} for tpc_idx, tpc_val in enumerate(W_norm): topic = "Topic{}".format(tpc_idx) # formatting w W_indices = tpc_val.argsort()[::-1][:nwords] W_names_values = [ (round(tpc_val[j], 4), names[j]) for j in W_indices ] W_dict[topic] = W_names_values # formatting h H_indices = H[:, tpc_idx].argsort()[::-1][:ndocs] H_names_values = [ (round(H[:, tpc_idx][j], 4), docs[j]) for j in H_indices ] H_dict[topic] = H_names_values W_df = pandas.DataFrame( W_dict, index=["Word" + str(i) for i in range(nwords)] ) H_df = pandas.DataFrame( H_dict, index=["Doc" + str(i) for i in range(ndocs)] ) return (W_df, H_df) # + # get nice tables W_df, H_df = get_topics( mod=lda, vec=clean_vec1, names=feature_names_vec1, docs=raw, ndocs=number_docs, nwords=number_words ) # + # word-topic table print(W_df) # + # document-topic table print(H_df) # + # iteractive plot # pca biplot and histogram lda_plot = pyLDAvis.sklearn.prepare(lda, clean_vec1, vectorizer1, R=10) pyLDAvis.display(lda_plot) # - # #### Activity 7.03 # + # bag of words conversion # tf-idf method vectorizer2 = sklearn.feature_extraction.text.TfidfVectorizer( analyzer="word", max_df=0.5, min_df=20, max_features=number_features, smooth_idf=False ) clean_vec2 = vectorizer2.fit_transform(clean_sentences) print(clean_vec2[0]) feature_names_vec2 = vectorizer2.get_feature_names() # + # define and fit nmf model nmf = sklearn.decomposition.NMF( n_components=optimal_num_topics, init="nndsvda", solver="mu", beta_loss="frobenius", random_state=0, alpha=0.1, l1_ratio=0.5 ) nmf.fit(clean_vec2) # + # get nicely formatted result tables W_df, H_df = get_topics( mod=nmf, vec=clean_vec2, names=feature_names_vec2, docs=raw, ndocs=number_docs, nwords=number_words ) # + # word-topic table print(W_df) # + # document-topic table print(H_df)
Activity01-Activity03/Activity01-Activity03.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="a6391a8d" # # Lab 3 - Distance Metrics and Clustering # # + [markdown] id="9c089c25" # ### Non-Euclidean Distance Metrics # # We are most familiar with the typical Euclidian distance metric, ie: given two vectors $\overline{v_1} = [x_1, y_1]$ and $\overline{v_2} = [x_2, y_2]$, the distance $D$ between them is $\sqrt{(x_2 - x_1)^2 + (y_2 - y_1)^2}$. This is generalized for $N$-sized vectors $\overline{a} = [a_1, ..., a_N]$ and $\overline{b} = [b_1, ..., b_N]$ with the following formula: # $$ # D = \sqrt{\sum_{i = 1}^{N}(a_i - b_i)^2} # $$ # # There are other metrics which may be useful, depending on the type of analysis you wish to do. The *Hamming Distance*, $D_H$ is useful when analyzing categorical data: # $$ # D_H = \frac{\sum_{i = 1}^{N}|a_i - b_i|}{N} # $$ # # The *Block Distance*, $D_B$, is useful for calculating distance when only straight paths parallel to your axes are allowed, eg: while navigating through blocks in a large city: # $$ # D_B = \sum_{i = 1}^{N}|a_i - b_i| # $$ # # The *Minkowski Distance*, $D_M$, is a generalized metric that reduces to the block and euclidean distances for $p=1$ and $p=2$, respectively. The value of $p$ can vary by application and is generally tuned by experiment - it can be used for applications ranging from analysing the quality of mibile networks to calculating distances in curved space: # $$ # D_M = \big(\sum_{i = 1}^{N}(a_i - b_i)^p\big)^{1/p} # $$ # + [markdown] id="1b68a946" # #### Example 1: Distance calculation with varying metrics # # Use the **Euclidean**, **Block**, and **Minkowski** (p=3 and p=4) distance formulas to calculate the distance between the two vectors given below. You may change the values contained in the vectors to see how their distances respond relative to eachother. # + id="64d5dc03" import numpy as np vec1 = np.array([1.2, 5.7, 6.1, 2.8, 1.1, 14.8]) vec2 = np.array([5.1, 6.0, 1.3, 28.3, 14.4, -11.9]) # + [markdown] id="348e8c70" # ### Clustering with K-Means # # "Clustering" is a method by which a set of *N* vectors can be subdivided into groups, based on the relative "closeness" of one vector to the surrounding vectors. This is done with the "*k*-means algorigthm", and can be seperated into three main parts: # - Check if the data can be clustered. If so, how many clusters are there? # - Determine a point's association with surrounding clusters. # - Repeat until clustering is optimized as much as possible. # # Consider *N* vectors of length *n*, $\overline{x_1} = [x_{1, 1}, ..., x{1, n}], ..., \overline{x_N} = [x_{N, 1}, ..., x{N, n}]$. Let there be *k* clusters, labeled $1, ..., k$. We specify the cluster assignment of a given vector by defining an *N*-sized vector, $\overline{c} = [c_1, ..., c_N]$, where $c_i = 1, ..., k$ is the cluster assignment for vector $\overline{x_i}$. The contents of a given cluster can then be defined as $G_j = \{i : c_i = j\}$, with $i$ the number of the cluster. # # For each group $G_i$, define a "group-representative $n$-vector" $\overline{z_i}$. We want this representative to be as close to other points in the cluster as possible, so we seek to minimize the distance between representative and cluster points (ie: minimize the quantity $||\overline{x_i} - \overline{z_{c_i}}||$). # # To evaluate a given choice of clustering, we use the mean square distance from the vectors to their associated representatives: # $$ # J^{clust} = \frac{(||\overline{x_1} - \overline{z_{c_1}}||^2 + ... + ||\overline{x_N} - \overline{z_{c_N}}||^2)}{N} # $$ # For optimal clustering, we seek to make $J^{clust}$ as small as possible. We can select $c_i$ to minimize each term: for each data vector $x_i$, select $c_i$ such that $||x_i - z_j||$ is minimized over all $j$. ie: assign each vector $x_i$ to its nearest neighbor amongst the group representatives. # # The best group assignment is then the one that minimizes $J^{clust}$. By the above logic, we have # $$ # ||x_i - z_{c_i}|| = min_{j = 1, ..., k}||x_i - z_j|| # $$ # so then # $$ # J^{clust} = \frac{(min_{j = 1, ..., k}||x_1 - z_j||^2 + ... + min_{j = 1, ..., k}||x_N - z_j||^2)}{N} # $$ # + [markdown] id="4cce6753" # ### SciKit-Learn `KMeans()` # # The `KMeans()` function in the `sklearn.cluster` package can help to easily identify the cluster centers. The documentation for this function can be found [here](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html). # # Given a `numpy` array, you can run the `KMeans()` function to produce an initial cluster grouping. You will need to specify a guess at a number of clusters (`n_clusters`) and a random state (`random_state`). An example of how to do this is shown in the documentation. # # Once the *k*-means operation is run, you can determine cluster membership using the `.labels_` attribute (ie: if you set `kmeans = KMeans(...).fit(X)` with point-array `X`, `kmeans.labels_` returns an array with the group numbers for each point). The `.cluster_centers_` attribute returns the cluster center location for each cluster. The `.predict()` function will predict the cluster membership of any additional points you want to add in or test. # + [markdown] id="042464d4" # #### Example 2: Find the number of darts players. # Several darts players are throwing darts at a $10\times10$ meter board. The points they hit are stored in the `darts` array below as *x*-*y* coordinate pairs. Guess the number of clusters, *k* - this corresponds to the number of darts players there are. You may want to plot the data to confirm your guess visually. # + id="e7780935" colab={"base_uri": "https://localhost:8080/", "height": 282} executionInfo={"status": "ok", "timestamp": 1634074009347, "user_tz": 420, "elapsed": 520, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "00734435821983708962"}} outputId="458b5915-855a-4a83-921e-0d2442a677f4" # %matplotlib inline import numpy as np from sklearn.cluster import KMeans import matplotlib.pyplot as plt darts = np.array([[1.369, 1.764], [1.250, 2.148], [1.369, 2.046], [1.408, 2.276], [1.547, 2.199], [1.805, 2.480], [1.765, 3.094], [1.289, 3.120], [0.833, 2.736], [1.686, 1.585], [2.023, 2.046], [1.289, 2.685], [1.706, 1.841], [0.952, 1.227], [3.631, 8.030], [3.769, 7.519], [3.571, 7.749], [3.968, 8.056], [3.769, 6.879], [4.404, 7.059], [4.365, 7.621], [2.579, 7.340], [3.333, 6.061], [3.333, 7.391], [3.412, 8.286], [3.948, 8.439], [5.694, 2.659], [5.813, 3.069], [5.932, 2.634], [5.000, 2.890], [5.436, 2.148], [5.456, 2.762], [5.218, 2.506], [5.654, 2.097], [6.111, 2.327], [5.575, 2.480], [5.555, 1.687], [6.726, 1.739], [6.428, 2.915], [5.714, 3.887], [5.753, 4.322], [5.992, 3.810], [5.436, 3.554], [5.218, 4.884], [5.019, 4.578], [5.337, 4.373], [5.694, 4.833], [2.420, 2.659], [1.785, 3.478], [2.202, 2.838], [2.162, 2.455], [1.448, 3.938], [0.813, 5.063], [0.793, 4.629], [0.992, 3.836], [3.789, 6.010], [4.146, 6.138], [3.809, 6.342], [1.329, 9.053], [1.825, 8.746], [1.964, 9.053], [2.341, 8.388], [2.956, 9.130], [5.138, 4.169], [7.341, 9.028], [7.400, 8.695], [7.480, 8.874], [6.924, 8.746], [7.261, 8.388], [7.559, 8.516], [8.015, 8.005], [7.876, 8.491], [7.460, 8.056], [5.615, 3.094], [5.654, 3.478], [5.952, 3.120], [5.337, 2.915], [5.635, 2.736], [3.274, 8.030], [3.452, 8.107], [3.392, 7.774], [3.115, 8.132], [7.083, 8.235], [7.400, 7.749], [7.837, 8.849], [6.686, 9.207], [7.202, 8.670], [7.361, 9.258], [7.619, 8.363], [7.599, 9.130], [8.829, 8.542], [8.849, 9.232], [8.551, 8.849], [8.392, 7.877], [8.888, 7.595], [8.869, 6.700], [8.174, 7.109], [8.452, 7.263], [8.214, 6.828], [7.698, 7.544]]) plt.scatter(darts[:, 0], darts[:, 1]) # + [markdown] id="MAsqYuwPZ1Rk" # Run the *k*-means algorithm to calculate and minimize the mean square distance from the vectors to their associated representatives, $J^{clust}$. The goal is to get $J^{clust}$ as small as possible, so that each point is in its most optimal grouping. After optimizing the groups, plot the points and use colors to show which cluster a given point is a member of. # # Try this for your chosen *k*-value and other nearby *k*-values to confirm your choice of *k*. # + colab={"base_uri": "https://localhost:8080/", "height": 281} id="wPMuMIZ-ZmPa" executionInfo={"status": "ok", "timestamp": 1634074013873, "user_tz": 420, "elapsed": 507, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "00734435821983708962"}} outputId="66759c41-936d-42c0-8c86-196b4c078035" kmeans = KMeans(n_clusters=4, random_state=0, max_iter=3).fit(darts) clusters = kmeans.labels_ k=0 plt_color = ['r','b','k','y','g','c'] for i in np.unique(clusters): indx = np.where(clusters==i)[0] plt.scatter(darts[indx,0],darts[indx,1],c=plt_color[k]) k = k + 1 plt.title('Clustering dart positions') plt.show() # + [markdown] id="7dc9cfe0" # ### Regression with K-Nearest Neighbors # # Now that we know how to determine clusters, let's develop a method by which additional points can be classified as belonging to one of the groups. The "K-Nearest Neighbors" algorithm (or KNN for short) can be used for both classification and regression - it can be used to guess the value of a new point based on its similarity to surrounding points. The algorithm is as follows: # - First, find the distance between the new point and all surrounding points. # - Select the closest *k* data points - this is a different *k*-value than the one used to number groups in the previous section! # - Use the average of these *k* datapoints to calculate the predicted value of the new point. # # The *k*-value is key in determining an accurate prediction based on the data. This value can be optimized using the training data - by plugging in various given points to the KNN algorithm, we can calculate the error for our chosen *k*. This is simply the difference between the actual and predicted value. # + [markdown] id="c0133ed2" # #### Example 3: Different Classes of Irises # Consider the following dataset containing data about irises. The attributes contained in the dataset are as follows: # 1. sepal length in cm # 2. sepal width in cm # 3. petal length in cm # 4. petal width in cm # 5. class: # - Iris Setosa # - Iris Versicolour # - Iris Virginica # # The data has been imported for your convenience below, as well as several steps to walk you through how the classification is done. Try changing the *k*-value (the number of nearest neighbors evaluated, preset as `n_neighbors=5`) to see how the class predictions change. Predict the class of a new iris with sepal length of $5.01cm$, sepal width of $3.72cm$, petal length of $1.23cm$, and petal width of $0.31cm$. This should be the only bit of wholly new code you need to add at the bottom. # # # The data was used as a courtesy of the UCI Machine Learning Repository, from the [Irises Dataset](https://archive.ics.uci.edu/ml/datasets/Iris). # # # **References**: # # <NAME>. and <NAME>. 1998. “UCI repository of machine learning databases”. University of California. Available online at: http://www.ics.uci.edu/∼mlearn/MLRepository.html # # <NAME>. "K-Nearest Neighbors Algorithm in Python and Scikit-Learn". StackAbuse.com. Available online at: https://stackabuse.com/k-nearest-neighbors-algorithm-in-python-and-scikit-learn/ # # + id="ec043b65" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1634266528664, "user_tz": 420, "elapsed": 631, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GioINv2YRmEhwfngXXbtoiIksMG-WuCXQmau-8D=s64", "userId": "11521119731528460253"}} outputId="74b89734-b45f-4361-9c7f-78feaac54316" import numpy as np import matplotlib.pyplot as plt import pandas as pd url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data" # Assign column names to the dataset names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'Class'] # Read dataset to pandas dataframe dataset = pd.read_csv(url, names=names) # Shows the first 5 entries in the dataset (only works if code below the following line is commented out) print(dataset.head()) # Split dataset into values and labels my_data = np.array(dataset.iloc[:, :-1].values) classifications = np.array(dataset.iloc[:, 4].values) # Split the values into train and test groups - 80% train and 20% test from sklearn.model_selection import train_test_split data_train, data_test, class_train, class_test = train_test_split(my_data, classifications, test_size=0.20) # Use Sci-Kit to create a classification of the training data # here, we set k=5 to classify points based on 5 nearest neighbors from sklearn.neighbors import KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors=5) classifier.fit(data_train, class_train) # Now lets make predictions of the classifications of our test data predicted_class = classifier.predict(data_test) # Predicted versus test classes print(predicted_class == class_test) print(class_test) ### Now predict the class of a new iris with the following attributes: # - sepal length of 5.01cm # - sepal width of 3.72cm # - petal length of 1.23cm # - petal width of 0.31cm # You will need to input the values into a numpy array, then use the classifier.predict() function as above ''' Insert Code Here ''' new = np.array([[5.01, 3.72, 1.23, 0.31]]) prediction_new = classifier.predict(new) print(prediction_new)
lab_notebook_solution/lab3_solved.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W2D2_LinearSystems/W2D2_Tutorial1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # - # # Tutorial 1: Linear dynamical systems # **Week 2, Day 2: Linear Systems** # # **By Neuromatch Academy** # # **Content Creators**: <NAME>, <NAME> # # **Content Reviewers**: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** # # <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p> # --- # # Tutorial Objectives # # *Estimated timing of tutorial: 1 hour* # # In this tutorial, we will be learning about behavior of dynamical systems -- systems that evolve in time -- where the rules by which they evolve in time are described precisely by a differential equation. # # Differential equations are equations that express the **rate of change** of the state variable $x$. One typically describes this rate of change using the derivative of $x$ with respect to time ($dx/dt$) on the left hand side of the differential equation: # # $$\frac{dx}{dt} = f(x)$$ # # A common notational short-hand is to write $\dot{x}$ for $\frac{dx}{dt}$. The dot means "the derivative with respect to time". # # Today, the focus will be on **linear dynamics**, where $f(x)$ is a linear function of $x$. In Tutorial 1, we will: # # * Explore and understand the behavior of such systems where $x$ is a single variable # * Consider cases where $\mathbf{x}$ is a state vector representing two variables. # # + cellView="form" # @title Tutorial slides # @markdown These are the slides for the videos in all tutorials today from IPython.display import IFrame IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/snv4m/?direct%26mode=render%26action=download%26mode=render", width=854, height=480) # - # --- # # Setup # + # Imports import numpy as np import matplotlib.pyplot as plt from scipy.integrate import solve_ivp # numerical integration solver # + cellView="form" #@title Figure settings import ipywidgets as widgets # interactive display # %config InlineBackend.figure_format = 'retina' plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle") # + cellView="form" #@title Plotting Functions def plot_trajectory(system, params, initial_condition, dt=0.1, T=6, figtitle=None): """ Shows the solution of a linear system with two variables in 3 plots. The first plot shows x1 over time. The second plot shows x2 over time. The third plot shows x1 and x2 in a phase portrait. Args: system (function): a function f(x) that computes a derivative from inputs (t, [x1, x2], *params) params (list or tuple): list of parameters for function "system" initial_condition (list or array): initial condition x0 dt (float): time step of simulation T (float): end time of simulation figtitlte (string): title for the figure Returns: nothing, but it shows a figure """ # time points for which we want to evaluate solutions t = np.arange(0, T, dt) # Integrate # use built-in ode solver solution = solve_ivp(system, t_span=(0, T), y0=initial_condition, t_eval=t, args=(params), dense_output=True) x = solution.y # make a color map to visualize time timecolors = np.array([(1 , 0 , 0, i) for i in t / t[-1]]) # make a large figure fig, (ah1, ah2, ah3) = plt.subplots(1, 3) fig.set_size_inches(10, 3) # plot x1 as a function of time ah1.scatter(t, x[0,], color=timecolors) ah1.set_xlabel('time') ah1.set_ylabel('x1', labelpad=-5) # plot x2 as a function of time ah2.scatter(t, x[1], color=timecolors) ah2.set_xlabel('time') ah2.set_ylabel('x2', labelpad=-5) # plot x1 and x2 in a phase portrait ah3.scatter(x[0,], x[1,], color=timecolors) ah3.set_xlabel('x1') ah3.set_ylabel('x2', labelpad=-5) #include initial condition is a blue cross ah3.plot(x[0,0], x[1,0], 'bx') # adjust spacing between subplots plt.subplots_adjust(wspace=0.5) # add figure title if figtitle is not None: fig.suptitle(figtitle, size=16) def plot_streamplot(A, ax, figtitle=None): """ Show a stream plot for a linear ordinary differential equation with state vector x=[x1,x2] in axis ax. Args: A (numpy array): 2x2 matrix specifying the dynamical system figtitle (string): title for the figure Returns: nothing, but shows a figure """ # sample 20 x 20 grid uniformly to get x1 and x2 grid = np.arange(-20, 21, 1) x1, x2 = np.meshgrid(grid, grid) # calculate x1dot and x2dot at each grid point x1dot = A[0,0] * x1 + A[0,1] * x2 x2dot = A[1,0] * x1 + A[1,1] * x2 # make a colormap magnitude = np.sqrt(x1dot ** 2 + x2dot ** 2) color = 2 * np.log1p(magnitude) #Avoid taking log of zero # plot plt.sca(ax) plt.streamplot(x1, x2, x1dot, x2dot, color=color, linewidth=1, cmap=plt.cm.cividis, density=2, arrowstyle='->', arrowsize=1.5) plt.xlabel(r'$x1$') plt.ylabel(r'$x2$') # figure title if figtitle is not None: plt.title(figtitle, size=16) # include eigenvectors if True: # get eigenvalues and eigenvectors of A lam, v = np.linalg.eig(A) # get eigenvectors of A eigenvector1 = v[:,0].real eigenvector2 = v[:,1].real # plot eigenvectors plt.arrow(0, 0, 20*eigenvector1[0], 20*eigenvector1[1], width=0.5, color='r', head_width=2, length_includes_head=True) plt.arrow(0, 0, 20*eigenvector2[0], 20*eigenvector2[1], width=0.5, color='b', head_width=2, length_includes_head=True) def plot_specific_example_stream_plots(A_options): """ Show a stream plot for each A in A_options Args: A (list): a list of numpy arrays (each element is A) Returns: nothing, but shows a figure """ # get stream plots for the four different systems plt.figure(figsize=(10,10)) for i, A in enumerate(A_options): ax = plt.subplot(2, 2, 1+i) # get eigenvalues and eigenvectors lam, v = np.linalg.eig(A) # plot eigenvalues as title # (two spaces looks better than one) eigstr = ", ".join([f"{x:.2f}" for x in lam]) figtitle =f"A with eigenvalues\n"+ '[' + eigstr + ']' plot_streamplot(A, ax, figtitle=figtitle) # Remove y_labels on righthand plots if i%2: ax.set_ylabel(None) if i<2: ax.set_xlabel(None) plt.subplots_adjust(wspace=0.3, hspace=0.3) # - # --- # # Section 1: One-dimensional Differential Equations # + cellView="form" # @title Video 1: Linear Dynamical Systems from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="BV1up4y1S7wj", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="87z6OR7-DBI", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # This video serves as an introduction to dynamical systems as the mathematics of things that change in time, including examples of relevant timescales relevant for neuroscience. It covers the definition of alinear system and why we are spending a whole day on linear dynamical systems, and walks through solutions to one-dimensional, deterministic dynamical systems, their behaviors, and stability criteria. # # Note that this section is a recap of [Tutorials 2](https://compneuro.neuromatch.io/tutorials/W0D4_Calculus/student/W0D4_Tutorial2.html) and [3](https://compneuro.neuromatch.io/tutorials/W0D4_Calculus/student/W0D4_Tutorial3.html) of our pre-course calculus day. # # <details> # <summary> <font color='blue'>Click here for text recap of video </font></summary> # # Let's start by reminding ourselves of a one-dimensional differential equation in $x$ of the form # # $$\dot{x} = a x$$ # # where $a$ is a scalar. # # Solutions for how $x$ evolves in time when its dynamics are governed by such a differential equation take the form # # $$x(t) = x_0\exp(a t)$$ # # where $x_0$ is the **initial condition** of the equation -- that is, the value of $x$ at time $0$. # </details> # # To gain further intuition, let's explore the behavior of such systems with a simple simulation. We can simulate an ordinary differential equation by approximating or modelling time as a discrete list of time steps $t_0, t_1, t_2, \dots$, such that $t_{i+1}=t_i+dt$. We can get the small change $dx$ over a small duration $dt$ of time from the definition of the differential: # # $$ \ # \begin{eqnarray} # \dot x &=& \frac{dx}{dt} \\ # dx &=& \dot x\, dt \\ # \end{eqnarray} # $$ # # So, at each time step $t_i$, we compute a value of $x$, $x(t_i)$, as the sum of the value of $x$ at the previous time step, $x(t_{i-1})$ and a small change $dx=\dot x\,dt$: # # $$x(t_i)=x(t_{i-1})+\dot x(t_{i-1}) dt$$ # # This very simple integration scheme, known as **forward Euler integration**, works well if $dt$ is small and the ordinary differential equation is simple. It can run into issues when the ordinary differential equation is very noisy or when the dynamics include sudden big changes of $x$. Such big jumps can occur, for example, in models of excitable neurons. In such cases, one needs to choose an integration scheme carefully. However, for our simple system, the simple integration scheme should work just fine! # # # ## Coding Exercise 1: Forward Euler Integration # # *Referred to as Exercise 1B in video* # # In this exercise, we will complete a function, ``integrate_exponential``, to compute the solution of the differential equation $\dot{x} = a x$ using forward Euler integration. We will then plot this solution over time. # # # # # + cellView="code" def integrate_exponential(a, x0, dt, T): """Compute solution of the differential equation xdot=a*x with initial condition x0 for a duration T. Use time step dt for numerical solution. Args: a (scalar): parameter of xdot (xdot=a*x) x0 (scalar): initial condition (x at time 0) dt (scalar): timestep of the simulation T (scalar): total duration of the simulation Returns: ndarray, ndarray: `x` for all simulation steps and the time `t` at each step """ # Initialize variables t = np.arange(0, T, dt) x = np.zeros_like(t, dtype=complex) x[0] = x0 # This is x at time t_0 # Step through system and integrate in time for k in range(1, len(t)): ################################################################### ## Fill out the following then remove raise NotImplementedError("Student exercise: need to implement simulation") ################################################################### # for each point in time, compute xdot from x[k-1] xdot = ... # Update x based on x[k-1] and xdot x[k] = ... return x, t # Choose parameters a = -0.5 # parameter in f(x) T = 10 # total Time duration dt = 0.001 # timestep of our simulation x0 = 1. # initial condition of x at time 0 # Use Euler's method x, t = integrate_exponential(a, x0, dt, T) # Visualize plt.plot(t, x.real) plt.xlabel('Time (s)') plt.ylabel('x') # + # to_remove solution def integrate_exponential(a, x0, dt, T): """Compute solution of the differential equation xdot=a*x with initial condition x0 for a duration T. Use time step dt for numerical solution. Args: a (scalar): parameter of xdot (xdot=a*x) x0 (scalar): initial condition (x at time 0) dt (scalar): timestep of the simulation T (scalar): total duration of the simulation Returns: ndarray, ndarray: `x` for all simulation steps and the time `t` at each step """ # Initialize variables t = np.arange(0, T, dt) x = np.zeros_like(t, dtype=complex) x[0] = x0 # This is x at time t_0 # Step through system and integrate in time for k in range(1, len(t)): # for each point in time, compute xdot from x[k-1] xdot = (a*x[k-1]) # Update x based on x[k-1] and xdot x[k] = x[k-1] + xdot * dt return x, t # Choose parameters a = -0.5 # parameter in f(x) T = 10 # total Time duration dt = 0.001 # timestep of our simulation x0 = 1. # initial condition of x at time 0 # Use Euler's method x, t = integrate_exponential(a, x0, dt, T) # Visualize with plt.xkcd(): plt.plot(t, x.real) plt.xlabel('Time (s)') plt.ylabel('x') # - # ## Interactive Demo 1: Forward Euler Integration # # 1. What happens when you change $a$? Try values where $a<0$ and $a>0$. # # 2. The $dt$ is the step size of the forward Euler integration. Try $a = -1.5$ and increase $dt$. What happens to the numerical solution when you increase $dt$? # + cellView="form" #@title #@markdown Make sure you execute this cell to enable the widget! T = 10 # total Time duration x0 = 1. # initial condition of x at time 0 @widgets.interact def plot_euler_integration(a=(-2.5, 1.5, .25), dt = widgets.SelectionSlider(options=[("%g"%i,i) for i in np.arange(0.001, 1.001, 0.01)])): # Have to do this clunky word around to show small values in slider accurately # (from https://github.com/jupyter-widgets/ipywidgets/issues/259) x, t = integrate_exponential(a, x0, dt, T) plt.plot(t, x.real) # integrate_exponential returns complex plt.xlabel('Time (s)') plt.ylabel('x') # + # to_remove explanation """ 1) For a<0, the solution decays in time. For a>0, the solution grows in time. For a=0, the solution stays at 1 (and is stable). 2) For small-ish dt, the solution still looks the same. As dt gets bigger, the solution starts to look choppier and is no longer smooth, but still has mostly the right trends. For a = 0.15, as dt gets above 0.7 or so, we start to see the forward Euler integration start to actually break down. Specifically, the solution is no longer decreasing monotonically and has developed an erroneous dip below zero. The more general lesson is that, for each system, there is a dt threshold above which the simulation introduces numerical artifacts and no longer behaves as an accurate estimate of the true underlying system. We may tolerate some choppiness in the solution, but eventually qualitatively wrong things creep in! """; # - # --- # # Section 2: Oscillatory Dynamics # # *Estimated timing to here from start of tutorial: 20 min* # # # + cellView="form" # @title Video 2: Oscillatory Solutions from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="BV1gZ4y1u7PK", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="vPYQPI4nKT8", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # We will now explore what happens when $a$ is a complex number and has a non-zero imaginary component. # ## Interactive Demo 2: Oscillatory Dynamics # # *Referred to as exercise 1B in video* # # In the following demo, you can change the real part and imaginary part of $a$ (so a = real + imaginary i) # # 1. What values of $a$ produce dynamics that both ***oscillate*** and ***grow***? # 2. What value of $a$ is needed to produce a stable oscillation of 0.5 Hertz (cycles/time units)? # # + cellView="form" #@title #@markdown Make sure you execute this cell to enable the widget! # parameters T = 5 # total Time duration dt = 0.0001 # timestep of our simulation x0 = 1. # initial condition of x at time 0 @widgets.interact def plot_euler_integration(real=(-2, 2, .2), imaginary=(-4, 7, .1)): a = complex(real, imaginary) x, t = integrate_exponential(a, x0, dt, T) plt.plot(t, x.real) #integrate exponential returns complex plt.grid(True) plt.xlabel('Time (s)') plt.ylabel('x') # - # to_remove explanation """ 1) To make the system both oscillate and grow, real has to be positive, and imaginary has to be not zero. 2) Stable oscillation of 0.5 Hz (half a cycle per unit time, or one cycle per two unit time) is achieved with real = 0 and imagineary = +/- pi (approximately 3.1 or -3.1). Note: For really large values of the imaginary component, the numerical integration scheme breaks down a bit, and we see non-stable oscillations even when real=0. This is a numerical artifact of the forward Euler scheme. Some of the students may discover this if they push the parameters, but I've tried to constrain the widget so that it is not obvious, as it is not the point of this exercise. """; # --- # # Section 3: Deterministic Linear Dynamics in Two Dimensions # # *Estimated timing to here from start of tutorial: 33 min* # + cellView="form" # @title Video 3: Multi-Dimensional Dynamics from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="BV1pf4y1R7uy", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="c_GdNS3YH_M", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # This video serves as an introduction to two-dimensional, deterministic dynamical systems written as a vector-matrix equation. It covers stream plots and how to connect phase portraits with the eigenvalues and eigenvectors of the transition matrix A. # # # <details> # <summary> <font color='blue'>Click here for text recap of relevant part of video </font></summary> # # Adding one additional variable (or _dimension_) adds more variety of behaviors. Additional variables are useful in modeling the dynamics of more complex systems with richer behaviors, such as systems of multiple neurons. We can write such a system using two linear ordinary differential equations: # $$ # \begin{eqnarray} # \dot{x}_1 &=& {a}_{11} x_1 \\ # \dot{x}_2 &=& {a}_{22} x_2 \\ # \end{eqnarray} # $$ # So far, this system consists of two variables (e.g. neurons) in isolation. To make things interesting, we can add interaction terms: # $$ # \begin{eqnarray} # \dot{x}_1 &=& {a}_{11} x_1 + {a}_{12} x_2 \\ # \dot{x}_2 &=& {a}_{21} x_1 + {a}_{22} x_2 \\ # \end{eqnarray} # $$ # We can write the two equations that describe our system as one (vector-valued) linear ordinary differential equation: # # $$\dot{\mathbf{x}} = \mathbf{A} \mathbf{x}$$ # # For two-dimensional systems, $\mathbf{x}$ is a vector with 2 elements ($x_1$ and $x_2$) and $\mathbf{A}$ is a $2 \times 2$ matrix with $\mathbf{A}=\bigg[\begin{array} & a_{11} & a_{12} \\ a_{21} & a_{22} \end{array} \bigg]$. # # </details> # ## Coding Exercise 3: Sample trajectories in 2 dimensions # # *Referred to in video as step 1 and 2 of exercise 1C* # # We want to simulate some **trajectories** of a given system and plot how 𝑥1 and 𝑥2 evolve in time. We will begin with this example system: # # $$\dot{\mathbf{x}} = \bigg[\begin{array} &2 & -5 \\ 1 & -2 \end{array} \bigg] \mathbf{x}$$ # # We will use an integrator from scipy, so we won't have to solve the system ourselves. We have a helper function, ``plot_trajectory``, that plots these trajectories given a system function. In this exercise, we will write the system function for a linear system with two variables. # # # # + def system(t, x, a00, a01, a10, a11): ''' Compute the derivative of the state x at time t for a linear differential equation with A matrix [[a00, a01], [a10, a11]]. Args: t (float): time x (ndarray): state variable a00, a01, a10, a11 (float): parameters of the system Returns: ndarray: derivative xdot of state variable x at time t ''' ################################################# ## TODO for students: Compute xdot1 and xdot2 ## ## Fill out the following then remove raise NotImplementedError("Student exercise: say what they should have done") ################################################# # compute x1dot and x2dot x1dot = ... x2dot = ... return np.array([x1dot, x2dot]) # Set parameters T = 6 # total time duration dt = 0.1 # timestep of our simulation A = np.array([[2, -5], [1, -2]]) x0 = [-0.1, 0.2] # Simulate and plot trajectories plot_trajectory(system, [A[0,0],A[0,1],A[1,0],A[1,1]], x0, dt=dt, T=T) # + # to_remove solution def system(t, x, a00, a01, a10, a11): ''' Compute the derivative of the state x at time t for a linear differential equation with A matrix [[a00, a01], [a10, a11]]. Args: t (float): time x (ndarray): state variable a00, a01, a10, a11 (float): parameters of the system Returns: ndarray: derivative xdot of state variable x at time t ''' # compute x1dot and x2dot x1dot = a00 * x[0] + a01 * x[1] x2dot = a10 * x[0] + a11 * x[1] return np.array([x1dot, x2dot]) # Set parameters T = 6 # total time duration dt = 0.1 # timestep of our simulation A = np.array([[2, -5], [1, -2]]) x0 = [-0.1, 0.2] # Simulate and plot trajectories with plt.xkcd(): plot_trajectory(system, [A[0,0],A[0,1],A[1,0],A[1,1]], x0, dt=dt, T=T) # - # ## Interactive Demo 3A: Varying A # # We will now use the function we created in the last exercise to plot trajectories with different options for A. What kinds of qualitatively different dynamics do you observe? Hint: Keep an eye on the x-axis and y-axis! # + cellView="form" #@title #@markdown Make sure you execute this cell to enable the widget! # parameters T = 6 # total Time duration dt = 0.1 # timestep of our simulation x0 = np.asarray([-0.1, 0.2]) # initial condition of x at time 0 A_option_1 = [[2, -5],[1, -2]] A_option_2 = [[3,4], [1, 2]] A_option_3 = [[-1, -1], [0, -0.25]] A_option_4 = [[3, -2],[2, -2]] @widgets.interact def plot_euler_integration(A = widgets.Dropdown( options=[A_option_1, A_option_2, A_option_3, A_option_4, None], value=A_option_1 )): if A: plot_trajectory(system, [A[0][0],A[0][1],A[1][0],A[1][1]], x0, dt=dt, T=T) # - # to_remove explanation """ You should observe exponential growth to positive values, exponential growth to negative values, stable oscillations, and decay to the origin. """; # ## Interactive Demo 3B: Varying Initial Conditions # # We will now vary the initial conditions for a given $\mathbf{A}$: # # $$\dot{\mathbf{x}} = \bigg[\begin{array} &2 & -5 \\ 1 & -2 \end{array} \bigg] \mathbf{x}$$ # # What kinds of qualitatively different dynamics do you observe? Hint: Keep an eye on the x-axis and y-axis! # + cellView="form" #@title #@markdown Make sure you execute this cell to enable the widget! # parameters T = 6 # total Time duration dt = 0.1 # timestep of our simulation x0 = np.asarray([-0.1, 0.2]) # initial condition of x at time 0 A = [[2, -5],[1, -2]] x0_option_1 = [-.1, 0.2] x0_option_2 = [10, 10] x0_option_3 = [-4, 3] @widgets.interact def plot_euler_integration(x0 = widgets.Dropdown( options=[x0_option_1, x0_option_2, x0_option_3, None], value=x0_option_1 )): if x0: plot_trajectory(system, [A[0][0],A[0][1],A[1][0],A[1][1]], x0, dt=dt, T=T) # - # to_remove explanation """ Changing the initial conditions for this A always produces oscillatory dynamics. The only difference is the radii of the resulting elliptical trajectories. """; # --- # # Section 4: Stream Plots # # *Estimated timing to here from start of tutorial: 45 min* # # It's a bit tedious to plot trajectories one initial condition at a time! # # Fortunately, to get an overview of how a grid of initial conditions affect trajectories of a system, we can use a _stream plot_. # # We can think of a initial condition ${\bf x}_0=(x_{1_0},x_{2_0})$ as coordinates for a position in a space. For a 2x2 matrix $\bf A$, a stream plot computes at each position $\bf x$ a small arrow that indicates $\bf Ax$ and then connects the small arrows to form _stream lines_. Remember from the beginning of this tutorial that $\dot {\bf x} = \bf Ax$ is the rate of change of $\bf x$. So the stream lines indicate how a system changes. If you are interested in a particular initial condition ${\bf x}_0$, just find the correponding position in the stream plot. The stream line that goes through that point in the stream plot indicates ${\bf x}(t)$. # ## Think! 4: Interpreting Eigenvalues and Eigenvectors # # Using some helper functions, we show the stream plots for each option of A that you examined in the earlier interactive demo. We included the eigenvectors of $\bf A$ as a red line (1st eigenvalue) and a blue line (2nd eigenvalue) in the stream plots. # # What is special about the direction in which the principal eigenvector points? And how does the stability of the system relate to the corresponding eigenvalues? (Hint: Remember from your [introduction to linear algebra](https://www.youtube.com/watch?v=PFDu9oVAE-g&list=PLZHQObOWTQDPD3MizzM2xVFitgF8hE_ab&index=15&t=0s) that, for matrices with real eigenvalues, the eigenvectors indicate the lines on which $\bf Ax$ is parallel to $\bf x$ and real eigenvalues indicate the factor by which $\bf Ax$ is streched or shrunk compared to $\bf x$.) # + cellView="form" # @markdown Execute this cell to see stream plots A_option_1 = np.array([[2, -5], [1, -2]]) A_option_2 = np.array([[3,4], [1, 2]]) A_option_3 = np.array([[-1, -1], [0, -0.25]]) A_option_4 = np.array([[3, -2], [2, -2]]) A_options = [A_option_1, A_option_2, A_option_3, A_option_4] plot_specific_example_stream_plots(A_options) # + # to_remove explanation """ In top-left A, both eigenvalues are imaginary (no real component, the two eigenvalues are complex conjugate pairs), so the solutions are all stable oscillations. The eigenvectors are also complex conjugate pairs (that's why we see them plotted on top of each other). They point in the direction of the major axis of the elliptical trajectories. In the top-right A, both eigenvalues are positive, so they are growing. The larger eigenvalue direction (red) grows faster than the other direction (blue), so trajectories all eventually follow the red eigenvector direction. Those that start close to the blue direction follow blue for a bit initially. In the bottom-left A, both eigenvalues are negative, so they are both decaying. All solutions decay towards the origin [0, 0]. The red eigenvalue is larger in magnitude, so decay is faster along the red eigenvector. In the bottm-right A, one eigenvalue is positive (red) and one eigenvalue is negative (blue). This makes the shape of the landscape the shape of a saddle (named after the saddle that one puts on a horse for a rider). Trajectories decay along the blue eigenvector but grow along the red eigenvector. """; # - # --- # # Summary # # *Estimated timing of tutorial: 1 hour* # # In this tutorial, we learned: # # * How to simulate the trajectory of a dynamical system specified by a differential equation $\dot{x} = f(x)$ using a forward Euler integration scheme. # * The behavior of a one-dimensional linear dynamical system $\dot{x} = a x$ is determined by $a$, which may be a complex valued number. Knowing $a$, we know about the stability and oscillatory dynamics of the system. # * The dynamics of high-dimensional linear dynamical systems $\dot{\mathbf{x}} = \mathbf{A} \mathbf{x}$ can be understood using the same intuitions, where we can summarize the behavior of the trajectories using the eigenvalues and eigenvectors of $\mathbf{A}$.
tutorials/W2D2_LinearSystems/W2D2_Tutorial1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from splinter import Browser import time browser = Browser('chrome','chromedriver') # ### NASA Mars News # # * Scrape the [NASA Mars News Site](https://mars.nasa.gov/news/) and collect the latest News Title and Paragraph Text. Assign the text to variables that you can reference later. browser.visit('https://mars.nasa.gov/news/') title = browser.find_by_css('div.content_title a').text paragraph = browser.find_by_css('div.article_teaser_body').text title, paragraph # ### JPL Mars Space Images - Featured Image # # * Visit the url for JPL Featured Space Image [here](https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html). # # * Use splinter to navigate the site and find the image url for the current Featured Mars Image and assign the url string to a variable called `featured_image_url`. # # * Make sure to find the image url to the full size `.jpg` image. # # * Make sure to save a complete url string for this image. browser.visit('https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html') browser.links.find_by_partial_text('FULL IMAGE').click() browser.find_by_css('img.fancybox-image')['src'] # ### Mars Facts # # * Visit the Mars Facts webpage [here](https://space-facts.com/mars/) and use Pandas to scrape the table containing facts about the planet including Diameter, Mass, etc. # # * Use Pandas to convert the data to a HTML table string. pd.read_html('https://space-facts.com/mars/')[0].to_html(classes='table table-stripped') # ### Mars Hemispheres # # * Visit the USGS Astrogeology site [here](https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars) to obtain high resolution images for each of Mar's hemispheres. # # * You will need to click each of the links to the hemispheres in order to find the image url to the full resolution image. # # * Save both the image url string for the full resolution hemisphere image, and the Hemisphere title containing the hemisphere name. Use a Python dictionary to store the data using the keys `img_url` and `title`. # # * Append the dictionary with the image url string and the hemisphere title to a list. This list will contain one dictionary for each hemisphere. browser.visit('https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars') links = browser.find_by_css('a.itemLink h3') hemispheres = [] for i in range(len(links)): hemisphere = {} hemisphere['title'] = browser.find_by_css('a.itemLink h3')[i].text browser.find_by_css('a.itemLink h3')[i].click() hemisphere['url'] = browser.find_by_text('Sample')['href'] hemispheres.append(hemisphere) browser.back() browser.quit() hemispheres
Missions_to-Mars/mission_to_mars.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Importing the libraries import numpy as np import copy import matplotlib.pyplot as plt import heapq import math import sys # Ask the User for Start Node start=tuple(map(int,input("Enter the start Coordinates (x y)").split())) # Ask the User for Goal Node goal=tuple(map(int,input("Enter the goal Coordinates (x y)").split())) # The max x and y coordinate of the grid max_x=300 max_y=200 #Radius of the robot radius=4 #Clearance taken clearance=2 #Checks if the goal node is within the C Space assert(goal<=(max_x,max_y)) def compute_cost(graph): ''' Computes the cost it takes to travel from one node to its neighbour Arguments: graph-- Dictionary of nodes along with their neighbours Return: cost-- Dictionary of cost for every node and corresponding neighbours ''' cost={} for parent,nbr in graph.items(): cost[parent]={} for pt in nbr: # Cost is taken as the euclidien distance between two nodes cost[parent][pt]=np.round(math.sqrt((parent[0]-pt[0])**2+(parent[1]-pt[1])**2),3) return cost def manhattan_distance(pt1,pt2): ''' Calculates the manhattan distance between two points for 'optimistic cost to go' in A* Search Arguments: pt1,pt2-- tuple of two points Return: distance-- Manhattan distance between two points ''' x1=pt1[0] y1=pt1[1] x2=pt2[0] y2=pt2[1] #Manhattan distance is calculated as the sum of absolute difference between the x and y coordinates distance=abs(x1-x2)+abs(y1-y2) return distance def base_graph_generate(point,max_x,max_y): ''' Calculate neighbours of a given point Arguments: point-- tuple containing the x and y coordinate max_x-- maximum x coordinate of the grid max_y-- maximum y coordinate of the grid Return: graph-- Dictionary containing neighbours of a given point ''' # x and y coordinate are extracted from the tuple x=point[0] y=point[1] graph={} # For origin (0,0) if x==0 and y==0: graph[point]={(x+1,y),(x,y+1),(x+1,y+1)} # For last coordinate in the grid elif x==max_x and y==max_y: graph[point]={(x-1,y),(x-1,y-1),(x,y-1)} # For points in the x=0 and 0< y <max_y elif x==0 and y!=0 and y!=max_y: graph[point]={(x+1,y),(x,y-1),(x,y+1)} # For points in the y=0 and 0< x <max_x elif y==0 and x!=0 and x!=max_x: graph[point]={(x-1,y),(x+1,y),(x,y+1)} # For point (0,max_y) elif x==0 and y==max_y: graph[point]={(x,y-1),(x+1,y),(x+1,y-1)} # For point (max_x,0) elif y==0 and x==max_x: graph[point]={(x-1,y),(x,y+1),(x-1,y+1)} # For points in the y=max_y and 0< x <max_x elif y==max_y and x!=0 and x!=max_x: graph[point]={(x,y-1),(x+1,y),(x-1,y)} # For points in the x=max_x and 0< y <max_y elif x==max_x and y!=0 and y!=max_y: graph[point]={(x-1,y),(x,y+1),(x,y-1)} # For rest of the case else: graph[point]={(x+1,y),(x-1,y),(x,y+1),(x,y-1),(x+1,y+1),(x-1,y+1),(x-1,y-1),(x+1,y-1)} return graph def astar_algorithm(graph,start,goal): ''' Executes the A*_Seach Algorithm Arguments: graph-- Dictionary containing cost it takes to travel the Free C Space start-- Tuple containing the start coordinates goal-- Tuple containing the goal coordinates Returns: CLOSED-- all the nodes searched by the algorithm backtrack-- used to compute the shortest path ''' OPEN=[] CLOSED=[] past_cost={} backtrack={} # Past Cost of start node is set as zero and for rest of the nodes infinity for node in graph.keys(): if node==start: past_cost[node]=0 else: past_cost[node]=math.inf # Start node along with the cost is appended into OPEN OPEN.append((0,start)) # Loop runs till the length of OPEN is not equal to zero while len(OPEN)>0: # node with the lowest cost is removed from OPEN current_cost,current_node=heapq.heappop(OPEN) # removed node is then added to CLOSED CLOSED.append(current_node) # if the goal node is reached the search is terminated if current_node==goal: print('SUCCESS: GOAL REACHED') return backtrack,CLOSED # If the goal node is not reached then the neighbours of the current node are searched for nbr,_ in graph[current_node].items(): # if the neighbour node is not already searched if nbr not in CLOSED: # tentative cost is calculated tentative_cost=past_cost[current_node]+graph[current_node][nbr]+manhattan_distance(goal,nbr) # if the tentative cost is less than the cost it takes to reach neighbour node if tentative_cost<past_cost[nbr]: backtrack[nbr]={} # past cost of neighbour is updated past_cost[nbr]=tentative_cost backtrack[nbr][tentative_cost]=current_node # neighbour node along with cost is added to the list OPEN heapq.heappush(OPEN,(tentative_cost,nbr)) # If the goal node is reached the search is terminated if nbr== goal: print('SUCCESS: GOAL REACHED') CLOSED.append(nbr) return backtrack,CLOSED print("GOAL NOT REACHED") def backtracking_func(backtrack,goal,start): ''' Computes the shortest path for a given problem Arguments: backtrack-- dictionary from A*search containing nodes and neighours travelled goal-- tuple containing the goal coordinates start-- tuple containing the start coordinates Return: backtrack_nodes-- list of nodes travelled in the shortest path ''' backtrack_nodes=[] # goal node is added to the list backtrack_nodes.append(goal) while goal!=0: for nbr,val in reversed(list(backtrack.items())): for ct,node in val.items(): if nbr==goal: if node not in backtrack_nodes: backtrack_nodes.append(node) goal=node if node==start: goal=0 break return backtrack_nodes def astar_pointR(start,goal): print("Creating The C Space") # List for all points in the grid all_points=[] # Runs the loop through the entire grid for x in range(max_x+1): # Loop runs from 0 to 300 for y in range(max_y+1): # Loop runs from 0 to 200 all_points.append((x,y)) print("Successfully Created The C Space") print("Creating the Free C Space") # List for obstacle points in the grid obstacle_points=[] # list of points taking obstacles, radius of robot and clearance in consideration map_points=[] for pt in all_points: x=pt[0] y=pt[1] clr=radius+clearance # Triangular Shaped Obstacle with (20,150), (45,175) and (30,80) as its vertex if(130+math.sqrt(2)*clr+x>=y) and (290-math.sqrt(50)*clr-7*x<=y) and ((17/3)*x-90-math.sqrt(298/9)*clr<=y): map_points.append((x,y)) if(130+x>=y) and (290-7*x<=y) and ((17/3)*x-90<=y): obstacle_points.append((x,y)) # Complex shaped Obstacle if (x>=90 and 5*x-360<=y and y<=155) or (x>=90 and(x+530>=4*y) and ((5/6)*x+(170/3)<=y) and x<=130): obstacle_points.append((x,y)) if (x>=90-clr and 5*x-360-math.sqrt(26)*clr<=y and y<=155+clr) or (x>=90-clr and(x+530+math.sqrt(17)*clr>=4*y) and ((5/6)*x+(170/3-math.sqrt(61/36)*clr)<=y) and x<=130+clr): map_points.append((x,y)) # Complex shaped Obstacle if x>=120 and x<=160 and y>=35 and y<=130: if (x-10)>=y: if x-400<=-2*y: if 3*x-360<=y: if x-60<=y or (-7/3)*x+(1120/3)>=y: if (-2/5)*x +93<=y: obstacle_points.append((x,y)) if x>=120-clr and x<=160+clr and y>=35-clr and y<=130+clr: if (x-10+math.sqrt(2)*clr)>=y: if x-400-math.sqrt(5)*clr<=-2*y: if 3*x-360-math.sqrt(10)*clr<=y: if x-60-math.sqrt(2)*clr<=y or (-7/3)*x+(1120/3+math.sqrt(58/9)*clr)>=y: if (-2/5)*x +93-math.sqrt(29/25)*clr<=y: map_points.append((x,y)) # Triangular Shaped Obstacle if (2*x-340>=y) and ((-5/2)*x+605>=y) and (x-350>=-4*y): obstacle_points.append((x,y)) if (2*x-340+math.sqrt(5)*clr>=y) and ((-5/2)*x+605+math.sqrt(29/4)*clr>=y) and (x-350+math.sqrt(17)*clr>=-4*y): map_points.append((x,y)) # Trapezoidal Shaped Obstacle if (-3*x+960>=y) and ((2/11)*x+(1460/11)>=y) and ((7/2)*x-(565)>=y) and (x+580<=5*y): obstacle_points.append((x,y)) if (-3*x+960+math.sqrt(10)*clr>=y) and ((2/11)*x+(1460/11+math.sqrt(125/121)*clr)>=y) and ((7/2)*x-(565-math.sqrt(53/4)*clr)>=y) and (x+580-math.sqrt(26)*clr<=5*y): map_points.append((x,y)) # Checks if the goal node is in the obstacle if goal in map_points: print("Goal Cant be Reached") sys.exit() # C space is generated base_graph={} for pt in all_points: base_graph[pt]=base_graph_generate(pt,max_x,max_y)[pt] # All the obstacle nodes are removed to create Free C space for parent,neighbours in base_graph.items(): neighbours_copy = neighbours.copy() for coordinates in neighbours_copy: if coordinates in map_points: neighbours.remove(coordinates) base_graph_copy = base_graph.copy() for parent,_ in base_graph_copy: if parent in map_points: del base_graph[parent] print("Successfully Created The Free C Space") print("Calculating Cost") # Costs are calculated cost_nodes=compute_cost(base_graph) print("Cost Calculated") print("Computing A* Algorithm") backtrack,visited=astar_algorithm(cost_nodes,start,goal) return backtrack,visited,obstacle_points,map_points backtrack,visited,obstacle_points,map_points=astar_pointR(start,goal) backtrack_node=backtracking_func(backtrack,goal,start) print(backtrack_node) # + obstacle_x=[] obstacle_y=[] for pt in obstacle_points: obstacle_x.append(pt[0]) obstacle_y.append(pt[1]) distance_x=[] distance_y=[] for c in backtrack_node: distance_x.append(c[0]) distance_y.append(c[1]) fig = plt.figure(figsize=(14,14)) ax = fig.add_subplot(111, aspect='equal', autoscale_on=True, xlim=(0,306), ylim=(0,205)) plt.scatter(obstacle_x,obstacle_y,c='black',s=7) g=plt.scatter(start[0],start[1],c='darkgreen',marker='x') sw=plt.scatter(goal[0],goal[1],c='darkgreen',marker='x') s=plt.plot(distance_x,distance_y,c='red') visited_x=[] visited_y=[] for j in range(len(visited)): x_s=visited[j][0] y_s=visited[j][1] visited_x.append(x_s) visited_y.append(y_s) s=plt.scatter(visited_x,visited_y,c='deepskyblue',s=4,marker='x') plt.title("A* Algorithm for Rigid Robot") plt.show()
Motion_Planning_RigidRobot_Astar.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from private.hypergraph import Hypergraph, hg_to_mol from grammar_generation import random_produce from rdkit import Chem from rdkit.Chem import Draw import numpy as np from copy import deepcopy import pickle5 as pickle import torch from os import listdir # + expr_name_dict = dict() expr_name_dict['polymer_117motif'] = 'grammar-log/log_117motifs' expr_name_dict['iso'] = 'grammar-log/log_iso' expr_name_dict['acrylates'] = 'grammar-log/log_acy' expr_name_dict['chain_extender'] = 'grammar-log/log_ce' expr_names = list(expr_name_dict.keys()) generated_mols = dict() for expr_name in expr_names: print('dealing with {}'.format(expr_name)) ckpt_list = listdir(expr_name_dict[expr_name]) max_R = 0 max_R_ckpt = None for ckpt in ckpt_list: if 'grammar' in ckpt: curr_R = float(ckpt.split('_')[4][:-4]) if curr_R > max_R: max_R = curr_R max_R_ckpt = ckpt print('loading {}'.format(max_R_ckpt)) with open('{}/{}'.format(expr_name_dict[expr_name], max_R_ckpt), 'rb') as fr: grammar = pickle.load(fr) for i in range(8): mol, _ = random_produce(grammar) if expr_name not in generated_mols.keys(): generated_mols[expr_name] = [mol] else: generated_mols[expr_name].append(mol) # - Chem.Draw.MolsToGridImage(generated_mols['polymer_117motif'], molsPerRow=4, subImgSize=(200,200)) Chem.Draw.MolsToGridImage(generated_mols['iso'], molsPerRow=4, subImgSize=(200,200)) Chem.Draw.MolsToGridImage(generated_mols['acrylates'], molsPerRow=4, subImgSize=(200,200)) Chem.Draw.MolsToGridImage(generated_mols['chain_extender'], molsPerRow=4, subImgSize=(200,200))
visualization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # %matplotlib inline # - filename = '../colliders.csv' data = np.loadtxt(filename,delimiter=',',dtype='Float64',skiprows=2) print(data) import os, sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath('planning_utils.py')))) from planning_utils import create_grid TARGET_ALTITUDE = 5 SAFETY_DISTANCE = 5 grid, north_offset, east_offset = create_grid(data, TARGET_ALTITUDE, SAFETY_DISTANCE) grid_zero, _, _ = create_grid(data, TARGET_ALTITUDE, 0) print("North offset = {0}, east offset = {1}".format(north_offset, east_offset)) # + def visualize_grid(g, north_offset, east_offset): """ Visualize the provided `g` """ fig = plt.figure(figsize=(20,10)) plt.imshow(g, origin='lower') plt.plot(-east_offset, -north_offset, 'r+') plt.xlabel('EAST') plt.ylabel('NORTH') plt.show() visualize_grid(grid, north_offset, east_offset) # - visualize_grid(grid+grid_zero, north_offset, east_offset) def create_voxmap(data, voxel_size=5): """ Returns a grid representation of a 3D configuration space based on given obstacle data. The `voxel_size` argument sets the resolution of the voxel map. """ # minimum and maximum north coordinates north_min = np.floor(np.amin(data[:, 0] - data[:, 3])) north_max = np.ceil(np.amax(data[:, 0] + data[:, 3])) # minimum and maximum east coordinates east_min = np.floor(np.amin(data[:, 1] - data[:, 4])) east_max = np.ceil(np.amax(data[:, 1] + data[:, 4])) # maximum altitude alt_max = np.ceil(np.amax(data[:, 2] + data[:, 5])) # given the minimum and maximum coordinates we can # calculate the size of the grid. north_size = int(np.ceil((north_max - north_min))) // voxel_size east_size = int(np.ceil((east_max - east_min))) // voxel_size alt_size = int(alt_max) // voxel_size # Create an empty grid voxmap = np.zeros((north_size, east_size, alt_size), dtype=np.bool) # Center offset for grid north_min_center = np.min(data[:, 0]) east_min_center = np.min(data[:, 1]) for i in range(data.shape[0]): # TODO: fill in the voxels that are part of an obstacle with `True` # # i.e. grid[0:5, 20:26, 2:7] = True north, east, alt, d_north, d_east, d_alt = data[i, :] obstacle = [ int(north - d_north - north_min_center) // voxel_size, int(north + d_north - north_min_center) // voxel_size, int(east - d_east - east_min_center) // voxel_size, int(east + d_east - east_min_center) // voxel_size, ] height = int(alt + d_alt) // voxel_size voxmap[obstacle[0]:obstacle[1], obstacle[2]:obstacle[3], 0:height] = True return voxmap voxmap = create_voxmap(data, 10) print(voxmap.shape) # + fig = plt.figure(figsize=(20,10)) ax = fig.gca(projection='3d') ax.voxels(voxmap, edgecolor='k') ax.set_xlim(voxmap.shape[0], 0) ax.set_ylim(0, voxmap.shape[1]) ax.set_zlim(0, voxmap.shape[2]) plt.xlabel('North') plt.ylabel('East') plt.show() # -
Visualizations/Colliders.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # <img src="img/full-colour-logo-UoB.png" alt="Drawing" style="width: 200px;"/> # # # Introduction to Programming for Engineers # # ## Python 3 # # # # + [markdown] slideshow={"slide_type": "slide"} # # 01a Operators and Data Types # ## SUPPLEMENTARY MATERIAL # # # # # <br> <a href='#AlgebraicOperators'>1. Algebraic Operators</a> # <br> <a href='#Comments'>2. Comments</a> # <br> <a href='#OperatorPrecedence'>3. Operator Precedence</a> # <br> <a href='#Readability'>4. Readability</a> # <br> <a href='#VariableAssignment'>5. Variable Assignment</a> # <br> <a href='#print'>6. `print`</a> # <br> <a href='#AugmentedAssignment'>7. Augmented Assignment</a> # <br> <a href='#NamingVariables'>8. Naming Variables</a> # <a href='#ComparisonOperators'>9. Comparison Operators</a> # <br> <a href='#LogicalOperators'>10. Logical Operators</a> # <br> <a href='#Types'>11. Types</a> # <br> <a href='#NumericTypes'>12. Numeric Types</a> # <br> <a href='#TypeConversion'>13. Type Conversion</a> # # + [markdown] slideshow={"slide_type": "slide"} # <a id='AlgebraicOperators'></a> # # 1. Algebraic Operators # # # # We can use Python like a calculator. # # __Simple arithmetical operators:__ # # $+$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Addition <br> # $-$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Subtraction <br> # $*$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Multiplication <br> # $/$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Division <br> # $//$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Floor division (round down to the next integer)<br> # $\%$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Modulo (remainder)<br> # $**$ &nbsp; &nbsp; &nbsp; &nbsp; Exponent <br> # + [markdown] slideshow={"slide_type": "slide"} # ### Practise Exercise 1A : Algebraic Operators # # Express the following simple expressions using python code. <br> # Click on the cell to type in it. <br> # Press "Shift" + "Enter" to run the cell. # # $3 + 8$ # # + slideshow={"slide_type": "-"} # + # SOLUTION a = 1 + 3 print(a) # + [markdown] slideshow={"slide_type": "subslide"} # <a id='Comments'></a> # # 2. Comments # # The text beginning with "#" is a comment. # # # ```python # # SOLUTION # ``` # # # These are not computed as part of the program but are there for humans to read to help understand what the code does. # + [markdown] slideshow={"slide_type": "subslide"} # <a id='OperatorPrecedence'></a> # # 3. Operator Precedence # # # # # The order in which operations are performed when there are multiple operations in an expression # # e.g. multiplication before addition. # # + [markdown] slideshow={"slide_type": "subslide"} # Python follows the usual mathematical rules for precedence. # # > 1. Parentheses &nbsp; e.g. $(2+4)$ # 1. Exponents &nbsp; &nbsp; &nbsp; e.g. $2^2$ # 1. Multiplication, Division, Floor Division and Modulo (left to right) # 1. Addition and Subtraction (left to right) # + [markdown] slideshow={"slide_type": "subslide"} # - The expression should __evaluate correctly__. # - The expression should be __easily readable__. # # + [markdown] slideshow={"slide_type": "subslide"} # __Easily Readable__ # # Simple enough for someone else reading the code to understand. # # It is possible to write __code__ that is correct, but might be difficult for someone (including you!) to check. # + [markdown] slideshow={"slide_type": "subslide"} # #### Correct Evaluation # # A common example: # # $$ # \frac{10}{2 \times 50} = 0.1 # $$ # + # Incorrect solution 10 / 2 * 50 # + # Correct solution 10 / (2 * 50) # + [markdown] slideshow={"slide_type": "subslide"} # Multiplication and division have the same precedence. # # The expression is evaluated 'left-to-right'. # # The correct result is acheived by using brackets &nbsp; $()$, as you would when using a calculator. # # # + [markdown] slideshow={"slide_type": "subslide"} # <a id='Readability'></a> # # 4. Readability # # # An example that __evaluates__ the following expression correctly: # # $$ # 2^{3} \cdot 4 = 32 # $$ # # but is __not easily readable__: # # + # Less easily readable 2**3*5 # More easily readable (2**3)*4 # + [markdown] slideshow={"slide_type": "subslide"} # It is best practise to use spaces between characters to make your code more readable. # # You will be marked on readbility in your assessment. # # Start developing good habits now! # + [markdown] slideshow={"slide_type": "subslide"} # # <a id='VariableAssignment'></a> # # 5. Variable Assignment # # When we compute something, we usually want to __store__ the result. # # This allows us to use it in subsequent computations. # # We *assign* a value to a *variables* to store the value. # # Example : The variable `c` is used to 'store' the value `10`. # + slideshow={"slide_type": "subslide"} c = 10 print(c) # - # <a id='print'></a> # ## 6. `print ` # # The variable `c` is used to 'store' the value `10`. # # The function `print` is used to display the value of a variable. # # (We will learn what functions are and how we use them later). # # # + [markdown] slideshow={"slide_type": "slide"} # We can add some text to the printed inforation. # # Text data is known as __string data__. # # __String data__ is surrounded by quotation marks `"...."` # # In the example below, a string and an integer value are seperated by a comma when using the `print` function: # - c = 10 print("the value of c is", c) # Another way is to embed numerical values within a string using f-strings: print(f"the value of c is {c}") # + [markdown] slideshow={"slide_type": "subslide"} # Example: We want to use the value of `A`, found using the first expression, in a subsequent computation to find `d`. # # # >$a = b + c$ # # >$d = a + b$ # + b = 11 c = 10 a = b + c d = a + b print(d) # + [markdown] slideshow={"slide_type": "subslide"} # Variable assigment allows us to quickly and easily update the value of every expression in which the variable appears by a single line of code to re-assign the variable: # # Compute $c, d, e$ , # 1. where $a = 2 , b = 10$ # 1. where $a = 5 , b = 1$ # + a = 2 b = 10 c = a + b e = a - b f = a / b print(c, e, f) # - a = 5 b = 1 c = a + b e = a - b f = a / b print(c, e, f) # + [markdown] slideshow={"slide_type": "subslide"} # If we want to change the value of $a$ to $4$ and recompute the sum, we change `a = 2` to as `a = 4` and run the cell again to execute the code. # + [markdown] slideshow={"slide_type": "subslide"} # ### Practise Exercise 6A : Variable Assignment # # Change the value of `a` or `b`. # # Re-run the cell to update the value. # # (Click on the cell to type in it. <br> # Press "Shift" + "Enter" to run the cell.) # # Then run the cell containing `print(c, e, f)` to view the new value. # + [markdown] slideshow={"slide_type": "subslide"} # ### Practise Exercise 6B : Variable Assignment # # <br>$y=ax^2+bx+c$ # # In the cell below find $y$ when $a=1$, $b=1$, $c=-6$, $x=-2$ # # Now change the value of $x$ so that $x = 0$ and re-run the cell to update the value. # # What value did you get for y this time? # + # create variables a, b, c and x # e.g. a = 1 #type: print(y) to reveal the answer # + [markdown] slideshow={"slide_type": "subslide"} # <a id='AugmentedAssignment'></a> # # 7. Augmented Assignment # # The case where the assigned value depends on a previous value of the variable. # # # + # Example: a = 2 b = 11 a = a + b print(a) # + [markdown] slideshow={"slide_type": "-"} # This type of expression is not a valid algebraic statement since '`a`' appears on both sides of '`=`'. # # However, is very common in computer programming. # # # # + [markdown] slideshow={"slide_type": "subslide"} # __How it works:__ # # > `a = a + b` # # The final value of `a` (left-hand side) is equal to the sum of the initial value of `a` and `b` # + [markdown] slideshow={"slide_type": "subslide"} # <a id='Shortcuts'></a> # ### Shortcuts # - # Augmented assignments can be written in short form. # # For __addition__: # # `a = a + b` &nbsp;&nbsp; &nbsp; can be written &nbsp;&nbsp;&nbsp; `a += b` # + # Addition : long version a = 2 b = 11 a = a + b print(a) # Augmented assignment a = 2 b = 11 a += b print(a) # + [markdown] slideshow={"slide_type": "subslide"} # For __subtraction__: # # `a = a - b` &nbsp;&nbsp; &nbsp; can be written &nbsp;&nbsp;&nbsp; `a -= b` # # + # subtraction : long version a = 1 b = 4 a = a - b print(a) # Augmented assignment a = 1 b = 4 a -= b print(a) # + [markdown] slideshow={"slide_type": "subslide"} # The <a href='#AlgebraicOperators'>basic algebraic operators</a> can all be manipulated in the same way to produce a short form of augmented assigment. # # ### Practise Exercise 7A : Augmented Assignment # In the cells below, use augmented assigment to write a shorter form of the expression shown in the cell. # Type `print(a)` to check your answers match. # + [markdown] slideshow={"slide_type": "subslide"} # #### Exercise 7Aa : Multiplication # + # Multiplication : long version a = 10 c = 2 a = c*a print(a) # Write code using Augmented Assigment here: # + [markdown] slideshow={"slide_type": "subslide"} # #### Exercise 7Ab : Division # + # Division : long version a = 1 a = a/4 print(a) # Write code using Augmented Assigment here: # + [markdown] slideshow={"slide_type": "subslide"} # #### Exercise 7Ac : Floor Division # + # Floor division : long version a = 12 a = a//5 print(a) # Write code using Augmented Assigment here: # + [markdown] slideshow={"slide_type": "subslide"} # #### Exercise 7Ad : Floor Division (of negative nunbers) # + slideshow={"slide_type": "-"} # Floor division : long version a = -12 a = a//5 print(a) # Write code using Augmented Assigment here: # - # __NOTE:__ Floor division always rounds DOWN. # # $$\frac{-12}{5} = -2.4$$ # # The closest integer __less than__ -2.4 is -3. # + [markdown] slideshow={"slide_type": "slide"} # <a id='NamingVariables'></a> # # 8. Naming Variables # __It is good practice to use meaningful variable names. __ # # e.g. using '`x`' for time, and '`t`' for position is likely to cause confusion. # # You will be marked on readbility in your assessment. # # Start developing good habits now! # + [markdown] slideshow={"slide_type": "slide"} # Problems with poorly considered variable names: # # 1. You're much more likely to make errors. # 1. It can be difficult to remember what the program does. # 1. It can be difficult for others to understand and use your program. # # + [markdown] slideshow={"slide_type": "slide"} # __Different languages have different rules__ for what characters can be used in variable names. # # In Python variable names can use letters and digits, but cannot start with a digit. # # e.g. # # `data5 = 3` &nbsp; &nbsp; &nbsp; $\checkmark$ # # `5data = 3` &nbsp; &nbsp; &nbsp; $\times$ # + [markdown] slideshow={"slide_type": "slide"} # __Python is a case-sensitive language__ # # e.g. the variables '`A`' and '`a`' are different. # # # + [markdown] slideshow={"slide_type": "slide"} # __Languages have *reserved keywords*__ that cannot be used as variable names as they are used for other purposes. # # The reserved keywords in Python are: # # `['False', 'None', 'True', 'and', 'as', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif', 'else', 'except', 'finally', 'for', 'from', 'global', 'if', 'import', 'in', 'is', 'lambda', 'nonlocal', 'not', 'or', 'pass', 'raise', 'return', 'try', 'while', 'with', 'yield']` # + [markdown] slideshow={"slide_type": "slide"} # A reserved keyword already has a use assigned to it. # + [markdown] slideshow={"slide_type": "slide"} # Reserved kewords are colored bold green when you type them in the Notebook so you can see if one is being used. # # # + [markdown] slideshow={"slide_type": "slide"} # If you try to assign something to a reserved keyword, you will get an error e.g. it is not possible to create a variable with the name __`for`__: # + slideshow={"slide_type": "-"} for = 12 # + [markdown] slideshow={"slide_type": "slide"} # <a id='ExampleUseKeywordsprint'></a> # ## Example Use of Reserved Keywords: `print` # We can print output from our program using the word `print`. # A print call is ended by a newline by default. # This is shown below: # - c = 10 print(c) print(c) # + [markdown] slideshow={"slide_type": "slide"} # However, we can choose a different line ending using the __keyword__ `end`. # + print(c, end=" ") print(c) print(c, end="") print(c) # + [markdown] slideshow={"slide_type": "slide"} # The print function can take mutiple inputs, seperated by commas. # <br>The default output seperator is a space. # <br>However, we can choose a different seperator using the keyword `sep`. # - print(c, c, c) print(c, c, c, sep=':') # + [markdown] slideshow={"slide_type": "slide"} # If you try to assign something to a reserved keyword, you will get an error e.g. it is not possible to create a variable with the name __`for`__: # + slideshow={"slide_type": "-"} for = 12 # + [markdown] slideshow={"slide_type": "subslide"} # __Sometimes it is useful to have variable names that are made up of two words.__ # # A convention is to separate the words in the variable name using an underscore '`_`'. # # e.g. a variable name for storing the number of days: # ```python # num_days = 10 # ``` # + [markdown] slideshow={"slide_type": "subslide"} # Can you think of a suitable variable name for each of the following quantities? # # __temperature__ # # __minimum height__ # # __side length__ # # __class__ # # - # + [markdown] slideshow={"slide_type": "slide"} # <a id='ComparisonOperators'></a> # # 9. Comparison Operators # # __Boolean:__ A type of variable that can take on one of two values: # - true # - false # # # + [markdown] slideshow={"slide_type": "slide"} # # __Comparison Operator:__ An operator that is used to compare the values of two variables. # # __Commonly used comparison operators:__ # # $==$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Equality <br> # $!=$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Inequality <br> # $>$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Greater than <br> # $<$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Less than <br> # $>=$ &nbsp; &nbsp; &nbsp; &nbsp; Greater than or equal to <br> # $<=$ &nbsp; &nbsp; &nbsp; &nbsp; Less than or equal to <br> # # # # + [markdown] slideshow={"slide_type": "slide"} # One way to visualise how a Boolean works is consider the answer when we make a verbal comparison... # + [markdown] slideshow={"slide_type": "slide"} # __Example:__ Comparing variables a and b using comparison operators returns a boolean variable: # + a = 10.0 b = 9.9 # Check if a is equal to b print("Is a equal to b?") print(a==b) # Check if a is more than b. print("Is a greater than b?") print(a > b) # + [markdown] slideshow={"slide_type": "subslide"} # ### Practise Exercise 5 : # Complete the cell below by writing the __correct comparison operator__ in each eampty `print()` statement: # # + a = 14 b = -9 c = 14 # EXAMPLE : Check if a is less than b. print("Is a less than b?") print(a < c) # Check if a is equal to c print("Is a equal to c?") print() # Check if a is not equal to c print("Is a not equal to c?") print() # Check if a is less than or equal to b print("Is a less than or equal to b?") print() # Check if a is less than or equal to c print("Is a less than or equal to c?") print() # Check if two colours are the same colour0 = 'blue' colour1 = 'green' print("Is colour0 the same as colour1?") print() # + [markdown] slideshow={"slide_type": "slide"} # <a id='LogicalOperators'></a> # # 10. Logical Operators # # The comparisons we have looked at so far consider two variables. # # *Logical operators*: # # > ```python # and # or # not # ``` # # allow us to make multiple comparisons at the same time. # # # # # + [markdown] slideshow={"slide_type": "slide"} # The code # ```python # X and Y # ``` # will evaluate to `True` if statement `X` *and* statement `Y` are both true. # # Otherwise will evaluate to `False`. # # # + [markdown] slideshow={"slide_type": "slide"} # The code # ```python # X or Y # ``` # will evaluate to `True` if statement `X` *or* statement `Y` is true. # # Otherwise will evaluate to `False`. # + [markdown] slideshow={"slide_type": "slide"} # __Examples:__ # # $10 < 9$ &nbsp; &nbsp;&nbsp; &nbsp; is false # # $15 < 20$ &nbsp; &nbsp; is true # - print(10 < 9 and 15 < 20) print(10 < 9 or 15 < 20) # + [markdown] slideshow={"slide_type": "slide"} # In Python, the 'not' operator negates a statement, e.g.: # - a = 12 b = 7 print(a < b) print(not a < b) # + [markdown] slideshow={"slide_type": "slide"} # <a id='Types'></a> # # 11. Types # <br> &emsp;<a href='#Booleans'>__11.1 Booleans__</a> # <br> &emsp;<a href='#Strings'>__11.2 Strings__</a> # # All variables have a 'type', which indicates what the variable is, e.g. a number, a string of characters, etc. # # # + [markdown] slideshow={"slide_type": "slide"} # Type is important because it determines: # - how a variable is stored # - how it behaves when we perform operations on it # - how it interacts with other variables. # # e.g.multiplication of two real numbers is different from multiplication of two complex numbers. # + [markdown] slideshow={"slide_type": "slide"} # <a id='Introspection'></a> # ### Introspection # # We can check a variable's type using *introspection*. # # To check the type of a variable we use the function `type`. # + x = True print(type(x)) a = 1 print(type(a)) a = "1" print(type(a)) a = 1.0 print(type(a)) # - # __Note__ that `1`, `1.0` and `"1"` have different *types*. # + [markdown] slideshow={"slide_type": "subslide"} # Complete the cell in your interactive textbook to find the `type` of `a` when it is written as shown below: # + slideshow={"slide_type": "subslide"} a = 1 a = 1.0 # + [markdown] slideshow={"slide_type": "subslide"} # What is the first type? What is the second type? # + [markdown] slideshow={"slide_type": "slide"} # - __bool__ means __Boolean__ variable. # - __str__ means __string__ variable. # - __int__ means __integer__ variable. # - __float__ means __floating point__ variable. # # This distinction is very important for numerical computations. # # We will look at the meaning of these different types next... # + [markdown] slideshow={"slide_type": "slide"} # <a id='Booleans'></a> # ## 11.1 Booleans # A type of variable that can take on one of two values - true or false. This is the simplest type. # + slideshow={"slide_type": "slide"} a = True b = False # test will = True if a or b = True test = a or b print(test) print(type(test)) # + [markdown] slideshow={"slide_type": "subslide"} # ##### Note: We can use a single instance of the print function to display multiple pieces of information if we sperate them by commas. # # e.g. `print(item_1, item_2)` # # - print(test, type(test)) # + [markdown] slideshow={"slide_type": "slide"} # __Re-cap: what does a evaluate to? (`True` or `False`)__ # + slideshow={"slide_type": "-"} a = (5 < 6 or 7 > 8) print(a) # + [markdown] slideshow={"slide_type": "slide"} # <a id='Strings'></a> # ## 11.2 Strings # # A string is a collection of characters. # # + [markdown] slideshow={"slide_type": "slide"} # A string is created by placing the characters between quotation marks. # # You may use single or double quotation marks; either is fine e.g. # # my_string = 'This is a string.' # # or # # my_string = "This is a string." # + [markdown] slideshow={"slide_type": "slide"} # __Example:__ Assign a string to a variable, display the string, and then check its type: # + my_string = "This is a string." print(my_string) print(type(my_string)) # + [markdown] slideshow={"slide_type": "slide"} # We can perform many different operations on strings. # # __Example__: Extract a *single* character as a new string: # # > *__NOTE:__ Python counts from 0.* # + my_string = "This is a string." # Store the 3rd character of `my_string` as a new variable s = my_string[2] print(s) print(type(s)) # + [markdown] slideshow={"slide_type": "slide"} # The number that describes the position of a character is called the *index*. # # What is the character at index 4? # # What is the index of the *second* i character? # + my_string = "This is a string." my_string[4] # + [markdown] slideshow={"slide_type": "slide"} # This shows that we count spaces as characters. # + [markdown] slideshow={"slide_type": "subslide"} # ### Practise Exercise 11A: Strings # # `my_string = "This is a string."` # # In the cell provided: # - store the 4th character of `my_string` as a new variable # - print the new variable # - check that it is a string # + # Store the 4th character as a new variable # Print the new variable # Check the type of the new variable # + [markdown] slideshow={"slide_type": "slide"} # We can extract a *range of* characters as a new string by specifiying the index to __start__ at and the index to __stop__ at: # # $$ # \underbrace{ # \underbrace{t}_{\text{0}} \ # \underbrace{h}_{\text{1}}\ # \underbrace{i}_{\text{2}}\ # \underbrace{s}_{\text{3}}\ # \underbrace{}_{\text{4}}\ # \underbrace{i}_{\text{5}}\ # }_{\text{s}} # \underbrace{s}_{\text{6}}\ # \underbrace{}_{\text{7}}\ # \underbrace{a}_{\text{8}}\ # \underbrace{}_{\text{9}}\ # \underbrace{s}_{\text{10}}\ # \underbrace{t}_{\text{11}}\ # \underbrace{r}_{\text{12}}\ # \underbrace{i}_{\text{13}}\ # \underbrace{n}_{\text{14}} \ # \underbrace{g}_{\text{15}} \ # \underbrace{.}_{\text{16}} \ # $$ # # The following example stores characters 0 to 5 of `my_string` as new variable, `s`. # + slideshow={"slide_type": "slide"} my_string = "This is a string." # Store the first 6 characters of my_string as new string, s s = my_string[0:6] # print print(s) # check type print(type(s)) # - # __Note:__ # - The space between the first and second word is counted as the 5th character. # - The "stop" value is not included in the range. # + slideshow={"slide_type": "subslide"} # Store the last 4 characters and print s = my_string[-4:] print(s) # - # $$ # my\_string = # \underbrace{t}_{\text{-17}} \ # \underbrace{h}_{\text{-16}}\ # \underbrace{i}_{\text{-15}}\ # \underbrace{s}_{\text{-14}}\ # \underbrace{}_{\text{-13}}\ # \underbrace{i}_{\text{-12}}\ # \underbrace{s}_{\text{-11}}\ # \underbrace{}_{\text{-10}}\ # \underbrace{a}_{\text{-9}}\ # \underbrace{}_{\text{-8}}\ # \underbrace{s}_{\text{-7}}\ # \underbrace{t}_{\text{-6}}\ # \underbrace{r}_{\text{-5}}\ # \underbrace{ # \underbrace{i}_{\text{-4}}\ # \underbrace{n}_{\text{-3}} \ # \underbrace{g}_{\text{-2}} \ # \underbrace{.}_{\text{-1}} \ # }_{\text{s}} # $$ # __Note:__ # - This time we only specify a starting value for the range. # - The stopping value is not specified. # ```python # s = my_string[-4:] # ``` # - This means the range ends at the end of the string. # + [markdown] slideshow={"slide_type": "subslide"} # ### Practise Exercise 11B: Strings # # In the cell provided: # # - store the last 6 characters of `my_string` as a new variable. # - print your new variable # # # + slideshow={"slide_type": "-"} # Store the last 6 characters as a new variable. # Print the new varaible # + [markdown] slideshow={"slide_type": "subslide"} # ### Practise Exercise 11C: Strings # In the cell provided: # # - store 6 characters, starting with the 2nd character of `my_string` (i.e. store: "his is") as a new variable. # - print your new variable # - Find an alternative way of extracting the same string? # + # Store 6 characters, starting with "h" print(my_string[1:7]) print(my_string[-16:-10]) # Print the new varaible # + [markdown] slideshow={"slide_type": "subslide"} # __Example:__ Add strings together. # + start = "Py" end = "thon" word = start + end print(word) # + [markdown] slideshow={"slide_type": "subslide"} # __Example:__ Add a section of a string to a section of another string: # + start = "Pythagoras" end = "marathon" word = start[:2] + end[-4:] print(word) # - # __Note__: We can use a blank space __or__ a 0 to index the first character; either is OK. # + [markdown] slideshow={"slide_type": "subslide"} # ### Practise Exercise 11D : Strings # In the cell below add the variables `start` and `end` to make a sentence. # + start = "The sky is" end = "blue" # Add variables start and end to make a new variable and print it # + [markdown] slideshow={"slide_type": "subslide"} # Notice that we need to add a space to seperate the words "is" and "blue". # # We can do this using a pair of quotation marks, seperated by a space. # - print(start + " " + end) # + [markdown] slideshow={"slide_type": "subslide"} # <a id='NumericTypes'></a> # # 12. Numeric Types # # <br> &emsp; <a href='#Integers'>12.1 Integers</a> # <br> &emsp; <a href='#FloatingPoint'>12.2 Floating Point</a> # &emsp; <a href='#ScientificNotation'>12.3 ScientificNotation</a> # <br>&emsp; <a href='#ComplexNumbers'>12.4 Complex Numbers</a> # # Numeric types are particlarly important when solving scientific and engineering problems. # # # + [markdown] slideshow={"slide_type": "subslide"} # Python 3 has three numerical types: # # - integers (`int`) # - floating point numbers (`float`) # - complex numbers (`complex`) # # __Integers:__ Whole numbers. <br> # __Floating point:__ Numbers with a decimal place.<br> # __Complex numbers:__ Numbers with a real and imaginary part.<br> # + [markdown] slideshow={"slide_type": "slide"} # <a id='Integers'></a> # ## 12.1 Integers # - Integers (`int`) are whole numbers. # - They can be postive or negative. # - Integers should be used when a value can only take on a whole number <br> e.g. the year, or the number of students following this course. # + [markdown] slideshow={"slide_type": "slide"} # <a id='FloatingPoint'></a> # ## 12.2 Floating Point # # Numbers that have a decimal point are automatically stored using the `float` type. # # A number is automatically classed as a float: # - if it has a decimal point # - if it is written using scientific notation (i.e. using e or E)... # # # + [markdown] slideshow={"slide_type": "slide"} # <a id='Round'></a> # ### Rounding floating point numbers. # # You can round your answer to a defined number of digits after the decimal point using the `round` function: # <br>https://docs.python.org/3/library/functions.html#round # # + a = 0.768567 print(round(a,2)) print(round(a,3)) print(round(a,4)) # + [markdown] slideshow={"slide_type": "slide"} # <a id='ScientificNotation'></a> # ## 12.3 Scientific Notation # In scientific notation, the letter e (or E) symbolises power of ten in the exponent. # # For example: # # $$ # 10.45\textrm{e}2 = 10.45 \times 10^{2} = 1045 # $$ # # $$ # 1.045\textrm{e}3 = 1.045 \times 10^{3} = 1045 # $$ # + [markdown] slideshow={"slide_type": "slide"} # Examples using scientific notation. # + a = 2e0 print(a, type(a)) b = 2e3 print(b) c = 2.1E3 print(c) # + [markdown] slideshow={"slide_type": "subslide"} # ### Practical Exercise 12A # # In the cell provided write two alternative ways to express 35,000 using scientific notation. # + # Express 35,000 using scientific notation # + [markdown] slideshow={"slide_type": "subslide"} # <a id='ComplexNumbers'></a> # ## 12.4 Complex Numbers # Complex numbers have real and imaginary parts. # # We can declare a complex number in Python by adding `j` or `J` after the complex part of the number: # # &nbsp; &nbsp; __Standard mathematical notation.__ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;__Python notation__ # # &nbsp; &nbsp; &nbsp; &nbsp; # $ a = \underbrace{3}_{\text{real part}} + \underbrace{4j}_{\text{imaginary part}} $ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; # `a = 3 + 4j` &nbsp; &nbsp; __or__ &nbsp; &nbsp; `a = 3 + 4J` # - b = 4 - 3j print(b, type(b)) # + [markdown] slideshow={"slide_type": "subslide"} # Python determines the type of a number from the way we input it. # # e.g. It will decide that a number is an `int` if we assign a number with no decimal place: # # # # # + [markdown] slideshow={"slide_type": "subslide"} # ### Practise Exercise 12B # # a) In the cell provided find the type of the following values: # # - 3.1 # - 2 # - `'blue'` # # __How could you re-write the number 2 so that Python makes it a float?__ # # b) Try changing the way 2 is written and run the cell again to check that the variable type has changed. # - # Find the variable types # + [markdown] slideshow={"slide_type": "slide"} # <a id='TypeConversion'></a> # # 13. Type Conversions (Casting) # # We often want to change between types. # # Sometimes we need to make sure two variables have the same type in order to perform an operation on them. # # Sometimes we recieve data of a type that is not directly usable by the program. # # This is called *type conversion* or *type casting*. # # + [markdown] slideshow={"slide_type": "subslide"} # ## Automatic Type Conversion # If we add two integers, the results will be an integer: # + a = 4 # int b = 15 # int c = a + b # print(c, type(c)) # + [markdown] slideshow={"slide_type": "subslide"} # However, if we add an int and a float, the result will be a float: # + a = 4 # int b = 15.0 # float c = a + b # print(c, type(c)) # + [markdown] slideshow={"slide_type": "subslide"} # If we divide two integers, the result will be a `float`: # + a = 16 # int b = 4 # int c = a / b # print(c, type(c)) # + [markdown] slideshow={"slide_type": "subslide"} # When dividing two integers with floor division (or 'integer division') using `//`, the result will be an `int` e.g. # + a = 16 # int b = 3 # int c = a // b # print(c, type(c)) # + [markdown] slideshow={"slide_type": "subslide"} # In general: # - operations that mix an `int` and `float` will generate a `float`. # - operations that mix an `int` or a `float` with `complex` will generate a `complex` type. # # # If in doubt, use `type` to check. # + [markdown] slideshow={"slide_type": "slide"} # ## Explicit Type Conversion # # We can explicitly change (or *cast*) the type. # # To cast variable a as a different type, write the name of the type, followed by the variable to convert in brackets. # # __Example: Cast from an int to a float:__ # + slideshow={"slide_type": "-"} a = 1 a = float(a) print(a, type(a)) # + slideshow={"slide_type": "subslide"} # If we use a new variable name the original value is unchanged. a = 1 b = float(a) # print(a, type(a)) # print(b, type(b)) # + # If we use the original name, the variable is updated. a = 1 a = float(a) # print(a, type(a)) # + [markdown] slideshow={"slide_type": "subslide"} # __Try it yourself.__ # # In the cell provided: # # - cast variable `a` from a float back to an int. # - print variable `a` and its type to check your answer # + # cast a as an int # print a and its type # + [markdown] slideshow={"slide_type": "subslide"} # ##### Note: Take care when casting as the value of the variable may change as well as the type. # # Here is an example to demmonstrate: # # # + [markdown] slideshow={"slide_type": "subslide"} # In the cell below: # 1. cast `i` as an `int` and print `i`. # 1. cast `i` back to a `float` and print `i`. # + i = 1.3 # float print(i, type(i)) # cast i as an int and print it i = int(i) print(i) i = float(i) print(i) # + [markdown] slideshow={"slide_type": "subslide"} # What has happened to the original value of `i`? # # # + [markdown] slideshow={"slide_type": "subslide"} # Note that rounding is applied when converting from a `float` to an `int`; the values after the decimal point are discarded. # # This type of rounding is called 'round towards zero' or 'truncation'. # + [markdown] slideshow={"slide_type": "subslide"} # A common task is converting numerical types to-and-from strings. # # Examples: # - Reading a number from a file where it appears as as a string # - User input might be given as a string. # # __Example: Cast from a float to a string:__ # # + a = 1.023 b = str(a) # print(b, type(b)) # - # We can use __format__ to change how a string is displayed... # + slideshow={"slide_type": "slide"} r = 0.0123456 s = 0.2345678 # cast as a string print(str(r)) print('%s' % r) print('{}'.format(r)) # cast as a string, scientific notation print('%E' % r) # specify number of significant figures displayed print('%.2E, %.1E' % (r, s)) # + [markdown] slideshow={"slide_type": "subslide"} # __Example: Cast from a string to a float:__ # # It is important to cast string numbers as either `int`s or `float`s for them to perform correctly in algebraic expressions. # # Consider the example below: # + a = "15.07" b = "18.07" print("As string numbers:") print("15.07 + 18.07 = ", a + b) print("When cast from string to float:") print("15.07 + 18.07 = ", float(a) + float(b)) # - # Note from the cell above that numbers expressed as strings can be cast as floats *within* algebraic expressions. # + [markdown] slideshow={"slide_type": "subslide"} # Only numerical values can be cast as numerical types. # e.g. Trying to cast the string `four` as an integer causes an error: # - f = float("four") # + [markdown] slideshow={"slide_type": "slide"} # <a id='Summary'></a> # # Summary # # - We can perform simple *arithmetic operations* in Python (+, -, $\times$, $\div$.....) # - We can *assign* values to variables. # - Expressions containing multiple operators obey precedence when executing operations. # # # # + [markdown] slideshow={"slide_type": "slide"} # - Every variable has a type (`int`, `float`, `string`....). # - A type is automatically assigned when a variable is created. # - Python's `type()` function can be used to determine the type of a variable. # - The data type of a variable can be converted by casting (`int()`, `float()`....) # + [markdown] slideshow={"slide_type": "slide"} # - *Comparison operators* (==, !=, <, >....) compare two variables. # - The outcome of a comparison is a *Boolean* (True or False) value. # - *Logical operators* (`and`, `or`) compares the outcomes of two comparison operations. # - The outcome of a logical operation is a *Boolean* (True or False) value. # - The logical `not` operator returns the inverse Boolean value of a comparison. # -
01a_Operators_DataTypes__SupplementaryMaterial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Robots-Vision/KNNExamples/blob/master/Mushrooms.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="zmUqPoIqKh9R" colab_type="code" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 72} outputId="3db5168e-6ddd-462f-820f-726886c4a9c7" import pandas as pd import numpy as np import matplotlib.pyplot as plt import scipy as scp import warnings warnings.filterwarnings("ignore") from google.colab import files uploaded = files.upload() # + id="gWCVCMjkLNAW" colab_type="code" colab={} # Utilizando a biblioteca pandas para importação dos dados dataset = "../assets/mushrooms.csv" df = pd.read_csv(dataset, engine='python', sep=',') # + id="1CPmj3knNHrI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 275} outputId="0fda893c-0960-44fc-b942-cd42f5245728" # Verigicando dados iniciais do dataset importado df.head() # + id="c4xLYdlbOA1t" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 527} outputId="ce0ebdef-c8da-4149-df3c-6193f2844704" # Entendendo o dataset, colunas, data types, quantidade de tegistros por coluna df.info() # + id="9Q8iqCpHOKaE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 244} outputId="b7388b2f-c830-4eb7-a4ef-c76c01fe0659" # Verificando detalhes estatísticos do dataset df.describe() # + id="yRC1czhDOaKb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 407} outputId="c046bcdb-1f10-4887-d54b-81af2ce3d29a" def distribuicao(data): ''' Esta função exibirá a quantidade de registros únicos para cada coluna existente no dataset dataframe -> Histogram ''' # Calculando valores unicos para cada label: num_unique_labels num_unique_labels = data.apply(pd.Series.nunique) # plotando valores num_unique_labels.plot(kind='bar') # Nomeando os eixos plt.xlabel('Campos') plt.ylabel('Número de registros únicos') plt.title('Distribuição de dados únicos do DataSet') # Exibindo gráfico plt.show() distribuicao(df) # + id="DnuM8KdIPs2U" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 576} outputId="9caf7b1a-b52a-4094-b018-ccc055ad047a" # Contando o número de registros dos tipos 'e' e 'p' da coluna 'class'(Comestível = e, Venenoso = p) e = pd.value_counts(df['class']) [0] p = pd.value_counts(df['class']) [1] tam = len(df) print('Cogumelos Comestiveis: ', e) print('Cogumelos Venenosos: ', p) # Cria uma estrutura de dados comum no pandas. Em outras palavras uma tabela de dados pie = pd.DataFrame([['Comestivel', e], ['Venenoso', p]], columns=['Tipo', 'Quantidade']) def pie_chart(data, col1, col2, title): labels = { 'Comestível': 0, 'Venenoso': 1 } sizes = data[col2] colors = ['#e5ffcc', '#ffb266'] plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True, startangle= 140, labeldistance= 1.2) plt.title( title ) plt.axis('equal') plt.show() pie_chart(pie, 'Tipo', 'Quantidade', 'Distribuição percentual Classes de Cogumelos') plt.bar(pie.Tipo, pie.Quantidade, color = ['#e5ffcc', '#ffb266']) plt.title("Distribuição das Classes de Cogumelos") plt.xlabel('Tipo de Cogumelo') plt.ylabel('Quantidade de Registros') plt.show() # + id="ZOVg79jXVuL_" colab_type="code" colab={} # X = colunas de informação, variáveis independentes # X recebe todo o DataFrame menos a coluna 'class'. axis significa o tipo ou o eixo do dado a ser removido, se for 1 vai ser uma coluna e se for 0 vai ser uma label em específico X = df.drop('class', axis=1) # y = Variável dependente, a qual será utilizada para classificar os dados # Recebe só a coluna 'class' y = df['class'] # + id="2kLPJUjvXj1Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="a9faf614-663c-49b5-d1c3-b888aff8c626" # Verificando se C está com a coluna class(porque não pode estar) X.head() # + id="ajWTLI3DXstq" colab_type="code" colab={} # Como o KNN utiliza medidas de distância para analisar a similaridade dos registros de cada classe, precisamos transformar as labels em números. # OneHotEncoder gera novas colunas com valor 0 ou 1, aonde 1 representa a ocorrência daquela caracteristica e 0 a não ocorrência. #Ex.: A coluna cap-color com registros n,y,w e g, após passar pelo OneHotEncoder ficariam como cap-color-n, cap-color-y, cap-color-w, suas linhas serão os valores 1 (é desta cor e 0 não é desta cor). #Importando o encoder para transformar as labels em chaves numéricas from sklearn.preprocessing import OneHotEncoder Oht_enc = OneHotEncoder() X = pd.DataFrame(Oht_enc.fit_transform(X).A) # + id="McYe7XDwZR0b" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="48b3c31d-c229-4590-b351-d2ea9a00ed52" X.shape # + id="UoWNEbtBZXjd" colab_type="code" colab={} # Nesta fase separamos o conjunto de dados em Treinamento e Teste, definindo o percentual que utilizaremos para teste e para treino do modelo from sklearn.model_selection import train_test_split # Retorna 70% de X(DataFrame sem o 'class') para treino e 30% de X para teste # Retorna 70% de y(coluna 'class' separada) para treino e 30% de y para teste X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.3) # + id="cEYKOQf7dEz4" colab_type="code" colab={} # Etapa importante que irá reduzir a escala numérica das colunas, para que todas estejam dentro de uma mesma escala de valor. # Se houverem medidas com escalas de valor muito diferentes a distância calculada pelo algorítimo será influênciada podendo gerar resultados errôneos. from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) # + id="irq7FZPxefWb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="6e3acd78-8ab9-453c-b100-e1bc1d4c4d19" # Agora iremos aplicar nossos dados ao algoritmo KNN # Importando o modelo KNN from sklearn.neighbors import KNeighborsClassifier # Definindo o valor de visinhos classifier = KNeighborsClassifier(n_neighbors=5) # Treinando o modelo, com dados de treinamento classifier.fit(X_train, y_train) # + id="KPx3CeCzfebi" colab_type="code" colab={} #### Prevendo os valores de Y para os dados de teste (X_test) y_pred = classifier.predict(X_test) # + id="CgmzH704f8gh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 272} outputId="db96e32e-175a-46a6-f958-987c9cabd88e" # Analisando e validando os resultados obtidos #Importnado métricas para validação do modelo from sklearn.metrics import classification_report, confusion_matrix, accuracy_score # Imprimindo matriz confusa print('Matriz Confusa: ') print(confusion_matrix(y_test, y_pred), "\n") # Impriminfo o relatório de classificação print("Relatório de classificação: \n", classification_report(y_test, y_pred)) # Importando o quão acurado(precisão) foi o modelo print('Acurácia do modelo: ', accuracy_score(y_test, y_pred)) # + id="pqChCh1Rh-Qs" colab_type="code" colab={} # Mas precisamos ter a maior precisão possivel para o nosso modelo # Para isso vamos gerar um loop para achar o melhor número de visinhos error = [] # Calculando error para o valor de K entre 1 e 40 for i in range(1, 10): knn = KNeighborsClassifier(n_neighbors=i) knn.fit(X_train, y_train) pred_i = knn.predict(X_test) # np.mean(Retorna a média dos elementos desse array) error.append(np.mean(pred_i != y_test)) # + id="b9vFEOmokA_Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 421} outputId="58d187e9-5184-4b58-8393-a33d5d037175" # Comparando o Error Rate gerado de valores K diferentes plt.figure(figsize=(12, 6)) plt.plot(range(1, 10), error, color='red', linestyle='dashed', marker='o', markerfacecolor='blue', markersize=10) plt.title('Error Rate K Value') plt.xlabel('K Value') plt.ylabel('Mean Error') # + id="lTlMmtNAk3BY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="3a703cad-d50f-4158-b96d-c0a647e3919b" # Aplicando melhor parâmetro para K encontrado from sklearn.neighbors import KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors = 8) classifier.fit(X_train, y_train) # + id="VigD0gAclNLR" colab_type="code" colab={} # Aplicando os valores de teste novamente y_pred = classifier.predict(X_test) # + id="mSLpZDMUlZRl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 272} outputId="ddfdf495-d961-485d-eec0-422753b202f7" # Importando métricas para validação do modelo from sklearn.metrics import classification_report, confusion_matrix, accuracy_score # Imprimindo a matriz confusa print("Matriz Confusa> ") print(confusion_matrix(y_test, y_pred), "\n") # Imprimindo o relatório de classificação print("Relatório de classificação: \n", classification_report(y_test, y_pred)) # Imprimindo o quão acurado foi o modelo print('Acurácia do modelo: ', accuracy_score(y_test, y_pred))
src/Mushrooms.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .rs // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Rust // language: rust // name: rust // --- // # Tour of the EvCxR Jupyter Kernel // For those not already familiar with Jupyter notebook, it lets you write code into "cells" like the box below. Cells can alternatively contain markdown, like this text here. Each code cell is compiled and executed separately, but variables, defined functions etc persist between cells. // // ## Printing to outputs and evaluating expressions // Lets print something to stdout and stderr then return a final expression to see how that's presented. Note that stdout and stderr are separate streams, so may not appear in the same order is their respective print statements. println!("Hello world"); eprintln!("Hello error"); format!("Hello {}", "world") // ## Assigning and making use of variables // We define a variable `message`, then in the subsequent cell, modify the string and finally print it out. We could also do all this in the one cell if we wanted. let mut message = "Hello ".to_owned(); message.push_str("world!"); message // ## Defining and redefining functions // Next we'll define a function pub fn fib(x: i32) -> i32 { if x <= 2 {0} else {fib(x - 2) + fib(x - 1)} } (1..13).map(fib).collect::<Vec<i32>>() // Hmm, that doesn't look right. Lets redefine the function. In practice, we'd go back and edit the function above and reevaluate it, but here, lets redefine it in a separate cell. pub fn fib(x: i32) -> i32 { if x <= 2 {2} else {fib(x - 2) + fib(x - 1)} } let values = (1..13).map(fib).collect::<Vec<i32>>(); values // ## Spawning a separate thread and communicating with it // We can spawn a thread to do stuff in the background, then continue executing code in other cells. use std::sync::{Mutex, Arc}; let counter = Arc::new(Mutex::new(0i32)); std::thread::spawn({ let counter = Arc::clone(&counter); move || { for i in 1..300 { *counter.lock().unwrap() += 1; std::thread::sleep(std::time::Duration::from_millis(100)); } }}); *counter.lock().unwrap() *counter.lock().unwrap() // ## Loading external crates // We can load external crates. This one takes a while to compile, but once it's compiled, subsequent cells shouldn't need to recompile it, so it should be much quicker. // :dep base64 = "0.10.1" base64::encode(&vec![1, 2, 3, 4]) // ## Customizing how types are displayed // We can also customize how our types are displayed, including presenting them as HTML. Here's an example where we define a custom display function for a type `Matrix`. use std::fmt::Debug; pub struct Matrix<T> {pub values: Vec<T>, pub row_size: usize} impl<T: Debug> Matrix<T> { pub fn evcxr_display(&self) { let mut html = String::new(); html.push_str("<table>"); for r in 0..(self.values.len() / self.row_size) { html.push_str("<tr>"); for c in 0..self.row_size { html.push_str("<td>"); html.push_str(&format!("{:?}", self.values[r * self.row_size + c])); html.push_str("</td>"); } html.push_str("</tr>"); } html.push_str("</table>"); println!("EVCXR_BEGIN_CONTENT text/html\n{}\nEVCXR_END_CONTENT", html); } } let m = Matrix {values: vec![1,2,3,4,5,6,7,8,9], row_size: 3}; m // We can also return images, we just need to base64 encode them. First, we set up code for displaying RGB and grayscale images. extern crate image; extern crate base64; pub trait EvcxrResult {fn evcxr_display(&self);} impl EvcxrResult for image::RgbImage { fn evcxr_display(&self) { let mut buffer = Vec::new(); image::png::PNGEncoder::new(&mut buffer).encode(&**self, self.width(), self.height(), image::ColorType::RGB(8)).unwrap(); let img = base64::encode(&buffer); println!("EVCXR_BEGIN_CONTENT image/png\n{}\nEVCXR_END_CONTENT", img); } } impl EvcxrResult for image::GrayImage { fn evcxr_display(&self) { let mut buffer = Vec::new(); image::png::PNGEncoder::new(&mut buffer).encode(&**self, self.width(), self.height(), image::ColorType::Gray(8)).unwrap(); let img = base64::encode(&buffer); println!("EVCXR_BEGIN_CONTENT image/png\n{}\nEVCXR_END_CONTENT", img); } } image::ImageBuffer::from_fn(256, 256, |x, y| { if (x as i32 - y as i32).abs() < 3 { image::Rgb([0, 0, 255]) } else { image::Rgb([0, 0, 0]) } }) // ## Display of compilation errors // Here's how compilation errors are presented. Here we forgot an & and passed a String instead of an &str. let mut s = String::new(); s.push_str(format!("foo {}", 42)); // ## Seeing what variables have been defined // We can print a table of defined variables and their types with the :vars command. // :vars // Other built-in commands can be found via :help // :help
tests/notebooks/ipynb_rust/evcxr_jupyter_tour.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: dcapy # language: python # name: dcapy # --- # + import os from dcapy import dca from dcapy.models import CashFlow, ChgPts, CashFlowModel, Period, Scenario import numpy as np import pandas as pd from datetime import date import matplotlib.pyplot as plt import seaborn as sns from scipy import stats import seaborn as sns # - cashflow_params = [ { 'name':'fix_opex', 'const_value':-5.000, 'target':'opex', }, { 'name':'var_opex', 'const_value':-0.005, 'target':'opex', 'multiply':'oil_volume' }, { 'name':'income', 'const_value':0.045, 'target':'income', 'multiply':'oil_volume' }, { 'name':'capex', 'array_values':{'date':[1],'value':[-70000]}, 'target':'capex' } ] p1_dict = { 'name':'pdp', 'dca': { 'ti':0, 'di':0.15, 'freq_di':'A', 'qi':[2000,1300],#{'dist':'norm', 'kw':{'loc':2500,'scale':200}}, #[800,1000], 'b':0, 'fluid_rate':5000 }, 'start':0, 'end':20, 'freq_input':'A', 'freq_output':'A', 'rate_limit': 700, 'iter':10, 'cashflow_params':cashflow_params } p1 = Period(**p1_dict) p1 dca.arps_forecast([0., 1., 2., 3., 4., 5., 6., 7.,8.,9.],800,0.15,0,[0,2]) fore1 = p1.generate_forecast() fore1 sns.lineplot(data=fore1, x=fore1.index, y='oil_rate', hue='iteration') c1 = p1.generate_cashflow() c1[0].fcf() p1.npv([0.10]) p1.irr(freq_output='A') # # Add another period p1.get_end_dates() p2_dict = { 'name':'pdnp', 'dca': { 'ti':7, 'di':0.2, 'freq_di':'A', 'qi':1000,#{'dist':'norm', 'kw':{'loc':3500,'scale':200}}, #[800,1000], 'b':0, 'fluid_rate':5000 }, 'start':0, 'end':20, 'freq_input':'A', 'freq_output':'A', 'rate_limit': 80, 'iter':14, 'cashflow_params':cashflow_params, 'depends':{'period':'pdp'} } p2 = Period(**p2_dict) p2 #s1 = Scenario(name='base', periods=[p1,p2]) s1 = Scenario(**{ 'name':'base', 'periods':[ p1_dict, p2_dict ] }) s1 sf1 = s1.generate_forecast(iter=3) sf1 sns.lineplot(data=sf1, x=sf1.index, y='oil_rate', hue='iteration', style='period') s1.forecast.df()
docs/examples/4-schedules/4-DCACashFlow_int.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="39bd55c8-59de-4990-8ea7-87bbda59e545" _uuid="dc45f93b8957b09e3e6009f03781082b5b463055" # ## Thanks to [<NAME>](https://www.kaggle.com/arsenalist/bitcoin-prices-prediction) for the great notebook! I just added more classifiers and the output from all classifiers stacked into Extreme Gradient Boosting # + [markdown] _cell_guid="cbfd67c6-00b2-4677-b961-9354f5b8d577" _uuid="17652e3882da6e380046f9ef493f3c97c0c9997e" # ## I will use Adaptive Boosting, Bagging, Extra Trees, Gradient Boosting and Random Forest for the base models # + _cell_guid="a969d2a3-9a6c-40cd-ada4-486430111237" _uuid="e25749272525ae99fd739594c6a1886bf126e9dc" import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pandas as pd from sklearn.preprocessing import MinMaxScaler import time from datetime import datetime from datetime import timedelta from sklearn import cross_validation from sklearn.ensemble import * sns.set() # + _cell_guid="3167d14d-8bdb-4740-8176-35927ba6cdd0" _uuid="9fa65bfa2d816241a7f62f261002b19dd30937b1" df = pd.read_csv('../input/all-crypto-currencies/crypto-markets.csv', parse_dates=['date'], index_col='date') df = df[df['symbol']=='BTC'] df.drop(['volume','symbol','name','ranknow','market'],axis=1,inplace=True) df.head() # + _cell_guid="2567699a-cc44-46b7-bd2d-0f374f286361" _uuid="76f8ed0ea6d7781b28a0101cb70ad5ac77258382" df['close'].plot(figsize=(12,6),label='Close') df['close'].rolling(window=30).mean().plot(label='30 Day Avg') plt.legend() # + [markdown] _cell_guid="d3c00264-f759-4171-9c1f-207f275c73d6" _uuid="91075201b06efca1542d825ecf196dcdda1f452c" # For me, I prefer to use MinMaxScaler (0, 1) from sklearn # # I set the period is 30, that is mean, today value is going to look 30 days ahead, you can change into any value. but do not too low or else the fitting will become saturated. # + _cell_guid="cfe4f46a-b8dc-448d-ad58-cbac147f8b01" _uuid="3ab6453e39ff539c1e12695236e02f08061df5a0" period = 30 minmax = MinMaxScaler().fit(df.iloc[:, 3].values.reshape((-1,1))) close_normalize = minmax.transform(df.iloc[:, 3].values.reshape((-1,1))) normalized = pd.DataFrame(close_normalize) normalized['Price_After_period']=normalized[0].shift(-period) normalized.dropna(inplace=True) X=normalized.drop('Price_After_period',axis=1) print(normalized.head()) y=normalized['Price_After_period'] print(X.head()) y.head() # + _cell_guid="c761e588-eaa8-41c0-87e0-afa9b5a08524" _uuid="f447cb10331bbda8226eeaed4cf01f3c8ddc4c9a" train_X,test_X,train_Y,test_Y=cross_validation.train_test_split(X, y, test_size=0.2,random_state=101) # + _cell_guid="c54a8f4e-7d5b-4545-aa2d-2a4a7bbfaf56" _uuid="bf1106626b0c76b9aec6a6007753502892fcfd32" from sklearn.ensemble import * ada = AdaBoostRegressor(n_estimators=500, learning_rate=0.1) bagging = BaggingRegressor(n_estimators=500) et = ExtraTreesRegressor(n_estimators=500) gb = GradientBoostingRegressor(n_estimators=500, learning_rate=0.1) rf = RandomForestRegressor(n_estimators=500,random_state=101) # + _cell_guid="99f4100c-d664-49bd-a72b-2e54ac2abe1f" _uuid="f7fa503c6ee2ce3ffd5118b5fae9a62e64a096b3" ada.fit(train_X, train_Y) bagging.fit(train_X, train_Y) et.fit(train_X, train_Y) gb.fit(train_X, train_Y) rf.fit(train_X, train_Y) # + _cell_guid="3caae918-0dce-4dd3-9faf-0aa402e75c64" _uuid="307220b04e57d263644297be1696197a3032c46e" accuracy=ada.score(test_X, test_Y) accuracy=accuracy*100 accuracy = float("{0:.4f}".format(accuracy)) print('Adaptive Accuracy is:',accuracy,'%') # + _cell_guid="b4de2653-5197-4abd-854a-2762b5dfefdc" _uuid="dcb993773fd179e99ef4f0a846f463b9fa5a4371" accuracy=bagging.score(test_X, test_Y) accuracy=accuracy*100 accuracy = float("{0:.4f}".format(accuracy)) print('Bagging Accuracy is:',accuracy,'%') # + _cell_guid="9749969d-bb3a-4001-9368-8d891e233e34" _uuid="689666d495e768c9c061e464ff0d1b35a3d70195" accuracy=et.score(test_X, test_Y) accuracy=accuracy*100 accuracy = float("{0:.4f}".format(accuracy)) print('Extra Trees Accuracy is:',accuracy,'%') # + _cell_guid="48a57f50-8237-4b57-b6a0-569fac114f74" _uuid="293d62aeecc53ae1f9acf1cfa1a95c0d4d41dc15" accuracy=gb.score(test_X, test_Y) accuracy=accuracy*100 accuracy = float("{0:.4f}".format(accuracy)) print('Gradient Boosting Accuracy is:',accuracy,'%') # + _cell_guid="6db70103-8674-4be1-9a63-9943082ab6c7" _uuid="210af80b1d15f5e6bf6992e8adeafe6876d8bd0b" accuracy=rf.score(test_X, test_Y) accuracy=accuracy*100 accuracy = float("{0:.4f}".format(accuracy)) print('Random Forest Accuracy is:',accuracy,'%') # + [markdown] _cell_guid="3737083e-3dbe-49bd-9101-d1d7cb3a6ebe" _uuid="790149b865e91e64f0e11b2f85acf8bc0d23d7b2" # Now, which model predict almost near to our test value? # # # Pearson please! # + _cell_guid="0fa2291a-7143-4214-be9c-663e0420180d" _uuid="fc6d04f103a2047243f4db6c0d03739f152d5512" ada_out = ada.predict(test_X) bagging_out = bagging.predict(test_X) et_out = et.predict(test_X) gb_out = gb.predict(test_X) rf_out = rf.predict(test_X) stack_predict = np.vstack([ada_out,bagging_out,et_out,gb_out,rf_out,test_Y]).T corr_df = pd.DataFrame(stack_predict, columns=['ada','bagging','et','gb','rf','test']) plt.figure(figsize=(10,5)) sns.heatmap(corr_df.corr(), annot=True) plt.show() # + [markdown] _cell_guid="c105fe12-7df6-44d0-8267-4d44845cb356" _uuid="8f5d4aac224deff10770fa75ff77032e2f25c5d9" # Seaborn round up the numbers, plus the value is still normalized # + _cell_guid="e43ad5c5-0578-4e53-9de8-7fb924345f32" _uuid="30fcca12fb62bfaf66c8dcb23741e75beff5cd14" corr_df.head() # + _cell_guid="82114383-c8e8-4326-9112-11588122555f" _uuid="3197a2efed6444cba379075b8c19c249be559304" corr_df.ada = minmax.inverse_transform(corr_df.ada.values.reshape((-1,1))).flatten() corr_df.bagging = minmax.inverse_transform(corr_df.bagging.values.reshape((-1,1))).flatten() corr_df.et = minmax.inverse_transform(corr_df.et.values.reshape((-1,1))).flatten() corr_df.gb = minmax.inverse_transform(corr_df.gb.values.reshape((-1,1))).flatten() corr_df.rf = minmax.inverse_transform(corr_df.rf.values.reshape((-1,1))).flatten() corr_df.test = minmax.inverse_transform(corr_df.test.values.reshape((-1,1))).flatten() # + _cell_guid="f9fd77fb-d9de-47df-a50a-9627098b2fd6" _uuid="8665ca1892f2e518d5641bd21d823eebb6641219" corr_df.head() # + [markdown] _cell_guid="e772c816-08a5-4269-bda8-a448f2300129" _uuid="67a0d59fed8b507bd3e767e37e81c88f0764b19f" # ## Now we able to see the huge difference! # + _cell_guid="033b6a22-6079-4df5-b137-8b8f54379bf7" _uuid="72a04738129b2906192eeac95b29a263b7b063a8" import xgboost as xgb params_xgd = { 'max_depth': 7, 'objective': 'reg:linear', 'learning_rate': 0.033, 'n_estimators': 10000 } clf = xgb.XGBRegressor(**params_xgd) stack_train = np.vstack([ada.predict(train_X), bagging.predict(train_X), et.predict(train_X), gb.predict(train_X), rf.predict(train_X)]).T stack_test = np.vstack([ada.predict(test_X), bagging.predict(test_X), et.predict(test_X), gb.predict(test_X), rf.predict(test_X)]).T clf.fit(stack_train, train_Y, eval_set=[(stack_test, test_Y)], eval_metric='rmse', early_stopping_rounds=20, verbose=True) # + _cell_guid="6092d0c1-4fe6-4bae-b862-3476617262f4" _uuid="cd54d3284a8d792427db232d6b0df37ca314a9fa" fig, ax = plt.subplots(figsize=(10,10)) xgb.plot_importance(clf, ax=ax) plt.show() # + [markdown] _cell_guid="5cc2b733-1697-4d5d-82b6-ddbc1a98692e" _uuid="83fb67fb9739fbd8a00b1b8c99174b0a67dd4b3a" # f2 = extra trees, wew! # + [markdown] _cell_guid="bbd1378e-0556-4249-af92-6c089b659e75" _uuid="95c780d8d598f7cdce475506cbc6f7813d491568" # ## Now it is time to predict, we will predict 10 days in the future, what happen to Bitcoin # + _cell_guid="4fdf66af-2daf-4d21-aabb-392d531f5d3c" _uuid="a8768460f3ff9d0bdb2015c5fe88f54384caf1f5" out_ada=X[0].tolist() + ada.predict(X[-period-10:]).tolist() out_bagging=X[0].tolist() + bagging.predict(X[-period-10:]).tolist() out_et=X[0].tolist() + et.predict(X[-period-10:]).tolist() out_gb=X[0].tolist() + gb.predict(X[-period-10:]).tolist() out_rf=X[0].tolist() + rf.predict(X[-period-10:]).tolist() # + _cell_guid="ad073654-0a85-4522-9afa-9c6691aa815e" _uuid="07845e81bb356a2e32109d3f407fb43321a1c86c" out_xgb=X[0].tolist()+clf.predict(np.vstack([ada.predict(X[-period-10:]), bagging.predict(X[-period-10:]), et.predict(X[-period-10:]), gb.predict(X[-period-10:]), rf.predict(X[-period-10:])]).T).tolist() # + _cell_guid="76441eb0-d16b-437e-a4ec-e669f338df23" _uuid="ab2403e1f7b145042ddbde2b67473d07b2ad13fd" last_date=pd.to_datetime(df.iloc[-1].name) print(last_date) modified_date = last_date + timedelta(days=1) date=pd.date_range(modified_date,periods=period,freq='D') # + _cell_guid="f0cb1ee6-cf1d-4650-85c7-2a6f996ee8a0" _uuid="96528a656155dc238354d4a88498f4ef3c06ab1e" out_ada = minmax.inverse_transform(np.array(out_ada).reshape((-1,1))).flatten() out_bagging = minmax.inverse_transform(np.array(out_bagging).reshape((-1,1))).flatten() out_et = minmax.inverse_transform(np.array(out_et).reshape((-1,1))).flatten() out_gb = minmax.inverse_transform(np.array(out_gb).reshape((-1,1))).flatten() out_rf = minmax.inverse_transform(np.array(out_rf).reshape((-1,1))).flatten() out_xgb = minmax.inverse_transform(np.array(out_xgb).reshape((-1,1))).flatten() # + _cell_guid="a02b5264-f73f-487f-8c28-a46e62b65b61" _uuid="d01c44fc446e0d3a9c301089e1444b9bc3a66347" date_ori=pd.to_datetime(df.index.date[:-period+10]).strftime(date_format='%Y-%m-%d').tolist()+pd.Series(date).dt.strftime(date_format='%Y-%m-%d').tolist() # + _cell_guid="cfbb1110-7ae1-44c7-a0fe-c0100e286c33" _uuid="ad29a921793ef26e5e4421c3f00d426625bf3ccb" len(date_ori) # + _cell_guid="fd798f60-14cf-4a0f-90f5-4f28b4a21b53" _uuid="b23ee9ed64a6560287fd714b1a7ab3186b3a58d7" len(out_ada) # + _cell_guid="7aadf703-11b7-480d-8c11-7fd993790c5e" _uuid="f3ae4296ee5e53a3580b898d5d9d2de548fd44f1" fig = plt.figure(figsize = (15,10)) ax = plt.subplot(111) x_range = np.arange(df.shape[0]) x_range_future = np.arange(len(out_ada)) ax.plot(x_range, df.close, label = 'true Close') ax.plot(x_range_future, out_ada, label = 'ADA predict Close') ax.plot(x_range_future, out_bagging, label = 'BAGGING predict Close') ax.plot(x_range_future, out_et, label = 'ET predict Close') ax.plot(x_range_future, out_gb, label = 'GB predict Close') ax.plot(x_range_future, out_rf, label = 'RF predict Close') ax.plot(x_range_future, out_xgb, label = 'STACK XGB predict Close') box = ax.get_position() ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]) ax.legend(loc = 'upper center', bbox_to_anchor= (0.5, -0.05), fancybox = True, shadow = True, ncol = 5) plt.title('overlap stock market') plt.xticks(x_range_future[::180], date_ori[::180]) plt.show() # + [markdown] _cell_guid="7e0ec897-7201-4794-94bf-34f9c0e8267b" _uuid="bdf8922a94cca5a4772c205ce45510724f70644d" # Wait.. # + _cell_guid="0714e5f9-740f-4aea-bf64-c654326f2edf" _uuid="1bf2d685fc69d3d7d389be0a34c0fa8cccc70926" from PIL import Image bitcoin_im = Image.open('../input/bitcoinpic/Bitcoin-Logo-640x480.png') # - fig = plt.figure(figsize = (15,10)) ax = plt.subplot(111) x_range = np.arange(df.shape[0]) x_range_future = np.arange(len(out_ada)) ax.plot(x_range, df.close, label = 'true Close') ax.plot(x_range_future, out_ada, label = 'ADA predict Close') ax.plot(x_range_future, out_bagging, label = 'BAGGING predict Close') ax.plot(x_range_future, out_et, label = 'ET predict Close') ax.plot(x_range_future, out_gb, label = 'GB predict Close') ax.plot(x_range_future, out_rf, label = 'RF predict Close') ax.plot(x_range_future, out_xgb, label = 'STACK XGB predict Close') box = ax.get_position() ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]) ax.legend(loc = 'upper center', bbox_to_anchor= (0.5, -0.05), fancybox = True, shadow = True, ncol = 5) plt.title('overlap stock market') plt.xticks(x_range_future[::180], date_ori[::180]) fig.figimage(bitcoin_im, 100, 120, zorder=3,alpha=.5) plt.show() # # Way more cooler, Biatch! # # why font for single hash not very big. fig = plt.figure(figsize = (15,10)) ax = plt.subplot(111) x_range = np.arange(df.shape[0]) x_range_future = np.arange(len(out_ada)) ax.plot(x_range[-30:], df.close[-30:], label = 'true Close') ax.plot(x_range_future[-40:], out_ada[-40:], label = 'ADA predict Close') ax.plot(x_range_future[-40:], out_bagging[-40:], label = 'BAGGING predict Close') ax.plot(x_range_future[-40:], out_et[-40:], label = 'ET predict Close') ax.plot(x_range_future[-40:], out_gb[-40:], label = 'GB predict Close') ax.plot(x_range_future[-40:], out_rf[-40:], label = 'RF predict Close') ax.plot(x_range_future[-40:], out_xgb[-40:], label = 'STACK XGB predict Close') box = ax.get_position() ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]) ax.legend(loc = 'upper center', bbox_to_anchor= (0.5, -0.05), fancybox = True, shadow = True, ncol = 5) plt.title('overlap stock market') plt.xticks(x_range_future[-40:][::5], date_ori[-40:][::5]) fig.figimage(bitcoin_im, 100, 120, zorder=3,alpha=.5) plt.show()
stochastic-study/2.cryptocurrency-stack-prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:tf1.2] # language: python # name: conda-env-tf1.2-py # --- # + # coding: utf-8 # In[ ]: """ Copyright (c) 2017, by the Authors: <NAME> This software is freely available under the MIT Public License. Please see the License file in the root for details. The following code snippet will convert the keras model file, which is saved using model.save('kerasmodel_weight_file'), to the freezed .pb tensorflow weight file which holds both the network architecture and its associated weights. """; # In[ ]: ''' Input arguments: num_output: this value has nothing to do with the number of classes, batch_size, etc., and it is mostly equal to 1. If the network is a **multi-stream network** (forked network with multiple outputs), set the value to the number of outputs. quantize: if set to True, use the quantize feature of Tensorflow (https://www.tensorflow.org/performance/quantization) [default: False] use_theano: Thaeno and Tensorflow implement convolution in different ways. When using Keras with Theano backend, the order is set to 'channels_first'. This feature is not fully tested, and doesn't work with quantizization [default: False] input_fld: directory holding the keras weights file [default: .] output_fld: destination directory to save the tensorflow files [default: .] input_model_file: name of the input weight file [default: 'model.h5'] output_model_file: name of the output weight file [default: args.input_model_file + '.pb'] graph_def: if set to True, will write the graph definition as an ascii file [default: False] output_graphdef_file: if graph_def is set to True, the file name of the graph definition [default: model.ascii] output_node_prefix: the prefix to use for output nodes. [default: output_node] ''' # Parse input arguments # In[ ]: import argparse parser = argparse.ArgumentParser(description='set input arguments') parser.add_argument('-input_fld', action="store", dest='input_fld', type=str, default='.') parser.add_argument('-output_fld', action="store", dest='output_fld', type=str, default='') parser.add_argument('-input_model_file', action="store", dest='input_model_file', type=str, default='weight/dcf.h5 ') parser.add_argument('-output_model_file', action="store", dest='output_model_file', type=str, default='weight/dcf.h5.pb') parser.add_argument('-output_graphdef_file', action="store", dest='output_graphdef_file', type=str, default='model.ascii') parser.add_argument('-num_outputs', action="store", dest='num_outputs', type=int, default=1) parser.add_argument('-graph_def', action="store", dest='graph_def', type=bool, default=False) parser.add_argument('-output_node_prefix', action="store", dest='output_node_prefix', type=str, default='output_node') parser.add_argument('-quantize', action="store", dest='quantize', type=bool, default=False) parser.add_argument('-theano_backend', action="store", dest='theano_backend', type=bool, default=False) parser.add_argument('-f') args = parser.parse_args() parser.print_help() print('input args: ', args) if args.theano_backend is True and args.quantize is True: raise ValueError("Quantize feature does not work with theano backend.") # initialize # In[ ]: import tensorflow as tf from tensorflow.keras.models import load_model from pathlib import Path from tensorflow.keras import backend as K output_fld = args.input_fld if args.output_fld == '' else args.output_fld if args.output_model_file == '': args.output_model_file = str(Path(args.input_model_file).name) + '.pb' Path(output_fld).mkdir(parents=True, exist_ok=True) weight_file_path = str(Path(args.input_fld) / args.input_model_file) # Load keras model and rename output # In[ ]: K.set_learning_phase(0) if args.theano_backend: K.set_image_data_format('channels_first') else: K.set_image_data_format('channels_last') try: net_model = load_model(weight_file_path) except ValueError as err: print('''Input file specified ({}) only holds the weights, and not the model defenition. Save the model using mode.save(filename.h5) which will contain the network architecture as well as its weights. If the model is saved using model.save_weights(filename.h5), the model architecture is expected to be saved separately in a json format and loaded prior to loading the weights. Check the keras documentation for more details (https://keras.io/getting-started/faq/)''' .format(weight_file_path)) raise err # num_output = args.num_outputs # pred = [None]*num_output # pred_node_names = [None]*num_output # for i in range(num_output): # pred_node_names[i] = args.output_node_prefix+str(i) # pred[i] = tf.identity(net_model.outputs[i], name=pred_node_names[i]) # num_output = len(net_model.output_names) # pred_node_names = [None]*num_output # pred = [None]*num_output # # pred_node_names = net_model.output_names # for i in range(num_output): # pred_node_names[i] = args.output_node_prefix+str(i) # pred[i] = tf.identity(net_model.outputs[i], name=pred_node_names[i]) input_node_names = [node.op.name for node in net_model.inputs] print('Input nodes names are: ', input_node_names) pred_node_names = [node.op.name for node in net_model.outputs] print('Output nodes names are: ', pred_node_names) # print("net_model.input.op.name:", net_model.input.op.name) # print("net_model.output.op.name:", net_model.output.op.name) # print("net_model.input_names:", net_model.input_names) # print("net_model.output_names:", net_model.output_names) # [optional] write graph definition in ascii # In[ ]: sess = K.get_session() if args.graph_def: f = args.output_graphdef_file tf.train.write_graph(sess.graph.as_graph_def(), output_fld, f, as_text=True) print('saved the graph definition in ascii format at: ', str(Path(output_fld) / f)) # convert variables to constants and save # In[ ]: from tensorflow.python.framework import graph_util from tensorflow.python.framework import graph_io if args.quantize: from tensorflow.tools.graph_transforms import TransformGraph transforms = ["quantize_weights", "quantize_nodes"] transformed_graph_def = TransformGraph(sess.graph.as_graph_def(), [], pred_node_names, transforms) constant_graph = graph_util.convert_variables_to_constants(sess, transformed_graph_def, pred_node_names) else: constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_node_names) graph_io.write_graph(constant_graph, output_fld, args.output_model_file, as_text=False) print('saved the freezed graph (ready for inference) at: ', str(Path(output_fld) / args.output_model_file)) # - # ### Predict # + # #!/usr/bin/env python import tensorflow as tf import numpy as np from tensorflow.python.platform import gfile # OpenCV import cv2 class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] # Read image img = cv2.imread('fashion_0.png', cv2.IMREAD_GRAYSCALE) print('img.shape = ', img.shape) img = img.astype('float32') img /= 255.0 img = img.reshape(1, 28, 28, 1) # Initialize a tensorflow session with tf.Session() as sess: # Load the protobuf graph with gfile.FastGFile("models/fashion_mnist.h5.pb",'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) # Add the graph to the session tf.import_graph_def(graph_def, name='') # Get graph graph = tf.get_default_graph() # Get tensor from graph pred = graph.get_tensor_by_name("output_class/Softmax:0") # Run the session, evaluating our "c" operation from the graph res = sess.run(pred, feed_dict={'input_image_input:0': img}) # Print test accuracy pred_index = np.argmax(res[0]) # Print test accuracy print('Predict:', pred_index, ' Label:', class_names[pred_index]) # -
convertFunc/Convert_H52PB_ok.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #importing libraries import numpy as np import pandas as pd import math import matplotlib.pyplot as plt import seaborn as sns import scipy.stats as stats from sklearn import preprocessing from sklearn.preprocessing import LabelEncoder from sklearn.neighbors import KNeighborsClassifier from sklearn import metrics # + tags=[] dataset = pd.read_csv('Crimes_2001_to_2022.csv',low_memory=False) dataset.head(10) # dataset.dtypes # + # droping the features that are not usefull dataset=dataset.drop(columns=['ID','Case Number','Description','Updated On','Block']) # X.shape print('Columns in dataset: ', dataset.columns) dataset.shape # + active="" # ID: Unique identifier for the record. # # Case Number: The Chicago Police Department RD Number (Records Division Number), which is unique to the incident. # # Date: Date when the incident occurred. this is sometimes a best estimate. # # Block: The partially redacted address where the incident occurred, placing it on the same block as the actual address. # # IUCR: The Illinois Unifrom Crime Reporting code. This is directly linked to the Primary Type and Description. # # Primary Type: The primary description of the IUCR code. # # Description: The secondary description of the IUCR code, a subcategory of the primary description. # # Location Description: Description of the location where the incident occurred. # # Arrest: Indicates whether an arrest was made. # # Domestic: Indicates whether the incident was domestic-related as defined by the Illinois Domestic Violence Act. # # Beat: Indicates the beat where the incident occurred. A beat is the smallest police geographic area – each beat has a dedicated police beat car. Three to five beats make up a police sector, and three sectors make up a police district. # # District: Indicates the police district where the incident occurred. # # Ward: The ward (City Council district) where the incident occurred. # # Community Area: Indicates the community area where the incident occurred. Chicago has 77 community areas. # # FBI Code: Indicates the crime classification as outlined in the FBI's National Incident-Based Reporting System (NIBRS). # # X Coordinate: The x coordinate of the location where the incident occurred in State Plane Illinois East NAD 1983 projection. # # Y Coordinate: The y coordinate of the location where the incident occurred in State Plane Illinois East NAD 1983 projection. # # Year: Year the incident occurred. # # Updated On: Date and time the record was last updated. # # Latitude: The latitude of the location where the incident occurred. # # Longitude: The longitude of the location where the incident occurred. # # Location: The location where the incident occurred in a format that allows for creation of maps and other geographic operations on data portal. # - # droping the null value enteries drop null dataset.dropna(inplace=True) dataset # + # Before removing Null values 1048575 # After removing Null value 1015247 # Total Null values removed 33328 # - # ignore latitude and logitude outside of the chicago dataset=dataset[(dataset["Latitude"] < 45) & (dataset["Latitude"] > 40) & (dataset["Longitude"] < -85) & (dataset["Longitude"] > -90)] dataset # # listing the crimes category wise with their counts types=dataset['Primary Type'].value_counts().sort_values(ascending=False) types # + # crime types according to their counts in dataframe # 15 classes # major_crimes=['THEFT','BATTERY','CRIMINAL DAMAGE','ASSAULT','OTHER OFFENSE','DECEPTIVE PRACTICE','NARCOTICS','BURGLARY','MOTOR VEHICLE THEFT' # ,'ROBBERY','CRIMINAL TRESPASS','WEAPONS VIOLATION','OFFENSE INVOLVING CHILDREN','PUBLIC PEACE VIOLATION','CRIM SEXUAL ASSAULT'] # 8 classes # storing major crime types according to their counts in dataframe # major_crimes=['THEFT','BATTERY','CRIMINAL DAMAGE','ASSAULT','OTHER OFFENSE','DECEPTIVE PRACTICE','NARCOTICS','BURGLARY'] # - # major crime time #---> Storing Major Crimes major_crimes=['THEFT','BATTERY','CRIMINAL DAMAGE','ASSAULT'] crime_df = dataset.loc[dataset['Primary Type'] .isin(major_crimes)] crime_df # + data = crime_df.pivot_table(index='Year', columns='Primary Type', aggfunc='count') print(data) # since we dont have different crimes in early years so we drop data of these years # - # selecting the dataset which starts from 2015 crime_df=crime_df[crime_df['Year']>=2015] crime_df temp=crime_df.copy() temp # getting the half of our data set for random data selection nrows= temp.shape[0] portion=math.floor(nrows/3) portion first=temp.iloc[0:portion,:] first.shape nextp=portion+portion+1 scnd=temp.iloc[(portion+1):nextp,:] scnd.shape finalp=nextp+portion+1 third=temp.iloc[(nextp+1):finalp,:] third.shape # + # picking random 5k enteries from the first part index=np.random.choice(portion,replace=False,size = 2000) df_frst=first.iloc[index] df_frst.shape sns.set_theme(style="whitegrid") ax = sns.boxplot(x=df_frst["Ward"]) # + # picking random 5k enteries from the second half index=np.random.choice(portion,replace=False,size = 2000) df_scnd=scnd.iloc[index] df_scnd # + # picking random 5k enteries from the third half index=np.random.choice(portion,replace=False,size = 2000) df_third=third.iloc[index] df_third # + # combined all three dataframe temp_df = pd.concat([df_frst,df_scnd],ignore_index=True) final_df = pd.concat([temp_df,df_third],ignore_index=True) final_df # - df=final_df.copy() # + # Using PCA to combine two features from sklearn.decomposition import PCA location = df[['Latitude','Longitude']] pca = PCA(n_components=1,random_state=123) locat = pca.fit_transform(location) df['Location'] = locat df # + # convertung date column to actual date format df.Date=pd.to_datetime(df.Date) df.head(10) # - # extracting month and weekday from date column df['month']=df.Date.dt.month df['weekday'] = df.Date.dt.day_of_week df=df.drop(columns='Date') df # elif t == 'OTHER OFFENSE': return '5' # elif t == 'DECEPTIVE PRACTICE': return '6' # elif t == 'NARCOTICS': return '7' # elif t == 'BURGLARY': return '8' # elif t == 'MOTOR VEHICLE THEFT': return '9' # elif t == 'ROBBERY': return '10' # elif t == 'CRIMINAL TRESPASS': return '11' # elif t == 'WEAPONS VIOLATION': return '12' # elif t == 'OFFENSE INVOLVING CHILDREN': return '13' # elif t == 'PUBLIC PEACE VIOLATION': return '14' # elif t == 'CRIM SEXUAL ASSAULT': return '15' # + # assigning crimetype def crime_type(t): if t =='THEFT': return '1' elif t =='BATTERY': return '2' elif t =='CRIMINAL DAMAGE': return '3' elif t == 'ASSAULT': return '4' else: return '0' cp_crime = df.copy() cp_crime['crimeType'] = cp_crime['Primary Type'].map(crime_type) cp_crime # + labelEncoder = LabelEncoder() locDes_enc = labelEncoder.fit_transform(cp_crime['Location Description']) cp_crime['Location Description'] = locDes_enc cp_crime.head() # + labelEncoder2 = LabelEncoder() arrest_enc = labelEncoder2.fit_transform(cp_crime['Arrest']) cp_crime['Arrest'] = arrest_enc cp_crime.head() # + labelEncoder3 = LabelEncoder() domestic_enc = labelEncoder3.fit_transform(cp_crime['Domestic']) cp_crime['Domestic'] = domestic_enc cp_crime.head() # - # feature scaling scaler = preprocessing.MinMaxScaler() cp_crime[['Beat']] = scaler.fit_transform(cp_crime[['Beat']]) cp_crime[['X Coordinate', 'Y Coordinate']] = scaler.fit_transform(cp_crime[['X Coordinate', 'Y Coordinate']]) cp_crime # using correlation for the feature selection corelation = cp_crime.corr() corelation plt.figure(figsize=(10,7)) sns.heatmap(corelation,annot=True) # + # month week day have low correlation they isn't effect our results so we drop them # since beat have high correlation with district so we drop beat # and X cordinate have high correlation with longitube and Y cordinate with latitude and location so we drop longitude and latitude selected_cols=['Location Description','Arrest','Domestic','Beat','Ward','Community Area','Year','X Coordinate','Y Coordinate','Location'] # - X=cp_crime[selected_cols] Y=cp_crime['crimeType'] Y=Y.astype(int) Y.dtype for c in selected_cols: print(f'{c}:{len(cp_crime[c].unique())}') sns.set_theme(style="whitegrid") selected_cols=['Location Description','Arrest','Domestic','Beat','Ward','Community Area','Year','X Coordinate','Y Coordinate','Location'] sns.boxplot(x=cp_crime['Location Description']) plt.show() sns.boxplot(x=cp_crime['Beat']) plt.show() sns.boxplot(x=cp_crime['Ward']) plt.show() sns.boxplot(x=cp_crime['Community Area']) plt.show() sns.boxplot(x=cp_crime['Year']) plt.show() sns.boxplot(x=cp_crime['Location']) plt.show() # + #for xg boost Y=Y.map({1:0,2:1,3:2,4:3}) # + # Tarining and testing from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,Y,test_size=0.2, random_state=0) # + # Models used # 1- Logistic Regression # 2- Naive Bayes # 3- XG Boost # 4- Random Forest # 5- Knn # 6- SVM # 7- Ada Boost # 8- Decision Tree Classifier (J48) # + # Logistic Regression from sklearn.linear_model import LogisticRegression lr = LogisticRegression(solver="saga", multi_class='ovr',max_iter=12000) lr.fit(X_train, y_train) print('Accuracy of Logistic Regression', lr.score(X_test, y_test)) # + # Naive Bayes from sklearn.naive_bayes import GaussianNB gnb = GaussianNB() gnb.fit(X_train,y_train) print('Accuracy of Naive Bayes', gnb.score(X_test, y_test)) # + # # Categoric Naivee Bayes # from sklearn.naive_bayes import CategoricalNB # cnb = CategoricalNB() # cnb.fit(X_train,y_train) # print('Accuracy of Categoric Naive Byaes', cnb.score(X_test, y_test)) # + # KNN knn = KNeighborsClassifier(n_neighbors = 10) knn.fit(X_train, y_train) predictions = knn.predict(X_test) print('Accuracy of KNN', knn.score(X_test, y_test)) pred_train = knn.predict(X_train) pred_i = knn.predict(X_test) print('Test accuracy ', metrics.accuracy_score(y_train, pred_train)) print('Accuracy ', metrics.accuracy_score(y_test, pred_i)) # - ## Hyperparameter optimization using RandomizedSearchCV from sklearn.model_selection import RandomizedSearchCV, GridSearchCV # + ## Hyper Parameter Optimization params={ "learning_rate" : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30 ] , "max_depth" : [ 3, 4, 5, 6, 8, 10, 12, 15], "min_child_weight" : [ 1, 3, 5, 7 ], "gamma" : [ 0.0, 0.1, 0.2 , 0.3, 0.4 ], "colsample_bytree" : [ 0.3, 0.4, 0.5 , 0.7 ] } # + # Calculate the accuracy import xgboost as xgb xgb = xgb.XGBClassifier() #xgb.set_params(n_estimators=10) random_search=RandomizedSearchCV(xgb,param_distributions=params,n_iter=5,scoring='roc_auc',n_jobs=-1,cv=5,verbose=3) random_search.fit(X_train, y_train) # Fit it to the training set # - print(random_search.best_estimator_) # + tags=[] random_search.best_params_ # + xgb=xgb.set_params(base_score=0.5, booster='gbtree', callbacks=None, colsample_bylevel=1, colsample_bynode=1, colsample_bytree=0.5, early_stopping_rounds=None, enable_categorical=False, eval_metric=None, gamma=0.1, gpu_id=-1, grow_policy='depthwise', importance_type=None, interaction_constraints='', learning_rate=0.15, max_bin=256, max_cat_to_onehot=4, max_delta_step=0, max_depth=12, max_leaves=0, min_child_weight=5, monotone_constraints='()', n_estimators=100, n_jobs=0, num_parallel_tree=1, objective='multi:softprob', predictor='auto', random_state=0, reg_alpha=0) xgb.fit(X_train, y_train) # Predict the labels of the test set preds = xgb.predict(X_test) accuracy = float(np.sum(preds==y_test))/y_test.shape[0] # Print the baseline accuracy print("Baseline accuracy:", accuracy) # - print(xgb) y_train.unique() # + # importing random forest classifier from assemble module from sklearn.ensemble import RandomForestClassifier # creating a RF classifier clf = RandomForestClassifier(n_estimators = 300) # Training the model on the training dataset # fit function is used to train the model using the training sets as parameters clf.fit(X_train, y_train) # performing predictions on the test dataset y_pred = clf.predict(X_test) # metrics are used to find accuracy or error from sklearn import metrics print() # using metrics module for accuracy calculation print("ACCURACY OF THE MODEL: ", metrics.accuracy_score(y_test, y_pred)) # + # # SVM # from sklearn.svm import SVC # svm = SVC(gamma='auto') # svm.fit(X_train, y_train) # print('Accuracy of SVM', svm.score(X_test, y_test)) # + # Decision Tree Classifier from sklearn.tree import DecisionTreeClassifier tree = DecisionTreeClassifier() y_pred = tree.fit(X_train, y_train).predict(X_test) total_datapoints = X_test.shape[0] mislabeled_datapoints = (y_test != y_pred).sum() correct_datapoints = total_datapoints-mislabeled_datapoints percent_correct = (correct_datapoints / total_datapoints) * 100 print("DecisionTreeClassifier results for NSL-KDD:\n") print("Total datapoints: %d\nCorrect datapoints: %d\nMislabeled datapoints: %d\nPercent correct: %.2f%%" % (total_datapoints, correct_datapoints, mislabeled_datapoints, percent_correct)) # + # Decision Tree Classifier (J48) from sklearn.metrics import accuracy_score X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 1000) j48 = DecisionTreeClassifier(criterion = "gini",random_state = 1000,max_depth=500, min_samples_leaf=600) j48.fit(X_train, y_train) print(j48) clf_entropy = DecisionTreeClassifier(criterion = "entropy", random_state = 1000,max_depth = 500, min_samples_leaf = 600) clf_entropy.fit(X_train, y_train) print(clf_entropy) y_pred = j48.predict(X_test) # print("Predicted values:") # print(y_pred) # print("Confusion Matrix: ",confusion_matrix(y_test, y_pred)) print ("Accuracy : ",accuracy_score(y_test,y_pred)) # print("Report : ",classification_report(y_test, y_pred)) # + from sklearn.ensemble import AdaBoostClassifier # Create adaboost classifer object adb = AdaBoostClassifier(n_estimators=300, learning_rate=1) # Train Adaboost Classifer model = adb.fit(X_train, y_train) #Predict the response for test dataset y_pred = model.predict(X_test) print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) # + # Using Cross Validation # Models used # 1- Logistic Regression # 2- Naive Bayes # 3- XG Boost # 4- Random Forest # 5- Knn # 6- SVM # 7- Ada Boost # 8- Decision Tree Classifier (J48) # + # # XG Boost # from sklearn.model_selection import cross_val_score # score=cross_val_score(xgb,X_train, y_train,cv=10) # score # print('XG boost Using Cross Validation: ',score.mean()) # + # # Logistic Regression # from sklearn.model_selection import cross_val_score # score=cross_val_score(lr,X_train, y_train,cv=10) # score # print('Logistic Regression boost Using Cross Validation: ',score.mean()) # + # # Naive Bayes # from sklearn.model_selection import cross_val_score # score=cross_val_score(gnb,X_train, y_train,cv=10) # score # print('Naive Bayes Using Cross Validation: ',score.mean()) # + # # Categoric Naive Bayes # from sklearn.model_selection import cross_val_score # score=cross_val_score(cnb,X_train, y_train,cv=10) # score # print('Categoric Naive Bayes Using Cross Validation: ',score.mean()) # + # # KNN # from sklearn.model_selection import cross_val_score # score=cross_val_score(knn,X_train, y_train,cv=10) # score # print('KNN Using Cross Validation: ',score.mean()) # + # # Random Forest # from sklearn.model_selection import cross_val_score # score=cross_val_score(clf,X_train, y_train,cv=10) # score # print('Random Forest Using Cross Validation: ',score.mean()) # + # # SVM # from sklearn.model_selection import cross_val_score # score=cross_val_score(svm,X_train, y_train,cv=10) # score # print('Random Forest Using Cross Validation: ',score.mean()) # + # # Decision Tree # from sklearn.model_selection import cross_val_score # score=cross_val_score(tree,X_train, y_train,cv=10) # score # print('Decision Using Cross Validation: ',score.mean()) # + # # Decision Tree Classifier (J48) # from sklearn.model_selection import cross_val_score # score=cross_val_score(j48,X_train, y_train,cv=10) # score # print('J46 Using Cross Validation: ',score.mean()) # + # # Ada Boost # from sklearn.model_selection import cross_val_score # score=cross_val_score(adb,X_train, y_train,cv=10) # score # print('Ada BoostUsing Cross Validation: ',score.mean()) # -
Notebooks/Prediction_using_ML.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np # + # Order is Siess, Pisa, Dartmouth, Baraffe # - ages = np.array([19., 18., 17., 19.]) age_err = np.array([3, 2, 2., 2]) masses = np.array([2.7, 2.71, 2.60, 2.68]) mass_err = np.array([0.06, 0.07, 0.07, 0.05]) masses = np.array([2.74, 2.72, 2.66, 2.70]) mass_err = np.array([0.07, 0.07, 0.07, 0.06]) def weighted_average(x, err): invar = 1/err**2 denom = np.sum(invar) mean = np.sum(x * invar)/denom std = np.sqrt(1/denom) return (mean, std) print(weighted_average(masses, mass_err)) print(weighted_average(ages, age_err))
notebooks/Weighted Means.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/mirianbr/happiness-report/blob/main/world_happiness_report.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="l0_nwQObPBYf" # # Exploratory data analysis - World Happiness Report # # This notebook was created following an [AI inclusive YouTube live](https://www.youtube.com/watch?v=9irM_mwe7T0). # # The dataset chosen by the event organizers was the [World Happiness Report](https://www.kaggle.com/ajaypalsinghlo/world-happiness-report-2021) - [detailed description](https://worldhappiness.report/ed/2021/). This year's edition of the report focuses on the impact of COVID-19 on happiness and mental health. # # ## Data dictionary # # ``` # Country name # Regional indicator: World region the country belongs to (North America and ANZ, # Western Europe, Central and Eastern Europe, Latin America and Caribbean, East Asia, Commonwealth of Independent States, Southeast Asia, Middle East and North Africa, Sub-Saharan Africa, South Asia) # Ladder score: Based entirely on surveys with participants from the listed countries. According to the FAQ, this is called the Cantril ladder: it asks respondents to think of a ladder, with the best possible life for them being a 10, and the worst possible life being a 0. # Standard error of the ladder score # upperwhisker # lowerwhisker # Logged GDP per capita # Social support # Healthy life expectancy # Freedom to make life choices # Generosity # Perceptions of corruption # Ladder score in Dystopia # ``` # + [markdown] id="HxYABDIEbxLV" # ## Importing modules # + colab={"base_uri": "https://localhost:8080/"} id="GI9q4YhIO-jD" outputId="bfb567dc-6c55-4b45-e172-44e3263770ab" # !pip install opendatasets # !pip install plotly # + id="zh6-TlX8SjW9" import opendatasets as od import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import plotly import plotly.express as px sns.set_style('darkgrid') # + [markdown] id="dojxiyFqbpPe" # ## Importing and reading data # + id="-chT9smNS6Cg" # Downloading dataset, using the kaggle.json API credentials od.download('https://www.kaggle.com/ajaypalsinghlo/world-happiness-report-2021') # + id="jjES8bMoWxNB" # Reading data df = pd.read_csv('world-happiness-report-2021/world-happiness-report-2021.csv') # + [markdown] id="9EVAPaWKbSlb" # ## Preliminary data exploration # # Number of lines and columns, null values. Also removing columns not to be used. # + colab={"base_uri": "https://localhost:8080/", "height": 400} id="_-nUJfYjXA3P" outputId="09d0c45c-fee1-4663-9077-64464084e042" # First lines df.head() # + colab={"base_uri": "https://localhost:8080/"} id="FNFCqFZCXcWF" outputId="9c3f15c3-5d7e-406e-fb17-5fb00df5bc01" # Dimensions print('Dimensions:', df.shape[0], 'lines x', df.shape[1], 'columns') # + colab={"base_uri": "https://localhost:8080/"} id="Jr-zykGIXyjO" outputId="ecb2ee2f-3468-420e-c713-809c074876ca" # Data frame info df.info() # + colab={"base_uri": "https://localhost:8080/"} id="sGn29tdmYg12" outputId="66ccc404-09e6-4c55-8cb3-effb493bc16f" # Data types count df.dtypes.value_counts() # + id="lRO4B7wTYvLD" # Discarding some of the columns, to focus on the others drop_list = ['Standard error of ladder score', 'upperwhisker', 'lowerwhisker', 'Ladder score in Dystopia', 'Explained by: Log GDP per capita', 'Explained by: Social support', 'Explained by: Healthy life expectancy', 'Explained by: Freedom to make life choices', 'Explained by: Generosity', 'Explained by: Perceptions of corruption', 'Dystopia + residual'] df.drop(columns=drop_list, inplace=True) # + colab={"base_uri": "https://localhost:8080/", "height": 300} id="HfLd6wDQZ2vi" outputId="7487847d-e4a5-476f-8157-ab46d2422bfb" # Basic stats for the remaining columns df.describe() # + colab={"base_uri": "https://localhost:8080/"} id="y5xLsJHPaxGy" outputId="e8285cf5-e03a-4473-fb29-3a7da3711352" # Are there null values in the dataset? df.isna().sum() # + [markdown] id="tDiiV0MBcA3-" # ## Data exploration # # Asking questions about what the data means - countries, all the faces of happiness per country. # + colab={"base_uri": "https://localhost:8080/"} id="4rl2Tx6zcOKH" outputId="1df84529-4ef3-4e64-ff10-baec828aee50" # Number of countries df['Country name'].nunique() # + colab={"base_uri": "https://localhost:8080/"} id="fZQk3ffEcgEI" outputId="c58a76f7-1df3-4a5a-d000-c3430b02256b" # Which countries are they? df['Country name'].unique() # + colab={"base_uri": "https://localhost:8080/", "height": 115} id="I1zbwxMJcumR" outputId="2ec51dcd-d70a-4f06-d844-d631a88e0d8d" # Finding Brazil df[df['Country name'] == 'Brazil'] # + colab={"base_uri": "https://localhost:8080/"} id="MVSfzt6ac-F3" outputId="f51a0d30-8ae4-4064-a916-a039f07d3807" # Listing regions df['Regional indicator'].unique() # + colab={"base_uri": "https://localhost:8080/", "height": 363} id="VQ3fYclKdGlT" outputId="7d387c80-7ccc-4076-e777-52a29625d113" # Top 10 in ladder score df[['Country name', 'Ladder score']].sort_values(by='Ladder score', ascending=False).head(10) # + colab={"base_uri": "https://localhost:8080/", "height": 394} id="BEirhqJIdpFr" outputId="9ba90d88-8f5b-4e23-ca2f-a8408aa8fac2" # Regions ordered by ladder score average df.groupby(['Regional indicator']).agg({'Ladder score': 'mean'}).sort_values(by='Ladder score', ascending=False) # + colab={"base_uri": "https://localhost:8080/", "height": 517} id="08IO1qs6IGYB" outputId="be820d06-a602-4ea6-f0ea-00d96632ee17" # Plotting bar chart for all regions (ladder score average) ranking = df.groupby(['Regional indicator']).agg({'Ladder score': 'mean'}).reset_index() plt.figure(figsize = (12, 8)) ax = sns.barplot(x = 'Ladder score', y = 'Regional indicator', data=ranking, orient='h', color='blueviolet', order=ranking.sort_values('Ladder score', ascending=False)['Regional indicator']) ax.set_title('Ladder score average by region', fontsize=17); # + colab={"base_uri": "https://localhost:8080/", "height": 363} id="TWHi5_1z2Dbt" outputId="7951bdc1-40eb-4b79-9cbf-29a37a352353" # Top 10 countries by social support index df[['Country name', 'Social support']].sort_values(by='Social support', ascending=False).head(10) # + colab={"base_uri": "https://localhost:8080/", "height": 363} id="eud_1Irs26LL" outputId="b3ac8cbd-5b26-40f9-83c2-6323b0928aa0" # Countries with the lowest social support index # We could have used tail instead of head with ascending=False, but the order would be backwards df[['Country name', 'Social support']].sort_values(by='Social support', ascending=True).head(10) # + colab={"base_uri": "https://localhost:8080/", "height": 363} id="KU04s_J33q0b" outputId="ae41a448-08de-4998-b2d2-35864203c8aa" # Top 10 countries using the generosity index df[['Country name', 'Generosity']].sort_values(by='Generosity', ascending=False).head(10) # + colab={"base_uri": "https://localhost:8080/", "height": 424} id="8wQ-_DTH4CCR" outputId="d3174e30-9beb-4ae5-f22d-f50782178be1" # Generosity ranking ranking = df[['Country name', 'Generosity']].sort_values(by='Generosity').reset_index(drop=True) ranking # + colab={"base_uri": "https://localhost:8080/", "height": 81} id="iBDKouTV4Tya" outputId="11b125e6-a755-41c8-94ac-1f73971f5ae9" # Finding Brazil in the generosity ranking ranking[ranking['Country name'] == 'Brazil'] # + colab={"base_uri": "https://localhost:8080/", "height": 310} id="soIQCVBA4euS" outputId="932d3da6-6294-4faf-ebef-d2d1db47d4fd" # Countries in LATAM and Caribe latam_caribe = df[df['Regional indicator'] == 'Latin America and Caribbean'] latam_caribe.head() # + colab={"base_uri": "https://localhost:8080/", "height": 517} id="nykcVdJD46c5" outputId="31eb82c5-c67f-4c25-ea86-8b87125b16c1" # Plotting bar chart for LATAM and Caribe only, for the ladder score plt.figure(figsize = (12, 8)) ax = sns.barplot(x = 'Ladder score', y = 'Country name', data=latam_caribe, orient='h', color='blueviolet', order=latam_caribe.sort_values('Ladder score', ascending=False)['Country name']) ax.set_title('Ladder score by country name (LATAM and Caribe only)', fontsize=17); # + colab={"base_uri": "https://localhost:8080/", "height": 517} id="I37Qi6Nd6rcD" outputId="f1a07a0c-d2ed-49bb-f15c-c07f4d04a108" # Number of countries per region plt.figure(figsize=(15,8)) ax = sns.countplot(y = 'Regional indicator', data = df, color = 'blueviolet', order = df['Regional indicator'].value_counts().index) ax.set_title('Countries by region', fontsize=17); # + colab={"base_uri": "https://localhost:8080/", "height": 503} id="4wa8cfxY79Ab" outputId="779e1543-40b6-4356-c14b-4b8b786e7e73" # Boxplotting all happiness metrics by region happiness_metrics = ['Social support', 'Freedom to make life choices', 'Generosity', 'Perceptions of corruption'] plt.figure(figsize = (12, 8)) ax = sns.boxplot(data = df.loc[:, happiness_metrics], orient = 'v', color = 'blueviolet') ax.set_title('Hapiness metrics minimum and maximum values (and quartiles) compared', fontsize = 17); # + colab={"base_uri": "https://localhost:8080/", "height": 747} id="x13Y5CY59OvY" outputId="8854685e-e7ab-42f5-e482-889db86d7104" # Happiness metrics correlations using a heatmap plt.figure(figsize = (15, 10)) ax = sns.heatmap(df.corr(), annot=True) ax.set_title('Happiness metrics correlations', fontsize = 17); # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="wt9rf80I-B9Y" outputId="950218d6-5ceb-41b9-9fae-27256e01de09" fig = px.choropleth(df, locations='Country name', locationmode='country names', color='Ladder score', title='Happiness across the World') fig.show()
world_happiness_report.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Lane Boundary Segmentation # ## Setting up Colab # You can delete this "Setting up Colab" section if you work locally and do not want to use Google Colab colab_nb = 'google.colab' in str(get_ipython()) if colab_nb: from google.colab import drive drive.mount('/content/drive') if colab_nb: # %cd drive/My\ Drive/aad/code/exercises/lane_detection if colab_nb: # !pip install segmentation-models-pytorch # !pip install albumentations --upgrade # ## Loading data # + import os os.environ['CUDA_VISIBLE_DEVICES'] = '0' import numpy as np import cv2 import matplotlib.pyplot as plt import re import sys sys.path.append("../../util") # - # If you have collected data yourself in a folder "data" using `collect_data.py` and you want to use it for training, set the boolean in the next cell to `True` own_data = False if own_data: from seg_data_util import sort_collected_data # copy and sort content of 'data' into 'data_lane_segmentation' folder: sort_collected_data() # Since data was copied, you can remove files in 'data' directory afterwards else: # if you stopped the download before completion, please delete the 'data_lane_segmentation' folder and run this cell again from seg_data_util import download_segmentation_data download_segmentation_data() # Independent of what you chose, you will have a directory 'data_lane_segmentation' now # + DATA_DIR = "data_lane_segmentation" x_train_dir = os.path.join(DATA_DIR, 'train') y_train_dir = os.path.join(DATA_DIR, 'train_label') x_valid_dir = os.path.join(DATA_DIR, 'val') y_valid_dir = os.path.join(DATA_DIR, 'val_label') # - # Note that the labels are regular png images with 3 color channels. The content of those color channels is identical, so when you load the png you should just load the first color channel. # Your code starts here: Train a deep learning segmentation model and evaluate its dice loss on the validation set. You should aim for a dice loss of 0.2 or less!
code/exercises/lane_detection/lane_segmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Inductive Causation Algorithm # # The inductive causation (IC) algorithm is a method of surfacing possible causal relationships in observational data (Verma and Pearl, 1990). In Pearl's framework, causal relationships between variables are expressed in terms of a _structural causal model_, which is a directed acyclic graph (DAC). The output of the IC is, in general, only a partially directed graph, because observational data may be compatible with more than one DAC. The undirected edges in the output graph represent ambiguity with respect to the direction of causality between two variables. More information is needed in this case to surface the true causal relationship between all variables (for example, human reasoning or further experiments). # # The family of DACs that is compatible with a set of observational data are referred to as _observationally equivalent_ or as the _equivalence class_. Thus, the output of the IC algorithm is the _equivalence class_. # # The IC algorithm leverages Pearl's concept of _d-separation_, short for "directional separation". As far as I can see, d-separation means that two variables are not adjacent on the causal graph (the graphical intuition), which is equivalent to saying that they are conditionally independent on some set of variables (including the empty set). # # The IC algorithm takes as input the joint distribution of the variables and has three steps. # # 1. The first step creates the so-called _skeleton_, which is the undirected graph, by checking for d-separation. Observational data allows for the construction of the skeleton because conditional independence can be investigated using the joint distribution: if $U$ is the set of _all_ relevant variables of the system, and there is no (possibly empty) subset of variables $S\subset U\setminus\{X,Y\}$ so that $P(X,Y|S) = P(X|S)P(Y|S)$, then $X$ and $Y$ are causally connected (not d-separated; are adjacent on the causal graph). Put more simply: if there is no way to make the dependence between $X$ and $Y$ disapper by correcting with some subset of the other system variables, then the algorithm assumes that $X$ and $Y$ have some connection, and draws in an edge: $X-Y$. (An extension of the IC algorithm, IC*, is necessary if $U$ does not contain all relevant variables, i.e. there are latent variables). # # 2. The second step orients some of the edges of the skeleton by identifying a special type of subgraph called _collider_, which looks like $X\rightarrow Z \leftarrow Y$ ($X$ and $Y$ are conditionally independent, while $Z$ is dependent on both of them). Colliders describe a situation in which two variables $X$ and $Y$ are conditionally independent from each other (i.e., they are non-adjacent on the causal graph), but they have a shared neighbor $Z$ which is _not_ among the variables making them conditionally independent. The collider is special because it is the only causal graph that is compatible with $X$ and $Y$ having a connection through $Z$ without introducing dependence between them. In all the other possible causal graphs that connect the three variables, $Z$ acts either as a mediator ($X\rightarrow Z \rightarrow Y$, $X\leftarrow Z \leftarrow Y$) or a confounder ($X\leftarrow Z \rightarrow Y$), which would cause $X$ and $Y$ to show up as dependent variables in the observational data. Colliders give rise to a so-called _v-structure_ in the graph. # # 3. The third step orients some more edges in the graph by principle of exclusion: if any alternative orientation of an edge results in a new v-structure or would cause the causal graph to become cyclic, then that orientation must be the right one. A new v-structure would imply a collider, and those should have already been spotted in step 2., and a cyclic graph does not make sense as a causal model. # # Good Papers: # - https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4078745/ # - https://ftp.cs.ucla.edu/pub/stat_ser/r509.pdf # ## Toy Data # # Consider some fictitious system in which there is rainy weather $r\in\{0,1\}$, temperature $t\in\mathcal{R}^+$, ice cream sales $s\in\mathcal{Z}$, crime $c\in\mathcal{Z}$ and police $p\in\mathcal{Z}$. # # Rain and low temperatures discourage crime and ice cream sales. Crime and ice cream sales have no causal relationship. Rain causes lower temperatures. Crime causes more police in the street. # # The pairplot matrix shows that in this setup, crime and ice cream sales are correlated even though they do not have a causal relationship. # # Now, I am using the `causality` library as an implementation of the IC* algorithm, which is different from the IC algorithm in that it allows for the existence of latent variables. Now, I find that even with the causal relationship with the variables fixed, the graph that is discovered by this implementation of the algorithm will completely change as a function of the parameters. This is discouraging. My suspicions are: # # - the conditional independence test is not working. The current version uses a robust linear regression, i.e. a parametric model that will also have trouble with multicollinearity. # - the difference is in the IC vs IC* nature of the algorithm, and the node between "sales" and "crime" is meant to show up as a spurious node, and I ought to do more reading :) # + import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import networkx as nx N = 1000 RAIN_RATE = 118/365 # nyc precip. days TEMP_LOW = 18 # nyc average June low TEMP_HIGH = 27 # nyc average June high TEMP_CHANGE_RAIN = -5 AV_SALES_RATE = 100000 SALES_CHANGE_TEMP = 100 SALES_CHANGE_RAIN = 150 AV_CRIME_RATE = 1205 # nyc robberies May 2020 CRIME_CHANGE_TEMP = 10 CRIME_CHANGE_RAIN = 20 # rain r = np.random.binomial(1,RAIN_RATE,N) # temperature t_change = np.random.normal(loc=TEMP_CHANGE_RAIN,size=N)*r t = np.random.uniform(TEMP_LOW,TEMP_HIGH,size=N) + t_change # sales s_rates = np.abs(AV_SALES_RATE+SALES_CHANGE_TEMP*t+SALES_CHANGE_RAIN*r) s = np.array([np.random.poisson(lam=s_rate,size=1) for s_rate in s_rates]).reshape(*t.shape) # crime c_rates = np.abs(AV_CRIME_RATE+CRIME_CHANGE_TEMP*t+CRIME_CHANGE_RAIN*r) c = np.array([np.random.poisson(lam=c_rate,size=1) for c_rate in c_rates]).reshape(*t.shape) # police p = np.round(c/100) + np.random.poisson(lam=3,size=N) # combine data = pd.DataFrame(np.vstack([r,t,s,c,p]).T,columns='rain,temp,sales,crime,police'.split(',')) sns.pairplot(data) # + import numpy import pandas as pd from causality.inference.search import IC from causality.inference.independence_tests import RobustRegressionTest, MutualInformationTest variable_types = {'rain' : 'd', 'temp' : 'c', 'sales' : 'd', 'crime' : 'd', 'police': 'd'} ic_algorithm = IC(RobustRegressionTest) graph = ic_algorithm.search(data, variable_types) pos = nx.spring_layout(graph) nx.draw(graph,pos,with_labels=True) # - # The toy example provided by the author of the library, however, works -- including when some of the variables are latent variables: # + import numpy import pandas as pd from causality.inference.search import IC from causality.inference.independence_tests import RobustRegressionTest # generate some toy data: SIZE = 2000 x1 = numpy.random.normal(size=SIZE) x2 = x1 + numpy.random.normal(size=SIZE) x3 = x1 + numpy.random.normal(size=SIZE) x4 = x2 + x3 + numpy.random.normal(size=SIZE) x5 = x4 + numpy.random.normal(size=SIZE) # load the data into a dataframe: X = pd.DataFrame({'x1' : x1, 'x2' : x2, 'x3' : x3, 'x4' : x4, 'x5' : x5}) # define the variable types: 'c' is 'continuous'. The variables defined here # are the ones the search is performed over -- NOT all the variables defined # in the data frame. variable_types = {'x1' : 'c', 'x2' : 'c', 'x3' : 'c', 'x4' : 'c', 'x5' : 'c'} # run the search ic_algorithm = IC(RobustRegressionTest) graph = ic_algorithm.search(X, variable_types) nx.draw(graph,with_labels=True) # - sns.pairplot(X) # + import numpy import pandas as pd from causality.inference.search import IC from causality.inference.independence_tests import RobustRegressionTest # generate some toy data: SIZE = 2000 x1 = numpy.random.normal(size=SIZE) x2 = x1 + numpy.random.normal(size=SIZE) x3 = x1 + numpy.random.normal(size=SIZE) x4 = x2 + x3 + numpy.random.normal(size=SIZE) x5 = x4 + numpy.random.normal(size=SIZE) # load the data into a dataframe: X = pd.DataFrame({'x1' : x1, 'x2' : x2, 'x3' : x3, #'x4' : x4, #'x5' : x5 }) # define the variable types: 'c' is 'continuous'. The variables defined here # are the ones the search is performed over -- NOT all the variables defined # in the data frame. variable_types = {'x1' : 'c', 'x2' : 'c', 'x3' : 'c', #'x4' : 'c', #'x5' : 'c' } # run the search ic_algorithm = IC(RobustRegressionTest) graph = ic_algorithm.search(X, variable_types) nx.draw(graph,with_labels=True) # -
Causality - Inductive Causation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import lightgbm as lgb import pickle from time import time import matplotlib.pyplot as plt # %matplotlib inline # + with open('data.pkl', 'rb') as f: df_train, labels, df_test, test_orders = pickle.load(f) print(df_train.shape) print(labels.shape) print(df_test.shape) # - df_train.head() print(labels[:5]) df_test.head() # + f_to_use = ['user_total_orders', 'user_total_items', 'total_distinct_items', 'user_average_days_between_orders', 'user_average_basket', 'order_hour_of_day', 'days_since_prior_order', 'days_since_ratio', 'aisle_id', 'department_id', 'product_orders', 'product_reorders', 'product_reorder_rate', 'UP_orders', 'UP_orders_ratio', 'UP_average_pos_in_cart', 'UP_reorder_rate', 'UP_orders_since_last', 'UP_delta_hour_vs_last'] print('formating for lgb') d_train = lgb.Dataset(df_train[f_to_use], label=labels, categorical_feature=['aisle_id', 'department_id']) train_cv = df_train[f_to_use] # train_cv = df_train[f_to_use].as_matrix() # labels = np.array(labels) del df_train # - print(len(train_cv)) print(len(labels)) '''from sklearn.model_selection import RandomizedSearchCV import scipy.stats as st clf = lgb.LGBMClassifier( task='train', boosting_type='gbdt', objective='binary', metric='binary_logloss', num_threads=4) param_dist = { 'num_leaves': st.randint(2, 31), 'max_depth': st.randint(-1, 10), 'min_data_in_leaf': st.randint(1, 20), 'min_sum_hessian_in_leaf': st.uniform(1e-3, 1), 'max_bin': st.randint(10, 255) } n_iter_search = 20 random_search = RandomizedSearchCV( clf, param_distributions=param_dist, n_iter=n_iter_search, n_jobs=-1) start = time() random_search.fit(train_cv, labels) print("RandomizedSearchCV took %.2f seconds for %d candidates" " parameter settings." % ((time() - start), n_iter_search)) report(random_search.cv_results_)''' # + params = { 'task': 'train', 'boosting_type': 'gbdt', 'objective': 'binary', 'metric': {'binary_logloss'}, 'num_leaves': 96, 'max_depth': 10, 'feature_fraction': 0.9, 'bagging_fraction': 0.95, 'bagging_freq': 5 } ROUNDS = 100 print('light GBM train :-)') t0 = time() bst = lgb.train(params, d_train, ROUNDS) tt = time() - t0 print("Training complete in {} seconds".format(round(tt, 1))) del d_train # - lgb.plot_importance(bst, figsize=(9,20)) # + print('light GBM predict') t0 = time() preds = bst.predict(df_test[f_to_use]) tt = time() - t0 print("Prediction complete in {} seconds".format(round(tt, 1))) df_test['pred'] = preds TRESHOLD = 0.22 # guess, should be tuned with crossval on a subset of train data # - print(preds[:5]) df_test.head() # + # Submission d = dict() for row in df_test.itertuples(): # Append all products with predictions greater than the threshold if row.pred > TRESHOLD: try: d[row.order_id] += ' ' + str(row.product_id) except: d[row.order_id] = str(row.product_id) # Append an explicit 'None' value for those orders not found in dict for order in test_orders.order_id: if order not in d: d[order] = 'None' sub = pd.DataFrame.from_dict(d, orient='index') sub.reset_index(inplace=True) sub.columns = ['order_id', 'products'] sub.to_csv('sub.csv', index=False) # - sub.head()
model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # #%cd -q .. import graphviz graphviz.__version__, graphviz.version() # - def make_graph(node_label=None, graph_name=None, **node_kwargs): result = graphviz.Digraph(name=graph_name) result.node('A', label=node_label, **node_kwargs) print(result) return result make_graph(node_label='"') make_graph(node_label=r'\\') make_graph(node_label=r'\\"') # https://www.graphviz.org/doc/info/attrs.html#k:escString make_graph(node_label=r'node: \N') make_graph(node_label=r'graph: \G', graph_name='spam') make_graph(node_label='spam', URL=r'https://example.org/\L') make_graph(node_label=r'centered\nleft\lright\r')
examples/graphviz-escapes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h2 align="center">Automatic Machine Learning with H2O AutoML</h2> # ### Task 2: Importing Packages import pandas as pd pd.options.display.max_rows = 999 import numpy as np import matplotlib.pyplot as plt # # ### Task 3: Loading and Exploring the Data xls = pd.ExcelFile('data/bank_term_deposit_marketing_analysis.xlsx') xls.sheet_names client_info = pd.read_excel(xls, 'CLIENT_INFO') loan_history = pd.read_excel(xls, 'LOAN_HISTORY') marketing_history = pd.read_excel(xls, 'MARKETING HISTORY') subscription_history = pd.read_excel(xls, 'SUBSCRIPTION HISTORY') client_info.head() loan_history.head() marketing_history.head() subscription_history.head() df = pd.merge(client_info, loan_history, on=['ID']) df = pd.merge(df, marketing_history, on=['ID']) df = pd.merge(df, subscription_history, on=['ID']) df.head() df = df.drop(['ID'], axis=1) # # ### Task 4: Data Prep & Start H2O import h2o h2o.init() h2o_df = h2o.H2OFrame(df) h2o_df.describe() # + train, test = h2o_df.split_frame(ratios=[.75]) # Identify predictors and response x = train.columns y = "TERM_DEPOSIT" x.remove(y) # - # # ### Task 5: Run H2O AutoML from h2o.automl import H2OAutoML aml = H2OAutoML(max_runtime_secs=600, #exclude_algos=['DeepLearning'], seed=1, #stopping_metric='logloss', #sort_metric='logloss', balance_classes=False, project_name='Completed' ) # %time aml.train(x=x, y=y, training_frame=train) # # ### Task 6: AutoML Leaderboard and Ensemble Exploration # View the AutoML Leaderboard lb = aml.leaderboard lb.head(rows=lb.nrows) # Print all rows instead of default (10 rows) # + # Get the "All Models" Stacked Ensemble model se = aml.leader # Get the Stacked Ensemble metalearner model metalearner = h2o.get_model(se.metalearner()['name']) # - metalearner.varimp() # # ### Task 7: Baselearner Model Exploration model = h2o.get_model('XGBoost_grid__1_AutoML_20200608_075205_model_2') model.model_performance(test) model.varimp_plot(num_of_features=20) model.partial_plot(train, cols=["DURATION"], figsize=(5,5)); # save the model model_path = h2o.save_model(model=model, force=True) print(model_path) # # ### (Optional) Correlation Analysis copy = df.copy() copy.head() bin_labels = ['bin1', 'bin2', 'bin3', 'bin4'] copy[['AGE','BALANCE','DAY','DURATION']] = copy[['AGE','BALANCE','DAY','DURATION']].transform(lambda x: pd.qcut(x, q=4, labels=bin_labels)) copy['CAMPAIGN'] = pd.qcut(copy.CAMPAIGN, q=4, duplicates='drop', labels=bin_labels[:3]) copy['PDAYS'] = pd.qcut(copy.PDAYS, q=4, duplicates='drop', labels=bin_labels[:1]) copy['PREVIOUS'] = pd.qcut(copy.PREVIOUS, q=4, duplicates='drop', labels=bin_labels[:1]) copy.head() copy_oh = pd.get_dummies(copy) copy_oh.head() copy_oh.corr()['TERM_DEPOSIT_yes'].sort_values(ascending=False)[1:] results, bin_edges = pd.qcut(df.DURATION, q=4, labels=bin_labels, retbins=True) bin_edges # + #Strategy: Reach out to customers whose last contact duration was >= 319 or POUTCOME = success
h2o-automl.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Process DATE Drug-Gene relations Data # # Jupyter Notebook to download and preprocess files to transform to BioLink RDF. # # ### Download files # # The download can be defined: # * in this Jupyter Notebook using Python # * as a Bash script in the `download/download.sh` file, and executed using `d2s download date` # # # + import os # Variables and path for the dataset dataset_id = 'date' input_folder = '/notebooks/workspace/input/' + dataset_id mapping_folder = '/notebooks/datasets/' + dataset_id + '/mapping' os.makedirs(input_folder, exist_ok=True) # + # Use input folder as working folder os.chdir(input_folder) files_to_download = [ 'http://tatonettilab.org/resources/DATE/date_resource.zip' ] # Download each file and uncompress them if needed # Use Bash because faster and more reliable than Python for download_url in files_to_download: os.system('wget -N ' + download_url) os.system('find . -name "*.tar.gz" -exec tar -xzvf {} \;') os.system('unzip -o \*.zip') # - # ## Process and load concepts # # We will use CWL workflows to integrate data with SPARQL queries. The structured data is first converted to a generic RDF based on the data structure, then mapped to BioLink using SPARQL. The SPARQL queries are defined in `.rq` files and can be [accessed on GitHub](https://github.com/MaastrichtU-IDS/d2s-project-template/tree/master/datasets/date/mapping). # # Start the required services (here on our server, defined by the `-d trek` arg): # # ```bash # d2s start tmp-virtuoso drill -d trek # ``` # # Run the following d2s command in the d2s-project folder: # # ```bash # d2s run csv-virtuoso.cwl date # ``` # # [HCLS metadata](https://www.w3.org/TR/hcls-dataset/) can be computed for the date graph: # # ```bash # d2s run compute-hcls-metadata.cwl date # ``` # # ## Load the BioLink model # # Load the [BioLink model ontology as Turtle](https://github.com/biolink/biolink-model/blob/master/biolink-model.ttl) in the graph `https://w3id.org/biolink/biolink-model` in the triplestore #
archived-datasets/date/process-date.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import scanpy as sc import numpy as np import scipy as sp import pandas as pd import matplotlib.pyplot as plt from matplotlib import rcParams from matplotlib import colors import seaborn as sb from gprofiler import GProfiler import rpy2.rinterface_lib.callbacks import logging from rpy2.robjects import pandas2ri import anndata2ri # + # Ignore R warning messages #Note: this can be commented out to get more verbose R output rpy2.rinterface_lib.callbacks.logger.setLevel(logging.ERROR) # Automatically convert rpy2 outputs to pandas dataframes pandas2ri.activate() anndata2ri.activate() # %load_ext rpy2.ipython plt.rcParams['figure.figsize']=(8,8) #rescale figures sc.settings.verbosity = 3 #sc.set_figure_params(dpi=200, dpi_save=300) sc.logging.print_versions() # + language="R" # # Load libraries from correct lib Paths for my environment - ignore this! # .libPaths(.libPaths()[c(3,2,1)]) # # # Load all the R libraries we will be using in the notebook # library(scran) # library(RColorBrewer) # library(slingshot) # library(monocle) # library(gam) # library(clusterExperiment) # library(ggplot2) # library(plyr) # library(MAST) # - # Single-cell RNAseq data was collected on whole early mouse embryos. Each experiment consists of 4-10 replicates of developmental stages 6.5, 7.0, 7.5, 8.0, 8.5 for wild-type sequenced with 10X Genomics single-cell RNA sequencing system. # + # Set up data loading #Data files sample_strings = ['E8.0_1ab', 'E8.5_1ab'] file_base = '../data/mammalian-embryogenesis/GSE122187_WT_' data_file_end = '_matrix.mtx' barcode_file_end = '_barcodes.tsv' gene_file_end = '_genes.tsv' #cc_genes_file = '../Macosko_cell_cycle_genes.txt' # + # First data set load & annotation #Parse Filenames sample = sample_strings.pop(0) data_file = file_base+sample+data_file_end barcode_file = file_base+sample+barcode_file_end gene_file = file_base+sample+gene_file_end #Load data adata = sc.read(data_file, cache=True) adata = adata.transpose() adata.X = adata.X.toarray() barcodes = pd.read_csv(barcode_file, header=None, sep='\t') genes = pd.read_csv(gene_file, header=None, sep='\t') #Annotate data barcodes.rename(columns={0:'barcode'}, inplace=True) barcodes.set_index('barcode', inplace=True) adata.obs = barcodes adata.obs['sample'] = [sample]*adata.n_obs adata.obs['stage'] = [sample.split("_")[0]]*adata.n_obs adata.obs['exp_number'] = [sample.split("_")[1]]*adata.n_obs genes.rename(columns={0:'gene_id', 1:'gene_symbol'}, inplace=True) genes.set_index('gene_symbol', inplace=True) adata.var = genes print(adata) # - print(adata.obs['sample']) # # Loop to load rest of data sets # for i in range(len(sample_strings)): # #Parse Filenames # sample = sample_strings[i] # data_file = file_base+sample+data_file_end # barcode_file = file_base+sample+barcode_file_end # gene_file = file_base+sample+gene_file_end # # #Load data # adata_tmp = sc.read(data_file, cache=True) # adata_tmp = adata_tmp.transpose() # adata_tmp.X = adata_tmp.X.toarray() # # barcodes_tmp = pd.read_csv(barcode_file, header=None, sep='\t') # genes_tmp = pd.read_csv(gene_file, header=None, sep='\t') # # #Annotate data # barcodes_tmp.rename(columns={0:'barcode'}, inplace=True) # barcodes_tmp.set_index('barcode', inplace=True) # adata_tmp.obs = barcodes_tmp # adata_tmp.obs['sample'] = [sample]*adata_tmp.n_obs # adata_tmp.obs['stage'] = [sample.split("_")[0]]*adata_tmp.n_obs # adata_tmp.obs['exp_number'] = [sample.split("_")[1]]*adata_tmp.n_obs # # genes_tmp.rename(columns={0:'gene_id', 1:'gene_symbol'}, inplace=True) # genes_tmp.set_index('gene_symbol', inplace=True) # adata_tmp.var = genes_tmp # adata_tmp.var_names_make_unique() # # # Concatenate to main adata object # adata = adata.concatenate(adata_tmp, batch_key='sample') # #adata.obs.drop(columns=['sample'], inplace=True) # adata.obs_names = [c.split("-")[0] for c in adata.obs_names] # adata.obs_names_make_unique(join='_') # # # print(adata) # # Once the data is read in and annotated, we test whether we have the expected number of cells for each region, mouse (donor), and sample. # Annotate the data sets print(adata.obs) print(adata.obs['stage'].value_counts()) print('') print(adata.obs['exp_number'].value_counts()) print('') #print(adata.obs['sample'].value_counts()) # Checking the total size of the data set adata.shape # + # Quality control - calculate QC covariates adata.obs['n_counts'] = adata.X.sum(1) adata.obs['log_counts'] = np.log(adata.obs['n_counts']) adata.obs['n_genes'] = (adata.X > 0).sum(1) mt_gene_mask = [gene.startswith('mt-') for gene in adata.var_names] adata.obs['mt_frac'] = adata.X[:, mt_gene_mask].sum(1)/adata.obs['n_counts'] # - # Quality control - plot QC metrics #Sample quality plots t1 = sc.pl.violin(adata, 'n_counts', groupby='stage', size=2, log=True, cut=0) t2 = sc.pl.violin(adata, 'mt_frac', groupby='stage') # Sample size for stage E6.5 is much smaller than the others. #Data quality summary plots p1 = sc.pl.scatter(adata, 'n_counts', 'n_genes', color='mt_frac') p2 = sc.pl.scatter(adata[adata.obs['n_counts']<10000], 'n_counts', 'n_genes', color='mt_frac') # + #Thresholding decision: counts p3 = sb.distplot(adata.obs['n_counts'], kde=False) plt.show() p4 = sb.distplot(adata.obs['n_counts'][adata.obs['n_counts']<9000], kde=False, bins=60) plt.show() p5 = sb.distplot(adata.obs['n_counts'][adata.obs['n_counts']>20000], kde=False, bins=60) plt.show() # -
Studying-Mammalian-embryogenesis-III.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys sys.path.append('../python_packages_static') import numpy as np import pandas as pd import matplotlib.pyplot as plt import fiona from shapely.geometry import shape from gisutils import project from pydrograph import Nwis import geopandas as gp import requests # # Notebook to extract NWIS data from model area and make obs tables # # ## 1. Get DV surface water obs from USGS StreamStats: https://streamstats.usgs.gov/ss/ # ### 1.1 Get streamflow daily values sites # **Create model bounding box and retrieve NWIS streamflow DV sites** # # uses the excellent `pydrogrpah`: https://github.com/aleaf/pydrograph extent_shp = '../source_data/Shapefiles/Extents/Model_Extent_HUC12.shp' epsg = 5070 extent_poly = shape(fiona.open(extent_shp).next()['geometry']) extent_poly_ll = project(extent_poly, "+init=epsg:{}".format(epsg), "+init=epsg:4269") extent_poly_ll.bounds bound = gp.read_file(extent_shp) nwis = Nwis(extent=extent_poly_ll) # **Get streamflow daily values sites using `pydrograph`** all_dvs = nwis.get_siteinfo('daily_values') all_dvs # + bound = bound.to_crs(epsg=4269) fig, ax = plt.subplots() bound.plot(ax=ax, facecolor='None', edgecolor='black') ax.scatter(all_dvs.dec_long_va, all_dvs.dec_lat_va) plt.show() # - # ### Only SANDBURG CREEK AT ELLENVILLE NY (01366650) and NEVERSINK RIVER AT WOODBOURNE NY (01436500) can be used for flux targets # * NEVERSINK RIVER AT NEVERSINK NY at edge of model, used for SFR inflow (see 0.7_make_SFR_inflow.ipynb) # * Data collection at GUMAER BROOK NEAR WURTSBORO NY only started on 2019-12-11, too little data avialable for annual average flow/BFI # ### 1.2 Extract Mean annual flow and BFI data from Streamstats: https://streamstats.usgs.gov/ss/ # **gage pages:** # * SANDBURG CREEK AT ELLENVILLE NY (01366650): https://streamstatsags.cr.usgs.gov/gagepages/html/01366650.htm # * NEVERSINK RIVER AT WOODBOURNE NY (01436500): https://streamstatsags.cr.usgs.gov/gagepages/html/01436500.htm # + gages = ['01366650', '01436500'] sites_dict = {} # + # read from gage streamstats pages for gage in gages: site_dict = {} url = f'https://streamstatsags.cr.usgs.gov/gagepages/html/{gage}.htm' info = ['Mean_Annual_Flow', 'Average_BFI_value', 'Latitude (degrees NAD83)', 'Longitude (degrees NAD83)'] r = requests.get(url, stream=True) lines = [l.decode('cp1252') for l in r.iter_lines()] line_nums = [] for i, line in enumerate(lines): for t in info: if t in line: line_nums.append(i) line_nums.append(i+1) for prop, val in zip(line_nums[::2], line_nums[1::2]): p = lines[prop].replace('<td>','').replace('</td>','').replace("<tr class='even'><td width='197'>",'').replace("<tr><td width='200'>",'') v = float(lines[val].replace('<td>','').replace('</td>','').replace("<td width='590'>",'').replace('</tr>','')) print(f' prop: {p}') print(f' val: {v}') site_dict[p] = v print('\n') sites_dict[gage] = site_dict # - df = pd.DataFrame.from_dict(sites_dict).T.reset_index(drop=False).rename(columns={'index':'site_id', 'Mean_Annual_Flow':'Mean_Annual_Flow_cfs'}) df # write out to processed data df.to_csv('../processed_data/NWIS_DV_STREAMSTATS_INFO.csv', index=False) # ## 2. Get groundwater daily values gw_dv = nwis.get_siteinfo('gwdv') gw_dv # ### Pull groundwater data for the lone GW DVs site in the model domain # uses `pydrograph`: https://github.com/aleaf/pydrograph # + gw_site = gw_dv.iloc[0]['site_no'] # pydrograh gw dv data retrieval not working at the momnet. do it manually for now -- see below: gw_data = nwis.get_all_dvs([gw_site], parameter_code='72019', start_date='2009-01-01', end_date='2016-01-01' ) # - gw_df = gw_data[gw_site] gw_df = gw_df.groupby('site_no').mean() gw_df = gw_df.rename(columns={'106190_72019_00003':'ave_dtw_ft'}) gw_df = gw_df.join(gw_dv[['alt_va']]) gw_df['gw_elev_ft'] = gw_df['alt_va'] - gw_df['ave_dtw_ft'] gw_df['gw_elev_m'] = gw_df['gw_elev_ft'] * 0.3048 gw_df.to_csv('../processed_data/NWIS_GW_DV_data.csv') gw_df # ### export NWIS gw sites table for obs section of `neversink_full.yml` gw_dv_gdf = gp.GeoDataFrame(gw_dv, crs="EPSG:4269", geometry=gw_dv.geometry) gw_dv_gdf_reproj = gw_dv_gdf.to_crs(epsg=epsg) gw_dv_gdf_reproj['x'] = gw_dv_gdf_reproj.geometry.x gw_dv_gdf_reproj['y'] = gw_dv_gdf_reproj.geometry.y gw_dv_gdf_reproj['obsprefix'] = gw_dv_gdf_reproj.index gw_dv_gdf_reproj gw_dv_gdf_reproj.to_csv('../processed_data/NWIS_GW_DV_sites.csv', index=False)
notebooks_preprocessing_blank/1.0_get_NWIS_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="a2WR-N-qWBll" colab_type="text" # # Introduction # + [markdown] id="E_j97k57ahA4" colab_type="text" # **Overview** # # The assessment is to apply a Naive Bayes Classifier to predict whether an email can be considered as a spam or not. # # **Metric of Success** # # The measure of success will lay in the ability of the model to predict whether an email can be considered as a spam or not. # # **Dataset** # # The dataset used here can be found here: [link text](https://archive.ics.uci.edu/ml/datasets/Spambase) # + id="wQ5XtTOFeKlX" colab_type="code" colab={} # import libraries import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline # + id="6DJiNewEoq_z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="3c93763f-e0d1-4216-9ab4-d53a9f3d62d5" spam = pd.read_csv("spambase.csv") spam.head() # + id="KVHxbqycp1JT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="076bd488-3341-4f40-efa1-ae0d04c61756" # check info # --- # spam.info() # + id="f_3rnFODp483" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="fe7009ca-dba4-407b-dfb8-8de2e371156b" # check for null # --- # spam.isnull().sum() # + id="FRd5E36EqLKz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="707c4e89-908d-4f8b-96dd-8ad879240e96" # check for duplicates # --- # print(spam.duplicated().any()) print(spam.duplicated().sum()) # + id="xZ-3aEX5rLWQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="7cb1b0f7-0c02-4da2-b7f4-71c53c7b2189" # dropping duplicates # --- # spam.drop_duplicates(inplace=True) print(spam.duplicated().any()) print(spam.duplicated().sum()) # + id="IvbgRF-jrAzZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="ef724e9a-281b-4951-9211-4fa510a5c0fc" # check for outliers # --- # Q1 = spam.quantile(q=0.25) Q3 = spam.quantile(q=0.75) IQR = Q3 - Q1 print(IQR) # + id="t1BNQitDsD7W" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 439} outputId="abc94631-4b5d-46b8-b94d-e7070924d028" # removing the outliers # --- # spam[~((spam < (Q1 - 1.5 * IQR)) |(spam > (Q3 + 1.5 * IQR))).any(axis=1)] # + id="05saGwInsYVd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="163596fa-ff87-4d17-c9d8-70577fe13451" # statistical description # --- # spam.describe() # + id="rMExJeIosfDi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5502f7fc-ce7f-45c6-fd44-180f84ccfdee" # shape # --- # spam.shape # + id="Qi-DLESksfSd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="838f85cd-d38d-45b4-9aaf-5060b49326b1" # size # --- # spam.size # + [markdown] id="5HHcEugisoKD" colab_type="text" # # Exploratory Data Analysis # + id="BuDUd2iCKSAU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 325} outputId="1741842d-55b0-4c45-8a18-a2df36508335" # Plotting the target variable # --- # ax = sns.countplot(x="class",data=spam) ax.set_title('Spam vs Non_Spam Emails', fontsize=18) # + id="DtO91YAssfXW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="94747839-63ae-4117-f20e-032c1f85804a" # correlation corr = spam.corr() corr # + id="N5sOMMUULeSD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="d8621220-d43d-4443-bd78-9fef88ae29ac" spam.head() # + [markdown] id="W4akoqVYLFo0" colab_type="text" # # Classifiers # # + id="CC38Il7sLFRL" colab_type="code" colab={} # Performing the Naive Bayes # --- # from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB from sklearn.metrics import confusion_matrix, classification_report # + id="7E43q0DHLX3I" colab_type="code" colab={} # Getting my features and labels # --- # X = spam.iloc[:,:-1].values y = spam.iloc[:,-1].values # + id="k1i_fZ06Lk6G" colab_type="code" colab={} # Split into train test splits -- 80/20 # --- # from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.20, random_state=10) # + id="ndIUoit2LpQ3" colab_type="code" colab={} # Gaussian Classifier # --- # Base without any tuning # gnb = GaussianNB() gnb.fit(X_train, y_train) y_pred = gnb.predict(X_test) # + id="-2kdEPZTMChb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="fea46f26-0c30-4db4-9abf-375cde9f5ba2" # Testing the scores # --- # print(f'The train set score accuracy: {gnb.score(X_train, y_train)}') print(f'The test set score accuracy: {gnb.score(X_test, y_test)}') # + id="vt2UntjUMCVa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="e6ed8fb8-5b3a-458e-ef0b-080d11f539df" # Performance Metrics # --- # print('-------- Confusion Matrix --------') cm = confusion_matrix(y_test, y_pred) print(cm) print() print('-------- Classification Report --------') cl_rep = classification_report(y_test, y_pred) print(cl_rep) # + id="TfjYlRAbMCGk" colab_type="code" colab={} # Split into train test splits -- 70/30 # --- # from sklearn.model_selection import train_test_split X_train_70, X_test_70, y_train_70, y_test_70 = train_test_split(X,y,test_size=0.30, random_state=10) # + id="XzsQhv7HMTQ2" colab_type="code" colab={} # Gaussian Classifier # --- # Base without any tuning # gnb_70 = GaussianNB() gnb_70.fit(X_train_70, y_train_70) y_pred_70 = gnb.predict(X_test_70) # + id="RPRsGRKEMTMf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="be8b87da-1b3e-4c87-fd6c-38841f35cf3b" # Testing the scores # --- # print(f'The train set score accuracy: {gnb_70.score(X_train_70, y_train_70)}') print(f'The test set score accuracy: {gnb_70.score(X_test_70, y_test_70)}') # + id="x6x5N6LyMTJu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="074f6eaf-c4dc-4ea2-e3be-b28045442512" # Performance Metrics # --- # print('-------- Confusion Matrix --------') cm = confusion_matrix(y_test_70, y_pred_70) print(cm) print() print('-------- Classification Report --------') cl_rep = classification_report(y_test_70, y_pred_70) print(cl_rep) # + id="_W5LgnwlMTF4" colab_type="code" colab={} # Split into train test splits -- 60/40 # --- # from sklearn.model_selection import train_test_split X_train_40, X_test_40, y_train_40, y_test_40 = train_test_split(X,y,test_size=0.40, random_state=10) # + id="ZvKGvf3RMS8-" colab_type="code" colab={} # Gaussian Classifier # --- # Base without any tuning # gnb_40 = GaussianNB() gnb_40.fit(X_train_40, y_train_40) y_pred_40 = gnb.predict(X_test_40) # + id="OGlz4iG8M6WT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="655f0010-ed18-4287-eebb-942936e3eaea" # Testing the scores # --- # print(f'The train set score accuracy: {gnb_40.score(X_train_40, y_train_40)}') print(f'The test set score accuracy: {gnb_40.score(X_test_40, y_test_40)}') # + id="BvA-dQT5M_ej" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="85fda9a6-ca31-4162-c1f7-b646efe19d09" # Performance Metrics # --- # print('-------- Confusion Matrix --------') cm = confusion_matrix(y_test_40, y_pred_40) print(cm) print() print('-------- Classification Report --------') cl_rep = classification_report(y_test_40, y_pred_40) print(cl_rep) # + id="-6N7KZLyN5LL" colab_type="code" colab={} # Multinomial Classifier 80/20 # --- # multi = MultinomialNB() multi.fit(X_train, y_train) y_pred = multi.predict(X_test) # + id="1jTNXltSOQmU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="e9ad917e-3105-428a-fe5c-74696df38572" # Testing the scores # --- # print(f'The train set score accuracy: {multi.score(X_train, y_train)}') print(f'The test set score accuracy: {multi.score(X_test, y_test)}') # + id="YLCjmcabOXPF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="b1bbe8d6-c605-4090-b8ca-00f896a18bd1" # Performance Metrics # --- # print('-------- Confusion Matrix --------') cm = confusion_matrix(y_test, y_pred) print(cm) print() print('-------- Classification Report --------') cl_rep = classification_report(y_test, y_pred) print(cl_rep) # + id="4c48VQFJPAmy" colab_type="code" colab={} # + [markdown] id="ZdXm4g9xNGmn" colab_type="text" # # Implementing Optimization for Model Performance # + id="T1-x3X-eNJwl" colab_type="code" colab={} # Split into train test splits -- 80/20 # --- # from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.20, random_state=10) # + id="30o-4a6OP0Ci" colab_type="code" colab={} # Feature Scaling # --- # from sklearn.preprocessing import StandardScaler, MinMaxScaler, Normalizer mm = MinMaxScaler() X_train = mm.fit_transform(X_train) X_test = mm.transform(X_test) # + id="r1TZ_QIwQLe1" colab_type="code" colab={} # Gaussian Classifier # --- # Base without any tuning # gnb = GaussianNB() gnb.fit(X_train, y_train) y_pred = gnb.predict(X_test) # + id="NCzhjV_nQbDX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="c008985e-5f7f-4b5a-cde7-03e636ad49e3" # Testing the scores # --- # print(f'The train set score accuracy: {gnb.score(X_train, y_train)}') print(f'The test set score accuracy: {gnb.score(X_test, y_test)}') # + id="LwfQ4It6QdYz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="7a327d7c-558c-4c0a-e02d-248f3c1fcf79" # Performance Metrics # --- # print('-------- Confusion Matrix --------') cm = confusion_matrix(y_test, y_pred) print(cm) print() print('-------- Classification Report --------') cl_rep = classification_report(y_test, y_pred) print(cl_rep)
naive_bayes_dataset_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7-2019.10 [python/3.7-2019.10] # language: python # name: sys_python37_2019_10 # --- # # Astronomy 8824 - Numerical and Statistical Methods in Astrophysics # # ## Statistical Methods Topic V. Hypothesis Testing # # These notes are for the course Astronomy 8824: Numerical and Statistical Methods in Astrophysics. It is based on notes from <NAME> with modifications and additions by <NAME>. # David's original notes are available from his website: http://www.astronomy.ohio-state.edu/~dhw/A8824/index.html # # #### Background reading: # - Statistics, Data Mining, and Machine Learning in Astronomy, $\S\S 3.37, 5.4$ # - Loredo, $\S 5.3$ # - Gould (2003), arXiv:astro-ph/0310577 # + import math import numpy as np from numpy.polynomial import Polynomial # %matplotlib inline import matplotlib.pyplot as plt from scipy.optimize import minimize from scipy import stats # matplotlib settings SMALL_SIZE = 14 MEDIUM_SIZE = 16 BIGGER_SIZE = 18 plt.rc('font', size=SMALL_SIZE) # controls default text sizes plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels plt.rc('lines', linewidth=2) plt.rc('axes', linewidth=2) plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title # - # LaTex macros hidden here -- # $\newcommand{\expect}[1]{{\left\langle #1 \right\rangle}}$ # $\newcommand{\intinf}{\int_{-\infty}^{\infty}}$ # $\newcommand{\xbar}{\overline{x}}$ # $\newcommand{\ybar}{\overline{y}}$ # $\newcommand{\like}{{\cal L}}$ # $\newcommand{\llike}{{\rm ln}{\cal L}}$ # $\newcommand{\xhat}{\hat{x}}$ # $\newcommand{\yhat}{\hat{y}}$ # $\newcommand{\xhati}{\hat{x}_i}$ # $\newcommand{\yhati}{\hat{y}_i}$ # $\newcommand{\sigxi}{\sigma_{x,i}}$ # $\newcommand{\sigyi}{\sigma_{y,i}}$ # $\newcommand{\cij}{C_{ij}}$ # $\newcommand{\cinvij}{C^{-1}_{ij}}$ # $\newcommand{\cinvkl}{C^{-1}_{kl}}$ # $\newcommand{\cinvmn}{C^{-1}_{mn}}$ # $\newcommand{\valpha}{\vec \alpha}$ # $\newcommand{\vth}{\vec \theta}$ # $\newcommand{\ymod}{y_{\rm mod}}$ # $\newcommand{\dy}{\Delta y}$ # ### Introduction # # We have focused so far on the task of estimating parameter values and their errors when fitting data. # # These results presume that the data are described by the model in question for _some_ value of the parameters. # # But once we have fit for parameters, how do we decide whether the model itself is viable? or how do we compare two models? # # We'll eventually take a Bayesian approach to this problem, but let's first look at a frequentist recipe that is often # useful in practice. # ### Expected value of $\chi^2$ # # Suppose that we have fit a model to data by minimizing $\chi^2$. # # Gould (2003) proves (a standard result) that at the minimum # $$ # \expect{\chi^2} = N - n + \expect{\Delta_k}\cinvkl\expect{\Delta_l}, # $$ # where $N$ is the number of data points and $n$ is the number of parameters that are fit. # # But if the model is a correct description of the data for some choice of parameters, then $\expect{\Delta_k} = # \expect{y_{\rm mod}(x_k)}-\expect{y_k} = 0$. This is the _expectation value_ for the correct model. # # Thus, for a correct model, we expect $\chi^2$ to be approximately $N-n$, the number of data points minus the number of fitted parameters, usually referred to as the number of "degrees of freedom." # # Alternatively, the _reduced_ $\chi^2$, sometimes written $\chi^2/{\rm d.o.f.}$, is expected to be approximately one. # # This result _does not_ assume Gaussian errors on the data, and it _does not_ assume that the errors are uncorrelated. # ### Distribution of $\chi^2$ # # If the errors on the data $x_i$ are Gaussian and the model is correct, then value of $\chi^2$ follows a $\chi^2$ # distribution with $k = N-n$ degrees of freedom (see Ivezic $\S 3.3.7$). # # If we define $z_i = (x_i - \mu)/\sigma$, the sum of the squares $Q = \Sigma^N_{i=1} z_i^2$ follows a $\chi^2$ distribution with $k = N$ and the probability is: # # $$ # p(Q | k) = \chi^2(Q|k) = \frac{1}{2^{k/2} \Gamma(k/2)} Q^{k/2 - 1} \exp( -Q/2 ) # $$ # for Q > 0. # # The variance of this distribution is $\sigma^2 = 2k$. # # Alternatively, the standard deviation for $\chi^2/k$ (reduced $\chi^2$) is $\sqrt{2k}/k = \sqrt{2/k}$. # # If the number of degrees of freedom is large, then the distribution of $\chi^2/k$ approaches a Gaussian distribution # with mean 1 and standard deviation $\sqrt{2/k}$. # # + # Plot the chi^2 distribution for a range of k q = np.linspace(0, 100, 1000) kvals = [1, 2, 3, 4, 5] # degrees of freedom plt.figure(figsize=(8, 5)) for k in kvals: p = stats.chi2.pdf(q, df=k) lab = "k = {0}".format(k) plt.plot(q, p, label=lab) plt.legend() plt.xlabel("Q") plt.ylabel(r"$\chi^2$(Q|k) = p(Q|k)") plt.title(r"$\chi^2$ Distribution") plt.ylim(0, .4) plt.xlim(0, 8) # + q = np.linspace(0, 1000, 1000) kvals = [5, 10, 20, 40, 80] # degrees of freedom plt.figure(figsize=(8, 5)) for k in kvals: p = stats.chi2.pdf(q, df=k) lab = "k = {0}".format(k) plt.plot(q/k, p, label=lab) mean = np.sum(p*q/k) std = np.sqrt( np.sum(p*np.power(q/k-mean, 2)) ) print("k = {0}, mean = {1:.2f}, std = {2:.2f}, sqrt(2/k) = {3:.2f}".format(k, mean, std, np.sqrt(2/k))) plt.xlabel("Q") plt.ylabel("p(Q/k|k)") plt.xlim(0, 5) plt.legend() # - # #### Example # # Suppose we have 12 data points that we fit with a straight line, and we get $\chi^2=14.47$ for the best-fit slope and amplitude. In this case $k = N - n = 12 - 2 = 10$. # # Then $\chi^2/{\rm d.o.f.} = 14.47/10 = 1.447 = 1+\sqrt{2/10}$, so this fit is only discrepant with the data at (exactly) the $1\sigma$ level. ($\sigma = \sqrt{2/k} = \sqrt{2/10} = 0.447$) # # However, if we have 120 data points and the same $\chi^2/{\rm d.o.f.}$, then the discrepancy is $0.447/\sqrt{2/118} = 3.4\sigma$. # # If the value of $\chi^2/k$ is much _larger_ than $1 + \sqrt{2/k}$, then it probably indicates that either (1) the model is incorrect, or (2) the errors have been underestimated, or (3) the errors are significantly non-Gaussian, so that "outliers" are giving anomalously large contributions to $\chi^2$. Or maybe it indicates all three! # # It will generally take thought and further inspection to determine which of these is going on. # # Note that these results apply unchanged for correlated (multi-variate Gaussian) errors, but the calculation of $\chi^2$ must correctly incorporate the error covariance matrix. # # Thus, a specific instance of "(2) the errors have been underestimated" is "the covariance matrix has significant off-diagonal terms that have not been accounted for when computing $\chi^2$." # # If the value of $\chi^2/k$ is much _smaller_ than $1-\sqrt{2/k}$ then it usually indicates that the errors have been underestimated. # ### Linear constraints # # The above results are consistent with our basic intuition. # # If a model is correct and the errors are correct, then data will typically scatter about the model at about the level of the $1\sigma$ error bars. # # For $N$ data points we therefore expect $\chi^2/N \approx 1$, not $\chi^2 = 0$. # # Each free parameter increases our ability to "fit the noise," so we expect a lower value of $\chi^2$. We could in principle use a free parameter to exactly fit one data point, reducing the expected $\chi^2$ by one. # # This turns out to be exactly right, as $\expect{\chi^2} = N-n$. # # We may also have a linear constraint on the parameters, for example that they sum to one, or that the average # of the distribution is zero, or even just knowing the value of one parameter. # # Gould (2003) gives formulae for the best-fit parameter values in this case. # # He further shows that (_if_ both the constraints and the model are correct) then imposing $m$ constraints changes the expected value of $\chi^2$ to $\expect{\chi^2} + N - n + m$. # # This again accords with intuition: imposing a constraint is equivalent to removing one degree of freedom. # # ### The $\chi^2$ hypothesis test # # The frequentist version of the $\chi^2$ test is simply this: a model should be rejected if its value of $\chi^2$ (for the best-fit parameters) is large enough to be highly improbable. # # Specifically, if the probability $P(>\chi^2)$ of obtaining a $\chi^2$ greater than the best-fit value is $q$, then the # model is rejected at the $1-q$ confidence level. For example, if $P(>\chi^2) = 0.01$, then the model is rejected at 99\% confidence. # # The cumulative probability distribution $P(>\chi^2)$ can be found in tables or computed via python routines; it can be # approximated by a complementary error function (integral of a Gaussian) if the number of degrees of freedom is large. # # One can make various complaints about this test --- Why integrate over values of $\chi^2$ larger than the observed one? Why reject a model for anomalously large $\chi^2$ values but not for anomalously small ones? --- but it basically makes sense. If a model has a very small $P(>\chi^2)$ it is probably wrong, or else the errors are wrong. # # ### An important note about $\chi^2$ parameter constraints # # The likelihood of a set of parameter values relative to the best-fit values is $\exp(-\Delta\chi^2/2)$, where $\Delta\chi^2$ is the change in $\chi^2$ relative to its minimum value. # # The 68% confidence interval on a parameter (in a one-parameter fit) corresponds to $\Delta\chi^2 = 1$, _not_ to $\Delta\chi^2/{\rm d.o.f.}=1$. # # More than one astronomy paper has incorrectly used the latter. # ### Bayesian Hypothesis Comparison # # (See Ivezic $\S 5.4.$) # # Bayes' Theorem gives a straightforward expression for the relative probability of two hypotheses: # $$ # O_{12} = # {p(H_1|DI) \over p(H_2|DI)} = # {p(H_1|I) \over p(H_2|I)} \times # {p(D|H_1 I) \over p(D| H_2I)}. # $$ # We multiply our prior probabilities by the relative probabilities of obtaining the data under the two hypotheses. The global likelihood $p(D|I)$ cancels out of the comparison. # # This ratio $O_{12}$ is called the _odds ratio_. # # Recall that $p(H|D I)$ is the posterior probability, $p(D|H I)$ is the likelihood, and $p(H|I)$ is the prior. In this case, $p(D|H I)$ is the marginal likelihood of hypothesis $H$. # # The ratio of the global likelihoods $B_{12} = p(D|H_1 I)/p(D|H_2 I)$ is called the Bayes factor. # # If the hypotheses are simple, with no free parameters, then this comparison is straightforward. However, if the hypotheses are models with parameters, we must integrate over the possible parameter values. This can be complicated, but it also has interesting effects when comparing two models with different numbers of parameters, or even with the same number of parameters but different degrees of prior predictiveness. # + fig, ax = plt.subplots(1, 1, figsize=(8, 6)) mumin = 3 mumax = 5 muone = 4 ax.plot([muone, muone], [0, 1], 'k-') ax.plot([mumin, mumin], [0, 0.5], 'b:') ax.plot([mumin, mumax], [0.5, 0.5], 'b:') ax.plot([mumax, mumax], [0, 0.5], 'b:') ax.text(muone, 1.05, s="$H_1$", ha="center") ax.text(mumin+0.5, 0.6, s="$H_2$", color='blue') ax.text(muone, -0.05, s="$\mu_{1}$", ha="center") ax.text(mumin, -0.05, s="$\mu_{min}$", ha="center") ax.text(mumax, -0.05, s="$\mu_{max}$", ha="center") ax.set_xticks([]) ax.set_ylim(0, 1.2) # - # #### Example (From Loredo, $\S 5.3$) # # We previously gave # $$ # p(D|\mu I) = # (2\pi \sigma^2)^{-N/2} \exp\left[-{Ns^2 \over 2\sigma^2}\right] # \exp\left[-{N\over 2\sigma^2}(\xbar-\mu)^2\right] # $$ # as the probability of obtaining the data $D=\{x_i\}$ drawn from a Gaussian distribution with mean $\mu$ and dispersion $\sigma$. # # Consider the competing hypotheses # # $H_1$ = mean of distribution is a specified value $\mu_1$ # # $H_2$ = mean of distribution is in range $\mu_{min} \leq \mu \leq \mu_{max}$, with a flat prior $p(\mu|I)=(\mu_{max}-\mu_{min})^{-1}$ in this range. # # $H_2$ will _always_ fit the data better, unless the mean happens to be exactly $\mu_1$, in which case it fits equally well. # # But does this mean $H_2$ is actually the preferred hypothesis? # # $$ # P(D|H_1 I) = K \times # \exp\left[-{N\over 2\sigma^2}(\xbar-\mu_1)^2\right], # $$ # where # $$ # K = (2\pi \sigma^2)^{-N/2} \exp\left[-{Ns^2 \over 2\sigma^2}\right] # $$ # is independent of $\mu_1$. # # $$\eqalign{ # p(D|H_2 I) &= \int_{\mu_{min}}^{\mu_{max}} p(D|\mu I) p(\mu |I) d\mu \cr # &= K (\mu_{max}-\mu_{min})^{-1} # \int_{\mu_{\rm min}}^{\mu_{\rm max}} # d\mu\exp\left[-{N\over 2\sigma^2}(\xbar-\mu)^2\right]. # } # $$ # If $\mu_{max}-\xbar$ and $\xbar-\mu_{min}$ are both $\gg \sigma/\sqrt{N}$, then the integral is just $(2\pi\sigma^2/N)^{1/2}$, since a Gaussian $(2\pi\sigma^2)^{-1/2} \exp(-x^2/2\sigma^2)$ integrates to one. # # In this case # $$ # {p(D|H_1 I) \over p(D|H_2 I)} = # {(\mu_{max}-\mu_{min}) \over # \left(2\pi\sigma^2 / N\right)^{1/2}} # \exp\left[-{N\over 2\sigma^2}(\xbar-\mu_1)^2\right]. # $$ # # If we considered the two hypotheses equally probable before hand, $p(H_1|I) = p(H_2 | I)$, then this ratio is also the ratio of posterior probabilities. # # Model 2 is "penalized" for having less predictive power than Model 1, and the amount of the penalty depends on the ratio of $(\mu_{max}-\mu_{min})$ to the actual uncertainty in the mean $\sigma/\sqrt{N}$. # # Model 1 is penalized because it doesn't fit the data as well as the best fit versions of Model 2. If it is nonetheless fairly close, then it may win out as the more probable hypothesis, otherwise it won't. # + # meanvals = np.linspace(3.5, 4.5, 10) N = 10 sigma = 0.5 mumin = 3 muone = 4 mumax = 5 def probratio(xmean, sigma, N): # ratio of the probabilities for the two hypothesis return ( (mumax - mumin)/np.sqrt(2*np.pi*sigma*sigma/N) ) * np.exp( -0.5*N*np.power( (xmean-muone)/sigma, 2 ) ) meanvals = np.linspace(3.5, 4.5, 50) plt.figure(figsize=(8, 6)) for meanval in meanvals: x = np.random.normal(meanval, sigma, size=N) xmean = np.mean(x) y = probratio(xmean, sigma, N) plt.plot(xmean, y, 'b.') plt.ylim(-0.1, 6) plt.xlim(3, 5) plt.xlabel(r"$\mu$") plt.ylabel(r"$p(D|H_1 I)/p(D|H_2 I)$") # - # For another example, see Ivezic $\S 5.4.2$. # # # More generally, we can see from the structure of the integral $\int p(\theta|I)p(D|\theta I)d\theta$ that a model with a free parameter $\theta$ will gain to the extent that its best fit value $\hat\theta$ # yields a greater likelihood $p(D|\hat{\theta} I)$, but will lose to the extent # that $p(\theta | I)$ is broad and "spreads out" the predictive power. # # The Bayesian expression for hypothesis comparison thus yields Occam's razor as a _result_: the preferred model is the one that fits the data adequately with the least freedom to be adjusted to do so. # # In principle, this provides a well defined way to decide whether a more complicated model is "worth it." # # In general cases, the integrals over parameter values may be impossible to do analytically, though they can probably be done numerically. # # Note that while we have used a Gaussian example here, the analysis is not restricted to any particular probability distribution. # # Indeed, one could use these ratio tests to compare the hypothesis that the data have Gaussian errors with a fixed dispersion to the hypothesis that there is an additional "outlier" population drawn from a broader Gaussian, or that the error distribution is exponential instead of Gaussian. # ### Rules of thumb # # Leaving aside the Bayesian approach, there is another important $\Delta\chi^2$ rule of thumb: an additional parameter should reduce $\chi^2$ by $\Delta\chi^2 > 1$ to be considered significant. # # Roughly, you can think of this rule as saying that one parameter can be chosen to perfectly explain one data point, so it should typically reduce $\Delta\chi^2$ by one even if the more complicated model has no more explanatory power than the simpler model. # # This rule can be justified more rigorously in terms of the expected value of $\chi^2$ in linear model fits, where adding $n$ parameters reduces the expected value of $\chi^2$ by $n$. # # A $\Delta\chi^2 =1$ is enough to prefer one parameter value over another at $1\sigma$, but it would be an undemanding criterion for accepting a model that was actually more complicated. # # The Aikake information criterion (AIC, Ivezic $\S 4.3.2$) is a popular choice for frequentist comparison of models with different numbers of parameters. This is ${\mathrm AIC} \equiv -2\ln \left[L^0(M)\right] + 2 k$, or for small samples (Burnham & Anderson suggest $N/k < 40$): # $$ # {\rm AIC} \equiv -2\ln \left[L^0(M)\right] + 2 k + \frac{2k (k+1)}{N-k-1} # $$ # # In terms of the Bayesian odds ratio, a ratio $>10$ might be taken as interesting evidence for one hypothesis over another. # # For equal priors (so that the odds ratio equals the likelihood ratio) and Gaussian errors, an odds ratio of 10 corresponds to $\Delta\chi^2 = -2\ln 0.1 = 4.6$ or a $2.1\sigma$ difference. # # An odds ratio of 100 corresponds to $\Delta\chi^2=13.8$ or a $3.7\sigma$ difference, which might be taken as "decisive" evidence. # # The Bayesian Information Criterion (BIC, Ivezic $\S 5.4.3$) is an approximate method of estimating the odds ratio from the maximum values of the data likelihood, without marginalizing over the full parameter space. # # The preferred model is the one with the smaller value of # $$ # {\rm BIC} \equiv -2\ln \left[L^0(M)\right] + k\ln N # $$ # where $L^0(M)$ is the likelihood of the model with best-fit parameter values, $k$ is the number of model parameters, and $N$ is the number of data points. Note that the BIC penalizes models with lots of parameters. If two models are equally successful, the model with fewer parameters wins. # ### Absolute model assessment # # In a Bayesian approach, there is really no such thing as an absolute model assessment. # # If one has an exhaustive set of possible hypotheses, $H_1$, $H_2$, ... $H_N$, then one can ask about the probability that any one of those hypotheses is correct # $$ # p(H_i|DI) = p(H_i|I) {p(D|H_i I) \over p(D|I)}, # $$ # where # $$ # p(D|I) = \sum_{i=1}^N p(D|H_i I) # $$ # is computed by summing over all of the hypotheses. # # But there isn't a Bayesian way to assess a hypothesis in isolation without specifying alternatives. # # # The traditional way to do an absolute model assessment in the frequentist approach is to compute some statistic, say $\chi^2$, that increases for worse fits, then ask how often one would expect to get a value that large _or larger_ if the hypothesis were true. # # If this probability $\alpha$ is small, then the model is rejected at the $1-\alpha$ confidence level. # # There are some problems with this approach: the answer depends on what statistic you choose, it may depend on what you think the alternative "data sets" are, and there is sometimes ambiguity about what "tail" of the distribution one should consider. For example, low $\chi^2$ values can be as improbable as high $\chi^2$ values --- should a model be rejected because it fits the data too well? # # Despite these problems, these frequentist assessments seem to make good sense in some cases, and choices among seemingly ambiguous alternatives (e.g., whether to reject low $\chi^2$ values) can often be made sensibly in the context of a specific problem. # # ### Example calculation def calc_lnlike(theta, x_i, yhat_i, sig_y_i): ''' Return the natural log of the likelihood fit of an arbitrary order polynomial Parameters ---------- observations: x_i, yhat_i uncertainties: sig_y_i model parameters: theta where theta are the coefficients of a polynomial such that theta[0] + theta[1]*x + theta[2]*x*x + ... ''' f = Polynomial(theta) like = np.power( yhat_i - f(x_i), 2)/(2*sig_y_i*sig_y_i) return -0.5*np.sum(like) # + # Create data with heteroscedastic errors def adderrors(y_i, sig_y_i): yhat_i = y_i.copy() # Apply the scatter to x and y for i in range(Npts): yhat_i[i] += np.random.normal(0, sig_y_i[i]) return yhat_i np.random.seed(1216) Npts = 200 x_i = np.linspace(0, 10, Npts) # Define truth theta_true = np.array([5, 0, .5, .2]) f_true = Polynomial(theta_true) y_i = f_true(x_i) sig_y = .5 # Calculate heteroscedastic errors sig_y_i = np.random.uniform(0.5*sig_y, sig_y, Npts) # Calculate homoscedastic errors # sig_y_i = 0.5*np.ones(Npts) # np.random.uniform(0.25, 0.5, Npts) # These are the data to fit yhat_i = adderrors(y_i, sig_y_i) # + def calc_aic(l, n, k): ''' Return AIC based on sample size and number of parameters ''' if n/k < 40: return -2.*l + 2.*k + 2*k*(k+1)/(n-k-1) else: return -2.*l + 2.*k # Orders of polynomial from 0th to 6th order numparams = np.linspace(1, 6, 6, dtype=int) # Multiply output by -1 to use with minimize() func = lambda *args: -calc_lnlike(*args) results = {} # Fit polynomials of progressively higher order for num in numparams: theta_guess = np.array([1, 2, 1, 0, 0]) theta = theta_guess[:num] x0 = theta + np.random.randn(len(theta)) print("Starting guess ", x0) results[num] = minimize(func, x0, args=(x_i, yhat_i, sig_y_i), method="BFGS") errs = np.sqrt(np.diag(results[num].hess_inv)) # The inverse of the Hessian matrix is the covariance matrix print("Coefficients: ", results[num].x) print("Uncertainties on coefficients: ", errs) print("Input values: ", theta_true) llike = calc_lnlike(results[num].x, x_i, yhat_i, sig_y_i) results[num]['aic'] = calc_aic(llike, len(yhat_i), len(theta) ) # -2*llike + 2*num results[num]['bic'] = -2*llike + num*np.log(Npts) results[num]['chi2'] = -2*llike print("Maximum Likelihood: ", llike, "AIC = ", results[num]['aic'], "BIC = ", results[num]['bic']) # + # Plot the data and fits plt.figure(figsize=(8,6)) plt.errorbar(x_i, yhat_i, xerr=None, yerr=sig_y_i, fmt='bo') x_plot = np.linspace(0, 10, 100) for num in numparams: fplot = Polynomial(results[num].x) y_plot = fplot(x_plot) plt.plot(x_plot, y_plot, label="Order = {}".format(num-1)) plt.legend() # + # Plot AIC, BIC, and Chi2 plt.figure(figsize=(8,6)) for num in numparams: fplot = Polynomial(results[num].x) y_plot = fplot(x_plot) plt.plot(num-1, results[num]['aic'], 'bo') plt.plot(num-1, results[num]['bic'], 'r^') plt.plot(num-1, results[num]['chi2'], 'g+') print("{0} AIC = {1} BIC = {2} Chi2 = {3}".format(num-1, results[num]['aic'], results[num]['bic'], results[num]['chi2'])) plt.plot(num-1, results[num]['aic'], 'bo', label='AIC') plt.plot(num-1, results[num]['bic'], 'r^', label='BIC') plt.plot(num-1, results[num]['chi2'], 'g+', label=r'$\chi^2$') plt.yscale('log') plt.xlabel("Polynomial Order") plt.ylabel(r"AIC, BIC, $\chi^2$") plt.xticks(ticks=numparams-1, labels=numparams-1) plt.legend() # -
Lectures/HypothesisTesting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Python ≥3.5 is required import sys # Scikit-Learn ≥0.20 is required import sklearn # TensorFlow ≥2.0 is required import tensorflow as tf # Common imports import numpy as np import os # to make this notebook's output stable across runs np.random.seed(42) # To plot pretty figures # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "ann" IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID) os.makedirs(IMAGES_PATH, exist_ok=True) def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension) print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format=fig_extension, dpi=resolution) # Ignore useless warnings (see SciPy issue #5998) import warnings warnings.filterwarnings(action="ignore", message="^internal gelsd") # - from tensorflow import keras # + from sklearn.datasets import fetch_california_housing from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler housing = fetch_california_housing() X_train_full, X_test, y_train_full, y_test = train_test_split(housing.data, housing.target, random_state=42) X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, random_state=42) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_valid = scaler.transform(X_valid) X_test = scaler.transform(X_test) # - np.random.seed(42) tf.random.set_seed(42) input_ = keras.layers.Input(shape=X_train.shape[1:]) hidden1 = keras.layers.Dense(30, activation="relu")(input_) hidden2 = keras.layers.Dense(30, activation="relu")(hidden1) concat = keras.layers.concatenate([input_, hidden2]) output = keras.layers.Dense(1)(concat) model = keras.models.Model(inputs=[input_], outputs=[output]) model.summary() model.compile(loss="mean_squared_error", optimizer=keras.optimizers.SGD(lr=1e-3)) history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid)) mse_test = model.evaluate(X_test, y_test) y_pred = model.predict(X_new) X_new = X_test[:3] y_pred = model.predict(X_new) np.random.seed(42) tf.random.set_seed(42) input_A = keras.layers.Input(shape=[5], name="wide_input") input_B = keras.layers.Input(shape=[6], name="deep_input") hidden1 = keras.layers.Dense(30, activation="relu")(input_B) hidden2 = keras.layers.Dense(30, activation="relu")(hidden1) concat = keras.layers.concatenate([input_A, hidden2]) output = keras.layers.Dense(1, name="output")(concat) model = keras.models.Model(inputs=[input_A, input_B], outputs=[output]) # + model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3)) X_train_A, X_train_B = X_train[:, :5], X_train[:, 2:] X_valid_A, X_valid_B = X_valid[:, :5], X_valid[:, 2:] X_test_A, X_test_B = X_test[:, :5], X_test[:, 2:] X_new_A, X_new_B = X_test_A[:3], X_test_B[:3] history = model.fit((X_train_A, X_train_B), y_train, epochs=20, validation_data=((X_valid_A, X_valid_B), y_valid)) mse_test = model.evaluate((X_test_A, X_test_B), y_test) y_pred = model.predict((X_new_A, X_new_B)) # - np.random.seed(42) tf.random.set_seed(42) input_A = keras.layers.Input(shape=[5], name="wide_input") input_B = keras.layers.Input(shape=[6], name="deep_input") hidden1 = keras.layers.Dense(30, activation="relu")(input_B) hidden2 = keras.layers.Dense(30, activation="relu")(hidden1) concat = keras.layers.concatenate([input_A, hidden2]) output = keras.layers.Dense(1, name="main_output")(concat) aux_output = keras.layers.Dense(1, name="aux_output")(hidden2) model = keras.models.Model(inputs=[input_A, input_B], outputs=[output, aux_output]) model.compile(loss=["mse", "mse"], loss_weights=[0.9, 0.1], optimizer=keras.optimizers.SGD(lr=1e-3)) history = model.fit([X_train_A, X_train_B], [y_train, y_train], epochs=20, validation_data=([X_valid_A, X_valid_B], [y_valid, y_valid])) total_loss, main_loss, aux_loss = model.evaluate( [X_test_A, X_test_B], [y_test, y_test]) y_pred_main, y_pred_aux = model.predict([X_new_A, X_new_B])
complex_models_functional_api/multiple_inputs_outputs.ipynb