code
stringlengths
2.5k
150k
kind
stringclasses
1 value
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import operator df_Confirmed=pd.read_csv('/content/drive/My Drive/datasets/Tensorflow community challenge /Datasets /time_series_2019-ncov-Confirmed (1).csv') df_Confirmed.head() draft=df_Confirmed.copy() df_Confirmed.keys() df_Confirmed.describe() key=df_Confirmed.describe().keys() key df_Confirmed=df_Confirmed.drop(['1/22/20', '1/23/20', '1/24/20', '1/25/20', '1/26/20', '1/27/20', '1/28/20', '1/29/20', '1/30/20', '1/31/20', '2/1/20', '2/2/20', '2/3/20', '2/4/20', '2/5/20', '2/6/20', '2/7/20', '2/8/20', '2/9/20', '2/10/20', '2/11/20', '2/12/20', '2/13/20', '2/14/20', '2/15/20', '2/16/20', '2/17/20', '2/18/20', '2/19/20', '2/20/20', '2/21/20', '2/22/20', '2/23/20', '2/24/20', '2/25/20', '2/26/20', '2/27/20', '2/28/20', '2/29/20', '3/1/20'],axis=1) df_Confirmed.describe() #df_Confirmed.head() lastest_confirmed=df_Confirmed['3/22/20'] ``` # **confirmed cases throughout the world reported** ``` df_contoury_wise=df_Confirmed.sort_values(by=['Country/Region']) unique_country_list=list(df_contoury_wise['Country/Region'].unique()) confirmed_country_list=[] no_cases=[] for i in unique_country_list: cases = lastest_confirmed[df_Confirmed['Country/Region']==i].sum() if cases>0: confirmed_country_list.append(cases) else: no_cases.append(i) for i in no_cases: unique_country_list.remove(i) unique_countries = [k for k, v in sorted(zip(unique_country_list, confirmed_country_list), key=operator.itemgetter(1), reverse=True)] for i in range(len(unique_countries)): confirmed_country_list[i] = lastest_confirmed[df_Confirmed['Country/Region']==unique_countries[i]].sum() for i in range(len(unique_countries)): print(f'{unique_countries[i]}: {confirmed_country_list[i]} cases') unique_provinces = list(df_Confirmed['Province/State'].unique()) outliers = ['United Kingdom', 'Denmark', 'France'] for i in outliers: unique_provinces.remove(i) province_confirmed_cases = [] no_cases = [] for i in unique_provinces: cases = lastest_confirmed[df_Confirmed['Province/State']==i].sum() if cases > 0: province_confirmed_cases.append(cases) else: no_cases.append(i) for i in no_cases: unique_provinces.remove(i) unique_provinces = [k for k, v in sorted(zip(unique_provinces, province_confirmed_cases), key=operator.itemgetter(1), reverse=True)] for i in range(len(unique_provinces)): province_confirmed_cases[i] = lastest_confirmed[df_Confirmed['Province/State']==unique_provinces[i]].sum() #unique_provinces print('Confirmed Cases by Province/States (US, China, Australia, Canada):') for i in range(len(unique_provinces)): print(f'{unique_provinces[i]}: {province_confirmed_cases[i]} cases') nan_indices = [] for i in range(len(unique_provinces)): if type(unique_provinces[i]) == float: nan_indices.append(i) unique_provinces = list(unique_provinces) province_confirmed_cases = list(province_confirmed_cases) for i in nan_indices: unique_provinces.pop(i) province_confirmed_cases.pop(i) import random import matplotlib.colors as mcolors '''c = random.choices(list(mcolors.CSS4_COLORS.values()),k = len(unique_countries)) plt.figure(figsize=(15,15)) plt.pie(confirmed_country_list, colors=c) plt.legend(unique_countries, loc='best', bbox_to_anchor=(0.7, 0., 1, 1)) plt.title('confirmed cases which are repored from country till the last date',size=32) plt.show()''' ``` # **Date wise plot** ``` dates=df_Confirmed.keys() dates=dates[4:] dates=list(dates) df_Confirmed.head() cases_on_dates=[] for i in dates: sum_of_cases=df_Confirmed[i].sum() cases_on_dates.append(sum_of_cases) print(f'{i}: {sum_of_cases}') c = random.choices(list(mcolors.CSS4_COLORS.values()),k = len(unique_countries)) c=random.choices(list(mcolors.CSS4_COLORS.values()),k=len(dates)) plt.figure(figsize=(15,15)) plt.pie(cases_on_dates,colors=c) plt.title('confirmed cases on basis of dates',size=30) plt.legend(dates, loc='best') plt.show() ``` # **continent plot** # top 10 country in which most of cases reporeted ``` # confirmed_country_list,unique_countries ``` ``` visual_unique_countries = [] visual_confirmed_cases = [] others = np.sum(confirmed_country_list[10:]) for i in range(len(confirmed_country_list[:10])): visual_unique_countries.append(unique_countries[i]) visual_confirmed_cases.append(confirmed_country_list[i]) visual_unique_countries.append('Others') visual_confirmed_cases.append(others) c=random.choices(list(mcolors.CSS4_COLORS.values()),k=len(dates)) plt.figure(figsize=(10,10)) plt.pie(visual_confirmed_cases,colors=c) plt.legend(visual_unique_countries,loc='best') plt.title('Top 10 country in which highest no of cases has been reported',size=30) plt.show() for i,j in enumerate(unique_countries): print(i,j) for i in range(len(unique_countries)): print(f'{unique_countries[i]}: {confirmed_country_list[i]} ') ``` 102 ``` visual_unique_countries_all = [] visual_confirmed_cases_all = [] others = np.sum(confirmed_country_list[20:]) for i in range(len(confirmed_country_list[:20])): visual_unique_countries_all.append(unique_countries[i]) visual_confirmed_cases_all.append(confirmed_country_list[i]) visual_unique_countries_all.append('Others') visual_confirmed_cases_all.append(others) c=random.choices(list(mcolors.CSS4_COLORS.values()),k=len(dates)) plt.figure(figsize=(10,10)) plt.pie(visual_confirmed_cases_all,colors=c) plt.legend(visual_unique_countries_all,loc='best',bbox_to_anchor=(0.7, 0., 1, 1)) plt.title('All country in which cases has been reported',size=30) plt.show() visual_unique_provinces = [] visual_confirmed_cases2 = [] others = np.sum(province_confirmed_cases[10:]) for i in range(len(province_confirmed_cases[:10])): visual_unique_provinces.append(unique_provinces[i]) visual_confirmed_cases2.append(province_confirmed_cases[i]) visual_unique_provinces.append('Others') visual_confirmed_cases2.append(others) c = random.choices(list(mcolors.CSS4_COLORS.values()),k = len(unique_countries)) plt.figure(figsize=(10,10)) plt.title(' Confirmed Cases per Province',size=32) plt.pie(visual_confirmed_cases2, colors=c) plt.legend(visual_unique_provinces, loc='best') plt.show() ``` # so we are going to plot these top 10 countrys ``` c=random.choices(list(mcolors.CSS4_COLORS.values()),k=len(dates)) plt.figure(figsize=(10,10)) plt.pie(confirmed_country_list[:10],colors=c) plt.legend(unique_countries[:10],loc='best') plt.title('Top 10 country in which highest no of cases has been reported',size=30) plt.show() by_Continent=['Europe','Asia','Africa','North america','South america','Australia'] #s=list(map(str,input().split(" "))) #Australia=s '''Australia.append('Marshall Islands') Australia.append('Solomon Islands') Australia.append('New Zealand') Australia.append('Papua New Guinea')''' '''South_America=s South_America''' '''North_America=[] North_America=s North_America''' '''North_America.append('Antigua and Barbuda') North_America.append('El Salvador') North_America.append('Dominican Republic') North_America.append('Saint Kitts and Nevis') North_America.append('Saint Lucia') North_America.append('Saint Vincent and the Grenadines') North_America.append('United States of America') North_America.append('Trinidad and Tobago')''' #North_America '''Africa.append('Burkina Faso')''' '''Africa.append('Sierra Leone') Africa.append('South Africa') Africa.append("Cote d'Ivoire") Africa.append('Central African Republic') Africa.append('Equatorial Guinea')''' '''Africa''' '''Asia.append('United Arab Emirates') Asia''' '''europe_provigences=['Albania', 'Andorra', 'Armenia', 'Austria', 'Azerbaijan','Belarus', 'Belgium', 'Bosnia and Herzegovina', 'Bulgaria', 'Croatia', 'Cyprus', 'Czechia', 'Denmark', 'Estonia', 'Finland', 'France', 'Georgia', 'Germany', 'Greece', 'Hungary', 'Iceland', 'Ireland', 'Italy', 'Kazakhstan', 'Kosovo', 'Latvia', 'Liechtenstein', 'Lithuania', 'Luxembourg', 'Malta', 'Moldova,' 'Monaco', 'Montenegro', 'Netherlands', 'North Macedonia', 'Norway', 'Poland', 'Portugal', 'Romania', 'Russia', 'San Marino', 'Serbia', 'Slovakia', 'Slovenia', 'Spain', 'Sweden', 'Switzerland', 'Turkey', 'Ukraine', 'United Kingdom', 'Vatican City' , 'UK']''' #continent_df=pd.DataFrame() #continent_df['europe_provigences']=europe_provigences #continent_df['Asia']=pd.Series(Asia) #continent_df['Africa']=pd.Series(Africa) #continent_df['North_America']=pd.Series(North_America) #continent_df['South_America']=pd.Series(South_America) #continent_df['Australia']=pd.Series(Australia) #continent_df.to_csv('/content/continent_df.csv') continent_df=pd.read_csv('/content/drive/My Drive/datasets/Tensorflow community challenge /Datasets /continent_df.csv') continent_df.pop('Unnamed: 0') europe=continent_df['europe_provigences'].values Asia=continent_df['Asia'].values Africa=continent_df['Africa'].values North_America=continent_df['North_America'].values South_America=continent_df['South_America'].values Australia=continent_df['Australia'].values South_America confirmed_country_list,unique_countries europe_total_cases=[] South_America_total_cases=[] Asia_total_cases=[] Africa_total_cases=[] North_America_total_cases=[] Australia_total_cases=[] for i in range(len(unique_countries)): if unique_countries[i] in europe: europe_total_cases.append(confirmed_country_list[i]) if unique_countries[i] in Asia: Asia_total_cases.append(confirmed_country_list[i]) if unique_countries[i] in Africa: Africa_total_cases.append(confirmed_country_list[i]) if unique_countries[i] in North_America: North_America_total_cases.append(confirmed_country_list[i]) if unique_countries[i] in Australia: Australia_total_cases.append(confirmed_country_list[i]) if unique_countries[i] in South_America: South_America_total_cases.append(confirmed_country_list[i]) sum(europe_total_cases) South_America_total_cases sum(Asia_total_cases),sum(Africa_total_cases),sum(North_America_total_cases),sum(Australia_total_cases),sum(South_America_total_cases) total_continent_through_World=[] total_continent_through_World.append(sum(europe_total_cases)) total_continent_through_World.append(sum(Asia_total_cases)) total_continent_through_World.append(sum(Africa_total_cases)) total_continent_through_World.append(sum(North_America_total_cases)) total_continent_through_World.append(sum(Australia_total_cases)) total_continent_through_World.append(sum(South_America_total_cases)) total_continent_through_World_unique=[] total_continent_through_World_unique.append('Europe') total_continent_through_World_unique.append('Asia') total_continent_through_World_unique.append('Africa') total_continent_through_World_unique.append('North_America') total_continent_through_World_unique.append('Australia') total_continent_through_World_unique.append('South_America') total_continent_through_World y_pos = np.arange(len(total_continent_through_World_unique)) plt.figure(figsize=(10,10)) plt.bar(y_pos,total_continent_through_World) plt.title('continent wise plot for date 3/22/20') plt.xlabel('continent') plt.ylabel('no of peoples') plt.ylim(0,180000) plt.xticks(y_pos, total_continent_through_World_unique) plt.show() ```
github_jupyter
# Introduction A mass on a spring experiences a force described by Hookes law. For a displacment $x$, the force is $$F=-kx,$$ where $k$ is the spring constant with units of N/m. The equation of motion is $$ F = ma $$ or $$ -k x = m a .$$ Because acceleration is the second derivative of displacment, this is a differential equation, $$ \frac{d^2}{dt^2} = -\frac{k}{m} x.$$ The solution to this equation is harmonic motion, for example $$ x(t) = A\sin\omega t,$$ where $A$ is some amplitude and $\omega = \sqrt{k/m}$. This can be verified by plugging the solution into the differential equation. The angular frequency $\omega$ is related to the frequency $f$ and the period $T$ by $$f = \omega/2\pi$$ and $$T=2\pi/\omega$$ We can illustrate this rather trivial case with an interacive plot. ``` import matplotlib.pyplot as plt %matplotlib inline from IPython.html import widgets def make_plot(t): fig, ax = plt.subplots() x,y = 0,0 plt.plot(x, y, 'k.') plt.plot(x + 0.3 * t, y, 'bo') plt.xlim(-1,1) plt.ylim(-1,1) widgets.interact(make_plot, t=(-1,1,0.1)) ``` We want to generalize this result to several massess connected by several springs. # The spring constant as a second derivative of potential The force related to poential energy by $$ F = -\frac{d}{dx}V(x).$$ Ths equation comes directly from the definition that work is force times distance. Integrating this, we find the potential energy of a mass on a spring, $$ V(x) = \frac{1}{2}kx^2. $$ In fact, the spring contant can be defined to be the second derivative of the potential, $$ k = \frac{d^2}{dx^2} V(x).$$ We take the value of the second derivative at the minimum of the potential, which assumes that the oscillations are not very far from equilibrium. We see that Hooke's law is simply $$F = -\frac{d^2 V(x)}{dx^2} x, $$ where the second derivative is evaluated at the minimum of the potential. For a general potential, we can write the equation of motion as $$ \frac{d^2}{dt^2} x = -\frac{1}{m}\frac{d^2V(x)}{dx^2} x.$$ The expression on the right hand side is known as the dynamical matrix, though this is a trivial 1x1 matrix. # Two masses connected by a spring Now the potential depends on two corrdinates, $$ V(x_1, x_2) = \frac{1}{2} k (x_1 - x_2 - d),$$ where $d$ is the equilibrium separation of the particles. Now the force on each particle depends on the positions of both of the particles, $$ \begin{pmatrix}F_1 \\ F_2\end{pmatrix} = - \begin{pmatrix} \frac{\partial^2 V}{\partial x_1^2} & \frac{\partial^2 V}{\partial x_1\partial x_2} \\ \frac{\partial^2 V}{\partial x_1\partial x_2} & \frac{\partial^2 V}{\partial x_2^2} \\ \end{pmatrix} \begin{pmatrix}x_1 \\ x_2\end{pmatrix} $$ For performing the derivatives, we find $$ \begin{pmatrix}F_1 \\ F_2\end{pmatrix} = - \begin{pmatrix} k & -k \\ -k & k \\ \end{pmatrix} \begin{pmatrix}x_1 \\ x_2\end{pmatrix} $$ The equations of motion are coupled, $$ \begin{pmatrix} \frac{d^2x_1}{dt^2} \\ \frac{d^2x_2}{dt^2} \\ \end{pmatrix} = - \begin{pmatrix} k/m & -k/m \\ -k/m & k/m \\ \end{pmatrix} \begin{pmatrix}x_1 \\ x_2\end{pmatrix} $$ To decouple the equations, we find the eigenvalues and eigenvectors. ``` import numpy as np a = np.array([[1, -1], [-1, 1]]) freq, vectors = np.linalg.eig(a) vectors = vectors.transpose() ``` The frequencies of the two modes of vibration are (in multiples of $\sqrt{k/m}$) ``` freq ``` The first mode is a vibrational mode were the masses vibrate against each other (moving in opposite directions). This can be seen from the eigenvector. ``` vectors[0] ``` The second mode is a translation mode with zero frequency—both masses move in the same direction. ``` vectors[1] ``` We can interactively illustrate the vibrational mode. ``` import matplotlib.pyplot as plt %matplotlib inline from IPython.html import widgets def make_plot(t): fig, ax = plt.subplots() x,y = np.array([-1,1]), np.array([0,0]) plt.plot(x, y, 'k.') plt.plot(x + 0.3 * vectors[0] * t, y, 'bo') plt.xlim(-1.5,1.5) plt.ylim(-1.5,1.5) widgets.interact(make_plot, t=(-1,1,0.1)) ``` # Finding the dynamical matrix with numerical derivatives We start from a function $V(x)$. If we want to calculate a derivative, we just use the difference formula but don't take the delta too small. Using $\delta x = 10^{-6}$ is safe. $$ F = -\frac{dV(x)}{dx} \approx \frac{V(x+\Delta x) - V(x-\Delta x)}{2\Delta x} $$ Note that it is more accurate to do this symmetric difference formula than it would be to use the usual forward derivative from calculus class. It's easy to see this formula is just calculating the slope of the function using points near $x$. ``` def V(x): return 0.5 * x**2 deltax = 1e-6 def F_approx(x): return ( V(x + deltax) - V(x - deltax) ) / (2 * deltax) [(x, F_approx(x)) for x in np.linspace(-2,2,9)] ``` Next, we can find the second derivative by using the difference formula twice. We find the nice expression, $$ \frac{d^2V}{dx^2} \approx \frac{V(x+\Delta x) - 2V(x) + V(x-\Delta x)}{(\Delta x)^2}. $$ This formula has the nice interpretation of comparing the value of $V(x)$ to the average of points on either side. If it is equal to the average, the line is straight and the second derivative is zero. If average of the outer values is larger than $V(x)$, then the ends curve upward, and the second derivative is positive. Likewise, if the average of the outer values is less than $V(x)$, then the ends curve downward, and the second derivative is negative. ``` def dV2dx2_approx(x): return ( V(x + deltax) - 2 * V(x) + V(x - deltax) ) / deltax**2 [(x, dV2dx2_approx(x)) for x in np.linspace(-2,2,9)] ``` Now we can use these derivative formulas to calcuate the dynamical matrix for the two masses on one spring. Well use $k=1$ and $m=1$ for simplicity. ``` def V2(x1, x2): return 0.5 * (x1 - x2)**2 x1, x2 = -1, 1 mat = np.array( [[(V2(x1+deltax, x2) - 2 * V2(x1,x2) + V2(x1-deltax, x2)) / deltax**2 , (V2(x1+deltax, x2+deltax) - V2(x1-deltax, x2+deltax) - V2(x1+deltax, x2-deltax) + V2(x1+deltax, x2+deltax)) / (2*deltax)**2], [(V2(x1+deltax, x2+deltax) - V2(x1-deltax, x2+deltax) - V2(x1+deltax, x2-deltax) + V2(x1+deltax, x2+deltax)) / (2*deltax)**2, (V2(x1, x2+deltax) - 2 * V2(x1,x2) + V2(x1, x2-deltax)) / deltax**2 ]] ) mat freq, vectors = np.linalg.eig(mat) vectors = vectors.transpose() for f,v in zip(freq, vectors): print("freqency", f, ", eigenvector", v) ``` For practical calcuations, we have to automate this matrix construction for an arbitrary potential.
github_jupyter
# Module 5 Lab - Data ``` % matplotlib inline ``` The special command above will make all the `matplotlib` images appear in the notebook. ## Directions **Failure to follow the directions will result in a "0"** The due dates for each are indicated in the Syllabus and the course calendar. If anything is unclear, please email EN605.448@gmail.com the official email for the course or ask questions in the Lab discussion area on Blackboard. The Labs also present technical material that augments the lectures and "book". You should read through the entire lab at the start of each module. ### General Instructions 1. You will be submitting your assignment to Blackboard. If there are no accompanying files, you should submit *only* your notebook and it should be named using *only* your JHED id: fsmith79.ipynb for example if your JHED id were "fsmith79". If the assignment requires additional files, you should name the *folder/directory* your JHED id and put all items in that folder/directory, ZIP it up (only ZIP...no other compression), and submit it to Blackboard. * do **not** use absolute paths in your notebooks. All resources should appear in the same directory as the rest of your assignments. * the directory **must** be named your JHED id and **only** your JHED id. This assignment has accompanying files. You should include in your zip file: 1. [jhed_id].ipynb 2. hurricanes.py - your preprocessing file. 3. hurricanes.html - the local copy of the Wikipedia page. 4. hurricanes.db - the SQLite database you create. 2. Data Science is as much about what you write (communicating) as the code you execute (researching). In many places, you will be required to execute code and discuss both the purpose and the result. Additionally, Data Science is about reproducibility and transparency. This includes good communication with your team and possibly with yourself. Therefore, you must show **all** work. 3. Avail yourself of the Markdown/Codecell nature of the notebook. If you don't know about Markdown, look it up. Your notebooks should not look like ransom notes. Don't make everything bold. Clearly indicate what question you are answering. 4. Submit a cleanly executed notebook. The first code cell should say `In [1]` and each successive code cell should increase by 1 throughout the notebook. ## Individual Submission ## Getting and Storing Data This Lab is about acquiring, cleaning and storing data as well as doing a little bit of analysis. ### Basic Outline 1. Using `curl` or `wget` obtain a local copy of the following web page: Atlantic Hurricane Season ( https://en.wikipedia.org/wiki/Atlantic_hurricane_season ). **include this in your submission as `hurricanes.html`**. This is important. In Spring 2016, the page was edited during the module and different people got different answers at different times. You only need to be correct with respect to your `hurricanes.html` file. 2. Using Beautiful Soup 4 and Python, parse the HTML file into a useable dataset. **your parsing code should be in a file `hurricanes.py` and included in your submission**. 3. Write this data set to a SQLite3 database called `hurricanes.db` **include this in your submission**. 4. Run the requested queries against the data set. **see below** The results should be **nicely formatted**. Although Wikipedia has an API, I do not what you to use it for this assignment. ### Details The data is contained in many separate HTML tables. The challenge is to write a general table parsing function and then locate each table and apply the function to it. You only need to get the data from the tables starting at 1850s. Not all years have the same data. You only need to save the following columns. The name is parentheses is the name the column should have in the database table. - Year (`year`) - Number of tropical storms (`tropical_storms`) - Number of hurricanes (`hurricanes`) - Number of Major Hurricanes (`major_hurricanes`) - Deaths (`deaths`) - Damage (`damage`) - Notes (`notes`) Note that "Damage" doesn't start until 1900s and "Notes" was added in 1880s. "Strongest Storm" should be added to the Notes column (even in years that didn't have Notes) as should "Retired Storms". The name of the database table should be atlantic_hurricanes. The name of the table file (SQLite3 uses a file) should be hurricanes.db (who knows...you might need to add Pacific storms someday). There are a number of parsing problems which will most likely require regular expressions. First, the Deaths column has numbers that include commas and entries that are not numbers (Unknown and None). How should you code Unknown and None so that answers are not misleading but queries are still fairly straightforward to write? Similarly, Damages has numbers with commas, currency signs and different amount words (millions, billions). How will you normalize all of these so that a query can compare them? You may need regular expressions. Additionally, the way that Tropical Storms are accounted for seems to change mysteriously. Looking over the data, it's not immediately apparent when the interpretation should change. 1850s, 1860s definitely but 1870s? Not sure. It could just be a coincidence that there were never more hurricanes than tropical storms which seems to be the norm but see, for example, 1975. Welcome to Data Science! You should put your parsing code in `hurricanes.py`. None of it should be in this file. Imagine this file is going to be the report you give to your boss. ## Documentation Any time you run into a problem where you have to decide what to do--how to solve the problem or treat the values--document it here. ## Hurricanes.db What is the *function* of `hurricanes.db` in this assignment? ### Queries When you are done, you must write and execute the following queries against your database. Those queries should be run from this notebook. Find the documentation for using SQLite3 from Python (the library is already included). You should never output raw Python data structures instead, you need to output well-formatted tables. You may need to look around for a library to help you or write your own formatting code. `Pandas` is one possibility. However, I want you to use raw SQL for your queries so if you use `Pandas` use it only for the formatting of query results (don't load the data into Pandas and use Pandas/Python to query the data). **Write the most general query possible. Never assume that you are going to get only one result back (i.e, don't assume there won't be ties).** The query result should be in a nicely formatted table; don't show raw data structures to your boss or manager. Additionally, don't just run the query. Having gotten an answer, add a textual description of the result to the question. Data Science is not about running code; code is a means to an end. The end is the communication of results. We never just run code in this class. ``` ""# imports import pandas as pd import sqlite3 conn = sqlite3.connect("hurricanes.db") df = pd.read_sql_query("select * from Hurricanes;", conn) ##change strings to numbers values = {'tropical_storms': 0, 'hurricanes': 0, 'C': 2, 'major_hurricanes': 0} df.fillna(value=values) df[['tropical_storms','hurricanes','major_hurricanes']] = df[['tropical_storms','hurricanes','major_hurricanes']].apply(pd.to_numeric) values = {'tropical_storms': 0, 'hurricanes': 0, 'C': 2, 'major_hurricanes': 0} df.fillna(value=values) df.deaths = df.deaths.str.replace('+','') df.deaths = df.deaths.str.replace(',','') df.deaths = df.deaths.str.replace('~','') df.damage = df.damage.str.replace(">=","") df.damage = df.damage.str.replace(">","") df.damage = df.damage.str.replace("$","") ##ignore "Not Know".... df.deaths = df[['deaths']].apply(pd.to_numeric, errors = "coerce") ##remove invalid data df = df.loc[df.Year!=""] df ``` 1\. For the 1920s, list the years by number of tropical storms, then hurricanes. ``` df.loc[df.Year.str.startswith("192")][["Year","tropical_storms","hurricanes"]] ``` 2\. What year had the most tropical storms? ``` df.sort_values('tropical_storms', ascending=0).iloc[0,] ``` 3\. What year had the most major hurricanes? ``` df.sort_values('major_hurricanes', ascending=0).iloc[0,] ``` 4\. What year had the most deaths? ``` df.sort_values('deaths', ascending=0).iloc[0,] ``` 5\. What year had the most damage (not inflation adjusted)? ``` #I could not find a good way to transfer billions and millions to numbers #My method seems cubersome, but it works df.loc[df.damage.str.contains( "282.16")] max_damage = 0 max_damage_1 = "" for damage in df.damage: damage_1 = damage.replace(',','') damage_1 = damage_1.replace('+','') if " billion" in damage_1: damage_2 = float(damage_1.replace(" billion","")) if damage_2>max_damage: max_damage = damage_2 max_damage_1 = damage print("converted value:", max_damage, ", raw value:",max_damage_1) df.loc[df.damage == max_damage_1] ``` 6\. What year had the highest proportion of tropical storms turn into major hurricanes? ``` df["majorRatio"] = df.major_hurricanes/df.hurricanes df.loc[df.majorRatio>0].sort_values('majorRatio', ascending=0).iloc[0,] ``` ## Things to think about 1. What is the granularity of this data? (Are the rows the most specific observation possible?) 2. What if this data were contained in worksheets in an Excel file. Find a Python library or libraries that work with Excel spreadsheets. 3. Each section links to details about each hurrican season. Review each Season's page and discuss strategies for extracting the information for every hurricane. 4. Hurricane tracking maps were recently added. How would you get and store those images? 5. Damages are not inflation adjusted. How would you go about *enriching* your data with inflation adjusted dollars? Where should this additional data be stored and how would it be used? *notes here*
github_jupyter
A **Deep Q Network** implementation in tensorflow with target network & random experience replay. The code is tested with Gym's discrete action space environment, CartPole-v0 on Colab. --- ## Notations: Model network = $Q_{\theta}$ Model parameter = $\theta$ Model network Q value = $Q_{\theta}$ (s, a) Target network = $Q_{\phi}$ Target parameter = $\phi$ Target network Q value = $Q_{\phi}$ ($s^{'}$, $a^{'}$) --- ## Equations: TD target = r (s, a) $+$ $\gamma$ $max_{a}$ $Q_{\phi}$ $s^{'}$, $a^{'}$) TD error = (TD target) $-$ (Model network Q value) = [r (s, a) $+$ $\gamma$ $max_{a^{'}}$ $Q_{\phi}$ ($s^{'}$, $a^{'}$)] $-$ $Q_{\theta}$ (s, a) --- ## Key implementation details: Update target parameter $\phi$ with model parameter $\theta$. Copy $\theta$ to $\phi$ with *either* soft or hard parameter update. Hard parameter update: ``` with tf.variable_scope('hard_replace'): self.target_replace_hard = [t.assign(m) for t, m in zip(self.target_net_params, self.model_net_params)] ``` ``` # hard params replacement if self.learn_step % self.tau_step == 0: self.sess.run(self.target_replace_hard) self.learn_step += 1 ``` Soft parameter update: polyak $\cdot$ $\theta$ + (1 $-$ polyak) $\cdot$ $\phi$ ``` with tf.variable_scope('soft_replace'): self.target_replace_soft = [t.assign(self.polyak * m + (1 - self.polyak) * t) for t, m in zip(self.target_net_params, self.model_net_params)] ``` Stop TD target from contributing to gradient computation: ``` # exclude td_target in gradient computation td_target = tf.stop_gradient(td_target) ``` --- ## References: [Human-level control through deep reinforcement learning (Mnih et al., 2015)](https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf) --- <br> ``` import tensorflow as tf import gym import numpy as np from matplotlib import pyplot as plt # random sampling for learning from experience replay class Exp(): def __init__(self, obs_size, max_size): self.obs_size = obs_size self.num_obs = 0 self.max_size = max_size self.mem_full = False # memory structure that stores samples from observations self.mem = {'s' : np.zeros(self.max_size * self.obs_size, dtype=np.float32).reshape(self.max_size,self.obs_size), 'a' : np.zeros(self.max_size * 1, dtype=np.int32).reshape(self.max_size,1), 'r' : np.zeros(self.max_size * 1).reshape(self.max_size,1), 'done' : np.zeros(self.max_size * 1, dtype=np.int32).reshape(self.max_size,1)} # stores sample obervation at each time step in experience memory def store(self, s, a, r, done): i = self.num_obs % self.max_size self.mem['s'][i,:] = s self.mem['a'][i,:] = a self.mem['r'][i,:] = r self.mem['done'][i,:] = done self.num_obs += 1 if self.num_obs == self.max_size: self.num_obs = 0 # reset number of observation self.mem_full = True # returns a minibatch of experience def minibatch(self, minibatch_size): if self.mem_full == False: max_i = min(self.num_obs, self.max_size) - 1 else: max_i = self.max_size - 1 # randomly sample a minibatch of indexes sampled_i = np.random.randint(max_i, size=minibatch_size) s = self.mem['s'][sampled_i,:].reshape(minibatch_size, self.obs_size) a = self.mem['a'][sampled_i].reshape(minibatch_size) r = self.mem['r'][sampled_i].reshape((minibatch_size,1)) s_next = self.mem['s'][sampled_i + 1,:].reshape(minibatch_size, self.obs_size) done = self.mem['done'][sampled_i].reshape((minibatch_size,1)) return (s, a, r, s_next, done) # Evaluates behavior policy while improving target policy class DQN_agent(): def __init__(self, num_actions, obs_size, nhidden, epoch, epsilon, gamma, learning_rate, replace, polyak, tau_step, mem_size, minibatch_size): super(DQN_agent, self).__init__() self.actions = range(num_actions) self.num_actions = num_actions self.obs_size = obs_size # number of features self.nhidden = nhidden # hidden nodes self.epoch = epoch # for epsilon decay & to decide when to start training self.epsilon = epsilon # for eploration self.gamma = gamma # discount factor self.learning_rate = learning_rate # learning rate alpha # for params replacement self.replace = replace # type of replacement self.polyak = polyak # for soft replacement self.tau_step = tau_step # for hard replacement self.learn_step = 0 # steps after learning # for Experience replay self.mem = Exp(self.obs_size, mem_size) # memory that holds experiences self.minibatch_size = minibatch_size self.step = 0 # each step in a episode # for tensorflow ops self.built_graph() self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) self.sess.run(self.target_replace_hard) self.cum_loss_per_episode = 0 # for charting display # decay epsilon after each epoch def epsilon_decay(self): if self.step % self.epoch == 0: self.epsilon = max(.01, self.epsilon * .95) # epsilon-greedy behaviour policy for action selection def act(self, state): if np.random.random() < self.epsilon: i = np.random.randint(0,len(self.actions)) else: # get Q(s,a) from model network Q_val = self.sess.run(self.model_Q_val, feed_dict={self.s: np.reshape(state, (1,state.shape[0]))}) # get index of largest Q(s,a) i = np.argmax(Q_val) action = self.actions[i] self.step += 1 self.epsilon_decay() return action def learn(self, s, a, r, done): # stores observation in memory as experience at each time step self.mem.store(s, a, r, done) # starts training a minibatch from experience after 1st epoch if self.step > self.epoch: self.replay() # start training with experience replay def td_target(self, r, done, target_Q_val): # select max Q values from target network (greedy policy) max_target_Q_val = tf.reduce_max(target_Q_val, axis=1, keepdims=True) # if state = done, td_target = r td_target = (1.0 - tf.cast(done, tf.float32)) * tf.math.multiply(self.gamma, max_target_Q_val) + r # exclude td_target in gradient computation td_target = tf.stop_gradient(td_target) return td_target # select Q(s,a) from actions using e-greedy as behaviour policy from model network def predicted_Q_val(self, a, model_Q_val): # create 1D tensor of length = number of rows in a arr = tf.range(tf.shape(a)[0], dtype=tf.int32) # stack by column to create indices for Q(s,a) selections based on a indices = tf.stack([arr, a], axis=1) # select Q(s,a) using indice from model_Q_val Q_val = tf.gather_nd(model_Q_val, indices) Q_val = tf.reshape(Q_val, (self.minibatch_size, 1)) return Q_val # contruct neural network def built_net(self, var_scope, w_init, b_init, features, num_hidden, num_output): with tf.variable_scope(var_scope): feature_layer = tf.contrib.layers.fully_connected(features, num_hidden, activation_fn = tf.nn.relu, weights_initializer = w_init, biases_initializer = b_init) Q_val = tf.contrib.layers.fully_connected(feature_layer, num_output, activation_fn = None, weights_initializer = w_init, biases_initializer = b_init) return Q_val # contruct tensorflow graph def built_graph(self): tf.reset_default_graph() self.s = tf.placeholder(tf.float32, [None,self.obs_size], name='s') self.a = tf.placeholder(tf.int32, [None,], name='a') self.r = tf.placeholder(tf.float32, [None,1], name='r') self.s_next = tf.placeholder(tf.float32, [None,self.obs_size], name='s_next') self.done = tf.placeholder(tf.int32, [None,1], name='done') # weight, bias initialization w_init = tf.initializers.lecun_uniform() b_init = tf.initializers.he_uniform(1e-4) self.model_Q_val = self.built_net('model_net', w_init, b_init, self.s, self.nhidden, self.num_actions) self.target_Q_val = self.built_net('target_net', w_init, b_init, self.s_next, self.nhidden, self.num_actions) with tf.variable_scope('td_target'): td_target = self.td_target(self.r, self.done, self.target_Q_val) with tf.variable_scope('predicted_Q_val'): predicted_Q_val = self.predicted_Q_val(self.a, self.model_Q_val) with tf.variable_scope('loss'): self.loss = tf.losses.huber_loss(td_target, predicted_Q_val) with tf.variable_scope('optimizer'): self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.loss) # get network params with tf.variable_scope('params'): self.target_net_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_net') self.model_net_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='model_net') # replace target net params with model net params with tf.variable_scope('hard_replace'): self.target_replace_hard = [t.assign(m) for t, m in zip(self.target_net_params, self.model_net_params)] with tf.variable_scope('soft_replace'): self.target_replace_soft = [t.assign(self.polyak * m + (1 - self.polyak) * t) for t, m in zip(self.target_net_params, self.model_net_params)] # decide soft or hard params replacement def replace_params(self): if self.replace == 'soft': # soft params replacement self.sess.run(self.target_replace_soft) else: # hard params replacement if self.learn_step % self.tau_step == 0: self.sess.run(self.target_replace_hard) self.learn_step += 1 def replay(self): # select minibatch of experiences from memory for training (s, a, r, s_next, done) = self.mem.minibatch(self.minibatch_size) # training _, loss = self.sess.run([self.optimizer, self.loss], feed_dict = {self.s: s, self.a: a, self.r: r, self.s_next: s_next, self.done: done}) self.cum_loss_per_episode += loss self.replace_params() # compute stats def stats(r_per_episode, R, cum_R, cum_R_episodes, cum_loss_per_episode, cum_loss, cum_loss_episodes): r_per_episode = np.append(r_per_episode, R) # store reward per episode cum_R_episodes += R cum_R = np.append(cum_R, cum_R_episodes) # store cumulative reward of all episodes cum_loss_episodes += cum_loss_per_episode cum_loss = np.append(cum_loss, cum_loss_episodes) # store cumulative loss of all episodes return (r_per_episode, cum_R_episodes, cum_R, cum_loss_episodes, cum_loss) # plot performance def plot_charts(values, y_label): fig = plt.figure(figsize=(10,5)) plt.title("DQN performance") plt.xlabel("Episode") plt.ylabel(y_label) plt.plot(values) plt.show(fig) def display(r_per_episode, cum_R, cum_loss): plot_charts(r_per_episode, "Reward") plot_charts(cum_R, "cumulative_reward") plot_charts(cum_loss, "cumulative_loss") avg_r = np.sum(r_per_episode) / max_episodes print("avg_r", avg_r) avg_loss = np.sum(cum_loss) / max_episodes print("avg_loss", avg_loss) def run_episodes(env, agent, max_episodes): r_per_episode = np.array([0]) cum_R = np.array([0]) cum_loss = np.array([0]) cum_R_episodes = 0 cum_loss_episodes = 0 # repeat each episode for episode_number in range(max_episodes): s = env.reset() # reset new episode done = False R = 0 # repeat each step while not done: # select action using behaviour policy(epsilon-greedy) from model network a = agent.act(s) # take action in environment next_s, r, done, _ = env.step(a) # agent learns agent.learn(s, a, r, done) s = next_s R += r (r_per_episode, cum_R_episodes, cum_R, cum_loss_episodes, cum_loss) = stats(r_per_episode, R, cum_R, cum_R_episodes, agent.cum_loss_per_episode, cum_loss, cum_loss_episodes) display(r_per_episode, cum_R, cum_loss) env.close() env = gym.make('CartPole-v0') # openai gym environment #env = gym.make('Pong-v0') # openai gym environment max_episodes = 500 epoch = 100 num_actions = env.action_space.n # number of possible actions obs_size = env.observation_space.shape[0] # dimension of state space nhidden = 128 # number of hidden nodes epsilon = .9 gamma = .9 learning_rate = .3 replace = 'soft' # params replacement type, 'soft' for soft replacement or empty string '' for hard replacement polyak = .001 tau_step = 300 mem_size = 30000 minibatch_size = 64 %matplotlib inline agent = DQN_agent(num_actions, obs_size, nhidden, epoch, epsilon, gamma, learning_rate, replace, polyak, tau_step, mem_size, minibatch_size) run_episodes(env, agent, max_episodes) ```
github_jupyter
# Using PS GPIO with PYNQ ## Goal The aim of this notebook is to show how to use the Zynq PS GPIO from PYNQ. The PS GPIO are simple wires from the PS, and don't need a controller in the programmable logic. Up to 96 input, output and tri-state PS GPIO are available via the EMIO in the Zynq Ultrascale+. They can be used to connect simple control and data signals to IP or external Inputs/Outputs in the PL. ## Hardware This example uses a bitstream that connects PS GPIO to the PMod on the KV260. ![PS GPIO Design](./ps_gpio/ps_gpio_kv260_bd.png "PS GPIO Design") ### External Peripherals An LED, a Slider switch and a Buzzer are connected via the Pmod connector and a Grove adapter. These will be used to demonstrate the PS GPIO are working. ![](./images/external_peripherals.jpg) ### Download the tutorial overlay The `ps_gpio_kv260.bit` and `ps_gpio_kv260.hwh` files are in the `ps_gpio` directory local to this folder. The bitstream can be downloaded using the PYNQ `Overlay` class. ``` from pynq import Overlay ps_gpio_design = Overlay("./ps_gpio/ps_gpio_kv260.bit") ``` ## PYNQ GPIO class The PYNQ GPIO class will be used to access the PS GPIO. ``` from pynq import GPIO ``` ### GPIO help ### Create Python GPIO objects for the led, slider and buzzer and set the direction: ``` led = GPIO(GPIO.get_gpio_pin(6), 'out') buzzer = GPIO(GPIO.get_gpio_pin(0), 'out') slider = GPIO(GPIO.get_gpio_pin(1), 'in') slider_led = GPIO(GPIO.get_gpio_pin(5), 'out') # = GPIO(GPIO.get_gpio_pin(2), 'out') # = GPIO(GPIO.get_gpio_pin(3), 'out') # = GPIO(GPIO.get_gpio_pin(4), 'out') # = GPIO(GPIO.get_gpio_pin(7), 'out') ``` ### led.write() help ## Test LED Turn on the LED Turn off the LED ## Blinky ``` from time import sleep DELAY = 0.1 for i in range(20): led.write(0) sleep(DELAY) led.write(1) sleep(DELAY) ``` ### Slider Read from Slider ``` for i in range(50): sliver_value = slider.read() slider_led.write(sliver_value) led.write(sliver_value) sleep(DELAY) ``` ### Buzzer ``` buzzer.write(1) buzzer.write(0) def play_sound(frequency, duration=100): period = 1/frequency timeHigh = period/2 for i in range(0, int(duration)): #, int(timeHigh*1000)): buzzer.write(1) sleep(timeHigh) buzzer.write(0) sleep(timeHigh) ``` Alarm clock ``` for i in range(10): play_sound(5000) sleep(.1) ``` ### Use an IPython Widget to control the buzzer The following example uses an IPython *Integer Slider* to call the `play_sound()` method defined above ``` from ipywidgets import interact import ipywidgets as widgets interact(play_sound, frequency=widgets.IntSlider(min=500, max=10000, step=500, value=500), duration =100); ```
github_jupyter
### Basics of Tensorflow ``` import numpy as np from tensorflow.python.layers import base import tensorflow as tf import tensorflow.contrib.slim as slim # To support both python 2 and python 3 from __future__ import division, print_function, unicode_literals # Common imports import numpy as np import os # to make this notebook's output stable across runs def reset_graph(seed=42): tf.reset_default_graph() tf.set_random_seed(seed) np.random.seed(seed) # To plot pretty figures %matplotlib inline import matplotlib import matplotlib.pyplot as plt plt.rcParams['axes.labelsize'] = 14 plt.rcParams['xtick.labelsize'] = 12 plt.rcParams['ytick.labelsize'] = 12 # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "tensorflow" def save_fig(fig_id, tight_layout=True): path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png") print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format='png', dpi=300) ``` ### Model summary for tensorflow ``` # model summary for tensorflow x = np.zeros((1,4,4,3)) x_tf = tf.convert_to_tensor(x, np.float32) z_tf = tf.layers.conv2d(x_tf, filters=32, kernel_size=(3,3)) def model_summary(): model_vars = tf.trainable_variables() slim.model_analyzer.analyze_vars(model_vars, print_info=True) model_summary() ``` ### Inspired from https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/1_Introduction/basic_operations.ipynb ``` # Basic constant operations # The value returned by the constructor represents the output # of the Constant op. a = tf.constant(2) b = tf.constant(3) # Launch the default graph. with tf.Session() as sess: print ("a: %i" % sess.run(a), "b: %i" % sess.run(b)) print ("Addition with constants: %i" % sess.run(a+b)) print ("Multiplication with constants: %i" % sess.run(a*b)) ``` https://github.com/vahidk/EffectiveTensorflow ### Numpy version of below import numpy as np x = np.random.normal(size=[10, 10]) y = np.random.normal(size=[10, 10]) z = np.dot(x, y) print(z) ``` import tensorflow as tf x = tf.random_normal([10, 10]) y = tf.random_normal([10, 10]) z = tf.matmul(x, y) sess = tf.Session() z_val = sess.run(z) print(z_val) tf.reset_default_graph() ``` ##### Note the below doesn't explain the basics. hence skip the below for a while To understand how powerful symbolic computation can be let's have a look at another example. Assume that we have samples from a curve (say f(x) = 5x^2 + 3) and we want to estimate f(x) based on these samples. We define a parametric function g(x, w) = w0 x^2 + w1 x + w2, which is a function of the input x and latent parameters w, our goal is then to find the latent parameters such that g(x, w) ≈ f(x). This can be done by minimizing the following loss function: L(w) = ∑ (f(x) - g(x, w))^2. Although there's a closed form solution for this simple problem, we opt to use a more general approach that can be applied to any arbitrary differentiable function, and that is using stochastic gradient descent. We simply compute the average gradient of L(w) with respect to w over a set of sample points and move in the opposite direction. Here's how it can be done in TensorFlow: ``` import numpy as np import tensorflow as tf tf.reset_default_graph() # Placeholders are used to feed values from python to TensorFlow ops. We define # two placeholders, one for input feature x, and one for output y. x = tf.placeholder(tf.float32) y = tf.placeholder(tf.float32) # Assuming we know that the desired function is a polynomial of 2nd degree, we # allocate a vector of size 3 to hold the coefficients. The variable will be # automatically initialized with random noise. w = tf.get_variable("w", shape=[3, 1]) # We define yhat to be our estimate of y. f = tf.stack([tf.square(x), x, tf.ones_like(x)], 1) yhat = tf.squeeze(tf.matmul(f, w), 1) # The loss is defined to be the l2 distance between our estimate of y and its # true value. We also added a shrinkage term, to ensure the resulting weights # would be small. loss = tf.nn.l2_loss(yhat - y) + 0.1 * tf.nn.l2_loss(w) # We use the Adam optimizer with learning rate set to 0.1 to minimize the loss. train_op = tf.train.AdamOptimizer(0.1).minimize(loss) def generate_data(): x_val = np.random.uniform(-10.0, 10.0, size=100) y_val = 5 * np.square(x_val) + 3 return x_val, y_val sess = tf.Session() # Since we are using variables we first need to initialize them. sess.run(tf.global_variables_initializer()) for _ in range(1000): x_val, y_val = generate_data() _, loss_val = sess.run([train_op, loss], {x: x_val, y: y_val}) #print(loss_val) print(sess.run([w])) ``` # Normal Equation (θ = XT · X)–1 · XT · y for house market prediction ``` import numpy as np from sklearn.datasets import fetch_california_housing tf.reset_default_graph() housing = fetch_california_housing() m, n = housing.data.shape housing_data_plus_bias = np.c_[np.ones((m, 1)), housing.data] X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name="X") y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y") XT = tf.transpose(X) theta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT, X)), XT), y) with tf.Session() as sess: theta_value = theta.eval() print(theta_value) # Pure numpy version X = housing_data_plus_bias y = housing.target.reshape(-1, 1) theta_numpy = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y) print(theta_numpy) ``` ## Using Batch Gradient Descent ``` from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaled_housing_data = scaler.fit_transform(housing.data) scaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data] print(scaled_housing_data_plus_bias.mean(axis=0)) print(scaled_housing_data_plus_bias.mean(axis=1)) print(scaled_housing_data_plus_bias.mean()) print(scaled_housing_data_plus_bias.shape) #Gradient Descent reset_graph() n_epochs = 1000 learning_rate = 0.01 X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X") y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y") theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta") y_pred = tf.matmul(X, theta, name="predictions") error = y_pred - y mse = tf.reduce_mean(tf.square(error), name="mse") gradients = 2/m * tf.matmul(tf.transpose(X), error) training_op = tf.assign(theta, theta - learning_rate * gradients) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for epoch in range(n_epochs): if epoch % 100 == 0: print("Epoch", epoch, "MSE =", mse.eval()) sess.run(training_op) best_theta = theta.eval() print(best_theta) # Using autodiff reset_graph() n_epochs = 1000 learning_rate = 0.01 X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X") y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y") theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta") y_pred = tf.matmul(X, theta, name="predictions") error = y_pred - y mse = tf.reduce_mean(tf.square(error), name="mse") gradients = tf.gradients(mse, [theta])[0] training_op = tf.assign(theta, theta - learning_rate * gradients) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for epoch in range(n_epochs): if epoch % 100 == 0: print("Epoch", epoch, "MSE =", mse.eval()) sess.run(training_op) best_theta = theta.eval() print("Best theta:") print(best_theta) ``` ## Linear Regression Example https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/linear_regression.ipynb ``` rng = np.random # Parameters learning_rate = 0.01 training_epochs = 1000 display_step = 50 # Training Data train_X = np.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167, 7.042,10.791,5.313,7.997,5.654,9.27,3.1]) train_Y = np.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 2.827,3.465,1.65,2.904,2.42,2.94,1.3]) n_samples = train_X.shape[0] # tf Graph Input X = tf.placeholder("float") Y = tf.placeholder("float") # Set model weights and bias random W = tf.Variable(rng.randn(), name="weight") b = tf.Variable(rng.randn(), name="bias") # Construct a linear model. y = XW + b pred = tf.add(tf.multiply(X, W), b) # Mean squared error cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples) # Gradient descent optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start training with tf.Session() as sess: sess.run(init) # Fit all training data for epoch in range(training_epochs): for (x, y) in zip(train_X, train_Y): sess.run(optimizer, feed_dict={X: x, Y: y}) #Display logs per epoch step if (epoch+1) % display_step == 0: c = sess.run(cost, feed_dict={X: train_X, Y:train_Y}) print ("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \ "W = ", sess.run(W), "b = ", sess.run(b)) print ("Optimization Finished!") training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y}) print ("Training cost=", training_cost, "W = ", sess.run(W), "b = ", sess.run(b), '\n') #Graphic display plt.plot(train_X, train_Y, 'ro', label='Original data') plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line') plt.legend() plt.show() # Logistic Regression Example # Import MINST data #from tensorflow.examples.tutorials.mnist import input_data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # Parameters learning_rate = 0.01 training_epochs = 25 batch_size = 100 display_step = 1 # tf Graph Input x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784 y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes # Set model weights W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) # Construct model pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax # Minimize error using cross entropy cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1)) # Gradient Descent optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start training with tf.Session() as sess: sess.run(init) # Training cycle for epoch in range(training_epochs): avg_cost = 0. total_batch = int(mnist.train.num_examples/batch_size) # Loop over all batches for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # Fit training using batch data _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs, y: batch_ys}) # Compute average loss avg_cost += c / total_batch # Display logs per epoch step if (epoch+1) % display_step == 0: print ("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)) print ("W = ", sess.run(W), "b = ", sess.run(b)) print ("Optimization Finished!") # Test model correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) # Calculate accuracy for 3000 examples accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print ("Accuracy:", accuracy.eval({x: mnist.test.images[:3000], y: mnist.test.labels[:3000]})) ``` ## Simple neural network A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron) implementation with TensorFlow. This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/). ``` def simple_nn (layer1_n,layer2_n) : print("Starting simple Neural network " + str(layer1_n) +" : " + str(layer2_n) ) reset_graph() # Parameters learning_rate = 0.1 num_steps = 500 batch_size = 128 display_step = 100 # Network Parameters n_hidden_1 = layer1_n # 1st layer number of neurons n_hidden_2 = layer2_n # 2nd layer number of neurons num_input = 784 # MNIST data input (img shape: 28*28) num_classes = 10 # MNIST total classes (0-9 digits) # tf Graph input X = tf.placeholder("float", [None, num_input]) Y = tf.placeholder("float", [None, num_classes]) # Store layers weight & bias weights = { 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])), 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes])) } biases = { 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 'b2': tf.Variable(tf.random_normal([n_hidden_2])), 'out': tf.Variable(tf.random_normal([num_classes])) } # Create model def neural_net(x): # Hidden fully connected layer with layer 1 neurons layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) # Hidden fully connected layer with layer 2 neurons layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) # Output fully connected layer with a neuron for each class out_layer = tf.matmul(layer_2, weights['out']) + biases['out'] return out_layer # Construct model logits = neural_net(X) prediction = tf.nn.softmax(logits) # Define loss and optimizer loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2( logits=logits, labels=Y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss_op) # Evaluate model correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # display all the parameters vars = 0 for v in tf.all_variables(): vars += np.prod(v.get_shape().as_list()) print(vars) # Start training with tf.Session() as sess: # Run the initializer sess.run(init) for step in range(1, num_steps+1): batch_x, batch_y = mnist.train.next_batch(batch_size) # Run optimization op (backprop) sess.run(train_op, feed_dict={X: batch_x, Y: batch_y}) if step % display_step == 0 or step == 1: # Calculate batch loss and accuracy loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x, Y: batch_y}) print("Step " + str(step) + ", Minibatch Loss= " + \ "{:.4f}".format(loss) + ", Training Accuracy= " + \ "{:.3f}".format(acc)) print("Optimization Finished!") # Calculate accuracy for MNIST test images print("Testing Accuracy:", \ sess.run(accuracy, feed_dict={X: mnist.test.images, Y: mnist.test.labels})) simple_nn(2048,2048) simple_nn(1024,1024) simple_nn(512,512) simple_nn(256,256) simple_nn(128,128) simple_nn(24,24) # 256 : 256 > 807968.0 # 512 : 512 > 2009120.0 simple_nn(512,128) simple_nn(12,12) ``` ## Simple Neural Network (tf.layers/estimator api) A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron) implementation with TensorFlow. This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/). This example is using TensorFlow layers, see 'neural_network_raw' example for a raw implementation with variables.
github_jupyter
# Example 1d: Spin-Bath model, fitting of spectrum and correlation functions ### Introduction The HEOM method solves the dynamics and steady state of a system and its environment, the latter of which is encoded in a set of auxiliary density matrices. In this example we show the evolution of a single two-level system in contact with a single Bosonic environment. The properties of the system are encoded in Hamiltonian, and a coupling operator which describes how it is coupled to the environment. The Bosonic environment is implicitly assumed to obey a particular Hamiltonian (see paper), the parameters of which are encoded in the spectral density, and subsequently the free-bath correlation functions. In the example below we show how model an Ohmic environment with exponential cut-off in two ways. First we fit the spectrum with a set of underdamped brownian oscillator functions. Second, we evaluate the correlation functions, and fit those with a certain choice of exponential functions. ``` %pylab inline from qutip import * %load_ext autoreload %autoreload 2 from bofin.heom import BosonicHEOMSolver def cot(x): return 1./np.tan(x) # Defining the system Hamiltonian eps = .0 # Energy of the 2-level system. Del = .2 # Tunnelling term Hsys = 0.5 * eps * sigmaz() + 0.5 * Del* sigmax() # Initial state of the system. rho0 = basis(2,0) * basis(2,0).dag() #Import mpmath functions for evaluation of correlation functions from mpmath import mp from mpmath import zeta from mpmath import gamma mp.dps = 15; mp.pretty = True Q = sigmaz() alpha = 3.25 T = 0.5 wc = 1 beta = 1/T s = 1 tlist = np.linspace(0, 10, 5000) tlist3 = linspace(0,15,50000) #note: the arguments to zeta should be in as high precision as possible, might need some adjustment # see http://mpmath.org/doc/current/basics.html#providing-correct-input ct = [complex((1/pi)*alpha * wc**(1-s) * beta**(-(s+1)) * (zeta(s+1,(1+beta*wc-1.0j*wc*t)/(beta*wc)) + zeta(s+1,(1+1.0j*wc*t)/(beta*wc)))) for t in tlist] #also check long timescales ctlong = [complex((1/pi)*alpha * wc**(1-s) * beta**(-(s+1)) * (zeta(s+1,(1+beta*wc-1.0j*wc*t)/(beta*wc)) + zeta(s+1,(1+1.0j*wc*t)/(beta*wc)))) for t in tlist3] corrRana = real(ctlong) corrIana = imag(ctlong) pref = 1. #lets try fitting the spectrurum #use underdamped case with meier tannor form wlist = np.linspace(0, 25, 20000) from scipy.optimize import curve_fit #seperate functions for plotting later: def fit_func_nocost(x, a, b, c, N): tot = 0 for i in range(N): tot+= 2 * a[i] * b[i] * (x)/(((x+c[i])**2 + (b[i]**2))*((x-c[i])**2 + (b[i]**2))) cost = 0. return tot def wrapper_fit_func_nocost(x, N, *args): a, b, c = list(args[0][:N]), list(args[0][N:2*N]),list(args[0][2*N:3*N]) # print("debug") return fit_func_nocost(x, a, b, c, N) # function that evaluates values with fitted params at # given inputs def checker(tlist, vals, N): y = [] for i in tlist: # print(i) y.append(wrapper_fit_func_nocost(i, N, vals)) return y ####### #Real part def wrapper_fit_func(x, N, *args): a, b, c = list(args[0][:N]), list(args[0][N:2*N]),list(args[0][2*N:3*N]) # print("debug") return fit_func(x, a, b, c, N) def fit_func(x, a, b, c, N): tot = 0 for i in range(N): tot+= 2 * a[i] * b[i] * (x)/(((x+c[i])**2 + (b[i]**2))*((x-c[i])**2 + (b[i]**2))) cost = 0. #for i in range(N): #print(i) # cost += ((corrRana[0]-a[i]*np.cos(d[i]))) tot+=0.0*cost return tot def fitterR(ans, tlist, k): # the actual computing of fit popt = [] pcov = [] # tries to fit for k exponents for i in range(k): #params_0 = [0]*(2*(i+1)) params_0 = [0.]*(3*(i+1)) upper_a = 100*abs(max(ans, key = abs)) #sets initial guess guess = [] #aguess = [ans[0]]*(i+1)#[max(ans)]*(i+1) aguess = [abs(max(ans, key = abs))]*(i+1) bguess = [1*wc]*(i+1) cguess = [1*wc]*(i+1) guess.extend(aguess) guess.extend(bguess) guess.extend(cguess) # sets bounds # a's = anything , b's negative # sets lower bound b_lower = [] alower = [-upper_a]*(i+1) blower = [0.1*wc]*(i+1) clower = [0.1*wc]*(i+1) b_lower.extend(alower) b_lower.extend(blower) b_lower.extend(clower) # sets higher bound b_higher = [] ahigher = [upper_a]*(i+1) #bhigher = [np.inf]*(i+1) bhigher = [100*wc]*(i+1) chigher = [100*wc]*(i+1) b_higher.extend(ahigher) b_higher.extend(bhigher) b_higher.extend(chigher) param_bounds = (b_lower, b_higher) p1, p2 = curve_fit(lambda x, *params_0: wrapper_fit_func(x, i+1, \ params_0), tlist, ans, p0=guess, bounds = param_bounds,sigma=[0.0001 for w in wlist], maxfev = 1000000000) popt.append(p1) pcov.append(p2) print(i+1) return popt # print(popt) J = [w * alpha * e**(-w/wc) for w in wlist] k = 4 popt1 = fitterR(J, wlist, k) for i in range(k): y = checker(wlist, popt1[i],i+1) print(popt1[i]) plt.plot(wlist, J, wlist, y) plt.show() lam = list(popt1[k-1])[:k] gamma = list(popt1[k-1])[k:2*k] #damping terms w0 = list(popt1[k-1])[2*k:3*k] #w0 termss print(lam) print(gamma) print(w0) lamT = [] print(lam) print(gamma) print(w0) fig, axes = plt.subplots(1, 1, sharex=True, figsize=(8,8)) axes.plot(wlist, J, 'r--', linewidth=2, label="original") for kk,ll in enumerate(lam): #axes.plot(wlist, [lam[kk] * gamma[kk] * (w)/(((w**2-w0[kk]**2)**2 + (gamma[kk]**2*w**2))) for w in wlist],linewidth=2) axes.plot(wlist, [2* lam[kk] * gamma[kk] * (w)/(((w+w0[kk])**2 + (gamma[kk]**2))*((w-w0[kk])**2 + (gamma[kk]**2))) for w in wlist],linewidth=2, label="fit") axes.set_xlabel(r'$w$', fontsize=28) axes.set_ylabel(r'J', fontsize=28) axes.legend() fig.savefig('noisepower.eps') wlist2 = np.linspace(-10,10 , 50000) s1 = [w * alpha * e**(-abs(w)/wc) * ((1/(e**(w/T)-1))+1) for w in wlist2] s2 = [sum([(2* lam[kk] * gamma[kk] * (w)/(((w+w0[kk])**2 + (gamma[kk]**2))*((w-w0[kk])**2 + (gamma[kk]**2)))) * ((1/(e**(w/T)-1))+1) for kk,lamkk in enumerate(lam)]) for w in wlist2] fig, axes = plt.subplots(1, 1, sharex=True, figsize=(8,8)) axes.plot(wlist2, s1, 'r', linewidth=2,label="original") axes.plot(wlist2, s2, 'b', linewidth=2,label="fit") axes.set_xlabel(r'$w$', fontsize=28) axes.set_ylabel(r'S(w)', fontsize=28) #axes.axvline(x=Del) print(min(s2)) axes.legend() #fig.savefig('powerspectrum.eps') #J(w>0) * (n(w>w)+1) def cot(x): return 1./np.tan(x) def coth(x): """ Calculates the coth function. Parameters ---------- x: np.ndarray Any numpy array or list like input. Returns ------- cothx: ndarray The coth function applied to the input. """ return 1/np.tanh(x) #underdamped meier tannior version with terminator TermMax = 1000 TermOps = 0.*spre(sigmaz()) Nk = 1 # number of exponentials in approximation of the Matsubara approximation pref = 1 ckAR = [] vkAR = [] ckAI = [] vkAI = [] for kk, ll in enumerate(lam): #print(kk) lamt = lam[kk] Om = w0[kk] Gamma = gamma[kk] print(T) print(coth(beta*(Om+1.0j*Gamma)/2)) ckAR_temp = [(lamt/(4*Om))*coth(beta*(Om+1.0j*Gamma)/2),(lamt/(4*Om))*coth(beta*(Om-1.0j*Gamma)/2)] for k in range(1,Nk+1): #print(k) ek = 2*pi*k/beta ckAR_temp.append((-2*lamt*2*Gamma/beta)*ek/(((Om+1.0j*Gamma)**2+ek**2)*((Om-1.0j*Gamma)**2+ek**2))) term = 0 for k in range(Nk+1,TermMax): #print(k) ek = 2*pi*k/beta ck = ((-2*lamt*2*Gamma/beta)*ek/(((Om+1.0j*Gamma)**2+ek**2)*((Om-1.0j*Gamma)**2+ek**2))) term += ck/ek ckAR.extend(ckAR_temp) vkAR_temp = [-1.0j*Om+Gamma,1.0j*Om+Gamma] vkAR_temp.extend([2 * np.pi * k * T + 0.j for k in range(1,Nk+1)]) vkAR.extend(vkAR_temp) factor=1./4. ckAI.extend([-factor*lamt*1.0j/(Om),factor*lamt*1.0j/(Om)]) vkAI.extend( [-(-1.0j*(Om) - Gamma),-(1.0j*(Om) - Gamma)]) TermOps += term * (2*spre(Q)*spost(Q.dag()) - spre(Q.dag()*Q) - spost(Q.dag()*Q)) print(ckAR) print(vkAR) Q2 = [] NR = len(ckAR) NI = len(ckAI) Q2.extend([ sigmaz() for kk in range(NR)]) Q2.extend([ sigmaz() for kk in range(NI)]) options = Options(nsteps=15000, store_states=True, rtol=1e-14, atol=1e-14) #corrRana = real(ct) #corrIana = imag(ct) corrRana = real(ctlong) corrIana = imag(ctlong) def checker2(tlisttemp): y = [] for i in tlisttemp: # print(i) temp = [] for kkk,ck in enumerate(ckAR): temp.append(ck*exp(-vkAR[kkk]*i)) y.append(sum(temp)) return y yR = checker2(tlist3) # function that evaluates values with fitted params at # given inputs def checker2(tlisttemp): y = [] for i in tlisttemp: # print(i) temp = [] for kkk,ck in enumerate(ckAI): if i==0: print(vkAI[kkk]) temp.append(ck*exp(-vkAI[kkk]*i)) y.append(sum(temp)) return y yI = checker2(tlist3) matplotlib.rcParams['figure.figsize'] = (7, 5) matplotlib.rcParams['axes.titlesize'] = 25 matplotlib.rcParams['axes.labelsize'] = 30 matplotlib.rcParams['xtick.labelsize'] = 28 matplotlib.rcParams['ytick.labelsize'] = 28 matplotlib.rcParams['legend.fontsize'] = 20 matplotlib.rcParams['axes.grid'] = False matplotlib.rcParams['savefig.bbox'] = 'tight' matplotlib.rcParams['lines.markersize'] = 5 matplotlib.rcParams['font.family'] = 'STIXgeneral' matplotlib.rcParams['mathtext.fontset'] = 'stix' matplotlib.rcParams["font.serif"] = "STIX" matplotlib.rcParams['text.usetex'] = False tlist2 = tlist3 from cycler import cycler wlist2 = np.linspace(-2*pi*4,2 * pi *4 , 50000) wlist2 = np.linspace(-7,7 , 50000) fig = plt.figure(figsize=(12,10)) grid = plt.GridSpec(2, 2, wspace=0.4, hspace=0.3) default_cycler = (cycler(color=['r', 'g', 'b', 'y','c','m','k']) + cycler(linestyle=['-', '--', ':', '-.',(0, (1, 10)), (0, (5, 10)),(0, (3, 10, 1, 10))])) plt.rc('axes',prop_cycle=default_cycler ) axes1 = fig.add_subplot(grid[0,0]) axes1.set_yticks([0.,1.]) axes1.set_yticklabels([0,1]) axes1.plot(tlist2, corrRana,"r",linewidth=3,label="Original") axes1.plot(tlist2, yR,"g",dashes=[3,3],linewidth=2,label="Reconstructed") axes1.legend(loc=0) axes1.set_ylabel(r'$C_R(t)$',fontsize=28) axes1.set_xlabel(r'$t\;\omega_c$',fontsize=28) axes1.locator_params(axis='y', nbins=4) axes1.locator_params(axis='x', nbins=4) axes1.text(2.,1.5,"(a)",fontsize=28) axes2 = fig.add_subplot(grid[0,1]) axes2.set_yticks([0.,-0.4]) axes2.set_yticklabels([0,-0.4]) axes2.plot(tlist2, corrIana,"r",linewidth=3,label="Original") axes2.plot(tlist2, yI,"g",dashes=[3,3], linewidth=2,label="Reconstructed") axes2.legend(loc=0) axes2.set_ylabel(r'$C_I(t)$',fontsize=28) axes2.set_xlabel(r'$t\;\omega_c$',fontsize=28) axes2.locator_params(axis='y', nbins=4) axes2.locator_params(axis='x', nbins=4) axes2.text(12.5,-0.2,"(b)",fontsize=28) axes3 = fig.add_subplot(grid[1,0]) axes3.set_yticks([0.,.5,1]) axes3.set_yticklabels([0,0.5,1]) axes3.plot(wlist, J, "r",linewidth=3,label="$J(\omega)$ original") y = checker(wlist, popt1[3],4) axes3.plot(wlist, y, "g", dashes=[3,3], linewidth=2, label="$J(\omega)$ Fit $k_J = 4$") axes3.set_ylabel(r'$J(\omega)$',fontsize=28) axes3.set_xlabel(r'$\omega/\omega_c$',fontsize=28) axes3.locator_params(axis='y', nbins=4) axes3.locator_params(axis='x', nbins=4) axes3.legend(loc=0) axes3.text(3,1.1,"(c)",fontsize=28) s1 = [w * alpha * e**(-abs(w)/wc) * ((1/(e**(w/T)-1))+1) for w in wlist2] s2 = [sum([(2* lam[kk] * gamma[kk] * (w)/(((w+w0[kk])**2 + (gamma[kk]**2))*((w-w0[kk])**2 + (gamma[kk]**2)))) * ((1/(e**(w/T)-1))+1) for kk,lamkk in enumerate(lam)]) for w in wlist2] axes4 = fig.add_subplot(grid[1,1]) axes4.set_yticks([0.,1]) axes4.set_yticklabels([0,1]) axes4.plot(wlist2, s1,"r",linewidth=3,label="Original") axes4.plot(wlist2, s2, "g", dashes=[3,3], linewidth=2,label="Reconstructed") axes4.set_xlabel(r'$\omega/\omega_c$', fontsize=28) axes4.set_ylabel(r'$S(\omega)$', fontsize=28) axes4.locator_params(axis='y', nbins=4) axes4.locator_params(axis='x', nbins=4) axes4.legend() axes4.text(4.,1.2,"(d)",fontsize=28) fig.savefig("figures/figFiJspec.pdf") NC = 11 NR = len(ckAR) NI = len(ckAI) print(NR) print(NI) Q2 = [] Q2.extend([ sigmaz() for kk in range(NR)]) Q2.extend([ sigmaz() for kk in range(NI)]) #Q2 = [Q for kk in range(NR+NI)] #print(Q2) options = Options(nsteps=1500, store_states=True, rtol=1e-12, atol=1e-12, method="bdf") import time start = time.time() print("start") Ltot = liouvillian(Hsys) + TermOps HEOMFit = BosonicHEOMSolver(Ltot, Q2, ckAR, ckAI, vkAR, vkAI, NC, options=options) print("end") end = time.time() print(end - start) #tlist4 = np.linspace(0, 50, 1000) tlist4 = np.linspace(0, 4*pi/Del, 600) tlist4 = np.linspace(0, 30*pi/Del, 600) rho0 = basis(2,0) * basis(2,0).dag() import time start = time.time() resultFit = HEOMFit.run(rho0, tlist4) end = time.time() print(end - start) # Define some operators with which we will measure the system # 1,1 element of density matrix - corresonding to groundstate P11p=basis(2,0) * basis(2,0).dag() P22p=basis(2,1) * basis(2,1).dag() # 1,2 element of density matrix - corresonding to coherence P12p=basis(2,0) * basis(2,1).dag() # Calculate expectation values in the bases P11exp11K4NK1TL = expect(resultFit.states, P11p) P22exp11K4NK1TL = expect(resultFit.states, P22p) P12exp11K4NK1TL = expect(resultFit.states, P12p) tlist3 = linspace(0,15,50000) #also check long timescales ctlong = [complex((1/pi)*alpha * wc**(1-s) * beta**(-(s+1)) * (zeta(s+1,(1+beta*wc-1.0j*wc*t)/(beta*wc)) + zeta(s+1,(1+1.0j*wc*t)/(beta*wc)))) for t in tlist3] corrRana = real(ctlong) corrIana = imag(ctlong) tlist2 = tlist3 from scipy.optimize import curve_fit #seperate functions for plotting later: def fit_func_nocost(x, a, b, c, N): tot = 0 for i in range(N): # print(i) tot += a[i]*np.exp(b[i]*x)*np.cos(c[i]*x) cost = 0. return tot def wrapper_fit_func_nocost(x, N, *args): a, b, c = list(args[0][:N]), list(args[0][N:2*N]), list(args[0][2*N:3*N]) # print("debug") return fit_func_nocost(x, a, b, c, N) # function that evaluates values with fitted params at # given inputs def checker(tlist_local, vals, N): y = [] for i in tlist_local: # print(i) y.append(wrapper_fit_func_nocost(i, N, vals)) return y ####### #Real part def wrapper_fit_func(x, N, *args): a, b, c = list(args[0][:N]), list(args[0][N:2*N]), list(args[0][2*N:3*N]) # print("debug") return fit_func(x, a, b, c, N) def fit_func(x, a, b, c, N): tot = 0 for i in range(N): # print(i) tot += a[i]*np.exp(b[i]*x)*np.cos(c[i]*x ) cost = 0. for i in range(N): #print(i) cost += ((corrRana[0]-a[i])) tot+=0.0*cost return tot def fitterR(ans, tlist_local, k): # the actual computing of fit popt = [] pcov = [] # tries to fit for k exponents for i in range(k): #params_0 = [0]*(2*(i+1)) params_0 = [0.]*(3*(i+1)) upper_a = 20*abs(max(ans, key = abs)) #sets initial guess guess = [] #aguess = [ans[0]]*(i+1)#[max(ans)]*(i+1) aguess = [abs(max(ans, key = abs))]*(i+1) bguess = [-wc]*(i+1) cguess = [wc]*(i+1) guess.extend(aguess) guess.extend(bguess) guess.extend(cguess) #c # sets bounds # a's = anything , b's negative # sets lower bound b_lower = [] alower = [-upper_a]*(i+1) blower = [-np.inf]*(i+1) clower = [0]*(i+1) b_lower.extend(alower) b_lower.extend(blower) b_lower.extend(clower) # sets higher bound b_higher = [] ahigher = [upper_a]*(i+1) #bhigher = [np.inf]*(i+1) bhigher = [0.1]*(i+1) chigher = [np.inf]*(i+1) b_higher.extend(ahigher) b_higher.extend(bhigher) b_higher.extend(chigher) param_bounds = (b_lower, b_higher) p1, p2 = curve_fit(lambda x, *params_0: wrapper_fit_func(x, i+1, \ params_0), tlist_local, ans, p0=guess, sigma=[0.1 for t in tlist_local], bounds = param_bounds, maxfev = 100000000) popt.append(p1) pcov.append(p2) print(i+1) return popt # print(popt) k = 3 popt1 = fitterR(corrRana, tlist2, k) for i in range(k): y = checker(tlist2, popt1[i],i+1) plt.plot(tlist2, corrRana, tlist2, y) plt.show() #y = checker(tlist3, popt1[k-1],k) #plt.plot(tlist3, real(ctlong), tlist3, y) #plt.show() ####### #Imag part def fit_func2(x, a, b, c, N): tot = 0 for i in range(N): # print(i) tot += a[i]*np.exp(b[i]*x)*np.sin(c[i]*x) cost = 0. for i in range(N): # print(i) cost += (corrIana[0]-a[i]) tot+=0*cost return tot # actual fitting function def wrapper_fit_func2(x, N, *args): a, b, c = list(args[0][:N]), list(args[0][N:2*N]), list(args[0][2*N:3*N]) # print("debug") return fit_func2(x, a, b, c, N) # function that evaluates values with fitted params at # given inputs def checker2(tlist_local, vals, N): y = [] for i in tlist_local: # print(i) y.append(wrapper_fit_func2(i, N, vals)) return y def fitterI(ans, tlist_local, k): # the actual computing of fit popt = [] pcov = [] # tries to fit for k exponents for i in range(k): #params_0 = [0]*(2*(i+1)) params_0 = [0.]*(3*(i+1)) upper_a = abs(max(ans, key = abs))*5 #sets initial guess guess = [] #aguess = [ans[0]]*(i+1)#[max(ans)]*(i+1) aguess = [-abs(max(ans, key = abs))]*(i+1) bguess = [-2]*(i+1) cguess = [1]*(i+1) guess.extend(aguess) guess.extend(bguess) guess.extend(cguess) #c # sets bounds # a's = anything , b's negative # sets lower bound b_lower = [] alower = [-upper_a]*(i+1) blower = [-100]*(i+1) clower = [0]*(i+1) b_lower.extend(alower) b_lower.extend(blower) b_lower.extend(clower) # sets higher bound b_higher = [] ahigher = [upper_a]*(i+1) bhigher = [0.01]*(i+1) chigher = [100]*(i+1) b_higher.extend(ahigher) b_higher.extend(bhigher) b_higher.extend(chigher) param_bounds = (b_lower, b_higher) p1, p2 = curve_fit(lambda x, *params_0: wrapper_fit_func2(x, i+1, \ params_0), tlist_local, ans, p0=guess, sigma=[0.0001 for t in tlist_local], bounds = param_bounds, maxfev = 100000000) popt.append(p1) pcov.append(p2) print(i+1) return popt # print(popt) k1 = 3 popt2 = fitterI(corrIana, tlist2, k1) for i in range(k1): y = checker2(tlist2, popt2[i], i+1) plt.plot(tlist2, corrIana, tlist2, y) plt.show() #tlist3 = linspace(0,1,1000) #y = checker(tlist3, popt2[k-1],k) #plt.plot(tlist3, imag(ctlong), tlist3, y) #plt.show() #ckAR1 = list(popt1[k-1])[:len(list(popt1[k-1]))//2] ckAR1 = list(popt1[k-1])[:k] #0.5 from cosine ckAR = [0.5*x+0j for x in ckAR1] #dress with exp(id) #for kk in range(k): # ckAR[kk] = ckAR[kk]*exp(1.0j*list(popt1[k-1])[3*k+kk]) ckAR.extend(conjugate(ckAR)) #just directly double # vkAR, vkAI vkAR1 = list(popt1[k-1])[k:2*k] #damping terms wkAR1 = list(popt1[k-1])[2*k:3*k] #oscillating term vkAR = [-x-1.0j*wkAR1[kk] for kk, x in enumerate(vkAR1)] #combine vkAR.extend([-x+1.0j*wkAR1[kk] for kk, x in enumerate(vkAR1)]) #double print(ckAR) print(vkAR) #ckAR1 = list(popt1[k-1])[:len(list(popt1[k-1]))//2] ckAI1 = list(popt2[k1-1])[:k1] #0.5 from cosine ckAI = [-1.0j*0.5*x for x in ckAI1] #dress with exp(id) #for kk in range(k1): # ckAI[kk] = ckAI[kk]*exp(1.0j*list(popt2[k1-1])[3*k1+kk]) ckAI.extend(conjugate(ckAI)) #just directly double # vkAR, vkAI vkAI1 = list(popt2[k1-1])[k1:2*k1] #damping terms wkAI1 = list(popt2[k1-1])[2*k1:3*k1] #oscillating term vkAI = [-x-1.0j*wkAI1[kk] for kk, x in enumerate(vkAI1)] #combine vkAI.extend([-x+1.0j*wkAI1[kk] for kk, x in enumerate(vkAI1)]) #double print(ckAI) print(vkAI) #check the spectrum of the fit def spectrum_matsubara_approx(w, ck, vk): """ Calculates the approximate Matsubara correlation spectrum from ck and vk. Parameters ========== w: np.ndarray A 1D numpy array of frequencies. ck: float The coefficient of the exponential function. vk: float The frequency of the exponential function. """ return ck*2*(vk)/(w**2 + vk**2) def spectrum_approx(w, ck,vk): """ Calculates the approximate non Matsubara correlation spectrum from the bath parameters. Parameters ========== w: np.ndarray A 1D numpy array of frequencies. coup_strength: float The coupling strength parameter. bath_broad: float A parameter characterizing the FWHM of the spectral density, i.e., the bath broadening. bath_freq: float The bath frequency. """ sw = [] for kk,ckk in enumerate(ck): #sw.append((ckk*(real(vk[kk]))/((w-imag(vk[kk]))**2+(real(vk[kk])**2)))) sw.append((ckk*(real(vk[kk]))/((w-imag(vk[kk]))**2+(real(vk[kk])**2)))) return sw from cycler import cycler wlist2 = np.linspace(-7,7 , 50000) s1 = [w * alpha * e**(-abs(w)/wc) * ((1/(e**(w/T)-1))+1) for w in wlist2] s2 = spectrum_approx(wlist2,ckAR,vkAR) s2.extend(spectrum_approx(wlist2,[1.0j*ckk for ckk in ckAI],vkAI)) #s2 = spectrum_approx(wlist2,ckAI,vkAI) print(len(s2)) s2sum = [0. for w in wlist2] for s22 in s2: for kk,ww in enumerate(wlist2): s2sum[kk] += s22[kk] fig = plt.figure(figsize=(12,10)) grid = plt.GridSpec(2, 2, wspace=0.4, hspace=0.3) default_cycler = (cycler(color=['r', 'g', 'b', 'y','c','m','k']) + cycler(linestyle=['-', '--', ':', '-.',(0, (1, 10)), (0, (5, 10)),(0, (3, 10, 1, 10))])) plt.rc('axes',prop_cycle=default_cycler ) axes1 = fig.add_subplot(grid[0,0]) axes1.set_yticks([0.,1.]) axes1.set_yticklabels([0,1]) y = checker(tlist2, popt1[2], 3) axes1.plot(tlist2, corrRana,'r',linewidth=3,label="Original") axes1.plot(tlist2, y,'g',dashes=[3,3],linewidth=3,label="Fit $k_R = 3$") axes1.legend(loc=0) axes1.set_ylabel(r'$C_R(t)$',fontsize=28) axes1.set_xlabel(r'$t\;\omega_c$',fontsize=28) axes1.locator_params(axis='y', nbins=3) axes1.locator_params(axis='x', nbins=3) axes1.text(2.5,0.5,"(a)",fontsize=28) axes2 = fig.add_subplot(grid[0,1]) y = checker2(tlist2, popt2[2], 3) axes2.plot(tlist2, corrIana,'r',linewidth=3,label="Original") axes2.plot(tlist2, y,'g',dashes=[3,3],linewidth=3,label="Fit $k_I = 3$") axes2.legend(loc=0) axes2.set_yticks([0.,-0.4]) axes2.set_yticklabels([0,-0.4]) axes2.set_ylabel(r'$C_I(t)$',fontsize=28) axes2.set_xlabel(r'$t\;\omega_c$',fontsize=28) axes2.locator_params(axis='y', nbins=3) axes2.locator_params(axis='x', nbins=3) axes2.text(12.5,-0.1,"(b)",fontsize=28) axes3 = fig.add_subplot(grid[1,0:]) axes3.plot(wlist2, s1, 'r',linewidth=3,label="$S(\omega)$ original") axes3.plot(wlist2, real(s2sum), 'g',dashes=[3,3],linewidth=3, label="$S(\omega)$ reconstruction") axes3.set_yticks([0.,1.]) axes3.set_yticklabels([0,1]) axes3.set_xlim(-5,5) axes3.set_ylabel(r'$S(\omega)$',fontsize=28) axes3.set_xlabel(r'$\omega/\omega_c$',fontsize=28) axes3.locator_params(axis='y', nbins=3) axes3.locator_params(axis='x', nbins=3) axes3.legend(loc=1) axes3.text(-4,1.5,"(c)",fontsize=28) fig.savefig("figures/figFitCspec.pdf") Q2 = [] NR = len(ckAR) NI = len(ckAI) Q2.extend([ sigmaz() for kk in range(NR)]) Q2.extend([ sigmaz() for kk in range(NI)]) options = Options(nsteps=15000, store_states=True, rtol=1e-14, atol=1e-14) NC = 11 #Q2 = [Q for kk in range(NR+NI)] #print(Q2) options = Options(nsteps=1500, store_states=True, rtol=1e-12, atol=1e-12, method="bdf") import time start = time.time() #HEOMFit = BosonicHEOMSolver(Hsys, Q2, ckAR2, ckAI2, vkAR2, vkAI2, NC, options=options) HEOMFitC = BosonicHEOMSolver(Hsys, Q2, ckAR, ckAI, vkAR, vkAI, NC, options=options) print("hello") end = time.time() print(end - start) tlist4 = np.linspace(0, 30*pi/Del, 600) rho0 = basis(2,0) * basis(2,0).dag() import time start = time.time() resultFit = HEOMFitC.run(rho0, tlist4) print("hello") end = time.time() print(end - start) # Define some operators with which we will measure the system # 1,1 element of density matrix - corresonding to groundstate P11p=basis(2,0) * basis(2,0).dag() P22p=basis(2,1) * basis(2,1).dag() # 1,2 element of density matrix - corresonding to coherence P12p=basis(2,0) * basis(2,1).dag() # Calculate expectation values in the bases P11expC11k33L = expect(resultFit.states, P11p) P22expC11k33L = expect(resultFit.states, P22p) P12expC11k33L = expect(resultFit.states, P12p) qsave(P11expC11k33L,'P11expC12k33L') qsave(P11exp11K4NK1TL,'P11exp11K4NK1TL') qsave(P11exp11K3NK1TL,'P11exp11K3NK1TL') qsave(P11exp11K3NK2TL,'P11exp11K3NK2TL') P11expC11k33L=qload('data/P11expC12k33L') P11exp11K4NK1TL=qload('data/P11exp11K4NK1TL') P11exp11K3NK1TL=qload('data/P11exp11K3NK1TL') P11exp11K3NK2TL=qload('data/P11exp11K3NK2TL') matplotlib.rcParams['figure.figsize'] = (7, 5) matplotlib.rcParams['axes.titlesize'] = 25 matplotlib.rcParams['axes.labelsize'] = 30 matplotlib.rcParams['xtick.labelsize'] = 28 matplotlib.rcParams['ytick.labelsize'] = 28 matplotlib.rcParams['legend.fontsize'] = 28 matplotlib.rcParams['axes.grid'] = False matplotlib.rcParams['savefig.bbox'] = 'tight' matplotlib.rcParams['lines.markersize'] = 5 matplotlib.rcParams['font.family'] = 'STIXgeneral' matplotlib.rcParams['mathtext.fontset'] = 'stix' matplotlib.rcParams["font.serif"] = "STIX" matplotlib.rcParams['text.usetex'] = False tlist4 = np.linspace(0, 4*pi/Del, 600) # Plot the results fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12,15)) axes[0].set_yticks([0.6,0.8,1]) axes[0].set_yticklabels([0.6,0.8,1]) axes[0].plot(tlist4, np.real(P11expC11k33L), 'y', linewidth=2, label="Correlation Function Fit $k_R=k_I=3$") axes[0].plot(tlist4, np.real(P11exp11K3NK1TL), 'b-.', linewidth=2, label="Spectral Density Fit $k_J=3$, $N_k=1$ & Terminator") axes[0].plot(tlist4, np.real(P11exp11K3NK2TL), 'r--', linewidth=2, label="Spectral Density Fit $k_J=3$, $N_k=2$ & Terminator") axes[0].plot(tlist4, np.real(P11exp11K4NK1TL), 'g--', linewidth=2, label="Spectral Density Fit $k_J=4$, $N_k=1$ & Terminator") axes[0].set_ylabel(r'$\rho_{11}$',fontsize=30) axes[0].set_xlabel(r'$t\;\omega_c$',fontsize=30) axes[0].locator_params(axis='y', nbins=3) axes[0].locator_params(axis='x', nbins=3) axes[0].legend(loc=0, fontsize=25) axes[1].set_yticks([0,0.01]) axes[1].set_yticklabels([0,0.01]) #axes[0].plot(tlist4, np.real(P11exp11K3NK1TL)-np.real(P11expC11k33L), 'b-.', linewidth=2, label="Correlation Function Fit $k_R=k_I=3$") axes[1].plot(tlist4, np.real(P11exp11K3NK1TL)-np.real(P11expC11k33L), 'b-.', linewidth=2, label="Spectral Density Fit $k_J=3$, $K=1$ & Terminator") axes[1].plot(tlist4, np.real(P11exp11K3NK2TL)-np.real(P11expC11k33L), 'r--', linewidth=2, label="Spectral Density Fit $k_J=3$, $K=2$ & Terminator") axes[1].plot(tlist4, np.real(P11exp11K4NK1TL)-np.real(P11expC11k33L), 'g--', linewidth=2, label="Spectral Density Fit $k_J=4$, $K=1$ & Terminator") axes[1].set_ylabel(r'$\rho_{11}$ difference',fontsize=30) axes[1].set_xlabel(r'$t\;\omega_c$',fontsize=30) axes[1].locator_params(axis='y', nbins=3) axes[1].locator_params(axis='x', nbins=3) #axes[1].legend(loc=0, fontsize=25) fig.savefig("figures/figFit.pdf") tlist4 = np.linspace(0, 4*pi/Del, 600) # Plot the results fig, axes = plt.subplots(1, 1, sharex=True, figsize=(12,5)) axes.plot(tlist4, np.real(P12expC11k33L), 'y', linewidth=2, label="Correlation Function Fit $k_R=k_I=3$") axes.plot(tlist4, np.real(P12exp11K3NK1TL), 'b-.', linewidth=2, label="Spectral Density Fit $k_J=3$, $K=1$ & Terminator") axes.plot(tlist4, np.real(P12exp11K3NK2TL), 'r--', linewidth=2, label="Spectral Density Fit $k_J=3$, $K=1$ & Terminator") axes.plot(tlist4, np.real(P12exp11K4NK1TL), 'g--', linewidth=2, label="Spectral Density Fit $k_J=4$, $K=1$ & Terminator") axes.set_ylabel(r'$\rho_{12}$',fontsize=28) axes.set_xlabel(r'$t\;\omega_c$',fontsize=28) axes.locator_params(axis='y', nbins=6) axes.locator_params(axis='x', nbins=6) axes.legend(loc=0) from qutip.ipynbtools import version_table version_table() ```
github_jupyter
``` import random g = 10 # number of genes m = 10 # max length of a gene, a gene is a collection of integers "v" v = 100 # max value for "v" genome = [[random.randint(0,v) for _ in range(random.randint(0,m))] for _ in range(g)] genome def mutate(genome, t='deletion', p=0.1): if t == 'SNP': new_genome = [] for gene in genome: pg = random.random() if pg <= p: i = random.randint(0,len(gene)-1) SNP = gene[:] SNP[i] = random.randint(0,100) new_genome.append(SNP) print('SNP generated! From {} to {}'.format(gene,SNP)) else: new_genome.append(gene) return new_genome if t == 'deletion': new_genome = genome[:] for gene in genome: pg = random.random() if pg <= p: new_genome.remove(gene) print('Deleted {}'.format(gene)) #print('Deleted {} gene/s'.format(len(genome)-len(new_genome))) return new_genome if t == 'inversion': new_genome = [] for gene in genome: pg = random.random() if pg <= p: inverted = gene[::-1] new_genome.append(inverted) print('Inverted {} to {}'.format(gene,inverted)) else: new_genome.append(gene) return new_genome if t == 'duplication': new_genome = [] for gene in genome: pg = random.random() if pg <= p: duplicated = gene + gene new_genome.append(duplicated) print('Duplicated {} to {}'.format(gene,duplicated)) else: new_genome.append(gene) return new_genome if t == 'all': all_ = ['deletion','inversion','duplication','SNP'] new_genome = genome[:] for _ in all_: new_genome = mutate(new_genome, _, p) return new_genome def translocate(gA,gB, p=0.1): gA_brks = [] for _ in range(len(gA)): pg = random.random() if pg <= p: gA_brks.append(_) gB_brks = [] for _ in range(len(gB)): pg = random.random() if pg <= p: gB_brks.append(_) if len(gA_brks) and len(gB_brks): iA = random.sample(gA_brks,1)[0] iB = random.sample(gB_brks,1)[0] print('Breaks generated in gA[{}] and gB[{}]'.format(iA,iB)) derA = gA[:iA]+gB[iB:] derB = gB[:iB]+gB[iA:] print('Translocation done') return derA, derB print('Translocation failed!') return gA, gB genome = [[random.randint(0,v) for _ in range(random.randint(0,m))] for _ in range(g)] genome #SNP test SNP_genome = mutate(genome, t='SNP', p=0.5) SNP_genome #Deletion test del_genome = mutate(genome, t='deletion', p=0.1) del_genome #Inversion test inv_genome = mutate(genome, t='inversion', p=0.1) inv_genome #Duplication test dup_genome = mutate(genome, t='duplication', p=0.1) dup_genome #All test all_genome = mutate(genome, t='all', p=0.1) all_genome #Translocation test gA = [[random.randint(0,v) for _ in range(random.randint(0,m))] for _ in range(g)] gB = [[random.randint(0,v) for _ in range(random.randint(0,m))] for _ in range(g)] gA gB derA,derB = translocate(gA,gB,p=0.5) derA derB ```
github_jupyter
``` import pyspark.sql import pyspark.sql.functions as sf spark = pyspark.sql.SparkSession.Builder().getOrCreate() ``` # Watson Sales Product Sample Data In this example, we want to have a look at the pivoting capabilities of Spark. Since pivoting is commonly used with sales data containing information for different product categories or countries, we will use a data set called "Watson Sales Product Sample Data" which was downloaded from https://www.ibm.com/communities/analytics/watson-analytics-blog/sales-products-sample-data/ # 1 Load and inspect data First we load the data, which is provided as a single CSV file, which again is well supported by Apache Spark ``` basedir = "s3://dimajix-training/data" data = spark.read\ .option("header", True) \ .option("inferSchema", True) \ .csv(basedir + "/watson-sales-products/WA_Sales_Products_2012-14.csv") ``` ### Inspect schema Since we used the existing header information and also let Spark infer appropriate data types, let us inspect the schema now. ``` data.printSchema() ``` ### Inspect pivoting candidates Now let us find some good candidates for a pivoting column. A pivoting column shouldn't have too many distinct entries, otherwise the result probably doesn't make too much sense and doesn't help the business expert in interpretation. We can either use ``` data.select("Retailer type").distinct().count() ``` which will give us the number of distinct values for a single column, or we can use the Spark aggregate function `countDistinct` which allows us to retrieve information for multiple columns within a single `select`. ``` result = data.select( sf.countDistinct("Retailer country"), sf.countDistinct("Retailer type"), sf.countDistinct("Product line"), sf.countDistinct("Product type"), sf.countDistinct("Quarter") ) result.toPandas() ``` # 2 Pivoting by Product Line The first example pivots by the product line, since there are only five different distinct values. ``` revenue_per_product_line = # YOUR CODE HERE revenue_per_product_line.toPandas() ``` ## 2.1 Exercise Craete an aggragated table with * Country and Product Line in Rows * The quantity for each quarter in different columns ``` # YOUR CODE HERE ``` # 3 Unpivoting again Sometimes you just need the opposite operation: You have a data set in pivoted format and want to unpivot it. There is no simple built in function provided by Spark, but you can construct the unpivoted table as follows * For every pivoted column: * Project data frame onto non-pivot columns * Add a new column with an appropriate name containing the name of the pivot column as its value * Add a new column with an appropriate name containing the values of the pivot column * Union together all these data frames ## 3.1 Specific Example Now let us perform these steps for the pivoted table above ``` revenue_camping = revenue_per_product_line.select( # YOUR CODE HERE ) revenue_golf = revenue_per_product_line.select( sf.col("Quarter"), sf.col("Retailer Country"), sf.lit("Golf Equipment").alias("Product line"), sf.col("Golf Equipment").alias("Revenue") ) revenue_mountaineering = revenue_per_product_line.select( sf.col("Quarter"), sf.col("Retailer Country"), sf.lit("Mountaineering Equipment").alias("Product line"), sf.col("Mountaineering Equipment").alias("Revenue") ) revenue_outdoor = revenue_per_product_line.select( sf.col("Quarter"), sf.col("Retailer Country"), sf.lit("Outdoor Protection").alias("Product line"), sf.col("Outdoor Protection").alias("Revenue") ) revenue_personal = revenue_per_product_line.select( sf.col("Quarter"), sf.col("Retailer Country"), sf.lit("Personal Accessories").alias("Product line"), sf.col("Personal Accessories").alias("Revenue") ) result = # YOUR CODE HERE result.limit(10).toPandas() ``` ## 3.2 Generic Approach Of course manually unpivoting is somewhat tedious, but we already see a pattern: * Select all non-pivot columns * Create a new column containing the pivot column name * Create a new column containing the pivot column values * Union together everything This can be done by writing some small Python functions as follows: ``` import functools # Unpivot a single column, thereby creating one data frame def unpivot_column(df, other, pivot_column, pivot_value, result_column): columns = [df[c] for c in other] + \ [sf.lit(pivot_value).alias(pivot_column)] + \ [df[pivot_value].alias(result_column)] return df.select(*columns) # Unpivot multiple columns by using the above method def unpivot(df, pivot_column, pivot_values, result_column): """ df - input data frame pivot_column - the name of the new column containg each pivot column name pivot_values - the list of pivoted column names result_column - the name of the column containing the values of the pivot columns """ common_columns = [f.name for f in df.schema.fields if not f.name in pivot_values] unpivot_dfs = [unpivot_column(df, common_columns, pivot_column, v, result_column) for v in pivot_values] return functools.reduce(lambda x,y: x.union(y), unpivot_dfs) ``` Let's test the function ``` product_lines = # YOUR CODE HERE result_per_product_line = # YOUR CODE HERE result_per_product_line.toPandas() ``` ## 3.3 Exercise Now unpivot the result of exercise 2.1. You can do that either manually or try using the generic function defined above.
github_jupyter
``` # export from local.imports import * from local.notebook.core import * from local.notebook.export import * import nbformat,inspect from nbformat.sign import NotebookNotary from nbconvert.preprocessors import ExecutePreprocessor from local.test import * from local.core import * # default_exp notebook.test ``` # Extracting tests from notebooks > The functions that grab the cells containing tests (filtering with potential flags) and execute them ``` # export _re_all_flag = re.compile(""" # Matches any line with #all_something and catches that something in a group: ^ # beginning of line (since re.MULTILINE is passed) \s* # any number of whitespace \#\s* # # then any number of whitespace all_(\S+) # all_ followed by a group with any non-whitespace chars \s* # any number of whitespace $ # end of line (since re.MULTILINE is passed) """, re.IGNORECASE | re.MULTILINE | re.VERBOSE) # export def check_all_flag(cells): for cell in cells: if check_re(cell, _re_all_flag): return check_re(cell, _re_all_flag).groups()[0] nb = read_nb("35_tutorial_wikitext.ipynb") test_eq(check_all_flag(nb['cells']), 'slow') nb = read_nb("91_notebook_export.ipynb") assert check_all_flag(nb['cells']) is None # export _re_flags = re.compile(""" # Matches any line with a test flad and catches it in a group: ^ # beginning of line (since re.MULTILINE is passed) \s* # any number of whitespace \#\s* # # then any number of whitespace (slow|cuda|cpp) # all test flags \s* # any number of whitespace $ # end of line (since re.MULTILINE is passed) """, re.IGNORECASE | re.MULTILINE | re.VERBOSE) # export def get_cell_flags(cell): if cell['cell_type'] != 'code': return [] return _re_flags.findall(cell['source']) test_eq(get_cell_flags({'cell_type': 'code', 'source': "#hide\n# slow\n"}), ['slow']) test_eq(get_cell_flags({'cell_type': 'code', 'source': "#hide\n# slow\n # cuda"}), ['slow', 'cuda']) test_eq(get_cell_flags({'cell_type': 'markdown', 'source': "#hide\n# slow\n # cuda"}), []) test_eq(get_cell_flags({'cell_type': 'code', 'source': "#hide\n"}), []) # export def _add_import_cell(mod): "Return an import cell for `mod`" return {'cell_type': 'code', 'execution_count': None, 'metadata': {'hide_input': True}, 'outputs': [], 'source': f"\nfrom local.{mod} import *"} # export _re_is_export = re.compile(r""" # Matches any text with #export or #exports flag: ^ # beginning of line (since re.MULTILINE is passed) \s* # any number of whitespace \#\s* # # then any number of whitespace exports? # export or exports \s* # any number of whitespace """, re.IGNORECASE | re.MULTILINE | re.VERBOSE) # export _re_has_import = re.compile(r""" # Matches any text with import statement: ^ # beginning of line (since re.MULTILINE is passed) \s* # any number of whitespace import # # then any number of whitespace \s+ | \s* from \s+\S+\s+ import \s+ """, re.IGNORECASE | re.MULTILINE | re.VERBOSE) # export class NoExportPreprocessor(ExecutePreprocessor): "An `ExecutePreprocessor` that executes not exported cells" @delegates(ExecutePreprocessor.__init__) def __init__(self, flags, **kwargs): self.flags = flags super().__init__(**kwargs) def preprocess_cell(self, cell, resources, index): if 'source' not in cell or cell['cell_type'] != "code": return cell, resources #if _re_is_export.search(cell['source']) and not _re_has_import.search(cell['source']): # return cell, resources for f in get_cell_flags(cell): if f not in self.flags: return cell, resources res = super().preprocess_cell(cell, resources, index) return res # export def test_nb(fn, flags=None): "Execute `nb` (or only the `show_doc` cells) with `metadata`" os.environ["IN_TEST"] = '1' try: nb = read_nb(fn) all_flag = check_all_flag(nb['cells']) if all_flag is not None and all_flag not in L(flags): return mod = find_default_export(nb['cells']) #if mod is not None: nb['cells'].insert(0, _add_import_cell(mod)) ep = NoExportPreprocessor(L(flags), timeout=600, kernel_name='python3') pnb = nbformat.from_dict(nb) ep.preprocess(pnb) except Exception as e: print(f"Error in {fn}") raise e finally: os.environ.pop("IN_TEST") test_nb("07_vision_core.ipynb") ``` ## Export- ``` #hide notebook2script(all_fs=True) ```
github_jupyter
``` from scripts.constants import SimulationConstants from scripts.epidemic_metrics import dead_ratio, infected_ratio %load_ext autoreload %autoreload 2 from scripts.simulation import init_run_simulation import scripts.visualization as viz viz.load_matplotlib() plt = viz.plt contants = SimulationConstants() metrics = {'dead_ratio': ('l1_layer', dead_ratio), 'infected_ratio': ('l1_layer', infected_ratio)} contants.n_agents = 100 contants.n_steps = 20000 out1, _, _ = init_run_simulation(contants.n_agents, contants.n_additional_virtual_links, contants.init_infection_fraction, contants.init_aware_fraction, contants.n_steps, contants.l1_params, contants.l2_params, contants.l2_voter_params, contants.l2_social_media_params, metrics) contants.n_agents = 1000 contants.n_steps = 20000 out2, _, _ = init_run_simulation(contants.n_agents, contants.n_additional_virtual_links, contants.init_infection_fraction, contants.init_aware_fraction, contants.n_steps, contants.l1_params, contants.l2_params, contants.l2_voter_params, contants.l2_social_media_params, metrics) %%time contants.n_agents = 5000 contants.n_steps = 100000 # ~100k is ok -- around 3 minutes out3, _, _ = init_run_simulation(contants.n_agents, contants.n_additional_virtual_links, contants.init_infection_fraction, contants.init_aware_fraction, contants.n_steps, contants.l1_params, contants.l2_params, contants.l2_voter_params, contants.l2_social_media_params, metrics) %%time contants.n_agents = 10000 contants.n_steps = 150000 # around 12 min -- single realisation for N_STEPS = 150k out4, _, _ = init_run_simulation(contants.n_agents, contants.n_additional_virtual_links, contants.init_infection_fraction, contants.init_aware_fraction, contants.n_steps, contants.l1_params, contants.l2_params, contants.l2_voter_params, contants.l2_social_media_params, metrics) plt.grid(alpha=0.1) plt.plot(out1['dead_ratio'], color='black', linewidth=3, label='dead ratio (N=100)') plt.plot(out1['infected_ratio'], color='red', linewidth=3, label='infected ratio (N=100)') plt.plot(out2['dead_ratio'], color='blue', linewidth=3, label='dead ratio (N=1000)') plt.plot(out2['infected_ratio'], color='green', linewidth=3, label='infected ratio (N=1000)') plt.plot(out3['dead_ratio'], color='brown', linewidth=3, label='dead ratio (N=5000)') plt.plot(out3['infected_ratio'], color='purple', linewidth=3, label='infected ratio (N=5000)') plt.plot(out4['dead_ratio'], color='orange', linewidth=3, label='dead ratio (N=10000)') plt.plot(out4['infected_ratio'], color='violet', linewidth=3, label='infected ratio (N=10000)') plt.xlabel('time') plt.legend(loc='center right', borderaxespad=0.1, fontsize=12, ncol=1, labelspacing=0.02, fancybox=False, columnspacing=0.3) # plt.savefig("../plots/measure_n_steps_n_agents.pdf") ```
github_jupyter
# Developing an AI application Going forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications. In this project, you'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below. <img src='assets/Flowers.png' width=500px> The project is broken down into multiple steps: * Load and preprocess the image dataset * Train the image classifier on your dataset * Use the trained classifier to predict image content We'll lead you through each part which you'll implement in Python. When you've completed this project, you'll have an application that can be trained on any set of labeled images. Here your network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new. First up is importing the packages you'll need. It's good practice to keep all the imports at the beginning of your code. As you work through this notebook and find you need to import a package, make sure to add the import up here. ## How to apply Deep Learning algorithm ### 1. Exploring the Data * Use head, describe, shape functions. * Define your transforms for the training, validation, and testing sets. ### 2. Building and training the classifier * Load a pre-trained network. * Define a new, untrained feed-forward network as a classifier, using activations and dropout. * Train the classifier layers using backpropagation using the pre-trained network to get the features. * Track the loss and accuracy on the validation set to determine the best hyperparameters. ### 3. Testing your network * Do validation on the test set and plot the graph. ### 4. Save the checkpoint * Save the model so you can load it later for making predictions. ### 5. Loading the checkpoint * You can come back to this project and keep working on it without having to retrain the network. ### 6. Class Prediction * What are the strengths of the model; when does it perform well? * What are the weaknesses of the model; when does it perform poorly? * What makes this model a good candidate for the problem, given what you know about the data? ### 7. Train and compare predicted results between models * Compare time on testing set * Compare accuracy score on testing set * Compare F-score on testing set * Compare results based on the size of testing set ### 8. Improve results by tuning model * Use grid search to find best parameters ### 9. Check feature importance * Use feature_importance_ attribute ``` import numpy as np import os, random import time from workspace_utils import active_session import torchvision from torchvision import transforms, datasets, models import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable from torch.utils.data import DataLoader from PIL import Image import matplotlib.pyplot as plt # allow to plot graph on jupyter notebook %matplotlib inline #higher resolution %config InlineBackend.figure_format = 'retina' ``` ## Load the data Here you'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). The data should be included alongside this notebook, otherwise you can [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. You'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks. The validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this you don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size. The pre-trained networks you'll use were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1. ``` data_dir = 'flowers' train_dir = data_dir + '/train' valid_dir = data_dir + '/valid' test_dir = data_dir + '/test' # TODO: Define your transforms for the training, validation, and testing sets # Image transformations image_transforms = { 'train': transforms.Compose([ # use data augmentation on training sets transforms.RandomResizedCrop(224), transforms.RandomRotation(degrees=30), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # Imagenet standards ]), # no augmentation on validation and test sets 'valid': transforms.Compose([ transforms.Resize(size=256), transforms.CenterCrop(size=224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'test': transforms.Compose([ transforms.Resize(size=256), transforms.CenterCrop(size=224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } data = { 'train': datasets.ImageFolder(root=train_dir, transform=image_transforms['train']), 'valid': datasets.ImageFolder(root=valid_dir, transform=image_transforms['valid']), 'test': datasets.ImageFolder(root=test_dir, transform=image_transforms['test']), } dataloaders = { 'train': DataLoader(data['train'], batch_size=32, shuffle=True), 'val': DataLoader(data['valid'], batch_size=32, shuffle=True), 'test': DataLoader(data['test'], batch_size=32, shuffle=True) } ``` ### Label mapping You'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers. ``` import json with open('cat_to_name.json', 'r') as f: cat_to_name = json.load(f) ``` # Building and training the classifier Now that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features. We're going to leave this part up to you. Refer to [the rubric](https://review.udacity.com/#!/rubrics/1663/view) for guidance on successfully completing this section. Things you'll need to do: * Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use) * Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout * Train the classifier layers using backpropagation using the pre-trained network to get the features * Track the loss and accuracy on the validation set to determine the best hyperparameters We've left a cell open for you below, but use as many as you need. Our advice is to break the problem up into smaller parts you can run separately. Check that each part is doing what you expect, then move on to the next. You'll likely find that as you work through each part, you'll need to go back and modify your previous code. This is totally normal! When training make sure you're updating only the weights of the feed-forward network. You should be able to get the validation accuracy above 70% if you build everything right. Make sure to try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project. One last important tip if you're using the workspace to run your code: To avoid having your workspace disconnect during the long-running tasks in this notebook, please read in the earlier page in this lesson called Intro to GPU Workspaces about Keeping Your Session Active. You'll want to include code from the workspace_utils.py module. **Note for Workspace users:** If your network is over 1 GB when saved as a checkpoint, there might be issues with saving backups in your workspace. Typically this happens with wide dense layers after the convolutional layers. If your saved checkpoint is larger than 1 GB (you can open a terminal and check with `ls -lh`), you should reduce the size of your hidden layers and train again. ``` # TODO: Build and train your network # Check GPU availability device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = models.vgg16(pretrained=True) # Freeze the parameters of densenet for param in model.parameters(): param.requires_grad = False # Custom Classifier classifier = nn.Sequential( nn.Linear(25088, 4096, bias=True), nn.ReLU(), nn.Dropout(), nn.Linear(4096, 102, bias=True), nn.LogSoftmax(dim=1) ) model.classifier = classifier criterion = nn.NLLLoss() # Only train the classifier parameters, feature parameters are frozen optimizer = optim.Adam(model.classifier.parameters(), lr=0.001) # Move to GPU or CPU mode model = model.to(device) with active_session(): epochs = 3 steps = 0 running_loss = 0 print_every = 10 train_losses, validation_losses = [], [] for epoch in range(epochs): for inputs, labels in dataloaders['train']: steps += 1 # Move inputs and labels to the GPU/CPU inputs, labels = inputs.to(device), labels.to(device) # Set gradient to zero so that you do the parameter update correctly. # Else the gradient would point in some other direction than the intended direction towards the minimum optimizer.zero_grad() # Propagation logps = model.forward(inputs) # Calculate loss loss = criterion(logps, labels) # Backpropagation loss.backward() # Update parameters based on the current gradient optimizer.step() running_loss += loss.item() # Print current error and accuracy in every n time if steps % print_every == 0: val_loss = 0 val_accuracy = 0 # Set to evaluation mode model.eval() # Deactivate autograde engine to save memory and time with torch.no_grad(): for inputs, labels in dataloaders['val']: inputs, labels = inputs.to(device), labels.to(device) logps = model.forward(inputs) batch_loss = criterion(logps, labels) val_loss += batch_loss.item() # Since our model outputs a LogSoftmax, find the real # percentages by reversing the log function ps = torch.exp(logps) # Get the top class and probability top_p, top_class = ps.topk(1, dim=1) # Check correct classes equals = top_class == labels.view(*top_class.shape) val_accuracy += torch.mean(equals.type(torch.FloatTensor)).item() train_losses.append(running_loss/len(dataloaders['train'])) validation_losses.append(val_loss/len(dataloaders['val'])) print(f"Epoch {epoch+1}/{epochs}.. " f"Train loss: {running_loss/print_every:.3f}.. " f"Validation loss: {val_loss/len(dataloaders['val']):.3f}.. " f"Validation accuracy: {val_accuracy/len(dataloaders['val']):.3f}") running_loss = 0 # Set to train mode model.train() ``` ## Testing your network It's good practice to test your trained network on test data, images the network has never seen either in training or validation. This will give you a good estimate for the model's performance on completely new images. Run the test images through the network and measure the accuracy, the same way you did validation. You should be able to reach around 70% accuracy on the test set if the model has been trained well. ``` plt.plot(train_losses, label='Training loss') plt.plot(validation_losses, label='Validation loss') plt.legend(frameon=False) # TODO: Do validation on the test set test_accuracy = 0 model.eval() with torch.no_grad(): for inputs, labels in dataloaders['test']: inputs, labels = inputs.to(device), labels.to(device) logps = model.forward(inputs) # Calculate accuracy ps = torch.exp(logps) top_p, top_class = ps.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) test_accuracy += torch.mean(equals.type(torch.FloatTensor)).item() print(f"Test accuracy: {test_accuracy/len(dataloaders['test']):.3f}") ``` ## Save the checkpoint Now that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on. ```model.class_to_idx = image_datasets['train'].class_to_idx``` Remember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now. ``` # TODO: Save the checkpoint model.class_to_idx = data['train'].class_to_idx checkpoint = {'input_size': 25088, 'hidden_layers':[4096], 'output_size': 102, 'arch': 'vgg16', 'learning_rate': 0.001, 'batch_size': 32, 'classifier' : classifier, 'epochs': epochs, 'optimizer': optimizer.state_dict(), 'state_dict': model.state_dict(), 'class_to_idx': model.class_to_idx} torch.save(checkpoint, 'checkpoint.pth') ``` ## Loading the checkpoint At this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network. ``` # TODO: Write a function that loads a checkpoint and rebuilds the model def load_checkpoint(filename): checkpoint = torch.load(filename) learning_rate = checkpoint['learning_rate'] model = getattr(torchvision.models, checkpoint['arch'])(pretrained=True) model.classifier = checkpoint['classifier'] model.epochs = checkpoint['epochs'] model.load_state_dict(checkpoint['state_dict']) model.class_to_idx = checkpoint['class_to_idx'] optimizer.load_state_dict(checkpoint['optimizer']) return model, optimizer nn_filename = 'checkpoint.pth' model, optimizer = load_checkpoint(nn_filename) saved_model = print(model) ``` # Inference for classification Now you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like ```python probs, classes = predict(image_path, model) print(probs) print(classes) > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339] > ['70', '3', '45', '62', '55'] ``` First you'll need to handle processing the input image such that it can be used in your network. ## Image Preprocessing You'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training. First, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image. Color channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`. As before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation. And finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions. ``` def process_image(image): ''' Scales, crops, and normalizes a PIL image for a PyTorch model, returns an Numpy array ''' # TODO: Process a PIL image for use in a PyTorch model img_size = 256 crop_size = 224 im = Image.open(image) im = im.resize((img_size,img_size)) left = (img_size-crop_size)*0.5 right = left + crop_size upper = (img_size-crop_size)*0.5 lower = upper + crop_size im = im.crop((left, upper, right, lower)) im = np.array(im)/255 mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) im = (im - mean) / std return im.transpose(2,0,1) ``` To check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions). ``` def imshow(image, ax=None, title=None): """Imshow for Tensor.""" if ax is None: fig, ax = plt.subplots() # PyTorch tensors assume the color channel is the first dimension # but matplotlib assumes is the third dimension image = image.numpy().transpose((1, 2, 0)) # Undo preprocessing mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) image = std * image + mean # Image needs to be clipped between 0 and 1 or it looks like noise when displayed image = np.clip(image, 0, 1) ax.imshow(image) return ax ``` ## Class Prediction Once you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values. To get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.html#torch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](#Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well. Again, this method should take a path to an image and a model checkpoint, then return the probabilities and classes. ```python probs, classes = predict(image_path, model) print(probs) print(classes) > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339] > ['70', '3', '45', '62', '55'] ``` ``` def predict(image_path, model, topk=5): ''' Predict the class (or classes) of an image using a trained deep learning model. ''' # TODO: Implement the code to predict the class from an image file model.eval() model.cpu() image = process_image(image_path) image = torch.from_numpy(np.array([image])).float() image = Variable(image) with torch.no_grad(): logps = model.forward(image) ps = torch.exp(logps) probs, labels = torch.topk(ps, topk) class_to_idx_rev = {model.class_to_idx[k]: k for k in model.class_to_idx} classes = [] for label in labels.numpy()[0]: classes.append(class_to_idx_rev[label]) return probs.numpy()[0], classes img = random.choice(os.listdir('./flowers/test/56/')) img_path = './flowers/test/56/' + img with Image.open(img_path) as image: plt.imshow(image) prob, classes = predict(img_path, model) print(prob) print(classes) print([cat_to_name[x] for x in classes]) ``` ## Sanity Checking Now that you can use a trained model for predictions, check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this: <img src='assets/inference_example.png' width=300px> You can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above. ``` # TODO: Display an image along with the top 5 classes probs, classes = predict(img_path, model) max_index = np.argmax(prob) max_probability = prob[max_index] label = classes[max_index] fig = plt.figure(figsize=(6,6)) ax1 = plt.subplot2grid((15,9), (0,0), colspan=9, rowspan=9) ax2 = plt.subplot2grid((15,9), (9,2), colspan=5, rowspan=5) ax1.axis('off') ax1.set_title(cat_to_name[label]) ax1.imshow(Image.open(img_path)) labels = [] for cl in classes: labels.append(cat_to_name[cl]) y_pos = np.arange(5) ax2.set_yticks(y_pos) ax2.set_yticklabels(labels) ax2.set_xlabel('Probability') ax2.invert_yaxis() ax2.barh(y_pos, prob, xerr=0, align='center', color='blue') plt.show() ```
github_jupyter
<a href="https://colab.research.google.com/github/Tessellate-Imaging/Monk_Object_Detection/blob/master/example_notebooks/4_efficientdet/train%20-%20with%20validation%20dataset.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Installation - Run these commands - git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git - cd Monk_Object_Detection/4_efficientdet/installation - Select the right requirements file and run - cat requirements.txt | xargs -n 1 -L 1 pip install ``` ! git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git # For colab use the command below ! cd Monk_Object_Detection/4_efficientdet/installation && cat requirements_colab.txt | xargs -n 1 -L 1 pip install # For Local systems and cloud select the right CUDA version #! cd Monk_Object_Detection/4_efficientdet/installation && cat requirements_cuda9.0.txt | xargs -n 1 -L 1 pip install ``` # About the network 1. Paper on EfficientDet: https://arxiv.org/abs/1911.09070 2. Blog 1 on EfficientDet: https://towardsdatascience.com/efficientdet-scalable-and-efficient-object-detection-review-4472ffc34fd9 3. Blog 2 on EfficientDet: https://medium.com/@nainaakash012/efficientdet-scalable-and-efficient-object-detection-ea05ccd28427 # COCO Format - 1 ## Dataset Directory Structure ../sample_dataset (root_dir) | |------ship (coco_dir) | | | |----images (img_dir) | | | |------Train (set_dir) (Train) | | | |---------img1.jpg | |---------img2.jpg | |---------..........(and so on) | | | |---annotations | |----| | |--------------------instances_Train.json (instances_<set_dir>.json) | |--------------------classes.txt - instances_Train.json -> In proper COCO format - classes.txt -> A list of classes in alphabetical order For TrainSet - root_dir = "../sample_dataset"; - coco_dir = "ship"; - img_dir = "images"; - set_dir = "Train"; Note: Annotation file name too coincides against the set_dir # COCO Format - 2 ## Dataset Directory Structure ../sample_dataset (root_dir) | |------ship (coco_dir) | | | |---ImagesTrain (set_dir) | |----| | |-------------------img1.jpg | |-------------------img2.jpg | |-------------------.........(and so on) | | | |---annotations | |----| | |--------------------instances_ImagesTrain.json (instances_<set_dir>.json) | |--------------------classes.txt - instances_Train.json -> In proper COCO format - classes.txt -> A list of classes in alphabetical order For TrainSet - root_dir = "../sample_dataset"; - coco_dir = "ship"; - img_dir = "./"; - set_dir = "ImagesTrain"; Note: Annotation file name too coincides against the set_dir # Sample Dataset Credits credits: https://www.tejashwi.io/object-detection-with-fizyr-retinanet/ ``` import os import sys sys.path.append("Monk_Object_Detection/4_efficientdet/lib/"); from train_detector import Detector gtf = Detector(); root_dir = "Monk_Object_Detection/example_notebooks/sample_dataset"; coco_dir = "ship"; img_dir = "./"; set_dir = "Images"; gtf.Train_Dataset(root_dir, coco_dir, img_dir, set_dir, batch_size=8, image_size=512, use_gpu=True) # Available models # model_name="efficientnet-b0" # model_name="efficientnet-b1" # model_name="efficientnet-b2" # model_name="efficientnet-b3" # model_name="efficientnet-b4" # model_name="efficientnet-b5" # model_name="efficientnet-b6" # model_name="efficientnet-b7" # model_name="efficientnet-b8" gtf.Model(model_name="efficientnet-b0"); # To resume training #gtf.Model(model_name="efficientnet-b0", load_pretrained_model_from="path to model.pth"); gtf.Set_Hyperparams(lr=0.0001, val_interval=1, es_min_delta=0.0, es_patience=0) gtf.Train(num_epochs=10, model_output_dir="trained/"); ``` # Inference ``` import os import sys sys.path.append("Monk_Object_Detection/4_efficientdet/lib/"); from infer_detector import Infer gtf = Infer(); gtf.Model(model_dir="trained/") f = open("Monk_Object_Detection/example_notebooks/sample_dataset/ship/annotations/classes.txt", 'r'); class_list = f.readlines(); f.close(); for i in range(len(class_list)): class_list[i] = class_list[i][:-1] class_list img_path = "Monk_Object_Detection/example_notebooks/sample_dataset/ship/test/img1.jpg"; scores, labels, boxes = gtf.Predict(img_path, class_list, vis_threshold=0.4); from IPython.display import Image Image(filename='output.jpg') img_path = "../sample_dataset/ship/test/img4.jpg"; scores, labels, boxes = gtf.Predict(img_path, class_list, vis_threshold=0.4); from IPython.display import Image Image(filename='output.jpg') img_path = "Monk_Object_Detection/example_notebooks/sample_dataset/ship/test/img5.jpg"; scores, labels, boxes = gtf.Predict(img_path, class_list, vis_threshold=0.4); from IPython.display import Image Image(filename='output.jpg') img_path = "Monk_Object_Detection/example_notebooks/sample_dataset/ship/test/img6.jpg"; scores, labels, boxes = gtf.Predict(img_path, class_list, vis_threshold=0.4); from IPython.display import Image Image(filename='output.jpg') ```
github_jupyter
# Generating conditional probability tables subject to constraints ``` import os from pathlib import Path from itertools import product import numpy as np import pandas as pd from fake_data_for_learning.fake_data_for_learning import ( BayesianNodeRV, FakeDataBayesianNetwork, SampleValue ) from fake_data_for_learning.utils import RandomCpt from fake_data_for_learning.probability_polytopes import ( MapMultidimIndexToLinear, ProbabilityPolytope, ExpectationConstraint ) ``` Suppose we want to generate data from a discrete Bayesian network, such as Product -> Days <- Rating, where e.g. Product is the (insurance) product name, Rating is rating strength (i.e. market price / technical price) for a submission, and Days is the number of days to generate a quote for the submission. The number of entries in probability and conditional probability tables to define this Bayesian network is $ | Product | + | Rating | + | Product | \times | Rating | \times | Days |$. For example, let us define Industry and Rating as follows ``` product_values = ['financial', 'liability', 'property'] product_type = BayesianNodeRV('product_type', np.array([0.2, 0.5, 0.3]), values=product_values) rating_values = range(2) rating = BayesianNodeRV('rating', np.array([0.3, 0.7])) ``` Suppose that Days is also discrete, e.g. ``` days_values = range(4) ``` Then if we choose the ordering of the conditional probability table axes as Product, Rating, Days, we can generate the entries of the conditional probability table for Days conditioned on Industry and Rating with `utils.RandomCpt`: ``` random_cpt = RandomCpt(len(product_values), len(rating_values), len(days_values)) X = random_cpt() X[0, 0, :].sum() ``` So the total number of probability table entries to specify is, as in the formula above, ``` f'Number of probability table entries: {len(product_values) + len(rating_values) + (len(product_values) * len(rating_values) * len(days_values))}' ``` It would be nice to specify certain properties of the matrix without having to change entries individually. For example, we may want to insist that \begin{equation*} E(D | P = property) = 3.5 \\ E(D | P = financial) = 1.0 \\ E(D | P= liability) = 2.0 \end{equation*} Denote the entries of the conditional probability table as $$(\rho_{p, r | d})$$ The the above constraints become \begin{equation*} \frac{1}{|R|} \sum_{r, d} d \, \rho_{\mathrm{property},\, r\, | d} = 3.5 \\ \frac{1}{|R|} \sum_{r, d} d \, \rho_{\mathrm{financial},\, r\, | d} = 1.0\\ \frac{1}{|R|} \sum_{r, d} d \, \rho_{\mathrm{liability},\, r\, | d} = 2.0. \end{equation*} As $(\rho)$ is a conditional probability table, we also have the constraints \begin{equation*} 0 \leq \rho_{p,\,r\,|d} \leq 1 \textrm{ for all }(p,\,r,\,d),\\ \sum_{d} \rho_{p,\,r,\,| d} = 1 \textrm{ for each pair } (p, \, r) \end{equation*} Together, these constraints define convex polytope contained in (probability) simplex $\Delta_{R-1} \subseteq \mathbb{R}^{R}$, where $R = |Product | \times | Rating | \times | Days|$ (see e.g. Chapter 1 of *Lectures on Algebraic Statistics*, Drton, Sturmfels, Sullivant). This polytope is defined as an intersection of half-spaces, i.e. using the so-called *H-representation* of the polytope, see *Lectures on Polytopes* by Ziegler, Chapters 0 and 1. To generate a random (conditional) probability table to these constraints, the vertex-, or *V-representation* of the probability polytope $P$ is much more useful, because given the a vertex matrix $V$, where each column is a vertex of $P$ in $\mathbb{R}^R$, and all points in $P$ can be obtained as $$ \begin{equation*} x = V \cdot t \end{equation*} $$ where $t \in \mathbb{R}^N$, with $N$ being the number of vertices for $P$, and $t$ satisfying $0 \leq t_i \leq 1$, $\sum t_i = 1$. Once we have determined the V-representation $V$, then the problem of generating conditional probability tables subject to our given expectation value constraints reduces to the much simpler problem of generating points on the non-negative quadrant of the unit (hyper) cube in $R^N$. Before we get to our goal of generating these probability tables for our hit ratio problem, let's look at elementary examples. ## (Conditional) Probability Polytopes The simplest example of a probability polytope is that of a Bernoulli random variable. ``` bernoulli = ProbabilityPolytope(('outcome',), dict(outcome=range(2))) A, b = bernoulli.get_probability_half_planes() print(A, '\n', b) ``` We convert the formulation A x <= b to the V-description ``` bernoulli.get_vertex_representation() tertiary = ProbabilityPolytope(('outcome',), dict(outcome=range(3))) tertiary.get_vertex_representation() conditional_bernoullis = ProbabilityPolytope( ('input', 'output'), dict(input=range(2), output=range(2)) ) conditional_bernoullis.get_vertex_representation() ``` The benefit of having the vertex-representation (V-representation) of the probability polytope is that generating random (conditional) probability tables is straightforward, namely, we can get all elements of the probability polytope by taking combinations of the vertex (column) vectors. In the flattened coordinates, we have, e.g. ``` conditional_bernoullis.generate_flat_random_cpt() ``` In the multidimensional coordinates for conditional probability tables here, we have e.g. ``` conditional_bernoullis.generate_random_cpt() ``` ## Adding contraints on conditional expectation values ``` conditional_bernoullis.set_expectation_constraints( [ExpectationConstraint(equation=dict(input=1), moment=1, value=0.5)] ) conditional_bernoullis.get_expect_equations_col_indices(conditional_bernoullis.expect_constraints[0].equation) conditional_bernoullis.get_vertex_representation() conditional_bernoullis.generate_random_cpt() two_input_constrained_polytope = ProbabilityPolytope( ('input', 'more_input', 'output'), dict(input=['hi', 'low'], more_input=range(2), output=range(2)) ) two_input_constrained_polytope.set_expectation_constraints( [ExpectationConstraint(equation=dict(more_input=0), moment=1, value=0.25)] ) two_input_constrained_polytope.get_vertex_representation() ``` ## Hit rate polytope again ``` days_polytope = ProbabilityPolytope( ('product', 'rating', 'days'), coords = { 'product': product_values, 'rating': rating_values, 'days': days_values } ) days_polytope.set_expectation_constraints( [ ExpectationConstraint(equation=dict(product='financial'), moment=1, value=0.2), ExpectationConstraint(equation=dict(product='liability'), moment=1, value=0.9), ExpectationConstraint(equation=dict(product='property'), moment=1, value=0.5), ] ) days_cpt = days_polytope.generate_random_cpt() days_cpt ``` Now we create our Bayesian network with desired constraints on some expectation values ``` days = BayesianNodeRV('days', days_cpt, parent_names=['product_type', 'rating']) bn = FakeDataBayesianNetwork(product_type, rating)#, days) bn = FakeDataBayesianNetwork(product_type, rating, days) bn.rvs(10) ```
github_jupyter
``` # python David_2_2_2_train_detector.py --class "../../../CV_PyImageSearch/Dataset/Chapter_Specific/chp2_2_stop_sign/stop_sign_images --annotations ../../../CV_PyImageSearch/Dataset/Chapter_Specific/chp2_2_stop_sign/stop_sign_annotations --output ../../../CV_PyImageSearch/Dataset/Chapter_Specific/chp2_2_stop_sign/output/2020_StopSignTest.svm' # python David_2_2_2_train_detector.py # import the necessary packages from __future__ import print_function from imutils import paths from scipy.io import loadmat from skimage import io import argparse import dlib import sys # handle Python 3 compatibility if sys.version_info > (3,): long = int # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-c", "--class", required=True, help="Path to the CALTECH-101 class images") ap.add_argument("-a", "--annotations", required=True, help="Path to the CALTECH-101 class annotations") ap.add_argument("-o", "--output", required=True, help="Path to the output detector") # import sys # sys.argv[1:] = '-c stop_sign_images -a stop_sign_annotations -o output/stop_sign_detector.svm'.split() # sys.argv[1:] = '-c Airplane/image -a Airplane/annotations -o Airplane/output/airplane.svm'.split() sys.argv[1:] = '-c ../../../CV_PyImageSearch/Dataset/caltech101/101_ObjectCategories -a ../../../CV_PyImageSearch/Dataset/caltech101/Annotations -o 20200724Face_Detector.svm'.split() args = vars(ap.parse_args()) # grab the default training options for our HOG + Linear SVM detector initialize the # list of images and bounding boxes used to train the classifier print("[INFO] gathering images and bounding boxes...") options = dlib.simple_object_detector_training_options() images = [] boxes = [] # loop over the image paths for imagePath in paths.list_images(args["class"]): # extract the image ID from the image path and load the annotations file imageID = imagePath[imagePath.rfind("/") + 1:].split("_")[1] #print(imageID) id1= imagePath.find("\\") #print(id1) id2= imagePath[id1+1:] #print(id2) imageID = id2.replace(".jpg", "") #print(imageID) str = imageID[-4:] #print(str) #dir= "./Airplane1/annotations/" dir= "./Face_easy/annotations" #print(dir) p = "{}annotation_{}.mat".format(dir, str) print(p) annotations = loadmat(p)["box_coord"] #print(annotations) #print(dir) p = "{}annotation_{}.mat".format(dir, str) #print(p) annotations = loadmat(p)["box_coord"] #print(annotations) bb = [dlib.rectangle(left=long(x), top=long(y), right=long(w), bottom=long(h)) for (y, h, x, w) in annotations] #print(bb) boxes.append(bb) #print(boxes) #print(len(boxes)) # add the image to the list of images images.append(io.imread(imagePath)) #print(images) # train the object detector print("[INFO] training detector...") detector = dlib.train_simple_object_detector(images, boxes, options) # dump the classifier to file print("[INFO] dumping classifier to file...") detector.save(args["output"]) # visualize the results of the detector win = dlib.image_window() win.set_image(detector) dlib.hit_enter_to_continue() ```
github_jupyter
# Spatiotemporal distribution of AxFUCCI cells ``` # Required libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline import scipy import os import seaborn as sns from pyabc import (Distribution, History) # Experimental data outgrowth_df = pd.read_csv('./outgrowth.csv') outgrowth_df.set_index(['day', 'tail'], inplace=True) outgrowth_mean = outgrowth_df.groupby('day').mean()['outgrowth'] percentage_df = pd.read_csv('./percentage_100um.csv') df = percentage_df for day in range(0,6): df.loc[df['day'] == day, 'position'] = (outgrowth_mean[day] - (df.loc[df['day'] == day, 'position']-100)).astype(int) percentage_df = df percentage_df.set_index(['day', 'tail', 'position'], inplace=True) percentage_df = percentage_df.drop(['unlabelled'], axis=1) experiments = percentage_df # Fitting results for each animal tail and day means,stds = {},{} for day,df_day in percentage_df.groupby(level='day'): tails_mean, tails_std = {},{} for tail,df_animal in df_day.groupby(level='tail'): db_path = ("sqlite:///" + os.path.join("./fitting_results/", "sp_fitting-day="+str(day)+"-tail="+str(tail)+".db")) h = History(db_path) df = h.get_distribution(m=0) tails_mean[tail] = df[0].mean() tails_std[tail] = df[0].std() means[day] = pd.DataFrame.from_dict(tails_mean, orient='index') stds[day] = pd.DataFrame.from_dict(tails_std, orient='index') means = pd.concat(means, names=['day','tail']) means['sp'] = outgrowth_mean-means['sp'] stds = pd.concat(stds, names=['day','tail']) day_means = means.groupby('day').mean() day_std = means.groupby('day').std() # Kolmogorov-Smirnov statistic significance = 0.05 for day in range(0,6): print("Day",day) green = scipy.stats.ks_2samp(means.xs(day,level='day')['c1g'],means.xs(day,level='day')['c2g']) magenta = scipy.stats.ks_2samp(means.xs(day,level='day')['c1m'],means.xs(day,level='day')['c2m']) print("Green:",green) print("H0:",green[1]>significance) print("Magenta",magenta) print("H0:",magenta[1]>significance) # Fitting results plots for day in range(0,6): sp_mean = means.groupby('day')['sp'].mean().iloc[day] sp_std = means.groupby('day')['sp'].std().iloc[day] c1g = means.groupby('day')['c1g'].mean().iloc[day] c2g = means.groupby('day')['c2g'].mean().iloc[day] c1g_std = means.groupby('day')['c1g'].std().iloc[day] c2g_std = means.groupby('day')['c2g'].std().iloc[day] c1m = means.groupby('day')['c1m'].mean().iloc[day] c2m = means.groupby('day')['c2m'].mean().iloc[day] c1m_std = means.groupby('day')['c1m'].std().iloc[day] c2m_std = means.groupby('day')['c2m'].std().iloc[day] pos = experiments.sort_index().xs(day,level='day').groupby('position').mean().dropna().index data = experiments.sort_index().xs(day,level='day').reset_index().dropna() ax = sns.scatterplot(x='position', y='green', data=data,style='tail',color='green') ax = sns.lineplot(x='position', y='green', data=data,style='tail',color='green') ax.step([-3000,sp_mean,sp_mean,3000], [c2g,c2g,c1g,c1g], color='darkgreen',linewidth=5, alpha=0.5) ax = sns.scatterplot(x='position', y='magenta', data=data,color='magenta',style='tail') ax = sns.lineplot(x='position', y='magenta', data=data,color='magenta',style='tail') ax.step([-3000,sp_mean,sp_mean,3000], [c2m,c2m,c1m,c1m], color='darkmagenta',linewidth=5, alpha=0.5) if day == 4: plt.axvline(-717.65,color='black',linestyle='--') plt.axvspan(-717.65-271.9, -717.65+271.9, color='black', alpha=0.1) plt.axvspan(-717.65-2*271.9, -717.65+2*271.9, color='black', alpha=0.1) if day == 5: plt.axvline(-446.43,color='black',linestyle='--') plt.axvspan(-446.43-112.46, -446.43+112.46, color='black', alpha=0.1) plt.axvspan(-446.43-2*112.46, -446.43+2*112.46, color='black', alpha=0.1) title = 'Time = '+ str(day)+" dpa" plt.xlim(data['position'].min()-100,data['position'].max()+100) plt.ylim(0,110) plt.xlabel('AP Position' + ' (' + r'$\mu$'+'m)') plt.ylabel('G0/G1 and S/G2 AxFUCCI cells (%)') plt.suptitle(title,size='24') plt.rcParams.update({'font.size': 14}) plt.legend([],[], frameon=False) plt.savefig('./fit_plot2/ap-border_'+str(day), dpi=300, bbox_inches='tight') plt.show() ```
github_jupyter
``` import scanpy as sc import pandas as pd import xarray as xr from numpy import ones from pandas_plink import read_plink1_bin from numpy.linalg import cholesky import matplotlib.pyplot as plt import time from limix.qc import quantile_gaussianize import cellregmap cellregmap from cellregmap import estimate_betas mydir = "/share/ScratchGeneral/anncuo/OneK1K/" input_files_dir = "/share/ScratchGeneral/anncuo/OneK1K/input_files_CellRegMap/" chrom = 8 ## sample mapping file ## this file will map cells to donors ## here, B cells only sample_mapping_file = input_files_dir+"smf_Bcells_noplasma.csv" sample_mapping = pd.read_csv(sample_mapping_file, dtype={"individual_long": str, "genotype_individual_id": str, "phenotype_sample_id": str}, index_col=0) sample_mapping.shape sample_mapping.head() ## extract unique individuals donors0 = sample_mapping["genotype_individual_id"].unique() donors0.sort() print("Number of unique donors: {}".format(len(donors0))) #### kinship file ## read in GRM (genotype relationship matrix; kinship matrix) kinship_file="/share/ScratchGeneral/anncuo/OneK1K/input_files_CellRegMap/grm_wide.csv" K = pd.read_csv(kinship_file, index_col=0) K.index = K.index.astype('str') assert all(K.columns == K.index) #symmetric matrix, donors x donors K = xr.DataArray(K.values, dims=["sample_0", "sample_1"], coords={"sample_0": K.columns, "sample_1": K.index}) K = K.sortby("sample_0").sortby("sample_1") donors = sorted(set(list(K.sample_0.values)).intersection(donors0)) print("Number of donors after kinship intersection: {}".format(len(donors))) ## subset to relevant donors K = K.sel(sample_0=donors, sample_1=donors) assert all(K.sample_0 == donors) assert all(K.sample_1 == donors) plt.matshow(K) ## and decompose such as K = hK @ hK.T (using Cholesky decomposition) hK = cholesky(K.values) hK = xr.DataArray(hK, dims=["sample", "col"], coords={"sample": K.sample_0.values}) assert all(hK.sample.values == K.sample_0.values) del K print("Sample mapping number of rows BEFORE intersection: {}".format(sample_mapping.shape[0])) ## subsample sample mapping file to donors in the kinship matrix sample_mapping = sample_mapping[sample_mapping["genotype_individual_id"].isin(donors)] print("Sample mapping number of rows AFTER intersection: {}".format(sample_mapping.shape[0])) ## use sel from xarray to expand hK (using the sample mapping file) hK_expanded = hK.sel(sample=sample_mapping["genotype_individual_id"].values) assert all(hK_expanded.sample.values == sample_mapping["genotype_individual_id"].values) hK_expanded.shape #### phenotype file # open anndata my_file = "/share/ScratchGeneral/anncuo/OneK1K/expression_objects/sce"+str(chrom)+".h5ad" adata = sc.read(my_file) # sparse to dense mat = adata.raw.X.todense() # make pandas dataframe mat_df = pd.DataFrame(data=mat.T, index=adata.raw.var.index, columns=adata.obs.index) # turn into xr array phenotype = xr.DataArray(mat_df.values, dims=["trait", "cell"], coords={"trait": mat_df.index.values, "cell": mat_df.columns.values}) phenotype = phenotype.sel(cell=sample_mapping["phenotype_sample_id"].values) del mat del mat_df phenotype.shape phenotype.head() #### genotype file ## read in genotype file (plink format) plink_folder = "/share/ScratchGeneral/anncuo/OneK1K/plink_files/" plink_file = plink_folder+"plink_chr"+str(chrom)+".bed" G = read_plink1_bin(plink_file) G G.shape # change this to select known eQTLs instead # Filter on specific gene-SNP pairs # eQTL from B cells (B IN + B Mem) Bcell_eqtl_file = input_files_dir+"fvf_Bcell_eqtls.csv" Bcell_eqtl = pd.read_csv(Bcell_eqtl_file, index_col = 0) Bcell_eqtl.head() ## SELL (chr1, index=30) ## REL (chr2, index=6) ## BLK (chr8, index=4) ## ORMDL3 (chr17, index=2) genes = Bcell_eqtl[Bcell_eqtl['chrom']==int(chrom)]['feature'].unique() genes # (1) gene name (feature_id) gene_name = genes[4] gene_name # select SNPs for a given gene leads = Bcell_eqtl[Bcell_eqtl['feature']==gene_name]['snp_id'].unique() leads #breakpoint() G_sel = G[:,G['snp'].isin(leads)] G_sel # expand out genotypes from cells to donors (and select relevant donors in the same step) G_expanded = G_sel.sel(sample=sample_mapping["individual_long"].values) # assert all(hK_expanded.sample.values == G_expanded.sample.values) G_expanded.shape del G #### context file # cells (B cells only) by PCs C_file = input_files_dir+"PCs_Bcells_noplasma.csv" C = pd.read_csv(C_file, index_col = 0) # C_file = input_files_dir+"PCs_Bcells.csv.pkl" # C = pd.read_pickle(C_file) C = xr.DataArray(C.values, dims=["cell", "pc"], coords={"cell": C.index.values, "pc": C.columns.values}) C = C.sel(cell=sample_mapping["phenotype_sample_id"].values) assert all(C.cell.values == sample_mapping["phenotype_sample_id"].values) C.shape # C_gauss = quantile_gaussianize(C) # select gene y = phenotype.sel(trait=gene_name) [(y == 0).astype(int).sum()/len(y)] plt.hist(y) plt.show() y = quantile_gaussianize(y) plt.hist(y) plt.show() n_cells = phenotype.shape[1] W = ones((n_cells, 1)) del phenotype start_time = time.time() GG = G_expanded.values print("--- %s seconds ---" % (time.time() - start_time)) # del G_expanded del G_sel snps = G_expanded["snp"].values snps # get MAF MAF_dir = "/share/ScratchGeneral/anncuo/OneK1K/snps_with_maf_greaterthan0.05/" myfile = MAF_dir+"chr"+str(chrom)+".SNPs.txt" df_maf = pd.read_csv(myfile, sep="\t") df_maf.head() import numpy as np mafs = np.array([]) for snp in snps: mafs = np.append(mafs, df_maf[df_maf["SNP"] == snp]["MAF"].values) mafs start_time = time.time() betas = estimate_betas(y=y, W=W, E=C.values[:,0:10], G=GG, hK=hK_expanded, maf=mafs) print("--- %s seconds ---" % (time.time() - start_time)) beta_G = betas[0] beta_GxC = betas[1][0] beta_G_df = pd.DataFrame({"chrom":G_expanded.chrom.values, "betaG":beta_G, "variant":G_expanded.snp.values}) beta_G_df.head() cells = phenotype["cell"].values snps = G_expanded["variant"].values beta_GxC_df = pd.DataFrame(data = beta_GxC, columns=snps, index=cells) beta_GxC_df.head() ## took over an hour to run for one SNP! gene_name folder = mydir + "CRM_interaction/Bcells_noplasma_Bcell_eQTLs/betas/" outfilename = f"{folder}{gene_name}" print(outfilename) beta_G_df.to_csv(outfilename+"_betaG.csv") beta_GxC_df.to_csv(outfilename+"_betaGxC.csv") ```
github_jupyter
# Set up Azure ML Automated Machine Learning on SQL Server 2019 CTP 2.4 big data cluster \# Prerequisites: \# - An Azure subscription and resource group \# - An Azure Machine Learning workspace \# - A SQL Server 2019 CTP 2.4 big data cluster with Internet access and a database named 'automl' \# - Azure CLI \# - kubectl command \# - The https://github.com/Azure/MachineLearningNotebooks repository downloaded (cloned) to your local machine \# In the 'automl' database, create a table named 'dbo.nyc_energy' as follows: \# - In SQL Server Management Studio, right-click the 'automl' database, select Tasks, then Import Flat File. \# - Select the file AzureMlCli\notebooks\how-to-use-azureml\automated-machine-learning\forecasting-energy-demand\nyc_energy.csv. \# - Using the "Modify Columns" page, allow nulls for all columns. \# Create an Azure Machine Learning Workspace using the instructions at https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-workspace \# Create an Azure service principal. You can do this with the following commands: az login az account set --subscription *subscriptionid* \# The following command prints out the **appId** and **tenant**, \# which you insert into the indicated cell later in this notebook \# to allow AutoML to authenticate with Azure: az ad sp create-for-rbac --name *principlename* --password *password* \# Log into the master instance of SQL Server 2019 CTP 2.4: kubectl exec -it mssql-master-pool-0 -n *clustername* -c mssql-server -- /bin/bash mkdir /tmp/aml cd /tmp/aml \# **Modify** the following with your subscription_id, resource_group, and workspace_name: cat > config.json << EOF { "subscription_id": "123456ab-78cd-0123-45ef-abcd12345678", "resource_group": "myrg1", "workspace_name": "myws1" } EOF \# The directory referenced below is appropriate for the master instance of SQL Server 2019 CTP 2.4. cd /opt/mssql/mlservices/runtime/python/bin ./python -m pip install azureml-sdk[automl] ./python -m pip install --upgrade numpy ./python -m pip install --upgrade sklearn ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/sql-server/setup/auto-ml-sql-setup.png) ``` -- Enable external scripts to allow invoking Python sp_configure 'external scripts enabled',1 reconfigure with override GO -- Use database 'automl' USE [automl] GO -- This is a table to hold the Azure ML connection information. SET ANSI_NULLS ON GO SET QUOTED_IDENTIFIER ON GO CREATE TABLE [dbo].[aml_connection]( [Id] [int] IDENTITY(1,1) NOT NULL PRIMARY KEY, [ConnectionName] [nvarchar](255) NULL, [TenantId] [nvarchar](255) NULL, [AppId] [nvarchar](255) NULL, [Password] [nvarchar](255) NULL, [ConfigFile] [nvarchar](255) NULL ) ON [PRIMARY] GO ``` # Copy the values from create-for-rbac above into the cell below ``` -- Use the following values: -- Leave the name as 'Default' -- Insert <tenant> returned by create-for-rbac above -- Insert <AppId> returned by create-for-rbac above -- Insert <password> used in create-for-rbac above -- Leave <path> as '/tmp/aml/config.json' INSERT INTO [dbo].[aml_connection] VALUES ( N'Default', -- Name N'11111111-2222-3333-4444-555555555555', -- Tenant N'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee', -- AppId N'insertpasswordhere', -- Password N'/tmp/aml/config.json' -- Path ); GO -- This is a table to hold the results from the AutoMLTrain procedure. SET ANSI_NULLS ON GO SET QUOTED_IDENTIFIER ON GO CREATE TABLE [dbo].[aml_model]( [Id] [int] IDENTITY(1,1) NOT NULL PRIMARY KEY, [Model] [varchar](max) NOT NULL, -- The model, which can be passed to AutoMLPredict for testing or prediction. [RunId] [nvarchar](250) NULL, -- The RunId, which can be used to view the model in the Azure Portal. [CreatedDate] [datetime] NULL, [ExperimentName] [nvarchar](100) NULL, -- Azure ML Experiment Name [WorkspaceName] [nvarchar](100) NULL, -- Azure ML Workspace Name [LogFileText] [nvarchar](max) NULL ) GO ALTER TABLE [dbo].[aml_model] ADD DEFAULT (getutcdate()) FOR [CreatedDate] GO -- This stored procedure uses automated machine learning to train several models -- and return the best model. -- -- The result set has several columns: -- best_run - ID of the best model found -- experiment_name - training run name -- fitted_model - best model found -- log_file_text - console output -- workspace - name of the Azure ML workspace where run history is stored -- -- An example call for a classification problem is: -- insert into dbo.aml_model(RunId, ExperimentName, Model, LogFileText, WorkspaceName) -- exec dbo.AutoMLTrain @input_query=' -- SELECT top 100000 -- CAST([pickup_datetime] AS NVARCHAR(30)) AS pickup_datetime -- ,CAST([dropoff_datetime] AS NVARCHAR(30)) AS dropoff_datetime -- ,[passenger_count] -- ,[trip_time_in_secs] -- ,[trip_distance] -- ,[payment_type] -- ,[tip_class] -- FROM [dbo].[nyctaxi_sample] order by [hack_license] ', -- @label_column = 'tip_class', -- @iterations=10 -- -- An example call for forecasting is: -- insert into dbo.aml_model(RunId, ExperimentName, Model, LogFileText, WorkspaceName) -- exec dbo.AutoMLTrain @input_query=' -- select cast(timeStamp as nvarchar(30)) as timeStamp, -- demand, -- precip, -- temp, -- case when timeStamp < ''2017-01-01'' then 0 else 1 end as is_validate_column -- from nyc_energy -- where demand is not null and precip is not null and temp is not null -- and timeStamp < ''2017-02-01''', -- @label_column='demand', -- @task='forecasting', -- @iterations=10, -- @iteration_timeout_minutes=5, -- @time_column_name='timeStamp', -- @is_validate_column='is_validate_column', -- @experiment_name='automl-sql-forecast', -- @primary_metric='normalized_root_mean_squared_error' SET ANSI_NULLS ON GO SET QUOTED_IDENTIFIER ON GO CREATE OR ALTER PROCEDURE [dbo].[AutoMLTrain] ( @input_query NVARCHAR(MAX), -- The SQL Query that will return the data to train and validate the model. @label_column NVARCHAR(255)='Label', -- The name of the column in the result of @input_query that is the label. @primary_metric NVARCHAR(40)='AUC_weighted', -- The metric to optimize. @iterations INT=100, -- The maximum number of pipelines to train. @task NVARCHAR(40)='classification', -- The type of task. Can be classification, regression or forecasting. @experiment_name NVARCHAR(32)='automl-sql-test', -- This can be used to find the experiment in the Azure Portal. @iteration_timeout_minutes INT = 15, -- The maximum time in minutes for training a single pipeline. @experiment_timeout_minutes INT = 60, -- The maximum time in minutes for training all pipelines. @n_cross_validations INT = 3, -- The number of cross validations. @blacklist_models NVARCHAR(MAX) = '', -- A comma separated list of algos that will not be used. -- The list of possible models can be found at: -- https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#configure-your-experiment-settings @whitelist_models NVARCHAR(MAX) = '', -- A comma separated list of algos that can be used. -- The list of possible models can be found at: -- https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#configure-your-experiment-settings @experiment_exit_score FLOAT = 0, -- Stop the experiment if this score is acheived. @sample_weight_column NVARCHAR(255)='', -- The name of the column in the result of @input_query that gives a sample weight. @is_validate_column NVARCHAR(255)='', -- The name of the column in the result of @input_query that indicates if the row is for training or validation. -- In the values of the column, 0 means for training and 1 means for validation. @time_column_name NVARCHAR(255)='', -- The name of the timestamp column for forecasting. @connection_name NVARCHAR(255)='default' -- The AML connection to use. ) AS BEGIN DECLARE @tenantid NVARCHAR(255) DECLARE @appid NVARCHAR(255) DECLARE @password NVARCHAR(255) DECLARE @config_file NVARCHAR(255) SELECT @tenantid=TenantId, @appid=AppId, @password=Password, @config_file=ConfigFile FROM aml_connection WHERE ConnectionName = @connection_name; EXEC sp_execute_external_script @language = N'Python', @script = N'import pandas as pd import logging import azureml.core import pandas as pd import numpy as np from azureml.core.experiment import Experiment from azureml.train.automl import AutoMLConfig from sklearn import datasets import pickle import codecs from azureml.core.authentication import ServicePrincipalAuthentication from azureml.core.workspace import Workspace if __name__.startswith("sqlindb"): auth = ServicePrincipalAuthentication(tenantid, appid, password) ws = Workspace.from_config(path=config_file, auth=auth) project_folder = "./sample_projects/" + experiment_name experiment = Experiment(ws, experiment_name) data_train = input_data X_valid = None y_valid = None sample_weight_valid = None if is_validate_column != "" and is_validate_column is not None: data_train = input_data[input_data[is_validate_column] <= 0] data_valid = input_data[input_data[is_validate_column] > 0] data_train.pop(is_validate_column) data_valid.pop(is_validate_column) y_valid = data_valid.pop(label_column).values if sample_weight_column != "" and sample_weight_column is not None: sample_weight_valid = data_valid.pop(sample_weight_column).values X_valid = data_valid n_cross_validations = None y_train = data_train.pop(label_column).values sample_weight = None if sample_weight_column != "" and sample_weight_column is not None: sample_weight = data_train.pop(sample_weight_column).values X_train = data_train if experiment_timeout_minutes == 0: experiment_timeout_minutes = None if experiment_exit_score == 0: experiment_exit_score = None if blacklist_models == "": blacklist_models = None if blacklist_models is not None: blacklist_models = blacklist_models.replace(" ", "").split(",") if whitelist_models == "": whitelist_models = None if whitelist_models is not None: whitelist_models = whitelist_models.replace(" ", "").split(",") automl_settings = {} preprocess = True if time_column_name != "" and time_column_name is not None: automl_settings = { "time_column_name": time_column_name } preprocess = False log_file_name = "automl_errors.log" automl_config = AutoMLConfig(task = task, debug_log = log_file_name, primary_metric = primary_metric, iteration_timeout_minutes = iteration_timeout_minutes, experiment_timeout_minutes = experiment_timeout_minutes, iterations = iterations, n_cross_validations = n_cross_validations, preprocess = preprocess, verbosity = logging.INFO, X = X_train, y = y_train, path = project_folder, blacklist_models = blacklist_models, whitelist_models = whitelist_models, experiment_exit_score = experiment_exit_score, sample_weight = sample_weight, X_valid = X_valid, y_valid = y_valid, sample_weight_valid = sample_weight_valid, **automl_settings) local_run = experiment.submit(automl_config, show_output = True) best_run, fitted_model = local_run.get_output() pickled_model = codecs.encode(pickle.dumps(fitted_model), "base64").decode() log_file_text = "" try: with open(log_file_name, "r") as log_file: log_file_text = log_file.read() except: log_file_text = "Log file not found" returned_model = pd.DataFrame({"best_run": [best_run.id], "experiment_name": [experiment_name], "fitted_model": [pickled_model], "log_file_text": [log_file_text], "workspace": [ws.name]}, dtype=np.dtype(np.str)) ' , @input_data_1 = @input_query , @input_data_1_name = N'input_data' , @output_data_1_name = N'returned_model' , @params = N'@label_column NVARCHAR(255), @primary_metric NVARCHAR(40), @iterations INT, @task NVARCHAR(40), @experiment_name NVARCHAR(32), @iteration_timeout_minutes INT, @experiment_timeout_minutes INT, @n_cross_validations INT, @blacklist_models NVARCHAR(MAX), @whitelist_models NVARCHAR(MAX), @experiment_exit_score FLOAT, @sample_weight_column NVARCHAR(255), @is_validate_column NVARCHAR(255), @time_column_name NVARCHAR(255), @tenantid NVARCHAR(255), @appid NVARCHAR(255), @password NVARCHAR(255), @config_file NVARCHAR(255)' , @label_column = @label_column , @primary_metric = @primary_metric , @iterations = @iterations , @task = @task , @experiment_name = @experiment_name , @iteration_timeout_minutes = @iteration_timeout_minutes , @experiment_timeout_minutes = @experiment_timeout_minutes , @n_cross_validations = @n_cross_validations , @blacklist_models = @blacklist_models , @whitelist_models = @whitelist_models , @experiment_exit_score = @experiment_exit_score , @sample_weight_column = @sample_weight_column , @is_validate_column = @is_validate_column , @time_column_name = @time_column_name , @tenantid = @tenantid , @appid = @appid , @password = @password , @config_file = @config_file WITH RESULT SETS ((best_run NVARCHAR(250), experiment_name NVARCHAR(100), fitted_model VARCHAR(MAX), log_file_text NVARCHAR(MAX), workspace NVARCHAR(100))) END -- This procedure returns a list of metrics for each iteration of a training run. SET ANSI_NULLS ON GO SET QUOTED_IDENTIFIER ON GO CREATE OR ALTER PROCEDURE [dbo].[AutoMLGetMetrics] ( @run_id NVARCHAR(250), -- The RunId @experiment_name NVARCHAR(32)='automl-sql-test', -- This can be used to find the experiment in the Azure Portal. @connection_name NVARCHAR(255)='default' -- The AML connection to use. ) AS BEGIN DECLARE @tenantid NVARCHAR(255) DECLARE @appid NVARCHAR(255) DECLARE @password NVARCHAR(255) DECLARE @config_file NVARCHAR(255) SELECT @tenantid=TenantId, @appid=AppId, @password=Password, @config_file=ConfigFile FROM aml_connection WHERE ConnectionName = @connection_name; EXEC sp_execute_external_script @language = N'Python', @script = N'import pandas as pd import logging import azureml.core import numpy as np from azureml.core.experiment import Experiment from azureml.train.automl.run import AutoMLRun from azureml.core.authentication import ServicePrincipalAuthentication from azureml.core.workspace import Workspace auth = ServicePrincipalAuthentication(tenantid, appid, password) ws = Workspace.from_config(path=config_file, auth=auth) experiment = Experiment(ws, experiment_name) ml_run = AutoMLRun(experiment = experiment, run_id = run_id) children = list(ml_run.get_children()) iterationlist = [] metricnamelist = [] metricvaluelist = [] for run in children: properties = run.get_properties() if "iteration" in properties: iteration = int(properties["iteration"]) for metric_name, metric_value in run.get_metrics().items(): if isinstance(metric_value, float): iterationlist.append(iteration) metricnamelist.append(metric_name) metricvaluelist.append(metric_value) metrics = pd.DataFrame({"iteration": iterationlist, "metric_name": metricnamelist, "metric_value": metricvaluelist}) ' , @output_data_1_name = N'metrics' , @params = N'@run_id NVARCHAR(250), @experiment_name NVARCHAR(32), @tenantid NVARCHAR(255), @appid NVARCHAR(255), @password NVARCHAR(255), @config_file NVARCHAR(255)' , @run_id = @run_id , @experiment_name = @experiment_name , @tenantid = @tenantid , @appid = @appid , @password = @password , @config_file = @config_file WITH RESULT SETS ((iteration INT, metric_name NVARCHAR(100), metric_value FLOAT)) END -- This procedure predicts values based on a model returned by AutoMLTrain and a dataset. -- It returns the dataset with a new column added, which is the predicted value. SET ANSI_NULLS ON GO SET QUOTED_IDENTIFIER ON GO CREATE OR ALTER PROCEDURE [dbo].[AutoMLPredict] ( @input_query NVARCHAR(MAX), -- A SQL query returning data to predict on. @model NVARCHAR(MAX), -- A model returned from AutoMLTrain. @label_column NVARCHAR(255)='' -- Optional name of the column from input_query, which should be ignored when predicting ) AS BEGIN EXEC sp_execute_external_script @language = N'Python', @script = N'import pandas as pd import azureml.core import numpy as np from azureml.train.automl import AutoMLConfig import pickle import codecs model_obj = pickle.loads(codecs.decode(model.encode(), "base64")) test_data = input_data.copy() if label_column != "" and label_column is not None: y_test = test_data.pop(label_column).values X_test = test_data predicted = model_obj.predict(X_test) combined_output = input_data.assign(predicted=predicted) ' , @input_data_1 = @input_query , @input_data_1_name = N'input_data' , @output_data_1_name = N'combined_output' , @params = N'@model NVARCHAR(MAX), @label_column NVARCHAR(255)' , @model = @model , @label_column = @label_column END ```
github_jupyter
``` import itertools from fractions import Fraction from typing import Tuple, Sequence from pprint import pprint import numpy as np import scipy.stats as st import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import theano as th import theano.tensor as T import pymc3 as pm from hypothesis import given from hypothesis import strategies as gen from mockdown.learning.noisetolerant.math import ext_farey # %matplotlib widget %config InlineBackend.figure_format='retina' sns.set_style("darkgrid") ``` ## Constants ``` # Maximum y-noise in pixels. ε_px = 3 # "Resolution": Maximum denominator for inferred slopes. # Also defines the range of allowed rationals (0 - R_MAX) R_MAX = 30 B_MIN = -100 B_MAX = +100 A_MIN = 0 A_MAX = R_MAX EXP_STEPS = 5 f_r = ext_farey(R_MAX) TINY = 1e-20 # just a very small number ``` ## Definitions ``` def q2cf(n1: int, n2: int): """Yields the continued fraction expansion of the rationals n1/n2.""" while n2: n1, (term, n2) = n2, divmod(n1, n2) yield term def sb_depth(n1: int, n2: int): """Returns the Stern-Brocot depth of the rationals n1/n2.""" return sum(list(q2cf(n1, n2))) def sweep(param_ranges, factor=None): """ Returns a dataframe containing model.logp at every combination of possible parameters. """ assert factor logp_fn = factor.logp rows = [] keys, ranges = zip(*(param_ranges.items())) for values in itertools.product(*ranges): params = zip(keys, values) logp = logp_fn(params) rows.append([*values, logp, np.exp(logp)]) return pd.DataFrame(rows, columns=keys + ('logp', 'p')) ``` ## Parameter Space ``` a_space_np = np.array(f_r, dtype=np.object) a_space_th = th.shared(np.array([(frac.numerator, frac.denominator) for frac in f_r])) b_space_np = np.arange(-100, 100, 1, dtype=np.int) b_space_th = th.shared(b_space_np) print(a_space_th.get_value()[:10]) print(b_space_th.get_value()[:10]) sb_depths = np.vectorize(lambda a: sb_depth(a.numerator, a.denominator))(a_space_np) sb_depth_hist, _ = np.histogram(sb_depths, bins=R_MAX + 1) plt.bar(np.arange(0, R_MAX + 1, 1), 1/sb_depth_hist); n = R_MAX alpha = EXP_STEPS + 1 beta = (R_MAX - EXP_STEPS) + 1 betabin = np.vectorize(lambda k: st.betabinom.pmf(k, n, alpha, beta))(np.arange(0, R_MAX + 1, 1)) plt.bar(np.arange(0, R_MAX + 1, 1), betabin); foo = np.vectorize(lambda k: st.betabinom.pmf(k, n, alpha, beta))(sb_depths) plt.bar(np.arange(0, len(sb_depths), 1), foo); prior = betabin[sb_depths] * (1/sb_depth_hist)[sb_depths] plt.bar(np.arange(0, len(sb_depths), 1), prior) prior.sum() ``` ## Generating Data ``` size = 3 a_true = Fraction(1, 3) b_true = 15 x = np.linspace(100, 300, size) y_true = a_true * x + b_true y = y_true + np.random.normal(scale=ε_px/3, size=size) # Make data piecewise-linear # y[[-1, -2, -3, -4]] = y[-5] df = pd.DataFrame(zip(x,y), columns=['parent.width', 'child.width']) # a_i_true = np.where((a_choices == [a_i_true.numerator(), a_i_true.denominator()]).all(axis=1))[0][0] # a_i_true, a_space_np[a_i_true] # b_i_true = np.where((b_choices == 5))[0][0] # b_i_true, b_space_np[b_i_true] fig, ax = plt.subplots(figsize=(12, 8)) ax.plot(x, y_true, ':.', color='tab:green', label="True"); ax.plot(x, y, 'o', data=df, label="Noisy Samples") ax.set_xlabel('parent.width') ax.set_ylabel('child.width') ax.legend(); ``` ## Modeling ### Relaxed Model The _relaxed_ model is more or less traditional Bayesian linear regression with continuous parameters and priors. The intent of this "preliminary phase" is to generate a good estimate as to the location of the best solution in the full model. The full model is discrete, and it is therefore computationally expensive to find the maximum a posteriori estimate of (a, b). However, the relaxed model's MAP provides a very good estimate as to the location of the full model's MAP. ``` def mk_relaxed_model(): relaxed_model = pm.Model() with relaxed_model: # y ~ N(⍺x + β, σ^2) x_data = pm.Data('x_data', x) σ = pm.HalfCauchy('σ', beta=(ε_px + TINY)/3) β = pm.Uniform('β', lower=B_MIN, upper=B_MAX) α = pm.Uniform('α', lower=A_MIN, upper=A_MAX) y_obs = pm.Normal('y_obs', mu=α*x_data + β, sigma=σ, observed=y) return relaxed_model relaxed_model = mk_relaxed_model() relaxed_MAP = pm.find_MAP(model=relaxed_model) a_MAP = relaxed_MAP['α'] b_MAP = relaxed_MAP['β'] sigma_MAP = relaxed_MAP['σ'] pprint(relaxed_MAP) fig, ax = plt.subplots(figsize=(12, 8)) ax.plot(x, y, 'o', label="noisy Samples") ax.plot(x, y_true, ':.', color='tab:green', label=f"True: y = {a_true}x + {b_true}"); ax.plot(x, a_MAP*x+b_MAP, ':.', color='tab:orange', label=f"MAP: y = {a_MAP}x + {b_MAP}") ax.fill_between(x, a_MAP*x+b_MAP + 3*sigma_MAP, a_MAP*x+b_MAP - 3*sigma_MAP, color='tab:orange', alpha=.1) ax.legend() ax.set_xlabel('parent.width') ax.set_ylabel('child.width') def candidate_bounds(x, y, relaxed_MAP): # Note: x and y should be in sorted order! sigma = np.exp(relaxed_MAP['σ']) x1, x2 = min(x), max(x) y1, y2 = min(y), max(y) # a = (y2 - y1) / (x2 - x1) a1 = ((y2 + 3*sigma) - (y1 - 3*sigma)) / (x2 - x1) a2 = ((y2 - 3*sigma) - (y1 + 3*sigma)) / (x2 - x1) # b = y - mx (where x,y = x1, y1 or x2, y2, equivalently) b1 = y1 - a1*x1 b2 = y1 - a2*x1 return min(a1, a2), max(a1, a2), min(b1, b2), max(b1, b2) def candidate_bounds_indices(a_space, b_space, a_lower, a_upper, b_lower, b_upper): a_i_lower = np.abs(a_space - a_lower).argmin() a_i_upper = np.abs(a_space - a_upper).argmin() b_i_lower = np.abs(b_space - b_lower).argmin() b_i_upper = np.abs(b_space - b_upper).argmin() return a_i_lower, a_i_upper, b_i_lower, b_i_upper a_l, a_u, b_l, b_u = candidate_bounds(x, y, relaxed_MAP) print(f"a: {a_l}...{a_u}, b: {b_l}...{b_u}") a_i_l, a_i_u, b_i_l, b_i_u = candidate_bounds_indices(a_space_np, b_space_np, a_l, a_u, b_l, b_u) print(a_i_l, a_i_u, b_i_l, b_i_u) print(f"a: {a_space_np[a_i_l]}...{a_space_np[a_i_u]}, b: {b_space_np[b_i_l]}...{b_space_np[b_i_u]}") ``` ## Full Model ``` with pm.Model() as model: b_i = pm.DiscreteUniform('b_i', lower=0, upper=len(b_space_np)) b = pm.Deterministic('b', b_space_th[b_i]) a_i = pm.DiscreteUniform('a_i', lower=0, upper=len(a_space_np)) a_p = pm.Deterministic('a_p', a_space_th[a_i][0]) a_q = pm.Deterministic('a_q', a_space_th[a_i][1]) y_observed = pm.Normal('y_observed', mu=(a_p / a_q) * x + b, sigma=(ε_px + TINY)/3, observed=y) m_logp = model.logp scores = sweep({ 'a_i': range(len(a_space_np)), 'b_i': range(len(b_space_np)) }, factor=model) scale = scores['p'].sum() scores['p'] = scores['p'] / scale scores['p'].describe() fig, ax = plt.subplots(figsize=(16, 16)) data = scores.pivot('b_i', 'a_i', 'p') ax.imshow(data) fig, ax = plt.subplots(figsize=(20, 6)) data = scores.pivot('b_i', 'a_i', 'p') sns.heatmap(data, ax=ax, xticklabels=a_space_np, yticklabels=b_space_np, square=True); plt.xlabel("a"); plt.ylabel("b"); # ax.hlines(list(range(len(b_choices))), *ax.get_xlim(), colors='darkslategray') # ax.vlines(list(range(len(a_choices))), *ax.get_ylim(), colors='darkslategray') ax.yaxis.set_major_locator(plt.MaxNLocator(30)) ax.xaxis.set_major_locator(plt.MaxNLocator(100)) ax.add_artist(plt.Rectangle((a_i_l, b_i_l), (a_i_u - a_i_l), (b_i_u - b_i_l), color='y', fill=False, zorder=99)); ```
github_jupyter
(parallel)= # Parallelization ``` %config InlineBackend.figure_format = "retina" from matplotlib import rcParams rcParams["savefig.dpi"] = 100 rcParams["figure.dpi"] = 100 rcParams["font.size"] = 20 import multiprocessing multiprocessing.set_start_method("fork") ``` :::{note} Some builds of NumPy (including the version included with Anaconda) will automatically parallelize some operations using something like the MKL linear algebra. This can cause problems when used with the parallelization methods described here so it can be good to turn that off (by setting the environment variable `OMP_NUM_THREADS=1`, for example). ::: ``` import os os.environ["OMP_NUM_THREADS"] = "1" ``` With emcee, it's easy to make use of multiple CPUs to speed up slow sampling. There will always be some computational overhead introduced by parallelization so it will only be beneficial in the case where the model is expensive, but this is often true for real research problems. All parallelization techniques are accessed using the `pool` keyword argument in the :class:`EnsembleSampler` class but, depending on your system and your model, there are a few pool options that you can choose from. In general, a `pool` is any Python object with a `map` method that can be used to apply a function to a list of numpy arrays. Below, we will discuss a few options. In all of the following examples, we'll test the code with the following convoluted model: ``` import time import numpy as np def log_prob(theta): t = time.time() + np.random.uniform(0.005, 0.008) while True: if time.time() >= t: break return -0.5 * np.sum(theta**2) ``` This probability function will randomly sleep for a fraction of a second every time it is called. This is meant to emulate a more realistic situation where the model is computationally expensive to compute. To start, let's sample the usual (serial) way: ``` import emcee np.random.seed(42) initial = np.random.randn(32, 5) nwalkers, ndim = initial.shape nsteps = 100 sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob) start = time.time() sampler.run_mcmc(initial, nsteps, progress=True) end = time.time() serial_time = end - start print("Serial took {0:.1f} seconds".format(serial_time)) ``` ## Multiprocessing The simplest method of parallelizing emcee is to use the [multiprocessing module from the standard library](https://docs.python.org/3/library/multiprocessing.html). To parallelize the above sampling, you could update the code as follows: ``` from multiprocessing import Pool with Pool() as pool: sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob, pool=pool) start = time.time() sampler.run_mcmc(initial, nsteps, progress=True) end = time.time() multi_time = end - start print("Multiprocessing took {0:.1f} seconds".format(multi_time)) print("{0:.1f} times faster than serial".format(serial_time / multi_time)) ``` I have 4 cores on the machine where this is being tested: ``` from multiprocessing import cpu_count ncpu = cpu_count() print("{0} CPUs".format(ncpu)) ``` We don't quite get the factor of 4 runtime decrease that you might expect because there is some overhead in the parallelization, but we're getting pretty close with this example and this will get even closer for more expensive models. ## MPI Multiprocessing can only be used for distributing calculations across processors on one machine. If you want to take advantage of a bigger cluster, you'll need to use MPI. In that case, you need to execute the code using the `mpiexec` executable, so this demo is slightly more convoluted. For this example, we'll write the code to a file called `script.py` and then execute it using MPI, but when you really use the MPI pool, you'll probably just want to edit the script directly. To run this example, you'll first need to install [the schwimmbad library](https://github.com/adrn/schwimmbad) because emcee no longer includes its own `MPIPool`. ``` with open("script.py", "w") as f: f.write(""" import sys import time import emcee import numpy as np from schwimmbad import MPIPool def log_prob(theta): t = time.time() + np.random.uniform(0.005, 0.008) while True: if time.time() >= t: break return -0.5*np.sum(theta**2) with MPIPool() as pool: if not pool.is_master(): pool.wait() sys.exit(0) np.random.seed(42) initial = np.random.randn(32, 5) nwalkers, ndim = initial.shape nsteps = 100 sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob, pool=pool) start = time.time() sampler.run_mcmc(initial, nsteps) end = time.time() print(end - start) """) mpi_time = !mpiexec -n {ncpu} python script.py mpi_time = float(mpi_time[0]) print("MPI took {0:.1f} seconds".format(mpi_time)) print("{0:.1f} times faster than serial".format(serial_time / mpi_time)) ``` There is often more overhead introduced by MPI than multiprocessing so we get less of a gain this time. That being said, MPI is much more flexible and it can be used to scale to huge systems. ## Pickling, data transfer & arguments All parallel Python implementations work by spinning up multiple `python` processes with identical environments then and passing information between the processes using `pickle`. This means that the probability function [must be picklable](https://docs.python.org/3/library/pickle.html#pickle-picklable). Some users might hit issues when they use `args` to pass data to their model. These args must be pickled and passed every time the model is called. This can be a problem if you have a large dataset, as you can see here: ``` def log_prob_data(theta, data): a = data[0] # Use the data somehow... t = time.time() + np.random.uniform(0.005, 0.008) while True: if time.time() >= t: break return -0.5 * np.sum(theta**2) data = np.random.randn(5000, 200) sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob_data, args=(data,)) start = time.time() sampler.run_mcmc(initial, nsteps, progress=True) end = time.time() serial_data_time = end - start print("Serial took {0:.1f} seconds".format(serial_data_time)) ``` We basically get no change in performance when we include the `data` argument here. Now let's try including this naively using multiprocessing: ``` with Pool() as pool: sampler = emcee.EnsembleSampler( nwalkers, ndim, log_prob_data, pool=pool, args=(data,) ) start = time.time() sampler.run_mcmc(initial, nsteps, progress=True) end = time.time() multi_data_time = end - start print("Multiprocessing took {0:.1f} seconds".format(multi_data_time)) print( "{0:.1f} times faster(?) than serial".format( serial_data_time / multi_data_time ) ) ``` Brutal. We can do better than that though. It's a bit ugly, but if we just make `data` a global variable and use that variable within the model calculation, then we take no hit at all. ``` def log_prob_data_global(theta): a = data[0] # Use the data somehow... t = time.time() + np.random.uniform(0.005, 0.008) while True: if time.time() >= t: break return -0.5 * np.sum(theta**2) with Pool() as pool: sampler = emcee.EnsembleSampler( nwalkers, ndim, log_prob_data_global, pool=pool ) start = time.time() sampler.run_mcmc(initial, nsteps, progress=True) end = time.time() multi_data_global_time = end - start print( "Multiprocessing took {0:.1f} seconds".format(multi_data_global_time) ) print( "{0:.1f} times faster than serial".format( serial_data_time / multi_data_global_time ) ) ``` That's better! This works because, in the global variable case, the dataset is only pickled and passed between processes once (when the pool is created) instead of once for every model evaluation.
github_jupyter
``` #Note: You need to reset the kernel for the keras installation to take place #Todo: Remove this line once it is installed, reset the kernel: Menu > Kernel > Reset & Clear Output !git clone https://github.com/fchollet/keras.git && cd keras && python setup.py install --user import keras from keras.applications.inception_resnet_v2 import InceptionResNetV2 from keras.preprocessing import image from keras.engine import Layer from keras.applications.inception_resnet_v2 import preprocess_input from keras.layers import Conv2D, UpSampling2D, InputLayer, Conv2DTranspose, Input, Reshape, merge, concatenate, Activation, Dense, Dropout, Flatten from keras.layers.normalization import BatchNormalization from keras.callbacks import TensorBoard from keras.models import Sequential, Model from keras.layers.core import RepeatVector, Permute from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img from skimage.color import rgb2lab, lab2rgb, rgb2gray, gray2rgb from skimage.transform import resize from skimage.io import imsave import numpy as np import os import random import tensorflow as tf # Get images X = [] for filename in os.listdir('colornet/'): X.append(img_to_array(load_img('colornet/'+filename))) X = np.array(X, dtype=float) Xtrain = 1.0/255*X #Load weights inception = InceptionResNetV2(weights=None, include_top=True) inception.load_weights('/data/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5') inception.graph = tf.get_default_graph() def conv_stack(data, filters, s): output = Conv2D(filters, (3, 3), strides=s, activation='relu', padding='same')(data) #output = BatchNormalization()(output) return output embed_input = Input(shape=(1000,)) #Encoder encoder_input = Input(shape=(256, 256, 1,)) encoder_output = conv_stack(encoder_input, 64, 2) encoder_output = conv_stack(encoder_output, 128, 1) encoder_output = conv_stack(encoder_output, 128, 2) encoder_output = conv_stack(encoder_output, 256, 1) encoder_output = conv_stack(encoder_output, 256, 2) encoder_output = conv_stack(encoder_output, 512, 1) encoder_output = conv_stack(encoder_output, 512, 1) encoder_output = conv_stack(encoder_output, 256, 1) #Fusion # y_mid: (None, 256, 28, 28) fusion_output = RepeatVector(32 * 32)(embed_input) fusion_output = Reshape(([32, 32, 1000]))(fusion_output) fusion_output = concatenate([fusion_output, encoder_output], axis=3) fusion_output = Conv2D(256, (1, 1), activation='relu')(fusion_output) #Decoder decoder_output = conv_stack(fusion_output, 128, 1) decoder_output = UpSampling2D((2, 2))(decoder_output) decoder_output = conv_stack(decoder_output, 64, 1) decoder_output = UpSampling2D((2, 2))(decoder_output) decoder_output = conv_stack(decoder_output, 32, 1) decoder_output = conv_stack(decoder_output, 16, 1) decoder_output = Conv2D(2, (2, 2), activation='tanh', padding='same')(decoder_output) decoder_output = UpSampling2D((2, 2))(decoder_output) model = Model(inputs=[encoder_input, embed_input], outputs=decoder_output) #Create embedding def create_inception_embedding(grayscaled_rgb): grayscaled_rgb_resized = [] for i in grayscaled_rgb: i = resize(i, (299, 299, 3), mode='constant') grayscaled_rgb_resized.append(i) grayscaled_rgb_resized = np.array(grayscaled_rgb_resized) grayscaled_rgb_resized = preprocess_input(grayscaled_rgb_resized) with inception.graph.as_default(): embed = inception.predict(grayscaled_rgb_resized) return embed # Image transformer datagen = ImageDataGenerator( shear_range=0.2, zoom_range=0.2, rotation_range=20, horizontal_flip=True) #Generate training data batch_size = 20 def image_a_b_gen(batch_size): for batch in datagen.flow(Xtrain, batch_size=batch_size): grayscaled_rgb = gray2rgb(rgb2gray(batch)) embed = create_inception_embedding(grayscaled_rgb) lab_batch = rgb2lab(batch) X_batch = lab_batch[:,:,:,0] X_batch = X_batch.reshape(X_batch.shape+(1,)) Y_batch = lab_batch[:,:,:,1:] / 128 yield ([X_batch, create_inception_embedding(grayscaled_rgb)], Y_batch) #Train model tensorboard = TensorBoard(log_dir="/output") model.compile(optimizer='adam', loss='mse') model.fit_generator(image_a_b_gen(batch_size), callbacks=[tensorboard], epochs=1000, steps_per_epoch=20) # Save model model_json = model.to_json() with open("model.json", "w") as json_file: json_file.write(model_json) model.save_weights("color_tensorflow_real_mode.h5") #Make predictions on validation images color_me = [] for filename in os.listdir('/data/images/Test/'): color_me.append(img_to_array(load_img('/data/images/Test/'+filename))) color_me = np.array(color_me, dtype=float) color_me_embed = create_inception_embedding(color_me) color_me = rgb2lab(1.0/255*color_me)[:,:,:,0] color_me = color_me.reshape(color_me.shape+(1,)) # Test model output = model.predict([color_me, color_me_embed]) output = output * 128 # Output colorizations for i in range(len(output)): cur = np.zeros((256, 256, 3)) cur[:,:,0] = color_me[i][:,:,0] cur[:,:,1:] = output[i] imsave("result/img_"+str(i)+".png", lab2rgb(cur)) ```
github_jupyter
# MAT281 - Laboratorio N°01 <a id='p1'></a> ## Problema 01 ### a) Calcular el número $\pi$ En los siglos XVII y XVIII, James Gregory y Gottfried Leibniz descubrieron una serie infinita que sirve para calcular $\pi$: $$\displaystyle \pi = 4 \sum_{k=1}^{\infty}\dfrac{(-1)^{k+1}}{2k-1} = 4(1-\dfrac{1}{3}+\dfrac{1}{5}-\dfrac{1}{7} + ...) $$ Desarolle un programa para estimar el valor de $\pi$ ocupando el método de Leibniz, donde la entrada del programa debe ser un número entero $n$ que indique cuántos términos de la suma se utilizará. * **Ejemplo**: *calcular_pi(3)* = 3.466666666666667, *calcular_pi(1000)* = 3.140592653839794 ### Definir Función ``` def calcular_pi(n:int)->float: """ calcular_pi(n) Aproximacion del valor de pi mediante el método de Leibniz Parameters ---------- n : int Numero de terminos. Returns ------- output : float Valor aproximado de pi. Examples -------- >>> calcular_pi(3) 3.466666666666667 >>> calcular_pi(1000) 3.140592653839794 """ pi = 0 # valor incial for k in range(1,n+1): numerador = (-1)**(k+1) # numerador de la iteracion i denominador = 2*k-1 # denominador de la iteracion i pi+=numerador/denominador # suma hasta el i-esimo termino return 4*pi # Acceso a la documentación help(calcular_pi) ``` ### Verificar ejemplos ``` # ejemplo 01 assert calcular_pi(3) == 3.466666666666667, "ejemplo 01 incorrecto" # ejemplo 02 assert calcular_pi(1000) == 3.140592653839794, "ejemplo 02 incorrecto" ``` **Observación**: * Note que si corre la línea de comando `calcular_pi(3.0)` le mandará un error ... ¿ por qué ? * En los laboratorio, no se pide ser tan meticuloso con la documentacion. * Lo primero es definir el código, correr los ejemplos y luego documentar correctamente. ### b) Calcular el número $e$ Euler realizó varios aportes en relación a $e$, pero no fue hasta 1748 cuando publicó su **Introductio in analysin infinitorum** que dio un tratamiento definitivo a las ideas sobre $e$. Allí mostró que: En los siglos XVII y XVIII, James Gregory y Gottfried Leibniz descubrieron una serie infinita que sirve para calcular π: $$\displaystyle e = \sum_{k=0}^{\infty}\dfrac{1}{k!} = 1+\dfrac{1}{2!}+\dfrac{1}{3!}+\dfrac{1}{4!} + ... $$ Desarolle un programa para estimar el valor de $e$ ocupando el método de Euler, donde la entrada del programa debe ser un número entero $n$ que indique cuántos términos de la suma se utilizará. * **Ejemplo**: *calcular_e(3)* =2.5, *calcular_e(1000)* = 2.7182818284590455 ### Definir función ``` def calcular_e(n:int): if n == 1: #1 termino de suma e = 1 if n == 2: #2 terminos de suma e = 2 if n >= 3: #3 o mas terminos de suma e = 2 factorial = 1 #def de factorial for k in range(2,n): #se suma hasta el termino n-1 de pasos, si fuera n+1 se sumaria hasta n pero la suma de e parte desde 0 factorial = factorial*k #recursion suma = 1/factorial e = e+suma return e print(calcular_e(100)) ``` ### Verificar ejemplos ``` # ejemplo 01 assert calcular_e(3) == 2.5, "ejemplo 01 incorrecto" # ejemplo 02 assert calcular_e(1000) == 2.7182818284590455, "ejemplo 02 incorrecto" ``` <a id='p2'></a> ## Problema 02 Sea $\sigma(n)$ definido como la suma de los divisores propios de $n$ (números menores que n que se dividen en $n$). Los [números amigos](https://en.wikipedia.org/wiki/Amicable_numbers) son enteros positivos $n_1$ y $n_2$ tales que la suma de los divisores propios de uno es igual al otro número y viceversa, es decir, $\sigma(n_1)=\sigma(n_2)$ y $\sigma(n_2)=\sigma(n_1)$. Por ejemplo, los números 220 y 284 son números amigos. * los divisores propios de 220 son 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 y 110; por lo tanto $\sigma(220) = 284$. * los divisores propios de 284 son 1, 2, 4, 71 y 142; entonces $\sigma(284) = 220$. Implemente una función llamada `amigos` cuyo input sean dos números naturales $n_1$ y $n_2$, cuyo output sea verifique si los números son amigos o no. * **Ejemplo**: *amigos(220,284)* = True, *amigos(6,5)* = False ### Definir Función ``` def amigos(n1:int, n2:int): suma1 = 0 #definicion de las cantidades a sumar suma2 = 0 for i in range(1,n1+1): if n1%i == 0: #Verificador de divisores propios suma1 = i+suma1 #Se agrega a la suma si es divisor propio for i in range(1,n2+1): if n2%i == 0: #Verificador de divisores propios suma2 = i+suma2 #Se agrega a la suma si es divisor propio if suma1==suma2: #Son amigos si ambas sumas son iguales return True else: #Caso contrario no lo son return False ``` ### Verificar ejemplos ``` # ejemplo 01 assert amigos(220,284) == True, "ejemplo 01 incorrecto" # ejemplo 02 assert amigos(6,5) == False, "ejemplo 02 incorrecto" ``` <a id='p3'></a> ## Problema 03 La [conjetura de Collatz](https://en.wikipedia.org/wiki/Collatz_conjecture), conocida también como conjetura $3n+1$ o conjetura de Ulam (entre otros nombres), fue enunciada por el matemático Lothar Collatz en 1937, y a la fecha no se ha resuelto. Sea la siguiente operación, aplicable a cualquier número entero positivo: * Si el número es par, se divide entre 2. * Si el número es impar, se multiplica por 3 y se suma 1. La conjetura dice que siempre alcanzaremos el 1 (y por tanto el ciclo 4, 2, 1) para cualquier número con el que comencemos. Implemente una función llamada `collatz` cuyo input sea un número natural positivo $N$ y como output devulva la secuencia de números hasta llegar a 1. * **Ejemplo**: *collatz(9)* = [9, 28, 14, 7, 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1] ### Definir Función ``` def collatz(N:int): paso = N #paso inicial numeros = [N] #lista donde se guardaran los numeros recorridos while paso != 1: if paso%2 == 0: #Verificacion si el numero en el que estamos es par paso = paso//2 #Calculo del paso numeros.append(paso) #Se agrega a la lista de pasos else: #Si no es par, es impar paso = paso*3 + 1 #Calculo del paso numeros.append(paso) #Se agrega a la lista de pasos return numeros ### Verificar ejemplos # ejemplo 01 assert collatz(9) == [9, 28, 14, 7, 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1], "ejemplo 01 incorrecto" ``` <a id='p4'></a> ## Problema 04 La [conjetura de Goldbach](https://en.wikipedia.org/wiki/Goldbach%27s_conjecture) es uno de los problemas abiertos más antiguos en matemáticas. Concretamente, G.H. Hardy, en 1921, en su famoso discurso pronunciado en la Sociedad Matemática de Copenhague, comentó que probablemente la conjetura de Goldbach no es solo uno de los problemas no resueltos más difíciles de la teoría de números, sino de todas las matemáticas. Su enunciado es el siguiente: $$\textrm{Todo número par mayor que 2 puede escribirse como suma de dos números primos - Christian Goldbach (1742)}$$ Implemente una función llamada `goldbach` cuyo input sea un número natural positivo $N$ y como output devuelva la suma de dos primos ($N1$ y $N2$) tal que: $N1+N2=N$. * **Ejemplo**: goldbash(4) = (2,2), goldbash(6) = (3,3) , goldbash(8) = (3,5) ### Definir función ``` def goldbach(N:int): if N == 2: return "Error" suma = 0 #cantidad a comparar primos = [2] #lista para guardar numeros primos N1 = 0 N2 = 0 for i in range(3,N+1): #ciclo para encontrar todos los numeros primos entre 3 y N for j in range(2,i): #comparacion numero a numero if (i % j) == 0: #si i tiene un divisor, no es primo break else: #si no tiene divisores, i es un numero primo y se agrega a la lista primos.append(i) for i in primos: #ciclo para encontrar la suma de primos que de N for j in primos: #se fija un numero en la lista y se recorre denuevo if (i+j) == N: #si se encuentra una suma de primos que de N, se asignan a N1 y N2 los primos correspondientes N1 = j N2 = i break return((N1,N2)) #se devuelven los primos encontrados en forma de tupla ``` ### Verificar ejemplos ``` # ejemplo 01 assert goldbach(4) == (2,2), "ejemplo 01 incorrecto" # ejemplo 02 assert goldbach(6) == (3,3), "ejemplo 02 incorrecto" # ejemplo 03 assert goldbach(8) == (3,5), "ejemplo 03 incorrecto" #Cristobal Lobos; ROL: 201610519-0 import time start_time = time.time() print(goldbach(1000000)) print("--- %s seconds ---" % (time.time() - start_time)) ```
github_jupyter
# Depression Detection in Social Media Posts #### Imports ``` import warnings warnings.filterwarnings("ignore") import ftfy import matplotlib.pyplot as plt import nltk import numpy as np import pandas as pd import re from math import exp from numpy import sign from sklearn.metrics import classification_report, confusion_matrix, accuracy_score from gensim.models import KeyedVectors from nltk.corpus import stopwords from nltk import PorterStemmer from keras.models import Model, Sequential from keras.callbacks import EarlyStopping, ModelCheckpoint from keras.layers import Conv1D, Dense, Input, LSTM, Embedding, Dropout, Activation, MaxPooling1D from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences ``` #### Constants ``` # Reproducibility np.random.seed(1234) DEPRES_NROWS = 3200 # number of rows to read from DEPRESSIVE_TWEETS_CSV RANDOM_NROWS = 12000 # number of rows to read from RANDOM_TWEETS_CSV MAX_SEQUENCE_LENGTH = 140 # Max tweet size MAX_NB_WORDS = 20000 EMBEDDING_DIM = 300 TRAIN_SPLIT = 0.6 TEST_SPLIT = 0.2 LEARNING_RATE = 0.1 EPOCHS= 10 ``` ## Section 1: Load Data Loading depressive tweets scraped from twitter using [TWINT](https://github.com/haccer/twint) and random tweets from Kaggle dataset [twitter_sentiment](https://www.kaggle.com/ywang311/twitter-sentiment/data). #### File Paths ``` #DEPRESSIVE_TWEETS_CSV = 'depressive_tweets.csv' DEPRESSIVE_TWEETS_CSV = 'depressive_tweets_processed.csv' RANDOM_TWEETS_CSV = 'Sentiment Analysis Dataset 2.csv' EMBEDDING_FILE = 'GoogleNews-vectors-negative300.bin.gz' depressive_tweets_df = pd.read_csv(DEPRESSIVE_TWEETS_CSV, sep = '|', header = None, usecols = range(0,9), nrows = DEPRES_NROWS) random_tweets_df = pd.read_csv(RANDOM_TWEETS_CSV, encoding = "ISO-8859-1", usecols = range(0,4), nrows = RANDOM_NROWS) depressive_tweets_df.head() random_tweets_df.head() ``` ## Section 2: Data Processing ### Load Pretrained Word2Vec Model The pretrained vectors for the Word2Vec model is from [here](https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit). Using a Keyed Vectors file, we can get the embedding of any word by calling `.word_vec(word)` and we can get all the words in the model's vocabulary through `.vocab`. ``` word2vec = KeyedVectors.load_word2vec_format(EMBEDDING_FILE, binary=True) ``` ### Preprocessing Preprocessing the tweets in order to: * Remove links and images * Remove hashtags * Remove @ mentions * Remove emojis * Remove stop words * Remove punctuation * Get rid of stuff like "what's" and making it "what is' * Stem words so they are all the same tense (e.g. ran -> run) ``` # Expand Contraction cList = { "ain't": "am not", "aren't": "are not", "can't": "cannot", "can't've": "cannot have", "'cause": "because", "could've": "could have", "couldn't": "could not", "couldn't've": "could not have", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hadn't've": "had not have", "hasn't": "has not", "haven't": "have not", "he'd": "he would", "he'd've": "he would have", "he'll": "he will", "he'll've": "he will have", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have", "I'm": "I am", "I've": "I have", "isn't": "is not", "it'd": "it had", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have", "it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have", "mightn't": "might not", "mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have", "o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have", "so's": "so is", "that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there had", "there'd've": "there would have", "there's": "there is", "they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we had", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'alls": "you alls", "y'all'd": "you all would", "y'all'd've": "you all would have", "y'all're": "you all are", "y'all've": "you all have", "you'd": "you had", "you'd've": "you would have", "you'll": "you you will", "you'll've": "you you will have", "you're": "you are", "you've": "you have" } c_re = re.compile('(%s)' % '|'.join(cList.keys())) def expandContractions(text, c_re=c_re): def replace(match): return cList[match.group(0)] return c_re.sub(replace, text) def clean_tweets(tweets): cleaned_tweets = [] for tweet in tweets: tweet = str(tweet) # if url links then dont append to avoid news articles # also check tweet length, save those > 10 (length of word "depression") if re.match("(\w+:\/\/\S+)", tweet) == None and len(tweet) > 10: #remove hashtag, @mention, emoji and image URLs tweet = ' '.join(re.sub("(@[A-Za-z0-9]+)|(\#[A-Za-z0-9]+)|(<Emoji:.*>)|(pic\.twitter\.com\/.*)", " ", tweet).split()) #fix weirdly encoded texts tweet = ftfy.fix_text(tweet) #expand contraction tweet = expandContractions(tweet) #remove punctuation tweet = ' '.join(re.sub("([^0-9A-Za-z \t])", " ", tweet).split()) #stop words stop_words = set(stopwords.words('english')) word_tokens = nltk.word_tokenize(tweet) filtered_sentence = [w for w in word_tokens if not w in stop_words] tweet = ' '.join(filtered_sentence) #stemming words tweet = PorterStemmer().stem(tweet) cleaned_tweets.append(tweet) return cleaned_tweets ``` Applying the preprocessing `clean_text` function to every element in the depressive tweets and random tweets data. ``` depressive_tweets_arr = [x for x in depressive_tweets_df[5]] random_tweets_arr = [x for x in random_tweets_df['SentimentText']] X_d = clean_tweets(depressive_tweets_arr) X_r = clean_tweets(random_tweets_arr) ``` ### Tokenizer Using a Tokenizer to assign indices and filtering out unfrequent words. Tokenizer creates a map of every unique word and an assigned index to it. The parameter called num_words indicates that we only care about the top 20000 most frequent words. ``` tokenizer = Tokenizer(num_words=MAX_NB_WORDS) tokenizer.fit_on_texts(X_d + X_r) ``` Applying the tokenizer to depressive tweets and random tweets data. ``` sequences_d = tokenizer.texts_to_sequences(X_d) sequences_r = tokenizer.texts_to_sequences(X_r) ``` Number of unique words in tokenizer. Has to be <= 20,000. ``` word_index = tokenizer.word_index print('Found %s unique tokens' % len(word_index)) ``` Pad sequences all to the same length of 140 words. ``` data_d = pad_sequences(sequences_d, maxlen=MAX_SEQUENCE_LENGTH) data_r = pad_sequences(sequences_r, maxlen=MAX_SEQUENCE_LENGTH) print('Shape of data_d tensor:', data_d.shape) print('Shape of data_r tensor:', data_r.shape) ``` ### Embedding Matrix The embedding matrix is a `n x m` matrix where `n` is the number of words and `m` is the dimension of the embedding. In this case, `m=300` and `n=20000`. We take the min between the number of unique words in our tokenizer and max words in case there are less unique words than the max we specified. ``` nb_words = min(MAX_NB_WORDS, len(word_index)) embedding_matrix = np.zeros((nb_words, EMBEDDING_DIM)) for (word, idx) in word_index.items(): if word in word2vec.vocab and idx < MAX_NB_WORDS: embedding_matrix[idx] = word2vec.word_vec(word) ``` ### Splitting and Formatting Data Assigning labels to the depressive tweets and random tweets data, and splitting the arrays into test (60%), validation (20%), and train data (20%). Combine depressive tweets and random tweets arrays and shuffle. ``` # Assigning labels to the depressive tweets and random tweets data labels_d = np.array([1] * DEPRES_NROWS) labels_r = np.array([0] * RANDOM_NROWS) # Splitting the arrays into test (60%), validation (20%), and train data (20%) perm_d = np.random.permutation(len(data_d)) idx_train_d = perm_d[:int(len(data_d)*(TRAIN_SPLIT))] idx_test_d = perm_d[int(len(data_d)*(TRAIN_SPLIT)):int(len(data_d)*(TRAIN_SPLIT+TEST_SPLIT))] idx_val_d = perm_d[int(len(data_d)*(TRAIN_SPLIT+TEST_SPLIT)):] perm_r = np.random.permutation(len(data_r)) idx_train_r = perm_r[:int(len(data_r)*(TRAIN_SPLIT))] idx_test_r = perm_r[int(len(data_r)*(TRAIN_SPLIT)):int(len(data_r)*(TRAIN_SPLIT+TEST_SPLIT))] idx_val_r = perm_r[int(len(data_r)*(TRAIN_SPLIT+TEST_SPLIT)):] # Combine depressive tweets and random tweets arrays data_train = np.concatenate((data_d[idx_train_d], data_r[idx_train_r])) labels_train = np.concatenate((labels_d[idx_train_d], labels_r[idx_train_r])) data_test = np.concatenate((data_d[idx_test_d], data_r[idx_test_r])) labels_test = np.concatenate((labels_d[idx_test_d], labels_r[idx_test_r])) data_val = np.concatenate((data_d[idx_val_d], data_r[idx_val_r])) labels_val = np.concatenate((labels_d[idx_val_d], labels_r[idx_val_r])) # Shuffling perm_train = np.random.permutation(len(data_train)) data_train = data_train[perm_train] labels_train = labels_train[perm_train] perm_test = np.random.permutation(len(data_test)) data_test = data_test[perm_test] labels_test = labels_test[perm_test] perm_val = np.random.permutation(len(data_val)) data_val = data_val[perm_val] labels_val = labels_val[perm_val] ``` ## Section 3: Building the Model ### Building Model (LSTM + CNN) The model takes in an input and then outputs a single number representing the probability that the tweet indicates depression. The model takes in each input sentence, replace it with it's embeddings, then run the new embedding vector through a convolutional layer. CNNs are excellent at learning spatial structure from data, the convolutional layer takes advantage of that and learn some structure from the sequential data then pass into a standard LSTM layer. Last but not least, the output of the LSTM layer is fed into a standard Dense model for prediction. ``` model = Sequential() # Embedded layer model.add(Embedding(len(embedding_matrix), EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=False)) # Convolutional Layer model.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu')) model.add(MaxPooling1D(pool_size=2)) model.add(Dropout(0.2)) # LSTM Layer model.add(LSTM(300)) model.add(Dropout(0.2)) model.add(Dense(1, activation='sigmoid')) ``` ### Compiling Model ``` model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['acc']) print(model.summary()) ``` ## Section 4: Training the Model The model is trained `EPOCHS` time, and Early Stopping argument is used to end training if the loss or accuracy don't improve within 3 epochs. ``` early_stop = EarlyStopping(monitor='val_loss', patience=3) hist = model.fit(data_train, labels_train, \ validation_data=(data_val, labels_val), \ epochs=EPOCHS, batch_size=40, shuffle=True, \ callbacks=[early_stop]) ``` ### Results Summarize history for accuracy ``` plt.plot(hist.history['acc']) plt.plot(hist.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.show() ``` Summarize history for loss ``` plt.plot(hist.history['loss']) plt.plot(hist.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() ``` Percentage accuracy of model ``` labels_pred = model.predict(data_test) labels_pred = np.round(labels_pred.flatten()) accuracy = accuracy_score(labels_test, labels_pred) print("Accuracy: %.2f%%" % (accuracy*100)) ``` f1, precision, and recall scores ``` print(classification_report(labels_test, labels_pred)) ``` ## Section 5: Comparing the Model to Base Line In order to evaluate the effectiveness of the LSTM + CNN model, a logistic regression model is trained with the same train data and the same number of epochs, and tested with the same test data. ### Logistic Regression Base Line Model ``` class LogReg: """ Class to represent a logistic regression model. """ def __init__(self, l_rate, epochs, n_features): """ Create a new model with certain parameters. :param l_rate: Initial learning rate for model. :param epoch: Number of epochs to train for. :param n_features: Number of features. """ self.l_rate = l_rate self.epochs = epochs self.coef = [0.0] * n_features self.bias = 0.0 def sigmoid(self, score, threshold=20.0): """ Prevent overflow of exp by capping activation at 20. :param score: A real valued number to convert into a number between 0 and 1 """ if abs(score) > threshold: score = threshold * sign(score) activation = exp(score) return activation / (1.0 + activation) def predict(self, features): """ Given an example's features and the coefficients, predicts the class. :param features: List of real valued features for a single training example. :return: Returns the predicted class (either 0 or 1). """ value = sum([features[i]*self.coef[i] for i in range(len(features))]) + self.bias return self.sigmoid(value) def sg_update(self, features, label): """ Computes the update to the weights based on a predicted example. :param features: Features to train on. :param label: Corresponding label for features. """ yhat = self.predict(features) e = label - yhat self.bias = self.bias + self.l_rate * e * yhat * (1-yhat) for i in range(len(features)): self.coef[i] = self.coef[i] + self.l_rate * e * yhat * (1-yhat) * features[i] return def train(self, X, y): """ Computes logistic regression coefficients using stochastic gradient descent. :param X: Features to train on. :param y: Corresponding label for each set of features. :return: Returns a list of model weight coefficients where coef[0] is the bias. """ for epoch in range(self.epochs): for features, label in zip(X, y): self.sg_update(features, label) return self.bias, self.coef def get_accuracy(y_bar, y_pred): """ Computes what percent of the total testing data the model classified correctly. :param y_bar: List of ground truth classes for each example. :param y_pred: List of model predicted class for each example. :return: Returns a real number between 0 and 1 for the model accuracy. """ correct = 0 for i in range(len(y_bar)): if y_bar[i] == y_pred[i]: correct += 1 accuracy = (correct / len(y_bar)) * 100.0 return accuracy ``` Training the logistic regression model ``` # Logistic Model logreg = LogReg(LEARNING_RATE, EPOCHS, len(data_train[0])) bias_logreg, weights_logreg = logreg.train(data_train, labels_train) y_logistic = [round(logreg.predict(example)) for example in data_test] ``` Getting the accuracy of the logistic regression model predicting the test data ``` # Compare accuracies accuracy_logistic = get_accuracy(y_logistic, labels_test) print('Logistic Regression Accuracy: {:0.3f}'.format(accuracy_logistic)) ```
github_jupyter
# Index Day/Night - Run in python This only needs to be done once ``` import pandas as pd import matplotlib.pyplot as plt from mpl_toolkits import mplot3d import numpy as np import cmocean import warnings from matplotlib import rcParams from mpl_toolkits.axes_grid1 import make_axes_locatable warnings.filterwarnings('ignore') #Pickled acoustic data df1022 = pd.read_pickle('processingFiles/df1022.pkl') df1023 = pd.read_pickle('processingFiles/df1023.pkl') dfMLS1 = pd.read_pickle('processingFiles/dfMLS1.pkl') dfMLS2 = pd.read_pickle('processingFiles/dfMLS2.pkl') dfMLS3 = pd.read_pickle('processingFiles/dfMLS3.pkl') dfMLS4 = pd.read_pickle('processingFiles/dfMLS4.pkl') # ENV data files dfEnv1022 = pd.read_csv('processingFiles/Env1022.csv') dfEnv1022.dtime = pd.to_datetime(dfEnv1022.dtime) dfEnv1023 = pd.read_csv('processingFiles/Env1023.csv') dfEnv1023.dtime = pd.to_datetime(dfEnv1023.dtime) def combineEnvAcoustic(dfEnv, dfAcoustic): dfEnv.dtime = pd.to_datetime(dfEnv.dtime) dfEnv = dfEnv.sort_values('dtime') dfEnv = dfEnv.set_index('dtime') dfAcoustic = dfAcoustic.sort_values('datetime') dfAcoustic = dfAcoustic.set_index('datetime') df = dfEnv.reindex(dfAcoustic.index, method='nearest') df = df.merge(dfAcoustic,on='datetime') return df df1022All= combineEnvAcoustic(dfEnv1022, df1022) df1023All= combineEnvAcoustic(dfEnv1023, df1023) dfSurvey1All = df1022All[(df1022All.index > '07-20-2018') & (df1022All.index < '08-21-2018') ] dfSurvey1All.name = 'First Complete Survey' df1023CS2 = df1023All[(df1023All.index > '08-24-2018 15:00:00') & (df1023All.index < '09-07-2018')] df1022CS2 = df1022All[(df1022All.index > '08-25-2018') & (df1022All.index < '09-11-2018 22:00:00') ] dfSurvey2All = pd.concat([df1023CS2, df1022CS2]) dfSurvey2All.name = 'Second Complete Survey' dfEnvMLS1 = dfEnv1023[(dfEnv1023.dtime > '07-20-2018 20:00:00') & (dfEnv1023.dtime < '08-04-2018 00:00:00')] dfEnvMLS1.name = 'MLS-1' dfEnvMLS2 = dfEnv1022[(dfEnv1022.dtime > '07-23-2018 20:00:00') & (dfEnv1022.dtime < '08-13-2018 00:00:00')] dfEnvMLS2.name = 'MLS-2' dfEnvMLS3 = pd.concat([dfEnv1023[(dfEnv1023.dtime > '08-13-2018 15:00:00') & (dfEnv1023.dtime < '08-16-2018 20:00:00')], dfEnv1022[(dfEnv1022.dtime > '08-22-2018 00:00:00') & (dfEnv1022.dtime < '08-29-2018 00:00:00')]]) dfEnvMLS3.name = 'MLS-3' dfEnvMLS4 = dfEnv1023[(dfEnv1023.dtime > '08-30-2018 20:00:00') & (dfEnv1023.dtime < '09-11-2018 17:00:00')] dfEnvMLS4.name = 'MLS-4' df1022All= combineEnvAcoustic(dfEnvMLS1, dfMLS1) test = df1022All.resample('H').mean() test['day'] = test.par > 10 test['survey'] = [1 for i in range(len(test))] csvMLS1 = test[['mwd','day','survey']] csvMLS1['hour'] = [i for i in range(len(csvMLS1))] df1022All= combineEnvAcoustic(dfEnvMLS2, dfMLS2) test = df1022All.resample('H').mean() test['day'] = test.par > 10 test['survey'] = [2 for i in range(len(test))] csvMLS2 = test[['mwd','day','survey']] csvMLS2['hour'] = [i for i in range(len(csvMLS2))] df1022All= combineEnvAcoustic(dfEnvMLS3, dfMLS3) test = df1022All.resample('H').mean() test['day'] = test.par > 10 test['survey'] = [3 for i in range(len(test))] csvMLS3 = test[['mwd','day','survey']] csvMLS3['hour'] = [i for i in range(len(csvMLS3))] df1022All= combineEnvAcoustic(dfEnvMLS4, dfMLS4) test = df1022All.resample('H').mean() test['day'] = test.par > 10 test['survey'] = [4 for i in range(len(test))] csvMLS4 = test[['mwd','day','survey']] csvMLS4['hour'] = [i for i in range(len(csvMLS4))] csvAll = pd.concat([csvMLS1,csvMLS2,csvMLS3,csvMLS4]) csvAll['julian']= csvAll.index.to_julian_date() csvAll.julian = csvAll.julian-pd.to_datetime('2017-12-31').to_julian_date() csvAll.day = csvAll.day.astype(int) csvAll.julian = csvAll.julian.astype(float) csvAll.to_csv('processingFiles/daynight.csv') ``` # Switch to R ``` df<-read.csv('processingFiles/daynight.csv') df <- df[complete.cases(df),] library(nlme) library(mgcv) library(dplyr) df2 = distinct(df,julian,.keep_all=TRUE) mod1<-gls(mwd ~ day*julian, data=df2, correl = corCAR1(form = ~julian)) # whether I use the survey as cetegorical or continuous summary(mod1) ``` # Spatial model - back to python This only needs to be done once to make csv versions of the dataframes for use in R. ``` import pandas as pd df = pd.read_pickle('processingFiles/dfMLS1Grid.pkl') df.to_csv('processingFiles/MLS1Grid.csv') df = pd.read_pickle('processingFiles/dfMLS2Grid.pkl') df.to_csv('processingFiles/MLS2Grid.csv') df = pd.read_pickle('processingFiles/dfMLS3Grid.pkl') df.to_csv('processingFiles/MLS3Grid.csv') df = pd.read_pickle('processingFiles/dfMLS4Grid.pkl') df.to_csv('processingFiles/MLS4Grid.csv') df = pd.read_pickle('processingFiles/dfS1Grid.pkl') df.to_csv('processingFiles/S1Grid.csv') df = pd.read_pickle('processingFiles/dfS2Grid.pkl') df.to_csv('processingFiles/S2Grid.csv') ``` ## R part ``` library(nlme) library(mgcv) library(mapproj) library(sp) # Read in all of the csv files and combine into small survey and big survey dataframes df1<-read.csv('processingFiles/MLS1Grid.csv') df1$survey=factor(1) df2<-read.csv('processingFiles/MLS2Grid.csv') df2$survey=factor(2) df3<-read.csv('processingFiles/MLS3Grid.csv') df3$survey=factor(3) df4<-read.csv('processingFiles/MLS4Grid.csv') df4$survey=factor(4) dfAllSmall <- rbind(df1, df2,df3,df4) df1<-read.csv('processingFiles/S1Grid.csv') df1$survey=factor(1) df2<-read.csv('processingFiles/S2Grid.csv') df2$survey=factor(2) dfAllLarge <- rbind(df1, df2) # Let's fit a model for the large scale survey dfAll <- dfAllLarge f.lm <- lm(nasc ~ survey, data=dfAll) x <- mapproject(dfAll$lonC, dfAll$latC, "albers", param=c(min(dfAll$latC), max(dfAll$latC))) dfAll$x <- x$x dfAll$y <- x$y f.Gaus <-gls(nasc ~ survey, data=dfAll, correl=corGaus(form= ~ x+y | survey, nugget=T)) summary(f.Gaus) plot(Variogram(f.Gaus)) anova(f.Gaus) # now we can fit the model for the small scale survey dfAll <- dfAllSmall f.lm <- lm(nasc ~ survey, data=dfAll) x <- mapproject(dfAll$lonC, dfAll$latC, "albers", param=c(min(dfAll$latC), max(dfAll$latC))) dfAll$x <- x$x dfAll$y <- x$y f.Gaus <-gls(nasc ~ survey, data=dfAll, correl=corGaus(form= ~ x+y | survey, nugget=T)) summary(f.Gaus) plot(Variogram(f.Gaus)) anova(f.Gaus) ``` # Target Strength As a function of time ``` # Start with the basic linear model library(nlme) library(mgcv) library(dplyr) df<-read.csv('processingFiles/targets6h.csv') mdl <- lm(y ~x,data=df) summary(mdl) # Then check the residuals par(mfrow=c(2,2)) plot(mdl) # and finaly take a look at potential autocorrelation effects par(mfrow=c(1,2)) plot(residuals(mdl)) acf(residuals(mdl), lag.max=24) # The temporal autocorrelation doesn't seem signifiacnt, but here's the model df <- df[complete.cases(df),] df2 = distinct(df,x,.keep_all=TRUE) mod1<-gls(y ~ x, data=df2, correl = corCAR1(form = ~x)) summary(mod1) ``` ### Target strength Spatial version using gridded data ``` library(nlme) library(mgcv) library(mapproj) library(sp) # Read in all of the csv files and combine into small survey and big survey dataframes df1<-read.csv('processingFiles/gridTS1.csv') df1$survey=factor(1) df2<-read.csv('processingFiles/gridTS2.csv') df2$survey=factor(2) df3<-read.csv('processingFiles/gridTS3.csv') df3$survey=factor(3) df4<-read.csv('processingFiles/gridTS4.csv') df4$survey=factor(4) dfAllSmall <- rbind(df1, df2,df3,df4) dfAll <- dfAllSmall f.lm <- lm(ts ~ survey, data=dfAll) x <- mapproject(dfAll$lonC, dfAll$latC, "albers", param=c(min(dfAll$latC), max(dfAll$latC))) dfAll$x <- x$x dfAll$y <- x$y f.Gaus <-gls(ts ~ survey, data=dfAll, correl=corGaus(form= ~ x+y | survey, nugget=T)) summary(f.Gaus) plot(Variogram(f.Gaus)) anova(f.Gaus) a = summary(f.Gaus) a$tTable 10*log10(a$tTable[1]-a$tTable[5]) 10*log10(a$tTable[1]) 10*log10(a$tTable[1]+a$tTable[5]) 10*log10(a$tTable[2]-a$tTable[6]) 10*log10(a$tTable[2]) 10*log10(a$tTable[2]+a$tTable[6]) 10*log10(a$tTable[3]-a$tTable[7]) 10*log10(a$tTable[3]) 10*log10(a$tTable[3]+a$tTable[7]) 10*log10(a$tTable[4]-a$tTable[8]) 10*log10(a$tTable[4]) 10*log10(a$tTable[4]+a$tTable[8]) ```
github_jupyter
# Feldman and Cousins intervals with asymptotics. This is a copy of `FC_interval_freq.ipynb` using the asymptotic formulae instead of toys. ``` import numpy as np import matplotlib.pyplot as plt import os import time import zfit from zfit.loss import UnbinnedNLL from zfit.minimize import Minuit zfit.settings.set_seed(10) from hepstats.hypotests.calculators import AsymptoticCalculator from hepstats.hypotests import ConfidenceInterval from hepstats.hypotests.parameters import POIarray from hepstats.hypotests.exceptions import POIRangeError from utils import one_minus_cl_plot, pltdist, plotfitresult ``` In this example we consider an experiment where the observable $x$ is simply the measured value of $\mu$ in an experiment with a Gaussian resolution with known width $\sigma = 1$. We will compute the confidence belt for a 90 % condifdence level for the mean of the Gaussian $\mu$. We define a sampler below for a Gaussian pdf with $\sigma = 1$ using the `zfit` library, the sampler allows to generate samples for different values of $\mu$. 1000 entries are generated for each sample. ``` bounds = (-10, 10) obs = zfit.Space('x', limits=bounds) mean = zfit.Parameter("mean", 0) sigma = zfit.Parameter("sigma", 1.0) model = zfit.pdf.Gauss(obs=obs, mu=mean, sigma=sigma) data = model.create_sampler(1000) data.resample() ``` Below is defined the negative-likelihood function which is needed to compute Feldman and Cousins intervals as described in [arXiv:1109.0714](https://arxiv.org/abs/1109.0714). The negative-likelihood function is mimised to compute the measured mean $x$ and its uncertainty $\sigma_x$. ``` # Create the negative log likelihood nll = UnbinnedNLL(model=model, data=data) # Instantiate a minuit minimizer minimizer = Minuit(verbosity=0) # minimisation of the loss function minimum = minimizer.minimize(loss=nll) minimum.hesse(); print(minimum) x_err = minimum.params[mean]["minuit_hesse"]["error"] ``` To compute the the confidence belt on $\mu$ 90 % CL intervals have to be computed for several values of the measured mean $x$. Samples are generated for $\mu = n \times \sigma_x$ with $n = -6, -5, -4, ..., 3, 4, 5, 6$, and fitted to measure the mean $x_n$. 90 % CL intervals are evaluated for each $x_n$ for the two following cases, $\mu > 0$ and $\mu$ unbounded. With `hepstats`, The intervals are obtained with `ConfidenceInterval` object using a calculator. Here the calculator is the `AsymptoticCalculator` which computes the intervals using asymptotic formulae (see [Asymptotic formulae for likelihood-based tests of new physics](https://arxiv.org/pdf/1007.1727.pdf)), an example of a 68 % CL interval with the `AsymptoticCalculator` can be found [here](https://github.com/scikit-hep/hepstats/blob/master/notebooks/hypotests/confidenceinterval_asy_zfit.ipynb). The option `qtilde = True` should be used if $\mu > 0$. ``` results = {} for n in np.arange(-6, 7, 1.0): x = n * x_err if n not in results: zfit.settings.set_seed(5) data.resample(param_values={mean: x}) minimum = minimizer.minimize(loss=nll) minimum.hesse(); results_n = {} results_n["x"] = minimum.params[mean]["value"] results_n["x_err"] = minimum.params[mean]["minuit_hesse"]["error"] calculator = AsymptoticCalculator(minimum, minimizer) x_min = results_n["x"] - results_n["x_err"]*3 x_max = results_n["x"] + results_n["x_err"]*3 if n < -1: x_max = max(0.5 * results_n["x_err"], x_max) poinull = POIarray(mean, np.linspace(x_min, x_max, 50)) results_n["calculator"] = calculator results_n["poinull"] = poinull else: results_n = results[n] calculator = results_n["calculator"] poinull = results_n["poinull"] if "mu_lower" not in results_n: for qtilde in [True, False]: while True: try: ci = ConfidenceInterval(calculator, poinull, qtilde=qtilde) interval = ci.interval(alpha=0.05, printlevel=0) break except POIRangeError: values = poinull.values poinull = POIarray(mean, np.concatenate([values, [values[-1] + np.diff(values)[0]]])) results_n["poinull"] = poinull if qtilde: results_n["mu_lower"] = interval["lower"] results_n["mu_upper"] = interval["upper"] else: results_n["mu_lower_unbound"] = interval["lower"] results_n["mu_upper_unbound"] = interval["upper"] results[n] = results_n ``` The plot of the confidence belt of $\mu$ at 90 % CL as function of the measured mean values $x$ (in unit of $\sigma_x$), for the bounded and unbounded case are shown below. ``` f = plt.figure(figsize=(9, 8)) plt.plot([v["x"]/v["x_err"] for v in results.values()], [v["mu_upper_unbound"]/v["x_err"] for v in results.values()], color="black", label="90 % CL, no boundaries") plt.plot([v["x"]/v["x_err"] for v in results.values()], [v["mu_lower_unbound"]/v["x_err"] for v in results.values()], color="black") plt.plot([v["x"]/v["x_err"] for v in results.values()], [v["mu_upper"]/v["x_err"] for v in results.values()], "--", color="crimson", label="90 % CL, $\mu > 0$") plt.plot([v["x"]/v["x_err"] for v in results.values()], [v["mu_lower"]/v["x_err"] for v in results.values()], "--", color="crimson") plt.ylim(0.) plt.legend(fontsize=15) plt.ylabel("Mean $\mu$", fontsize=15) plt.xlabel("Measured mean $x$", fontsize=15); ``` For the unbounded and the $\mu > 0$ cases the plot reproduces the figure 3 and 10, respectively, of [A Unified Approach to the Classical Statistical Analysis of Small Signals, Gary J. Feldman, Robert D. Cousins](https://arxiv.org/pdf/physics/9711021.pdf).
github_jupyter
``` import matplotlib.pyplot as plt import numpy as np import pandas as pd import xarray as xr import cartopy.crs as ccrs import glob import os import scipy.stats from matplotlib import cm import seaborn as sns import dask import pickle from datetime import datetime import ast from dask.distributed import Client, LocalCluster if __name__ == "__main__": cluster=LocalCluster(host="tcp://127.0.0.1:2459",dashboard_address="127.0.0.1:2461",n_workers=4) client = Client(cluster) models = [x.split('/')[-1] for x in glob.glob("/terra/data/cmip5/global/rcp85/*")] dask.config.set(**{'array.slicing.split_large_chunks': False}) import warnings warnings.simplefilter("ignore") #annoying cftime serialization warning dic = {} for model in models: try: rcp85_files = sorted(glob.glob("/terra/data/cmip5/global/rcp85/"+str(model)+"/r1i1p1/mon/native/tas_*")) rcp85 = xr.open_mfdataset(rcp85_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').tas rcp85 = rcp85.sel(time = slice('2000','2250')) hist_files = sorted(glob.glob("/terra/data/cmip5/global/historical/"+str(model)+"/r1i1p1/mon/native/tas_*")) hist = xr.open_mfdataset(hist_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').tas x = xr.concat([hist,rcp85],dim='time').load() x = x.sortby(x.time) x = x.resample(time='M').mean() dic[model] = x - hist.sel(time=slice('1979','2005')).mean(dim='time') except: if model == 'BNU-ESM': # no historical monthly data rcp85_files = sorted(glob.glob("/terra/data/cmip5/global/rcp85/"+str(model)+"/r1i1p1/mon/native/tas_*")) rcp85 = xr.open_mfdataset(rcp85_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').tas rcp85 = rcp85.sel(time = slice('2000','2250')) hist_files = sorted(glob.glob("/terra/data/cmip5/global/historical/"+str(model)+"/r1i1p1/day/native/tas_*")) hist = xr.open_mfdataset(hist_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').tas hist = hist.resample(time='M').mean() x = xr.concat([hist,rcp85],dim='time').load() x = x.sortby(x.time) x = x.resample(time='M').mean() dic[model] = x - hist.sel(time=slice('1979','2005')).mean(dim='time') elif model == 'MPI-ESM-LR': # a problem with the later than 2100 data rcp85_files = sorted(glob.glob("/terra/data/cmip5/global/rcp85/"+str(model)+"/r1i1p1/mon/native/tas_*"))[0] rcp85 = xr.open_mfdataset(rcp85_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').tas rcp85 = rcp85.sel(time = slice('2000','2250')) hist_files = sorted(glob.glob("/terra/data/cmip5/global/historical/"+str(model)+"/r1i1p1/mon/native/tas_*")) hist = xr.open_mfdataset(hist_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').tas x = xr.concat([hist,rcp85],dim='time').load() x = x.sortby(x.time) x = x.resample(time='M').mean() dic[model] = x - (x.sel(time=slice('1979','2005')).mean(dim='time')) elif model == 'CNRM-CM5': # a problem with the later than 2100 data rcp85_files = sorted(glob.glob("/terra/data/cmip5/global/rcp85/"+str(model)+"/r1i1p1/mon/native/tas_*"))[:2] rcp85 = xr.open_mfdataset(rcp85_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').tas rcp85 = rcp85.sel(time = slice('2000','2250')) hist_files = sorted(glob.glob("/terra/data/cmip5/global/historical/"+str(model)+"/r1i1p1/mon/native/tas_*")) hist = xr.open_mfdataset(hist_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').tas x = xr.concat([hist,rcp85],dim='time').load() x = x.sortby(x.time) x = x.resample(time='M').mean() dic[model] = x - (x.sel(time=slice('1979','2005')).mean(dim='time')) #NOAA x = xr.open_mfdataset('/home/pmarsh/NOAA_2deg/air.2m.mon.mean.nc', decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').air x = x.sortby(x.time) x = x.resample(time='M').mean() x = x.sel(time=slice('1940','2016')) dic['NOAA'] = x - (x.sel(time=slice('1979','2005')).mean(dim='time')) #ERA5 - 1hr - daily avalable but missing some data x = xr.open_mfdataset(sorted(glob.glob('/terra/data/reanalysis/global/reanalysis/ECMWF/ERA5/1hr/native/tas_*')), decode_cf=True).sel(latitude = -34, method = 'nearest').sel(longitude = 18, method = 'nearest').tas x = x.resample(time='M').mean() x = x.sortby(x.time).load() dic['ERA5'] = x - (x.sel(time=slice('1979','2005')).mean(dim='time')) pickle.dump(dic, open( "monthly_tas_dic.p", "wb" ) ) client.close() ```
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #@title MIT License # # Copyright (c) 2017 François Chollet # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ``` # Regresion Basica: Predecir eficiencia de gasolina <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/keras/regression"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/es/tutorials/keras/regression.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/es/tutorials/keras/regression.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/es/tutorials/keras/regression.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> Note: Nuestra comunidad de Tensorflow ha traducido estos documentos. Como las traducciones de la comunidad son basados en el "mejor esfuerzo", no hay ninguna garantia que esta sea un reflejo preciso y actual de la [Documentacion Oficial en Ingles](https://www.tensorflow.org/?hl=en). Si tienen sugerencias sobre como mejorar esta traduccion, por favor envian un "Pull request" al siguiente repositorio [tensorflow/docs](https://github.com/tensorflow/docs). Para ofrecerse como voluntario o hacer revision de las traducciones de la Comunidad por favor contacten al siguiente grupo [docs@tensorflow.org list](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs). En un problema de *regresion*, buscamos predecir la salida de un valor continuo como la probabilidad de un precio. En contraste en un problema de *Clasificacion*, buscamos seleccionar una clase de una lista de clases (por ejemplo, en donde una imagen contenga una manzana o una naranja queremos reconocer cual es la fruta en la imagen). Este libro usa el set de datos clasico [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) y construye un modelo para predecir la eficiencia de vehiculos de 1970 y 1980. Para hacer esto proveeremos el modelo con una descripcion de muchos automoviles de ese periodo. Esta descripcion incluye atributos como: Cilindros, desplazamiento, potencia y peso. Este ejemplo usa el API `tf.keras` , revise [Esta Guia](https://www.tensorflow.org/guide/keras) para obtener mas detalles. ``` # Use seaborn for pairplot !pip install seaborn from __future__ import absolute_import, division, print_function, unicode_literals import pathlib import matplotlib.pyplot as plt import pandas as pd import seaborn as sns try: # %tensorflow_version only exists in Colab. %tensorflow_version 2.x except Exception: pass import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers print(tf.__version__) ``` ## El set de Datos de MPG el set de datos esta disponible de el siguiente repositorio [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/). ### Obtenga la data Primero descargue el set de datos. ``` dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data") dataset_path ``` Importelo usando pandas. ``` column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight', 'Acceleration', 'Model Year', 'Origin'] raw_dataset = pd.read_csv(dataset_path, names=column_names, na_values = "?", comment='\t', sep=" ", skipinitialspace=True) dataset = raw_dataset.copy() dataset.tail() ``` ### Limpie la data El set de datos contiene algunos valores desconocidos. ``` dataset.isna().sum() ``` **Para** Mantener este tutorial inicial sencillo eliminemos las siguientes filas. ``` dataset = dataset.dropna() ``` La columna de `"Origin"` realmente es categorica, no numerica. Entonces conviertala a un "one-hot": ``` origin = dataset.pop('Origin') dataset['USA'] = (origin == 1)*1.0 dataset['Europe'] = (origin == 2)*1.0 dataset['Japan'] = (origin == 3)*1.0 dataset.tail() ``` ### Dividamos la data en entrenamiento y prueba Ahora divida el set de datos en un set de entrenamiento y otro de pruebas. Usaremos el set de pruebas en la evaluacion final de nuestro modelo. ``` train_dataset = dataset.sample(frac=0.8,random_state=0) test_dataset = dataset.drop(train_dataset.index) ``` ### Inspeccione la data Revise rapidamente la distribucion conjunta de un par de columnas de el set de entrenamiento. ``` sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde") ``` Tambien revise las estadisticas generales: ``` train_stats = train_dataset.describe() train_stats.pop("MPG") train_stats = train_stats.transpose() train_stats ``` ### Separe las caracteristicas de las etiquetas. Separe el valor objetivo, o la "etiqueta" de las caracteristicas. Esta etiqueta es el valor que entrenara el modelo para predecir. ``` train_labels = train_dataset.pop('MPG') test_labels = test_dataset.pop('MPG') ``` ### Normalice la data Revise otra vez el bloque de `train_stats` que se presento antes y note la diferencia de rangos de cada caracteristica. Es una buena práctica normalizar funciones que utilizan diferentes escalas y rangos. Aunque el modelo * podría * converger sin normalización de características, dificulta el entrenamiento y hace que el modelo resultante dependa de la elección de las unidades utilizadas en la entrada. Nota: Aunque generamos intencionalmente estas estadísticas solo del conjunto de datos de entrenamiento, estas estadísticas también se utilizarán para normalizar el conjunto de datos de prueba. Necesitamos hacer eso para proyectar el conjunto de datos de prueba en la misma distribución en la que el modelo ha sido entrenado. ``` def norm(x): return (x - train_stats['mean']) / train_stats['std'] normed_train_data = norm(train_dataset) normed_test_data = norm(test_dataset) ``` Estos datos normalizados es lo que usaremos para entrenar el modelo. Precaución: las estadísticas utilizadas para normalizar las entradas aquí (media y desviación estándar) deben aplicarse a cualquier otro dato que se alimente al modelo, junto con la codificación de un punto que hicimos anteriormente. Eso incluye el conjunto de pruebas, así como los datos en vivo cuando el modelo se usa en producción. ## El modelo ### Construye el modelo Construyamos nuestro modelo. Aquí, utilizaremos un modelo `secuencial` con dos capas ocultas densamente conectadas y una capa de salida que devuelve un único valor continuo. Los pasos de construcción del modelo se envuelven en una función, `build_model`, ya que crearemos un segundo modelo, más adelante. ``` def build_model(): model = keras.Sequential([ layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]), layers.Dense(64, activation='relu'), layers.Dense(1) ]) optimizer = tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse']) return model model = build_model() ``` ### Inspeccione el modelo Use el método `.summary` para imprimir una descripción simple del modelo ``` model.summary() ``` Ahora pruebe el modelo. Tome un lote de ejemplos `10` de los datos de entrenamiento y llame a` model.predict` en él. ``` example_batch = normed_train_data[:10] example_result = model.predict(example_batch) example_result ``` Parece estar funcionando, y produce un resultado de la forma y tipo esperados. ### Entrenar a la modelo Entrene el modelo durante 1000 épocas y registre la precisión de entrenamiento y validación en el objeto `history`. ``` # Display training progress by printing a single dot for each completed epoch class PrintDot(keras.callbacks.Callback): def on_epoch_end(self, epoch, logs): if epoch % 100 == 0: print('') print('.', end='') EPOCHS = 1000 history = model.fit( normed_train_data, train_labels, epochs=EPOCHS, validation_split = 0.2, verbose=0, callbacks=[PrintDot()]) ``` Visualice el progreso de entrenamiento del modelo usando las estadísticas almacenadas en el objeto `history`. ``` hist = pd.DataFrame(history.history) hist['epoch'] = history.epoch hist.tail() def plot_history(history): hist = pd.DataFrame(history.history) hist['epoch'] = history.epoch plt.figure() plt.xlabel('Epoch') plt.ylabel('Mean Abs Error [MPG]') plt.plot(hist['epoch'], hist['mae'], label='Train Error') plt.plot(hist['epoch'], hist['val_mae'], label = 'Val Error') plt.ylim([0,5]) plt.legend() plt.figure() plt.xlabel('Epoch') plt.ylabel('Mean Square Error [$MPG^2$]') plt.plot(hist['epoch'], hist['mse'], label='Train Error') plt.plot(hist['epoch'], hist['val_mse'], label = 'Val Error') plt.ylim([0,20]) plt.legend() plt.show() plot_history(history) ``` Este gráfico muestra poca mejora, o incluso degradación en el error de validación después de aproximadamente 100 épocas. Actualicemos la llamada `model.fit` para detener automáticamente el entrenamiento cuando el puntaje de validación no mejore. Utilizaremos una * devolución de llamada de EarlyStopping * que pruebe una condición de entrenamiento para cada época. Si transcurre una cantidad determinada de épocas sin mostrar mejoría, entonces detiene automáticamente el entrenamiento. Puedes obtener más información sobre esta devolución de llamada [Aca](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping). ``` model = build_model() # The patience parameter is the amount of epochs to check for improvement early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) history = model.fit(normed_train_data, train_labels, epochs=EPOCHS, validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()]) plot_history(history) ``` El gráfico muestra que en el conjunto de validación, el error promedio generalmente es de alrededor de +/- 2 MPG. ¿Es esto bueno? Le dejaremos esa decisión a usted. Veamos qué tan bien generaliza el modelo al usar el conjunto ** test **, que no usamos al entrenar el modelo. Esto nos dice qué tan bien podemos esperar que el modelo prediga cuándo lo usamos en el mundo real. ``` loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2) print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae)) ``` ### Haga Predicciones Finalmente, prediga los valores de MPG utilizando datos en el conjunto de pruebas: ``` test_predictions = model.predict(normed_test_data).flatten() plt.scatter(test_labels, test_predictions) plt.xlabel('True Values [MPG]') plt.ylabel('Predictions [MPG]') plt.axis('equal') plt.axis('square') plt.xlim([0,plt.xlim()[1]]) plt.ylim([0,plt.ylim()[1]]) _ = plt.plot([-100, 100], [-100, 100]) ``` Parece que nuestro modelo predice razonablemente bien. Echemos un vistazo a la distribución de errores. ``` error = test_predictions - test_labels plt.hist(error, bins = 25) plt.xlabel("Prediction Error [MPG]") _ = plt.ylabel("Count") ``` No es del todo gaussiano, pero podríamos esperar eso porque el número de muestras es muy pequeño. ## Conclusion Este cuaderno introdujo algunas técnicas para manejar un problema de regresión. * El error cuadrático medio (MSE) es una función de pérdida común utilizada para problemas de regresión (se utilizan diferentes funciones de pérdida para problemas de clasificación). * Del mismo modo, las métricas de evaluación utilizadas para la regresión difieren de la clasificación. Una métrica de regresión común es el error absoluto medio (MAE). * Cuando las características de datos de entrada numéricos tienen valores con diferentes rangos, cada característica debe escalarse independientemente al mismo rango. * Si no hay muchos datos de entrenamiento, una técnica es preferir una red pequeña con pocas capas ocultas para evitar el sobreajuste. * La detención temprana es una técnica útil para evitar el sobreajuste.
github_jupyter
``` %load_ext watermark %watermark -p torch,pytorch_lightning,torchvision,torchmetrics,matplotlib %load_ext pycodestyle_magic %flake8_on --ignore W291,W293,E703 ``` <a href="https://pytorch.org"><img src="https://raw.githubusercontent.com/pytorch/pytorch/master/docs/source/_static/img/pytorch-logo-dark.svg" width="90"/></a> &nbsp; &nbsp;&nbsp;&nbsp;<a href="https://www.pytorchlightning.ai"><img src="https://raw.githubusercontent.com/PyTorchLightning/pytorch-lightning/master/docs/source/_static/images/logo.svg" width="150"/></a> # Model Zoo -- VGG16 Trained on CIFAR-10 This notebook implements the VGG16 convolutional network [1] and applies it to CIFAR-10 digit classification. ![](../pytorch_ipynb/images/vgg16/vgg16-arch-table.png) ### References - [1] Simonyan, K., & Zisserman, A. (2014). [Very deep convolutional networks for large-scale image recognition](https://arxiv.org/abs/1409.1556). arXiv preprint arXiv:1409.1556. ## General settings and hyperparameters - Here, we specify some general hyperparameter values and general settings - Note that for small datatsets, it is not necessary and better not to use multiple workers as it can sometimes cause issues with too many open files in PyTorch. So, if you have problems with the data loader later, try setting `NUM_WORKERS = 0` instead. ``` BATCH_SIZE = 256 NUM_EPOCHS = 25 LEARNING_RATE = 0.001 NUM_WORKERS = 4 ``` ## Implementing a Neural Network using PyTorch Lightning's `LightningModule` - In this section, we set up the main model architecture using the `LightningModule` from PyTorch Lightning. - When using PyTorch Lightning, we can start with defining our neural network model in pure PyTorch, and then we use it in the `LightningModule` to get all the extra benefits that PyTorch Lightning provides. - In this case, since Torchvision already offers a nice and efficient PyTorch implementation of MobileNet-v2, let's load it from the Torchvision hub: ``` import torch.nn as nn class PyTorchVGG16(nn.Module): def __init__(self, num_classes): super().__init__() # calculate same padding: # (w - k + 2*p)/s + 1 = o # => p = (s(o-1) - w + k)/2 self.block_1 = nn.Sequential( nn.Conv2d(in_channels=3, out_channels=64, kernel_size=(3, 3), stride=(1, 1), # (1(32-1)- 32 + 3)/2 = 1 padding=1), nn.ReLU(), nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) ) self.block_2 = nn.Sequential( nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) ) self.block_3 = nn.Sequential( nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) ) self.block_4 = nn.Sequential( nn.Conv2d(in_channels=256, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) ) self.block_5 = nn.Sequential( nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) ) self.features = nn.Sequential( self.block_1, self.block_2, self.block_3, self.block_4, self.block_5 ) self.classifier = nn.Sequential( nn.Linear(512, 4096), nn.ReLU(True), nn.Dropout(p=0.5), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(p=0.5), nn.Linear(4096, num_classes), ) # self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) for m in self.modules(): if isinstance(m, torch.nn.Conv2d): #n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels #m.weight.data.normal_(0, np.sqrt(2. / n)) m.weight.detach().normal_(0, 0.05) if m.bias is not None: m.bias.detach().zero_() elif isinstance(m, torch.nn.Linear): m.weight.detach().normal_(0, 0.05) m.bias.detach().detach().zero_() def forward(self, x): x = self.features(x) # x = self.avgpool(x) x = x.view(x.size(0), -1) logits = self.classifier(x) return logits ``` - Next, we can define our `LightningModule` as a wrapper around our PyTorch model: ``` import pytorch_lightning as pl import torchmetrics # LightningModule that receives a PyTorch model as input class LightningModel(pl.LightningModule): def __init__(self, model, learning_rate): super().__init__() self.learning_rate = learning_rate # The inherited PyTorch module self.model = model # Save settings and hyperparameters to the log directory # but skip the model parameters self.save_hyperparameters(ignore=['model']) # Set up attributes for computing the accuracy self.train_acc = torchmetrics.Accuracy() self.valid_acc = torchmetrics.Accuracy() self.test_acc = torchmetrics.Accuracy() # Defining the forward method is only necessary # if you want to use a Trainer's .predict() method (optional) def forward(self, x): return self.model(x) # A common forward step to compute the loss and labels # this is used for training, validation, and testing below def _shared_step(self, batch): features, true_labels = batch logits = self(features) loss = torch.nn.functional.cross_entropy(logits, true_labels) predicted_labels = torch.argmax(logits, dim=1) return loss, true_labels, predicted_labels def training_step(self, batch, batch_idx): loss, true_labels, predicted_labels = self._shared_step(batch) self.log("train_loss", loss) # To account for Dropout behavior during evaluation self.model.eval() with torch.no_grad(): _, true_labels, predicted_labels = self._shared_step(batch) self.train_acc.update(predicted_labels, true_labels) self.log("train_acc", self.train_acc, on_epoch=True, on_step=False) self.model.train() return loss # this is passed to the optimzer for training def validation_step(self, batch, batch_idx): loss, true_labels, predicted_labels = self._shared_step(batch) self.log("valid_loss", loss) self.valid_acc(predicted_labels, true_labels) self.log("valid_acc", self.valid_acc, on_epoch=True, on_step=False, prog_bar=True) def test_step(self, batch, batch_idx): loss, true_labels, predicted_labels = self._shared_step(batch) self.test_acc(predicted_labels, true_labels) self.log("test_acc", self.test_acc, on_epoch=True, on_step=False) def configure_optimizers(self): optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate) return optimizer ``` ## Setting up the dataset - In this section, we are going to set up our dataset. ### Inspecting the dataset ``` from torchvision import datasets from torchvision import transforms from torch.utils.data import DataLoader train_dataset = datasets.CIFAR10(root='./data', train=True, transform=transforms.ToTensor(), download=True) train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, drop_last=True, shuffle=True) test_dataset = datasets.CIFAR10(root='./data', train=False, transform=transforms.ToTensor()) test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, drop_last=False, shuffle=False) from collections import Counter train_counter = Counter() for images, labels in train_loader: train_counter.update(labels.tolist()) print('\nTraining label distribution:') sorted(train_counter.items(), key=lambda pair: pair[0]) test_counter = Counter() for images, labels in test_loader: test_counter.update(labels.tolist()) print('\nTest label distribution:') sorted(test_counter.items(), key=lambda pair: pair[0]) ``` ### A quick visual check ``` %matplotlib inline import matplotlib.pyplot as plt import numpy as np import torchvision for images, labels in train_loader: break plt.figure(figsize=(8, 8)) plt.axis("off") plt.title("Training Images") plt.imshow(np.transpose(torchvision.utils.make_grid( images[:64], padding=2, normalize=True), (1, 2, 0))) plt.show() ``` ### Performance baseline - Especially for imbalanced datasets, it's quite useful to compute a performance baseline. - In classification contexts, a useful baseline is to compute the accuracy for a scenario where the model always predicts the majority class -- you want your model to be better than that! ``` majority_class = test_counter.most_common(1)[0] majority_class ``` - (To be fair, the classes in the test set are perfectly evenly distributed, so the majority class is an arbitrary choice in this case) ``` baseline_acc = majority_class[1] / sum(test_counter.values()) print('Accuracy when always predicting the majority class:') print(f'{baseline_acc:.2f} ({baseline_acc*100:.2f}%)') ``` ### Setting up a `DataModule` - There are three main ways we can prepare the dataset for Lightning. We can 1. make the dataset part of the model; 2. set up the data loaders as usual and feed them to the fit method of a Lightning Trainer -- the Trainer is introduced in the next subsection; 3. create a LightningDataModule. - Here, we are going to use approach 3, which is the most organized approach. The `LightningDataModule` consists of several self-explanatory methods as we can see below: ``` import os from torch.utils.data.dataset import random_split from torch.utils.data import DataLoader from torchvision import transforms class DataModule(pl.LightningDataModule): def __init__(self, data_path='./'): super().__init__() self.data_path = data_path def prepare_data(self): datasets.CIFAR10(root=self.data_path, download=True) self.train_transform = torchvision.transforms.Compose([ # torchvision.transforms.Resize((70, 70)), # torchvision.transforms.RandomCrop((64, 64)), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize( (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) self.test_transform = torchvision.transforms.Compose([ # torchvision.transforms.Resize((70, 70)), # torchvision.transforms.CenterCrop((64, 64)), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize( (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) return def setup(self, stage=None): train = datasets.CIFAR10(root=self.data_path, train=True, transform=self.train_transform, download=False) self.test = datasets.CIFAR10(root=self.data_path, train=False, transform=self.test_transform, download=False) self.train, self.valid = random_split(train, lengths=[45000, 5000]) def train_dataloader(self): train_loader = DataLoader(dataset=self.train, batch_size=BATCH_SIZE, drop_last=True, shuffle=True, num_workers=NUM_WORKERS) return train_loader def val_dataloader(self): valid_loader = DataLoader(dataset=self.valid, batch_size=BATCH_SIZE, drop_last=False, shuffle=False, num_workers=NUM_WORKERS) return valid_loader def test_dataloader(self): test_loader = DataLoader(dataset=self.test, batch_size=BATCH_SIZE, drop_last=False, shuffle=False, num_workers=NUM_WORKERS) return test_loader ``` - Note that the `prepare_data` method is usually used for steps that only need to be executed once, for example, downloading the dataset; the `setup` method defines the the dataset loading -- if you run your code in a distributed setting, this will be called on each node / GPU. - Next, lets initialize the `DataModule`; we use a random seed for reproducibility (so that the data set is shuffled the same way when we re-execute this code): ``` import torch torch.manual_seed(1) data_module = DataModule(data_path='./data') ``` ## Training the model using the PyTorch Lightning Trainer class - Next, we initialize our model. - Also, we define a call back so that we can obtain the model with the best validation set performance after training. - PyTorch Lightning offers [many advanced logging services](https://pytorch-lightning.readthedocs.io/en/latest/extensions/logging.html) like Weights & Biases. Here, we will keep things simple and use the `CSVLogger`: ``` from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.loggers import CSVLogger pytorch_model = PyTorchVGG16(num_classes=10) lightning_model = LightningModel( pytorch_model, learning_rate=LEARNING_RATE) callbacks = [ModelCheckpoint( save_top_k=1, mode='max', monitor="valid_acc")] # save top 1 model logger = CSVLogger(save_dir="logs/", name="my-model") ``` - Now it's time to train our model: ``` import time trainer = pl.Trainer( max_epochs=NUM_EPOCHS, callbacks=callbacks, progress_bar_refresh_rate=50, # recommended for notebooks accelerator="auto", # Uses GPUs or TPUs if available devices="auto", # Uses all available GPUs/TPUs if applicable logger=logger, log_every_n_steps=100) start_time = time.time() trainer.fit(model=lightning_model, datamodule=data_module) runtime = (time.time() - start_time)/60 print(f"Training took {runtime:.2f} min in total.") ``` ## Evaluating the model - After training, let's plot our training ACC and validation ACC using pandas, which, in turn, uses matplotlib for plotting (you may want to consider a [more advanced logger](https://pytorch-lightning.readthedocs.io/en/latest/extensions/logging.html) that does that for you): ``` import pandas as pd metrics = pd.read_csv(f"{trainer.logger.log_dir}/metrics.csv") aggreg_metrics = [] agg_col = "epoch" for i, dfg in metrics.groupby(agg_col): agg = dict(dfg.mean()) agg[agg_col] = i aggreg_metrics.append(agg) df_metrics = pd.DataFrame(aggreg_metrics) df_metrics[["train_loss", "valid_loss"]].plot( grid=True, legend=True, xlabel='Epoch', ylabel='Loss') df_metrics[["train_acc", "valid_acc"]].plot( grid=True, legend=True, xlabel='Epoch', ylabel='ACC') ``` - The `trainer` automatically saves the model with the best validation accuracy automatically for us, we which we can load from the checkpoint via the `ckpt_path='best'` argument; below we use the `trainer` instance to evaluate the best model on the test set: ``` trainer.test(model=lightning_model, datamodule=data_module, ckpt_path='best') ``` ## Predicting labels of new data - You can use the `trainer.predict` method on a new `DataLoader` or `DataModule` to apply the model to new data. - Alternatively, you can also manually load the best model from a checkpoint as shown below: ``` path = trainer.checkpoint_callback.best_model_path print(path) lightning_model = LightningModel.load_from_checkpoint( path, model=pytorch_model) lightning_model.eval(); ``` - Note that our PyTorch model, which is passed to the Lightning model requires input arguments. However, this is automatically being taken care of since we used `self.save_hyperparameters()` in our PyTorch model's `__init__` method. - Now, below is an example applying the model manually. Here, pretend that the `test_dataloader` is a new data loader. ``` test_dataloader = data_module.test_dataloader() all_true_labels = [] all_predicted_labels = [] for batch in test_dataloader: features, labels = batch with torch.no_grad(): logits = lightning_model(features) predicted_labels = torch.argmax(logits, dim=1) all_predicted_labels.append(predicted_labels) all_true_labels.append(labels) all_predicted_labels = torch.cat(all_predicted_labels) all_true_labels = torch.cat(all_true_labels) all_predicted_labels[:5] ``` Just as an internal check, if the model was loaded correctly, the test accuracy below should be identical to the test accuracy we saw earlier in the previous section. ``` test_acc = torch.mean((all_predicted_labels == all_true_labels).float()) print(f'Test accuracy: {test_acc:.4f} ({test_acc*100:.2f}%)') ``` ## Inspecting Failure Cases - In practice, it is often informative to look at failure cases like wrong predictions for particular training instances as it can give us some insights into the model behavior and dataset. - Inspecting failure cases can sometimes reveal interesting patterns and even highlight dataset and labeling issues. ``` # Append the folder that contains the # helper_data.py, helper_plotting.py, and helper_evaluate.py # files so we can import from them import sys sys.path.append('../pytorch_ipynb') from helper_data import UnNormalize from helper_plotting import show_examples class_dict = {0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck'} # We normalized each channel during training; here # we are reverting the normalization so that we # can plot them as images unnormalizer = UnNormalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) show_examples( model=lightning_model, data_loader=test_dataloader, unnormalizer=unnormalizer, class_dict=class_dict) from torchmetrics import ConfusionMatrix cmat = ConfusionMatrix(num_classes=len(class_dict)) for x, y in test_dataloader: pred = lightning_model(x) cmat(pred, y) cmat_tensor = cmat.compute() from helper_plotting import plot_confusion_matrix plot_confusion_matrix( cmat_tensor.numpy(), class_names=class_dict.values()) plt.show() ``` ## Single-image usage ``` %matplotlib inline import matplotlib.pyplot as plt ``` - Assume we have a single image as shown below: ``` from PIL import Image image = Image.open('data/cifar10_pngs/90_airplane.png') plt.imshow(image, cmap='Greys') plt.show() ``` - Note that we have to use the same image transformation that we used earlier in the `DataModule`. - While we didn't apply any image augmentation, we could use the `to_tensor` function from the torchvision library; however, as a general template that provides flexibility for more complex transformation chains, let's use the `Compose` class for this: ``` transform = data_module.train_transform image_chw = transform(image) ``` - Note that `ToTensor` returns the image in the CHW format. CHW refers to the dimensions and stands for channel, height, and width. ``` print(image_chw.shape) ``` - However, the PyTorch / PyTorch Lightning model expectes images in NCHW format, where N stands for the number of images (e.g., in a batch). - We can add the additional channel dimension via `unsqueeze` as shown below: ``` image_nchw = image_chw.unsqueeze(0) print(image_nchw.shape) ``` - Now that we have the image in the right format, we can feed it to our classifier: ``` with torch.no_grad(): # since we don't need to backprop logits = lightning_model(image_nchw) probas = torch.softmax(logits, axis=1) predicted_label = torch.argmax(probas) int_to_str = { 0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck'} print(f'Predicted label: {int_to_str[predicted_label.item()]}') print(f'Class-membership probability {probas[0][predicted_label]*100:.2f}%') ```
github_jupyter
# Investigate HPS profile ``` %matplotlib inline import sys sys.path.append('/home/surchs/Repositories/abide_univariate/') import asdfc import pandas as pd import scipy as sp import numpy as np import patsy as pat import nibabel as nib import pathlib as pal import seaborn as sbn import matplotlib as mpl from matplotlib import gridspec import nilearn.input_data as nii from nilearn import plotting as nlp from matplotlib import pyplot as plt import matplotlib.patches as mpatches from matplotlib.colors import ListedColormap, LinearSegmentedColormap root_p = pal.Path('/home/surchs/Repositories/autismclassification/20190524_Validation_Data/') model1_p = root_p / 'validation_net_split_1_model_1_combined_p_values.tsv' pheno_valid_p = root_p / 'ABIDE_2_Pheno_PSM_matched_ados.tsv' resid_valid_p = root_p / 'abide_2_validation_residuals_regressed_with_abide_1.npy' seed_valid_p = root_p / 'Sebastian Urchs - abide_2_seed_maps_no_cereb_for_hien.npy' mask_p = '/home/surchs/Repositories/abide_univariate/data/ATLAS/MIST/Parcellations/MIST_mask_nocereb.nii.gz' atlas_p = '/home/surchs/Repositories/abide_univariate/data/ATLAS/MIST/Parcellations/MIST_20_nocereb.nii.gz' labels_p = '/home/surchs/Repositories/abide_univariate/data/ATLAS/MIST/Parcel_Information/MIST_20_nocereb.csv' completed_pheno_p = root_p / 'completed_abide_2_validation.tsv' figure_p = pal.Path('/home/surchs/VM_Transfer/HCS_paper_figures/fig4/') if not figure_p.is_dir(): figure_p.mkdir(parents=True) p_test = '/home/surchs/Repositories/abide_univariate/data/pheno/ABIDEII_Composite_Phenotypic.csv' p1_test = '/home/surchs/Repositories/abide_univariate/data/pheno/ABIDEII_Composite_Phenotypic.csv' pp = pd.read_csv(p_test, encoding = "ISO-8859-1") pp1 = pd.read_csv(p1_test, encoding = "ISO-8859-1") def mm2in(mm): return mm/25.4 ``` ## Load and preparation ``` pheno_valid = pd.read_csv(pheno_valid_p, sep='\t') model1 = pd.read_csv(model1_p, sep='\t') model1.rename(columns={'V1': 'p_ASD', 'V2': 'p_TDC'}, inplace=True) resid_valid = np.load(resid_valid_p) seed_valid = np.load(resid_valid_p) labels = pd.read_csv(labels_p, sep=';') mask_i = nib.load(str(mask_p)) atlas_i = nib.load(str(atlas_p)) voxel_masker = nii.NiftiMasker(mask_img=mask_i, standardize=False) voxel_masker.fit() ``` ``` # These need to be the values because the model 1 dataframe starts with index 1 pheno_valid.loc[:, 'is_hps'] = ((model1.p_ASD>0.2) & (model1.p_TDC<=0.2)).values pheno_valid.loc[:, 'p_ASD'] = model1.loc[:, 'p_ASD'].values pheno_valid.loc[:, 'p_TDC'] = model1.loc[:, 'p_TDC'].values # Compute actual indices to avoid trusting the DF indices hps_idx = np.where(pheno_valid.is_hps)[0] non_hps_idx = np.where(~pheno_valid.is_hps) non_hps_asd_idx = np.where((pheno_valid.is_hps) & (pheno_valid.DX_GROUP=='Autism'))[0] non_hps_con_idx = np.where((pheno_valid.is_hps) & (pheno_valid.DX_GROUP=='Control'))[0] ``` ## Describe symptom profile - with ADOS raw totals - with ADOS raw domain totals - with SRS raw totals T (standardized) ``` p_hps_asd = pheno_valid.query('is_hps and DX_GROUP=="Autism"') p_hps_con = pheno_valid.query('is_hps and DX_GROUP=="Control"') p_idi_asd = pheno_valid.query('not is_hps and DX_GROUP=="Autism"') p_idi_con = pheno_valid.query('not is_hps and DX_GROUP=="Control"') def get_counts(pheno, var, ados_values=range(1,11)): values = pheno[var].value_counts().keys().tolist() counts = pheno[var].value_counts().tolist() # Compute counts for each value in case there aren't return [counts[values.index(val)] if val in values else 0 for val in ados_values] def get_relative_counts(counts): return [count/sum(counts) for count in counts] ``` ## Boxplot + Swarmplot raw ADOS TOTAL ``` asd_idi_color = 'lightcoral' asd_hps_color = 'firebrick' con_idi_color = 'lightgreen' con_hps_color = 'darkgreen' from matplotlib import rcParams rcParams['font.sans-serif'] = ['Arial'] dpi = 300 scale_factor = 1 height = 55 * scale_factor width = 55 * scale_factor f = plt.figure(figsize=(mm2in(width),mm2in(height)), constrained_layout=True) ax = f.add_subplot(111) sbn.boxplot(x='DX_GROUP', y='ADOS_RAW_TOTAL_combined', data=pheno_valid.query('not is_hps'), ax=ax, palette=['white', 'white'], saturation=1, linewidth=1.2) sbn.stripplot(x='DX_GROUP', y='ADOS_RAW_TOTAL_combined', data=pheno_valid.query('not is_hps'), size=4,ax=ax, palette=['#8F09A7', 'lightgrey'], linewidth=0, alpha=0.3, jitter=0.25) sbn.swarmplot(x='DX_GROUP', y='ADOS_RAW_TOTAL_combined', data=pheno_valid.query('is_hps'), size=8,ax=ax, palette=['#8F09A7', 'lightgrey'], linewidth=1.5, edgecolor='#FE9D08') #for i,box in enumerate(ax.artists): # box.set_edgecolor('black') # # iterate over whiskers and median lines # for j in range(6*i,6*(i+1)): # ax.lines[j].set_color('black') ax.set_xticklabels(['ASD', 'NTC'], fontsize=10); ax.set_xlabel('Diagnosis', fontsize=10) ax.set_yticks(np.arange(0,26,5)) ax.set_ylim([-1, 26]) ax.set_ylabel('raw ADOS total', fontsize=10) f.savefig(figure_p / 'fig4_boxplot_rawADOS_new.png', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0) f.savefig(figure_p / 'fig4_boxplot_rawADOS_new.pdf', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0) # Make a stupid legend by hand dpi = 300 scale_factor = 1 height = 30 * scale_factor width = 30 * scale_factor f = plt.figure(figsize=(mm2in(width),mm2in(height)), constrained_layout=False) ax = f.add_subplot(111) ax.scatter(0,-0.5, s=100, color='lightgrey', edgecolor='#FE9D08', linewidth=0) ax.scatter(0,0.5, s=100, color='#8F09A7', edgecolor='#FE9D08', linewidth=0) ax.scatter(0,1.5, s=100, color='white', edgecolor='#FE9D08', linewidth=1.5) ax.set_ylim([-1,2]) ax.set_xlim([-1,4]) #ax.set_xlim([-0.2,2]) ax.set_axis_off() ax.annotate(f'identified by HRS', xy=(0.5,0.82), xytext=(0, 0), ha='left', va='center', xycoords='axes fraction', textcoords='offset points', fontsize=10) ax.annotate(f'ASD', xy=(0.5,0.49), xytext=(0, 0), ha='left', va='center', xycoords='axes fraction', textcoords='offset points', fontsize=10) ax.annotate(f'NTC', xy=(0.5,0.16), xytext=(0, 0), ha='left', va='center', xycoords='axes fraction', textcoords='offset points', fontsize=10) f.savefig(figure_p / 'fig4_boxplot_legend_new.png', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0) f.savefig(figure_p / 'fig4_boxplot_legend_new.pdf', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0) ``` ## Boxplot + Swarmplot proxy ADOS CSS ``` dpi = 300 scale_factor = 1 height = 55 * scale_factor width = 55 * scale_factor f = plt.figure(figsize=(mm2in(width),mm2in(height)), constrained_layout=True) ax = f.add_subplot(111) sbn.boxplot(x='DX_GROUP', y='ADOS_CSS_proxy_fully', data=pheno_valid.query('not is_hps'), ax=ax, palette=['white', 'white'], saturation=1, linewidth=1.2) sbn.stripplot(x='DX_GROUP', y='ADOS_CSS_proxy_fully', data=pheno_valid.query('not is_hps'), size=4,ax=ax, palette=['#8F09A7', 'lightgrey'], linewidth=0, alpha=0.17, jitter=0.3) sbn.swarmplot(x='DX_GROUP', y='ADOS_CSS_proxy_fully', data=pheno_valid.query('is_hps'), size=8,ax=ax, palette=['#8F09A7', 'lightgrey'], linewidth=1.5, edgecolor='#FE9D08') #for i,box in enumerate(ax.artists): # box.set_edgecolor('black') # # iterate over whiskers and median lines # for j in range(6*i,6*(i+1)): # ax.lines[j].set_color('black') ax.set_xticklabels(['ASD', 'NTC'], fontsize=10); ax.set_xlabel('Diagnosis', fontsize=10) ax.set_yticks(np.arange(1,11)) ax.set_ylabel('proxy ADOS CSS', fontsize=10) f.savefig(figure_p / 'fig4_boxplot_proxyADOS_new.png', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0) f.savefig(figure_p / 'fig4_boxplot_proxyADOS_new.pdf', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0) ``` # The brain profile - go through each seed region - average the HPS individuals - show the map ``` # Networks that we are actually using: # These are in R notation and thus start with 1 used_networks = np.array([18, 3, 9, 5, 16, 1, 13, 4, 12])-1 networks_not_used = np.array([2, 7, 10, 11, 6, 17, 8, 14, 15])-1 used_numbers = [labels.iloc[n].roi for n in used_networks] not_used_numbers = [labels.iloc[n].roi for n in networks_not_used] atlas = voxel_masker.transform(atlas_i) new_atlas = np.zeros_like(atlas) for n in used_numbers: new_atlas[atlas==n] = 1 new_atlas_i = voxel_masker.inverse_transform(new_atlas) def mm2in(mm): return mm/25.4 def seed_map_overlay(width, height, img, over_img, coords, path): fig = plt.figure(figsize=(width, height), constrained_layout=True) ax = fig.add_subplot(111) gp = nlp.plot_stat_map(img, cut_coords=coords, axes=ax, colorbar=False) gp.add_contours(over_img, filled=False, alpha=1, levels=[0.5], colors='limegreen', linewidths=2) fig.savefig(path, bbox_inches='tight', dpi=300, transparent=True, pad_inches=0) ``` ``` net_colors = ['#FFB647', # Light Orange '#FF907E', '#FF6047', '#BF1A00', '#881200', # 2nd group, shades of red '#BF7400', # Burnt Orange '#FFEC7E', '#FFE347', '#BFA200', # 4th group, shades of yellow '#6C98CA', '#3B72B0', '#09407D', # 5th group (prot), shades of blue '#9EEB75', '#77E13E', '#3AA600', # 6th group (prot), shades of green '#9D6ECD', '#783CB5', '#440981'] # 7th group (prot), shades of purple import matplotlib.patches as patches dpi=300 scale_factor = 1 height = 20 * scale_factor width = 55 * scale_factor coords = [(-2, 24, 24), (-2, 32, -18), (-22, -4, -16), (-41, -6, -34), (-48, -30, -4), (-18, -2, 4), (0, -57, 32), (0, 45, 0), (4, 42, 40)] for fig_idx, nid in enumerate(used_networks): fig = plt.figure(figsize=(mm2in(width),mm2in(height))) gs = gridspec.GridSpec(nrows=1, ncols=1) ax_img = fig.add_subplot(gs[0]) a = np.mean(resid_valid[hps_idx, :, nid], 0) img = voxel_masker.inverse_transform(a) # Make an image for just this seed region seed_vec = (voxel_masker.transform(atlas_i)==labels.iloc[nid].roi).squeeze() seed_img = voxel_masker.inverse_transform(seed_vec) gp = nlp.plot_stat_map(img, cut_coords=coords[fig_idx], axes=ax_img, colorbar=False, draw_cross=False, annotate=False, black_bg=False) gp.add_contours(seed_img, filled=False, alpha=1, levels=[0.5], colors=net_colors[fig_idx], linewidths=1) net_name = labels.iloc[nid]["label"] ax_img.set_axis_on() ax_img.patch.set_alpha(0) for spine in ax_img.spines.values(): spine.set_visible(False) ax_img.set_xticks([]) ax_img.set_yticks([]) ax_img.annotate(f'{net_name}', xy=(0,0), xytext=(17, -5), ha='left', va='top', xycoords='axes fraction', textcoords='offset points', fontsize=10) ax_img.add_patch(plt.Rectangle((0,-0.32),0.07, 0.2,facecolor=net_colors[fig_idx], edgecolor='black', linewidth = 1, clip_on=False)) fig.savefig(figure_p / f'hcs_avg_{net_name}_outline.png', bbox_inches='tight', dpi=dpi, transparent=False, pad_inches=0) fig.savefig(figure_p / f'hcs_avg_{net_name}_outline.pdf', bbox_inches='tight', dpi=dpi, transparent=False, pad_inches=0) ``` ## Plot the brain atlas with corresponding colors ``` order = np.array([18, 3, 9, 5, 16, 1, 13, 4, 12, 2, 7, 10, 11, 6, 17, 8, 14, 15])-1 # Manual partition part_dict = {1: [18], 2: [3, 9, 5, 16], 3: [1], 4: [13, 4, 12], 5: [2, 7, 10], 6: [11, 6, 17], 7: [8, 14, 15] } manual_part = np.array([p for i in order for p, v in part_dict.items() if i+1 in v]) atlas = atlas_i.get_fdata().astype(int) atlas_masked = np.zeros(shape=(atlas.shape)) atlas_ens1 = np.zeros(shape=(atlas.shape)) atlas_ens2 = np.zeros(shape=(atlas.shape)) idx = 1 for i in np.arange(1,8): for j in part_dict[i]: # convert j to real j real_j = labels.iloc[j-1].roi if i <=4: atlas_ens1[atlas==real_j] = idx atlas_masked[atlas==real_j] = 1 idx +=1 else: atlas_ens2[atlas==real_j] = i atlas_masked_i = nib.Nifti1Image(atlas_masked, affine=atlas_i.affine, header=atlas_i.header) atlas_ens1_i = nib.Nifti1Image(atlas_ens1, affine=atlas_i.affine, header=atlas_i.header) cmp = LinearSegmentedColormap.from_list('lala', net_colors, N=18) cut_coords = (10,5,0) dpi=300 scale_factor = 1 height = 20 * scale_factor width = 55 * scale_factor fig = plt.figure(figsize=(mm2in(width),mm2in(height))) ax = fig.add_subplot(111) gp = nlp.plot_roi(atlas_ens1_i, cmap=cmp, cut_coords=cut_coords, axes=ax, vmin=1, vmax=18, draw_cross=False, annotate=False); gp.add_contours(atlas_masked_i, filled=False, alpha=1, levels=[0.5], colors='black', linewidths=0.5) fig.savefig('/home/surchs/VM_Folders/HCS_paper_figures/fig4_ensemble1_maps_for_montage.png', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0) fig.savefig('/home/surchs/VM_Folders/HCS_paper_figures/fig4_ensemble1_maps_for_montage.pdf', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0) fig.savefig(figure_p / 'fig4_ensemble1_maps_for_montage.png', bbox_inches='tight', dpi=dpi, transparent=False, pad_inches=0) fig.savefig(figure_p / 'fig4_ensemble1_maps_for_montage.pdf', bbox_inches='tight', dpi=dpi, transparent=False, pad_inches=0) cut_coords = (0,0,-10) dpi=300 scale_factor = 1 height = 40 * scale_factor width = 110 * scale_factor fig = plt.figure(figsize=(mm2in(width),mm2in(height))) ax = fig.add_subplot(111) gp = nlp.plot_roi(atlas_ens1_i, cmap=cmp, cut_coords=cut_coords, axes=ax, vmin=1, vmax=18, draw_cross=False, annotate=False); gp.add_contours(atlas_masked_i, filled=False, alpha=1, levels=[0.5], colors='black', linewidths=0.5) ``` ## Color bar ``` dpi=300 scale_factor = 1 height = 20 * scale_factor width = 1.5 * scale_factor a = np.array([[0,1]]) plt.figure(figsize=(mm2in(width),mm2in(height))) img = plt.imshow(a, cmap=nlp.cm.cold_hot) plt.gca().set_visible(False) cax = plt.axes([0.1, 0.2, 0.8, 0.6]) plt.colorbar(cax=cax) cax.set_ylabel('a.u.', fontsize=10) cax.set_axis_on() cax.set_yticklabels(('', '', ''), fontsize=8); plt.savefig(figure_p / "colorbar.pdf", bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0) ```
github_jupyter
## Drawing Edgeworth Box ``` import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D %matplotlib inline def Edgeworth_box(u1, u2, util1, util2, MRS1, MRS2, ttl, ttl_margin= 1, top=0.7, fname=None): l1 = 0.00000001 x1 = np.arange(l1, u1, 0.01) x2 = np.arange(l1, u2, 0.01) X1, X2 = np.meshgrid(x1, x2) V1 = util1(X1, X2) V2 = util2(u1-X1, u2-X2) x, y = contract_curve(u1,u2, MRS1,MRS2,num_indiff = 10) xr = u1-x[::-1] # to use the same contours for Consumer 2. yr = u2-y[::-1] # to use the same contours for Consumer 2. clev1 = util1(x,y) clev2 = util2(xr, yr) Draw_Edgeworth_box(u1, u2, X1, X2, V1, V2, clev1,clev2, ttl, ttl_margin, top,contract1=x, contract2=y) if fname != None: plt.savefig(fname) from scipy.optimize import fsolve from scipy.optimize import fminbound def contract_curve(u1, u2, MRS1, MRS2, num_indiff = 10): xs = np.linspace(0,u1,num_indiff+2) xs = xs[1:-1] ys = [] for x in xs: y = fminbound(lambda y: (MRS1(x,y) - MRS2(u1-x, u2-y))**2, 0, u2) ys.append(min(max(np.asscalar(y),0),u2)) ys = np.asarray(ys) return xs, ys def Draw_Edgeworth_box(u1, u2,X1, X2, V1, V2, clev1,clev2, ttl=[], ttl_margin= 1, top=0.7,contract1=[], contract2=[]): # u1: the total amount of good 1 # u2: the total amount of good 2 # V1: levels of utility for consumer 1 # V2: levels of utility for consumer 2 # clev1: levels of contours (for consumer 1) you are interested in # clev2: levels of contours (for consumer 2) you are interested in # ttl: a title # ttl_margin: space added for the title # top: a location where the graph starts (if top=1, the title and the graph will overlap) xtcks = np.arange(0, u1+1) ytcks = np.arange(0,u2+1) # Adjustment of the title is bit annoying when we try to set xlabel on the top if len(ttl)>0: fig = plt.figure(figsize = (u1, u2/top)) ax1 = fig.add_subplot(1,1,1) plt.subplots_adjust(top=top) fig.suptitle(ttl) #plt.title(ttl) else: fig = plt.figure(figsize = (u1, u2)) col1 = 'tab:red' col2 = 'tab:blue' plt.contour(X1, X2, V1,clev1, linewidths = 1, colors=col1, linestyles = 'dashed') plt.contour(X1, X2, V2,clev2, linewidths = 1, colors = col2, linestyles = 'dashed') plt.xlim([0,u1]) plt.ylim([0,u2]) plt.xlabel('$x_{1,1}$', color = col1, fontsize = 13) plt.ylabel('$x_{1,2}$', color = col1, fontsize = 13) plt.xticks(xtcks, color = col1) plt.yticks(ytcks, color = col1) xplt = np.linspace(0,u1,12) yplt = np.linspace(0,u2,12) if len(contract1)>0: xplt[1:-1]=contract1 yplt[1:-1]=contract2 plt.plot(xplt,yplt, 'k--', alpha = 0.7) ax1 = plt.gca() ax2 = plt.twinx(ax1) plt.ylabel('$x_{2,2}$', color=col2 , fontsize = 13) plt.yticks(ytcks,ytcks[::-1], color = col2) # It's a bit hacky, but the following looks an easy way. ax3 = plt.twiny(ax1) plt.xlabel('$x_{2,1}$', color=col2, fontsize = 13) plt.xticks(xtcks,xtcks[::-1], color = col2) return fig ``` ### Edgeworth Box under Homothetic Preferences - ##### Q: Is a contract curve of homothetic preferences always a diagonal line of the Edgeworth Box ? - ##### A: If preferences are identical and strictly convex (i.e., strictly (quasi-) concave utility functions), Yes ##### Example with CES (with the same weights on two goods. ) ``` def CES(x,y,dlt): return (x**dlt)/dlt + (y**dlt)/dlt def Cobb_Douglas(x, y, al, bet): return (x**al)*(y**bet) def MRS_CES(x,y,dlt): return (x**(dlt-1))/(y**(dlt-1)) def MRS_CD(x, y, al, bet): return (al*y)/(bet*x) dlt1 = 0.5 dlt2 = dlt1 Edgeworth_box(u1=6, u2=3, util1 = lambda x,y: CES(x, y, dlt1), util2 =lambda x,y: CES(x, y, dlt2), MRS1 =lambda x, y: MRS_CES(x,y,dlt1), MRS2 =lambda x, y: MRS_CES(x,y,dlt2), ttl='$u_1=x^{\delta_1}/{\delta_1} + y^{\delta_1}/{\delta_1}$ : ${\delta_1}=$' + '{}\n'.format(dlt1) +'$u_2=x^{\delta_2}/\delta_2 + y^{\delta_2}/\delta_2$ : ${\delta_2}=$' + '{}\n'.format(dlt2), ttl_margin= 1, top=0.7, fname='Edgeworth_identical1.png') dlt1 = 0.5 dlt2 = -3 Edgeworth_box(u1=6, u2=3, util1 = lambda x,y: CES(x, y, dlt1), util2 =lambda x,y: CES(x, y, dlt2), MRS1 =lambda x, y: MRS_CES(x,y,dlt1), MRS2 =lambda x, y: MRS_CES(x,y,dlt2), ttl='$u_1=x^{\delta_1}/{\delta_1} + y^{\delta_1}/{\delta_1}$ : ${\delta_1}=$' + '{}\n'.format(dlt1) +'$u_2=x^{\delta_2}/\delta_2 + y^{\delta_2}/\delta_2$ : ${\delta_2}=$' + '{}'.format(dlt2), ttl_margin= 1, top=0.7, fname='Edgeworth_not_identical1.png') ``` In the above graph, weights on two goods are the same, but complementarity for each consumer are different. (Consumer 2 feels higher complementaryty than Consumer 1) ##### Example with Cobb-Douglas (With different weights for two goods.) ``` al = 0.3 bet = 0.7 Edgeworth_box(u1=5, u2=5, util1 = lambda x,y: Cobb_Douglas(x, y, al,bet), util2 =lambda x,y: Cobb_Douglas(x, y, al,bet), MRS1 =lambda x, y: MRS_CD(x,y, al,bet), MRS2 =lambda x, y: MRS_CD(x,y, al,bet), ttl=r'$u_1=x^{\alpha_1}y^{\beta_1} : {\alpha_1}$'+ '={}, '.format(al) + r'$\beta_1$'+ '={}\n'.format(bet) +r'$u_2=x^{\alpha_2}y^{\beta_2} : {\alpha_2}$'+ '={}, '.format(al) + r'$\beta_2$'+ '={}'.format(bet), ttl_margin= 1, top=0.8, fname='Edgeworth_identical2.png') ``` Even if goods 2 is more important, the contract curve is still diagonal since prefernces are identical and homothetic (and strictly convex) If we use different weigts for two consumers, then we have a different contract curve. ``` al = 0.3 bet = 0.7 asym = 0.4 Edgeworth_box(u1=5, u2=5, util1 = lambda x,y: Cobb_Douglas(x, y, al,bet), util2 =lambda x,y: Cobb_Douglas(x, y, al+asym,bet-asym), MRS1 =lambda x, y: MRS_CD(x,y, al,bet), MRS2 =lambda x, y: MRS_CD(x,y, al+asym,bet-asym), ttl=r'$u_1=x^{\alpha_1}y^{\beta_1} : {\alpha_1}$'+ '={:02.1f}, '.format(al) + r'$\beta_1$'+ '={:02.1f}\n'.format(bet) +r'$u_2=x^{\alpha_2}y^{\beta_2} : {\alpha_2}$'+ '={:02.1f}, '.format(al+asym) + r'$\beta_2$'+ '={:02.1f}'.format(bet-asym), ttl_margin= 1, top=0.8, fname='Edgeworth_not_identical2.png') ```
github_jupyter
``` import os import networkx as nx import pandas as pd import numpy as np import pickle as pkl import itertools from sklearn.manifold import TSNE import stellargraph as sg from stellargraph import StellarGraph from stellargraph import globalvar from stellargraph.mapper import GraphSAGENodeGenerator from stellargraph.layer import GraphSAGE from tensorflow.keras import layers, optimizers, losses, metrics, Model from sklearn import preprocessing, feature_extraction, model_selection import matplotlib.pyplot as plt ``` ### Read the graph and the data ``` data_ab = pd.read_csv("../../../generated_csvs/guncontrol_daily_polarities_base.csv") graph_ab = nx.read_gexf("../../../graph/guncontrol_daily_largest.gexf") ``` ### Divide the data into classes ``` # n_classes can be 2, 4, 6 or 8 n_classes = 2 # polarity_type can be either "Polarity Neighbours" or "Polarity Following" polarity_type = "Polarity Neighbours" base_polarities = data_ab[polarity_type].dropna() base_polarities[base_polarities > 2.42] = 2.42 base_polarities[base_polarities < -2.42] = -2.42 if n_classes == 2: base_polarities = base_polarities.to_frame() base_polarities['Class'] = 0 base_polarities['Class'][base_polarities[polarity_type] >= 0] = 1 elif n_classes == 4: base_polarities = base_polarities.to_frame() base_polarities['Class'] = 0 base_polarities['Class'][base_polarities[polarity_type] >= 0] = 1 positive_quantiles = np.quantile(base_polarities[polarity_type][base_polarities[polarity_type] >= 0], q = 0.5) base_polarities['Class'][base_polarities[polarity_type] >= positive_quantiles] = 2 base_polarities['Class'][base_polarities[polarity_type] < 0] = 3 negative_quantiles = np.quantile(base_polarities[polarity_type][base_polarities[polarity_type] < 0], q = 0.5) base_polarities['Class'][base_polarities[polarity_type] <= negative_quantiles] = 4 elif n_classes == 6: base_polarities = base_polarities.to_frame() base_polarities['Class'] = 0 base_polarities['Class'][base_polarities[polarity_type] >= 0] = 1 positive_quantiles = np.quantile(base_polarities[polarity_type][base_polarities[polarity_type] >= 0], q = [0.33, 0.66]) base_polarities['Class'][base_polarities[polarity_type] >= positive_quantiles[0]] = 2 base_polarities['Class'][base_polarities[polarity_type] >= positive_quantiles[1]] = 3 base_polarities['Class'][base_polarities[polarity_type] < 0] = 4 negative_quantiles = np.quantile(base_polarities[polarity_type][base_polarities[polarity_type] < 0], q = [0.33, 0.66]) base_polarities['Class'][base_polarities[polarity_type] <= negative_quantiles[1]] = 5 base_polarities['Class'][base_polarities[polarity_type] <= negative_quantiles[0]] = 6 elif n_classes == 8: base_polarities = base_polarities.to_frame() base_polarities['Class'] = 0 base_polarities['Class'][base_polarities[polarity_type] >= 0] = 1 positive_quantiles = np.quantile(base_polarities[polarity_type][base_polarities[polarity_type] >= 0], q = [0.25, 0.5, 0.75]) base_polarities['Class'][base_polarities[polarity_type] >= positive_quantiles[0]] = 2 base_polarities['Class'][base_polarities[polarity_type] >= positive_quantiles[1]] = 3 base_polarities['Class'][base_polarities[polarity_type] >= positive_quantiles[2]] = 4 base_polarities['Class'][base_polarities[polarity_type] < 0] = 5 negative_quantiles = np.quantile(base_polarities[polarity_type][base_polarities[polarity_type] < 0], q = [0.25, 0.5, 0.75]) base_polarities['Class'][base_polarities[polarity_type] <= negative_quantiles[2]] = 6 base_polarities['Class'][base_polarities[polarity_type] <= negative_quantiles[1]] = 7 base_polarities['Class'][base_polarities[polarity_type] <= negative_quantiles[0]] = 8 ``` ### Get the features ``` # relabel nodes as indexes d = dict(zip(data_ab['0'].values, data_ab['0'].index)) g2 = nx.relabel_nodes(graph_ab, d) ``` #### Use the node degrees as features ``` features_idx = np.load("../../../npy/guncontrol_daily/index.npy", allow_pickle = True) features_mat = np.load("../../../npy/guncontrol_daily/node_features_pca.npy", allow_pickle = True) map_nodes_to_features = {} for idx, feature in zip(range(0, len(features_idx)), features_idx): map_nodes_to_features[d[feature]] = idx for i in list(g2.nodes()): g2.nodes[i]['features'] = features_mat[map_nodes_to_features[i]] for node in list(g2.nodes()): g2.node[node]['label'] = 'default' ``` #### Alternative: use the eigenvectors as features ``` with open("../../../pickle/guncontrol_daily_eig.pkl", 'rb') as _p: _, eigvec = pkl.load(_p) map_nodes_to_eigvec = {} for idx, node_name in zip(range(0, len(list(graph_ab.nodes()))), list(graph_ab.nodes())): map_nodes_to_eigvec[d[node_name]] = idx for i in list(g2.nodes()): g2.nodes[i]['features'] = eigvec[map_nodes_to_eigvec[i], 1:] for node in list(g2.nodes()): g2.node[node]['label'] = 'default' ``` #### Alternative 2: use both the node degrees and the eigenvectors as features ``` features_idx = np.load("../../../npy/guncontrol_daily/index.npy", allow_pickle = True) features_mat = np.load("../../../npy/guncontrol_daily/node_features_pca.npy", allow_pickle = True) map_nodes_to_features = {} for idx, feature in zip(range(0, len(features_idx)), features_idx): map_nodes_to_features[d[feature]] = idx with open("../../../pickle/guncontrol_daily_eig.pkl", 'rb') as _p: _, eigvec = pkl.load(_p) map_nodes_to_eigvec = {} for idx, node_name in zip(range(0, len(list(graph_ab.nodes()))), list(graph_ab.nodes())): map_nodes_to_eigvec[d[node_name]] = idx for i in list(g2.nodes()): g2.nodes[i]['features'] = np.concatenate((features_mat[map_nodes_to_features[i]], eigvec[map_nodes_to_eigvec[i], 1:])) for node in list(g2.nodes()): g2.node[node]['label'] = 'default' ``` ### Train the model ``` # get the subgraph containing the known nodes stellar = StellarGraph.from_networkx(g2, node_features = 'features') subg = stellar.subgraph(base_polarities['Class'].index) train_labels, test_labels = model_selection.train_test_split( base_polarities['Class'], train_size=0.7, test_size=None, stratify=base_polarities['Class'], random_state=42, ) val_labels, test_labels = model_selection.train_test_split( test_labels, train_size=0.2, test_size=None, stratify=test_labels, random_state=100, ) target_encoding = preprocessing.LabelBinarizer() train_targets = target_encoding.fit_transform(train_labels) val_targets = target_encoding.transform(val_labels) test_targets = target_encoding.transform(test_labels) batch_size = 50 num_samples = [10, 10] generator = GraphSAGENodeGenerator(subg, batch_size, num_samples) train_gen = generator.flow(train_labels.index, train_targets, shuffle=True) graphsage_model = GraphSAGE( layer_sizes=[32, 32], generator=generator, bias=True, dropout=0.5, ) # choose the activation function and the loss based on the number of classes: if n_classes == 2: x_inp, x_out = graphsage_model.in_out_tensors() prediction = layers.Dense(units=train_targets.shape[1], activation="sigmoid")(x_out) model = Model(inputs=x_inp, outputs=prediction) model.compile( optimizer=optimizers.Adam(lr=0.005), loss=losses.binary_crossentropy, metrics=["acc"], ) else: x_inp, x_out = graphsage_model.in_out_tensors() prediction = layers.Dense(units=train_targets.shape[1], activation="softmax")(x_out) model = Model(inputs=x_inp, outputs=prediction) model.compile( optimizer=optimizers.Adam(lr=0.005), loss=losses.categorical_crossentropy, metrics=["acc"], ) val_gen = generator.flow(val_labels.index, val_targets) history = model.fit( train_gen, epochs=20, validation_data=val_gen, verbose=2, shuffle=False ) # Save the train / validation training history history_save_path = "../../../npy/classifier_history/guncontrol_daily/history_2_neighbours_node_degree.npy" np.save(history_save_path, history.history) ``` ### Test the model ``` test_gen = generator.flow(test_labels.index, test_targets) test_metrics = model.evaluate(test_gen) print("\nTest Set Metrics:") for name, val in zip(model.metrics_names, test_metrics): print("\t{}: {:0.4f}".format(name, val)) ``` ### Predict the unlabeled data ``` generator = GraphSAGENodeGenerator(stellar, batch_size, num_samples) hold_out_nodes = data_ab.index.difference(base_polarities.index) data_ab['Class'] = 0 hold_out_targets = target_encoding.transform(data_ab.loc[hold_out_nodes]["Class"]) hold_out_gen = generator.flow(hold_out_nodes, hold_out_targets) hold_out_predictions = model.predict(hold_out_gen) hold_out_predictions = target_encoding.inverse_transform(hold_out_predictions) pred = data_ab pred['Class'] = base_polarities['Class'] pred.loc[hold_out_nodes, 'Class'] = hold_out_predictions ``` ### Save the model and the predictions ``` model.save("../../../keras/guncontrol_daily/model") pred[['0', 'Class']].to_csv("../../../keras/guncontrol_daily/predictions.csv", index = False) ``` ### TSNE visualization ``` all_nodes = pred['Class'].index all_mapper = generator.flow(all_nodes) embedding_model = Model(inputs = x_inp, outputs = x_out) emb = embedding_model.predict(all_mapper) # save the embeddings np.save("../../../npy/embeddings/guncontrol_daily/emb.npy", emb) emb_trans = TSNE(n_components = 2, n_jobs = -1).fit_transform(emb) colours = [] for iter in range(0, len(map_nodes_to_features)): if pred.loc[map_nodes_to_features[iter], 'Class'] == 0: colours.append('b') else: colours.append('r') plt.figure(figsize = (20, 10)) plt.rcParams.update({'font.size': 22}) plt.scatter(emb_trans[:,0], emb_trans[:,1], c = colours, alpha = 0.5) plt.tick_params(labelsize = 20) plt.title("TSNE embedding for the 'gun control' data set") plt.xlabel("Dimension 1") plt.ylabel("Dimension 2") plt.grid() plt.show() plt.figure(figsize = (20, 10)) plt.rcParams.update({'font.size': 22}) plt.scatter(emb_trans[:,0], emb_trans[:,1], c = colours, alpha = 0.5) plt.tick_params(labelsize = 20) plt.title("TSNE embedding for the 'gun control' data set") plt.xlabel("Dimension 1") plt.ylabel("Dimension 2") plt.grid() plt.savefig("../../../plots/guncontrol_daily/tsne_30_neighbours.png") plt.close() ``` #### Save map to features from node degree ``` with open("../../../keras/guncontrol_daily/map_nodes_to_features.pkl", "wb") as _pkl: pkl.dump(map_nodes_to_features, _pkl) ``` #### Save map to features from eigenvectors ``` with open("../../../keras/guncontrol_daily/map_nodes_to_eigvec.pkl", "wb") as _pkl: pkl.dump(map_nodes_to_eigvec, _pkl) ```
github_jupyter
## Test monthly means with leap year for GNSS-RO data ``` import xarray as xr import matplotlib.pyplot as plt import numpy as np from netCDF4 import num2date import func as func ds_obs = xr.open_dataset('GPS-RO__CP_LR_5x5_2007-2018.nc') ds_era5 = xr.open_dataset('FULL-ERA5.monthmean.2007-2018.concat_new.nc') ds_erai = xr.open_dataset('erai.tp.monmean.nc') obs_y = ds_obs.resample(time='M', keep_attrs=True).mean() obs_y = obs_y.sel(lat=slice(-20,20)) #era_y = ds_era5.resample(time='Y', keep_attrs=True).mean() era_y = ds_era5.sel(lat=slice(-20,20)) ds_obs #erai_y = ds_erai.resample(time='Y', keep_attrs=True).mean() erai_y = ds_erai.sel(lat=slice(20,-20), time=slice('2007-01-01T00:00:00.000000000', '2018-12-31T00:00:00.000000000')) #era_y_mean= f.year_mean(era_y) #erai_y_mean = f.year_mean(erai_y) #obs_y_mean = f.year_mean(obs_y, obs=True) era_y_mean = func.w_average(era_y, ['ctpt', 'ctpz', 'tpt', 'tpz']) erai_y_mean = func.w_average(erai_y, ['ctpt', 'ctpz', 'tpt', 'tpz']) obs_y_mean = func.w_average(obs_y, ['CP_T', 'CP_z', 'LR_T', 'LR_z']) len(erai_y_mean[0]) f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4)) plt.suptitle('Cold point tropopause \n2007-2018 annual mean', fontsize=12) yr = np.linspace(2007,2018,144) lab1 = 'Era5' lab2 = 'Era-I' lab3 = 'GNSS-RO' ax1.plot(yr,era_y_mean[0], lw = 2,label = lab1) ax1.plot(yr, erai_y_mean[0], lw = 2, label = lab2) ax1.plot(yr,obs_y_mean[0] , lw = 2, label = lab3) ax1.legend() ax1.set(ylabel = 'Temperature [K]', xlabel = 'Year') ax2.plot(yr, era_y_mean[1], lw=2, label = lab1) ax2.plot(yr, erai_y_mean[1], lw=2, label = lab2) ax2.plot(yr, obs_y_mean[1], lw = 2, label = lab3) ax2.legend() ax2.set(ylabel = 'Height [km]', xlabel='Year') plt.tight_layout() plt.subplots_adjust(top=0.85) #plt.subplots_adjust(top=2) #plt.savefig('yearly_mean_cpt.png') f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4)) plt.suptitle('Lapse rate tropopause \n2007-2018 annual mean',fontsize=12) yr = np.linspace(2007,2018,144) ax1.plot(yr,era_y_mean[2], lw=2, label = lab1) ax1.plot(yr, erai_y_mean[2], lw = 2,label = lab2) ax1.plot(yr,obs_y_mean[2], lw = 2, label = lab3) ax1.legend() ax1.set(ylabel='Temperature [K]', xlabel='years') ax2.plot(yr, era_y_mean[3], lw = 2,label = lab1) ax2.plot(yr, erai_y_mean[3], lw = 2,label = lab2) ax2.plot(yr, obs_y_mean[3], lw = 2,label = lab3) ax2.legend() ax2.set(ylabel='Height [km]', xlabel='years') plt.tight_layout() plt.subplots_adjust(top=0.85) #plt.savefig('yearly_mean_lrt.png') f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4)) dlab1 = 'ERA5 - GNSS-RO' dlab2 = 'ERA-I - GNSS-RO' yr = np.linspace(2007,2018,144) ax1.plot(yr,era_y_mean[0] - obs_y_mean[0], lw = 1.5, label = dlab1) ax1.plot(yr, erai_y_mean[0] - obs_y_mean[0], lw = 1.5, label = dlab2) #plt.ylim(-0.75,0.75) ax1.set(title='Difference ERA5 & ERA-I and GNSS-RO \n Cold point temperature', ylabel = 'Temperature difference [K]') ax1.legend() ax2.plot(yr, era_y_mean[2] - obs_y_mean[2], lw = 1.5, label = dlab1) ax2.plot(yr, erai_y_mean[2] - obs_y_mean[2], lw = 1.5, label = dlab2) ax2.set(title='Difference ERA5 & ERA-I and GNSS-RO \n Lapse rate temperature', ylabel = 'Temperature difference [K]') ax2.legend() plt.tight_layout() #plt.savefig('monthly_diff_temp_cpt_lrt.png') f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4)) ax1.plot(yr,era_y_mean[1] - obs_y_mean[1], lw = 1.5, label = dlab1) ax1.plot(yr, erai_y_mean[1] - obs_y_mean[1], lw = 1.5, label= dlab2) #plt.ylim(-0.75,0.75) ax1.set(title='Difference ERA5 & ERA-I and GNSS-RO \n Cold point height', ylabel='Height difference [km]') ax1.legend() ax2.plot(yr, era_y_mean[3] - obs_y_mean[3], lw = 1.5, label = dlab1) ax2.plot(yr, erai_y_mean[3] - obs_y_mean[3], lw = 1.5, label = dlab2) ax2.set(title='Difference ERA5 & ERA-I and GNSS-RO \n Lapse rate height', ylabel='Height difference [km]') ax2.legend() plt.tight_layout() #plt.savefig('yearly_diff_height_cpt_lrt.png') f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 4)) era_y.ctpt[12,:,:].plot(ax=ax1) erai_y.ctpt[12,:,:].plot(ax=ax2) obs_y.CP_T[12,:,:].plot(ax=ax3) plt.tight_layout() #f.savefig('latlon_cpt_jan08.png') f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 4)) era_y.ctpt[11,:,:].plot(ax=ax1) erai_y.ctpt[11,:,:].plot(ax=ax2) obs_y.CP_T[11,:,:].plot(ax=ax3) plt.tight_layout() #f.savefig('latlon_cpt_dec07') obs_y.CP_T[3,:,:].plot() erai_pre = ds_erai.sel(time=slice('2000-01-15', '2010-12-15')) ds_erai.time[200:] erai_pre.ctpt.time era_pre_av = func.w_average(erai_pre, ['ctpt']) era_pre_av[0] xax = np.linspace(2006,2010,59, dtype=int) plt.figure() plt.plot(xax,era_pre_av[0]) ```
github_jupyter
# Transforming Vertical Coordinates A common need in the analysis of ocean and atmospheric data is to transform the vertical coordinate from its original coordinate (e.g. depth) to a new coordinate (e.g. density). Xgcm supports this sort of one-dimensional coordinate transform on `Axis` and `Grid` objects using the `transform` method. Two algorithms are implemented: - _Linear interpolation:_ Linear interpolation is designed to interpolate intensive quantities (e.g. temperature) from one coordinate to another. This method is suitable when the target coordinate is monotonically increasing or decreasing and the data variable is intensive. For example, you want to visualize oxygen on density surfaces from a z-coordinate ocean model. - _Conservative remapping:_ This algorithm is designed to conserve extensive quantities (e.g. transport, heat content). It requires knowledge of cell bounds in both the source and target coordinate. It also handles non-monotonic target coordinates. On this page, we explain how to use these coordinate transformation capabilities. ``` from xgcm import Grid import xarray as xr import numpy as np import matplotlib.pyplot as plt ``` ## 1D Toy Data Example First we will create a simple, one-dimensional dataset to illustrate how the `transform` function works. This dataset contains - a coordinate called `z`, representing the original depth coordinate - a data variable called `theta`, a function of `z`, which we want as our new vertical coordinate - a data variable called `phi`, akso a function of `z`, which represents the data we want to transform into this new coordinate space In an oceanic context `theta` might be density and `phi` might be oxygen. In an atmospheric context, `theta` might be potential temperature and `phi` might be potential vorticity. ``` z = np.arange(2, 12) theta = xr.DataArray(np.log(z), dims=['z'], coords={'z': z}) phi = xr.DataArray(np.flip(np.log(z)*0.5+ np.random.rand(len(z))), dims=['z'], coords={'z':z}) ds = xr.Dataset({'phi': phi, 'theta': theta}) ds ``` Let's plot this data. Note that, for a simple 1D profile, we can easily visualize `phi` in `theta` space by simply plotting `phi` vs. `theta`. This is essentially a form of linear interpolation, performed automatically by matplotlib when it draws lines between the discrete points of our data. ``` def plot_profile(): fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=[8,5]) ds.theta.plot(ax=ax1, y='z', marker='.', yincrease=False) ds.phi.plot(ax=ax2, y='z', marker='.', yincrease=False) ds.swap_dims({'z': 'theta'}).phi.plot(ax=ax3, y='theta', marker='.', yincrease=False) fig.subplots_adjust(wspace=0.5) return ax3 plot_profile(); ``` ### Linear transformation Ok now lets transform `phi` to `theta` coordinates using linear interpolation. A key part of this is to define specific `theta` levels onto which we want to interpolate the data. ``` # First create an xgcm grid object grid = Grid(ds, coords={'Z': {'center':'z'}}, periodic=False) # define the target values in density, linearly spaced theta_target = np.linspace(0, 3, 20) # and transform phi_transformed = grid.transform(ds.phi, 'Z', theta_target, target_data=ds.theta) phi_transformed ``` Now let's see what the result looks like. ``` ax = plot_profile() phi_transformed.plot(ax=ax, y='theta', marker='.') ``` Not too bad. We can increase the number of interpolation levels to capture more of the small scale structure. ``` target_theta = np.linspace(0,3, 100) phi_transformed = grid.transform(ds.phi, 'Z', target_theta, target_data=ds.theta) ax = plot_profile() phi_transformed.plot(ax=ax, y='theta', marker='.') ``` Note that by default, values of `theta_target` which lie outside the range of `theta` have been masked (set to `NaN`). To disable this behavior, you can pass `mask_edges=False`; values outside the range of `theta` will be filled with the nearest valid value. ``` target_theta = np.linspace(0,3, 60) phi_transformed = grid.transform(ds.phi, 'Z', target_theta, target_data=ds.theta, mask_edges=False) ax = plot_profile() phi_transformed.plot(ax=ax, y='theta', marker='.') ``` ### Conservative transformation Conservative transformation is designed to preseve the total sum of `phi` over the `Z` axis. It presumes that `phi` is an _extensive quantity_, i.e. a quantity that is already volume weighted, with respect to the Z axis: for example, units of `Kelvins * meters` for heat content, rather than just `Kelvins`. The conservative method requires more input data at the moment. You have to not only specify the coordinates of the cell centers, but also the cell faces (or bounds/boundaries). In xgcm we achieve this by defining the bounding coordinates as the `outer` axis position. The target `theta` values are likewise intepreted as cell boundaries in `theta`-space. In this way, conservative transformation is similar to calculating a histogram. ``` # define the cell bounds in depth zc = np.arange(1,12)+0.5 # add them to the existing dataset ds = ds.assign_coords({'zc': zc}) ds # Recreate the grid object with a staggered `center`/`outer` coordinate layout grid = Grid(ds, coords={'Z':{'center':'z', 'outer':'zc'}}, periodic=False) grid ``` Currently the `target_data`(`theta` in this case) has to be located on the `outer` coordinate for the conservative method (compared to the `center` for the linear method). We can easily interpolate `theta` on the outer coordinate with the grid object. ``` ds['theta_outer'] = grid.interp(ds.theta, 'Z', boundary='fill') ds['theta_outer'] ``` Now lets transform the data using the conservative method. Note that the target values will now be interpreted as cell bounds and not cell centers as before. ``` # define the target values in density theta_target = np.linspace(0,3, 20) # and transform phi_transformed_cons = grid.transform(ds.phi, 'Z', theta_target, method='conservative', target_data=ds.theta_outer) phi_transformed_cons phi_transformed_cons.plot(y='theta_outer', marker='.', yincrease=False) ``` There is no point in comparing `phi_transformed_cons` directly to `phi` or the results of linear interoplation, since here we have reinterpreted `phi` as an extensive quantity. However, we can verify that the sum of the two quantities over the Z axis is exactly the same. ``` ds.phi.sum().values phi_transformed_cons.sum().values ``` ## Realistic Data Example To illustrate these features in a more realistic example, we use data from the [NCEP Global Ocean Data Assimilation](https://www.cpc.ncep.noaa.gov/products/GODAS/) (GODAS). This data are available from the [Pangeo Cloud Data Library](https://catalog.pangeo.io/browse/master/ocean/GODAS/). We can see that this is a full, global, 4D ocean dataset. ``` from intake import open_catalog cat = open_catalog("https://raw.githubusercontent.com/pangeo-data/pangeo-datastore/master/intake-catalogs/ocean.yaml") ds = cat["GODAS"].to_dask() ds ``` The grid is missing an `outer` coordinate for the Z axis, so we will construct one. This will be needed for conservative interpolation. ``` level_outer_data = np.pad(ds.level_w.values, [1, 0]) level_outer = xr.DataArray( level_outer_data, dims=['level_outer'], coords={'level_outer': ('level_outer', level_outer_data)} ) ds = ds.assign_coords({'level_outer': level_outer}) ``` Now we create a `Grid` object for this dataset. ``` grid = Grid(ds, coords={'Z': {'center': 'level', 'outer': 'level_outer'}, 'X': {'center': 'lon', 'right': 'lon_u'}, 'Y': {'center': 'lat', 'right': 'lat_u'}}, periodic=['X']) grid ``` ### Linear Interpolation To illustrate linear interpolation, we will interpolate salinity onto temperature surface. ``` # convert to standard units theta = ds.pottmp - 273.15 salt = 1000 * ds.salt target_theta_levels = np.arange(-2, 36) salt_on_theta = grid.transform(salt, 'Z', target_theta_levels, target_data=theta, method='linear') salt_on_theta ``` Note that the computation is lazy. (No data has been downloaded or computed yet.) We can trigger computation by plotting something. ``` salt_on_theta.isel(time=0).sel(pottmp=18).plot() salt_on_theta.isel(time=0).mean(dim='lon').plot(x='lat') ``` ## Conservative Interpolation To do conservative interpolation, we will attempt to calculate the meridional overturning in temperature space. Note that this is not a perfectly precise calculation, since the GODAS data are not exactly volume conserving as provided. However, it's sufficient to illustrate the basic principles of the calculation. To use conservative interpolation, we have to go from an intensive quantity (velocity) to an extensive one (velocity times cell thickness). We fill any missing values with 0, since they don't contribute to the transport. ``` thickness = grid.diff(ds.level_outer, 'Z') v_transport = ds.vcur * thickness v_transport = v_transport.fillna(0.).rename('v_transport') v_transport ``` We also need to interpolate `theta`, our target data for interoplation, to the same horizontal position as `v_transport`. This means moving from cell center to cell corner. This step introduces some considerable errors, particularly near the boundaries of bathymetry. (Xgcm currently has no special treatment for internal boundary conditions--see issue [222](https://github.com/xgcm/xgcm/issues/240).) ``` theta = grid.interp(theta,['X', 'Y'], boundary='extend').rename('theta') theta v_transport_theta = grid.transform(v_transport, 'Z', target_theta_levels, target_data=theta, method='conservative') v_transport_theta ``` Notice that this produced a warning. The `conservative` transformation method natively needs `target_data` to be provided on the cell bounds (here `level_outer`). Since transforming onto tracer coordinates is a very common scenario, xgcm uses linear interpolation to infer the values on the `outer` axis position. To demonstrate how to provide provide `target_data` on the outer grid position, we reproduce the steps xgcm executes internally: ``` theta_outer = grid.interp(theta,['Z'], boundary='extend') # the data cannot be chunked along the transformation axis theta_outer = theta_outer.chunk({'level_outer': -1}).rename('theta') theta_outer ``` When we apply the transformation we can see that the results in this case are equivalent: ``` v_transport_theta_manual = grid.transform(v_transport, 'Z', target_theta_levels, target_data=theta_outer, method='conservative') # Warning: this step takes a long time to compute. We will only compare the first time value xr.testing.assert_allclose(v_transport_theta_manual.isel(time=0), v_transport_theta.isel(time=0)) ``` Now we verify visually that the vertically integrated transport is conserved under this transformation. ``` v_transport.isel(time=0).sum(dim='level').plot(robust=True) v_transport_theta.isel(time=0).sum(dim='theta').plot(robust=True) ``` Finally, we attempt to plot a crude meridional overturning streamfunction for a single timestep. ``` dx = 110e3 * np.cos(np.deg2rad(ds.lat_u)) (v_transport_theta.isel(time=0) * dx).sum(dim='lon_u').cumsum(dim='theta').plot.contourf(x='lat_u', levels=31) ```
github_jupyter
# Transpose of a Matrix In this set of exercises, you will work with the transpose of a matrix. Your first task is to write a function that takes the transpose of a matrix. Think about how to use nested for loops efficiently. The second task will be to write a new matrix multiplication function that takes advantage of your matrix transposition function. ``` ### TODO: Write a function called transpose() that ### takes in a matrix and outputs the transpose of the matrix def transpose(matrix): matrix_transpose = [] for i in range(len(matrix[0])): matrix_transpose.append([row[i] for row in matrix]) return matrix_transpose ### TODO: Run the code in the cell below. If there is no ### output, then your answers were as expected assert transpose([[5, 4, 1, 7], [2, 1, 3, 5]]) == [[5, 2], [4, 1], [1, 3], [7, 5]] assert transpose([[5]]) == [[5]] assert transpose([[5, 3, 2], [7, 1, 4], [1, 1, 2], [8, 9, 1]]) == [[5, 7, 1, 8], [3, 1, 1, 9], [2, 4, 2, 1]] ``` ### Matrix Multiplication Now that you have your transpose function working, write a matrix multiplication function that takes advantage of the transpose. As part of the matrix multiplication code, you might want to re-use your dot product function from the matrix multiplication exercises. But you won't need your get_row and get_column functions anymore because the tranpose essentially takes care of turning columns into row vectors. Remember that if matrix A is mxn and matrix B is nxp, then the resulting product will be mxp. ``` ### TODO: Write a function called matrix_multiplication() that ### takes in two matrices and outputs the product of the two ### matrices ### TODO: Copy your dot_product() function here so that you can ### use it in your matrix_multiplication function def dot_product(matrixA, matrixB): return sum([x[0]*x[1] for x in zip(matrixA, matrixB)]) def matrix_multiplication(matrixA, matrixB): product = [] ## TODO: Take the transpose of matrixB and store the result ## in a new variable ## TODO: Use a nested for loop to iterate through the rows ## of matrix A and the rows of the tranpose of matrix B ## TODO: Calculate the dot product between each row of matrix A ## with each row in the transpose of matrix B ## TODO: As you calculate the results inside your for loops, ## store the results in the product variable matrixB_transpose = transpose(matrixB) for i in range(len(matrixA)): product_row = [] for j in range(len(matrixB_transpose)): product_row.append(dot_product(matrixA[i],matrixB_transpose[j])) product.append(product_row) return product ### TODO: Run the code in the cell below. If there is no ### output, then your answers were as expected assert matrix_multiplication([[5, 3, 1], [6, 2, 7]], [[4, 2], [8, 1], [7, 4]]) == [[51, 17], [89, 42]] assert matrix_multiplication([[5]], [[4]]) == [[20]] assert matrix_multiplication([[2, 8, 1, 2, 9], [7, 9, 1, 10, 5], [8, 4, 11, 98, 2], [5, 5, 4, 4, 1]], [[4], [2], [17], [80], [2]]) == [[219], [873], [8071], [420]] assert matrix_multiplication([[2, 8, 1, 2, 9], [7, 9, 1, 10, 5], [8, 4, 11, 98, 2], [5, 5, 4, 4, 1]], [[4, 1, 2], [2, 3, 1], [17, 8, 1], [1, 3, 0], [2, 1, 4]]) == [[61, 49, 49], [83, 77, 44], [329, 404, 39], [104, 65, 23]] ```
github_jupyter
# 1장 한눈에 보는 머신러닝 #### 감사의 글 자료를 공개한 저자 오렐리앙 제롱과 강의자료를 지원한 한빛아카데미에게 진심어린 감사를 전합니다. ## 1.1 머신러닝이란? - 아서 새뮤얼(Artuhr Samuel), 1959 > 머신러닝은 **명시적인 프로그래밍** 없이 컴퓨터가 학습하는 능력을 갖추게 하는 연구 분야 - 톰 미첼(Tom Michell), 1977 > 어떤 작업 T에 대한 컴퓨터 프로그램의 성능을 P로 측정했을 때 > 경험 E로 인해 성능이 향상되었다면, > 이 컴퓨터 프로그램은 __작업 T와 성능 측정 P에 대해 경험 E로부터 학습한다__고 말한다. ### 머신러닝 프로그램 예제 * 스팸 필터: 스팸(spam)과 스팸이 아닌 메일(ham)의 샘플을 이용하여 스팸 메일 구분법 학습 ### 기본 용어 * __훈련 세트__(training set): 머신러닝 프로그램이 훈련(학습)하는 데 사용하는 데이터 집합 * __훈련 사례__ 혹은 __샘플__: 각각의 훈련 데이터 * 톰 미첼의 정의와의 연계 - 작업 T: 새로운 메일이 스팸 여부 판단 - 경험 E: 훈련 데이터셋 - 성능 P: 예를 들어, 정확히 분류된 메일의 비율 (다른 성능 측정 기준도 가능. 3장 참조) ## 1.2 왜 머신러닝을 사용하는가? ### 전통적인 프로그래밍 <div align="center"><img src="images/ch01/homl01-01.png" width="500"/></div> * 전통적인 프로그래밍 접근 방법은 다음과 같다. * **문제 연구**: 누군가가 문제를 해결하기 위해 해결책을 찾음 * **규칙 작성**: 결정된 규칙을 개발자가 프로그램을 작성 * **평가**: 만들어진 프로그램을 테스트 * 문제가 없다면 **론칭**, 문제가 있다면 **오차를 분석**한 후 처음 과정부터 다시 실시 #### 예제: 스팸 메일 분류 * **특정 단어**가 들어가면 스팸 메일로 처리 * 프로그램이 론칭된 후 새로운 스팸단어가 생겼을 때 소프트웨어는 이 단어를 자동으로 분류할 수 없음 * 개발자가 새로운 규칙을 업데이트 시켜줘야 함 * **새로운 규칙이 생겼을 때 사용자가 매번 업데이트를** 시켜줘야하기 때문에 유지 보수가 어려움 ### 머신러닝 <div align="center"><img src="images/ch01/homl01-02.png" width="500"/></div> #### 예제: 스팸 메일 분류 * 사용자가 스팸으로 지정한 메일에 '4U', 'For U', 'Buy Drugs" 등의 표현이 자주 등장하는 경우 그런 표현을 자동으로 인식하고 메일을 스팸으로 분류하도록 프로그램 스스로 학습 ### 머신러닝 학습 자동화 <div align="center"><img src="images/ch01/homl01-03.png" width="500"/></div> * 머신러닝 작업 흐름의 전체를 __머신러닝 파이프라인__ 또는 __MLOps(Machine Learning Operations, 머신러닝 운영)__라 부르며 자동화가 가능함. * 참조: [MLOps: 머신러닝의 지속적 배포 및 자동화 파이프라인](https://cloud.google.com/solutions/machine-learning/mlops-continuous-delivery-and-automation-pipelines-in-machine-learning) ### 머신러닝의 장점 <div align="center"><img src="images/ch01/homl01-04.png" width="500"/></div> * 전통적인 방식으로는 해결 방법이 없는 너무 복잡한 문제 해결 * 새로운 데이터에 쉽게 적응 가능한 시스템 * 데이터 마이닝(data mining): 복잡한 문제와 대량의 데이터에서 통찰 얻기 ## 1.3 적용 사례 ### 대표적인 머신러닝 적용 사례 * 이미지 분류 작업: 생산 라인에서 제품 이미지를 분석해 자동으로 분류 * 시맨틱 분할 작업: 뇌를 스캔하여 종양 진단 * 텍스트 분류(자연어 처리): 자동으로 뉴스 기사 분류 * 텍스트 분류: 토론 포럼에서 부정적인 코멘트를 자동으로 구분 * 텍스트 요약: 긴 문서를 자동으로 요약 * 자연어 이해 : 챗봇(chatbot) 또는 개인 비서 만들기 * 회귀 분석: 회사의 내년도 수익을 예측하기 * 음성 인식: 음성 명령에 반응하는 앱 * 이상치 탐지: 신용 카드 부정 거래 감지 * 군집 작업: 구매 이력을 기반 고객 분류 후 다른 마케팅 전략 계획 * 데이터 시각화: 고차원의 복잡한 데이터셋을 그래프로 효율적 표현 * 추천 시스템: 과거 구매 이력 관심 상품 추천 * 강화 학습: 지능형 게임 봇(bot) 만들기 ## 1.4 머신러닝 시스템 종류 ### 머신러닝 시스템 분류 기준 * 기준 1: 훈련 지도 여부 * 지도 학습 * 비지도 학습 * 준지도 학습 * 강화 학습 * 기준 2: 실시간 훈련 여부 * 온라인 학습 * 배치 학습 * 기준 3: 예측 모델 사용 여부 * 사례 기반 학습 * 모델 기반 학습 #### 분류 기준의 비배타성 * 분류 기준이 상호 배타적이지는 않음. * 스팸 필터 예제 * 심층 신경망 모델 활용 실시간 스팸 메일 분류 학습 가능 * 지도 학습 + 온라인 학습 + 모델 기반 학습 ### 훈련 지도 여부 구분 ### 지도 학습 * 훈련 데이터에 __레이블(label)__이라는 답 포함 * 레이블 대신에 __타깃(target)__이란 표현도 사용됨. * 대표적 지도 학습 * 분류 * 회귀 #### 분류 <div align="center"><img src="images/ch01/homl01-05.png" width="500"/></div> * 특성을 사용한 데이터 분류 * 예제: 스팸 필터 * 특성: 소속 정보, 특정 단어 포함 여부 등 * 레이블(타깃): 스팸 또는 햄 #### 회귀 <div align="center"><img src="images/ch01/homl01-06.png" width="400"/></div> * 특성을 사용하여 타깃(target) 수치 예측 * 예제: 중고차 가격 예측 * 특성: 주행거리, 연식, 브랜드 등 * 타깃: 중고차 가격 #### 중요한 지도학습 알고리즘들 * k-최근접 이웃(k-NNs) * 선형 회귀(linear regression) * 로지스틱 회귀(logistic regression) * 서포트 벡터 머신(support vector machines, SVCs) * 결정 트리(decision trees)와 랜덤 포레스트(random forests) * 신경망(neural networks) #### 주의사항 * 일부 회귀/분류 알고리즘을 분류/회귀에 사용 가능 * 예제: 로지스틱 회귀, SVM 등등 ### 비지도 학습 * __레이블 없는 훈련 데이터__를 이용하여 시스템 스스로 학습 <div align="center"><img src="images/ch01/homl01-07.png" width="410"/></div> * 대표적 비지도 학습 * 군집 * 시각화 * 차원 축소 * 연관 규칙 학습 #### 군집 <div align="center"><img src="images/ch01/homl01-08.png" width="400"/></div> * 데이터를 비슷한 특징을 가진 몇 개의 그룹으로 나누는 것 * 예제: 블로그 방문자들을 그룹으로 묶기: 남성, 여성, 주말, 주중, 만화책, SF 등등 * 대표적 군집 알고리즘 * k-평균 * DBSCAN * 계층 군집 분석 #### 시각화 * 다차원 특성을 가진 데이터셋을 2D 또는 3D로 표현하기 * 시각화를 하기 위해서는 데이터의 특성을 2가지로 줄여야함 * 데이터가 구성 패턴에 대한 정보 획득 가능 <div align="center"><img src="images/ch01/homl01-09.png" width="650"/></div> #### 차원 축소 * 데이터의 특성 수 줄이기 * 예제 * 특성 추출: 상관관계가 있는 여러 특성을 하나로 합치기 * 자동차의 주행거리와 연식은 상관관계가 높음. 따라서 차의 '마모정도'라는 하나의 특성으로 합칠 수 있음. * 차원 축소의 장점: 머신러닝 알고리즘의 성능 향상 * 훈련 실행 속도 빨라짐 * 메모리 사용 공간 줄어듬 #### 시각화와 차원축소 알고리즘 * 주성분 분석(PCA) * 커널 PCA * 지역적 선형 임베딩 * t-SNE #### 이상치 탐지(Outlier detection) * 정상 샘플을 이용하여 훈련 후 입력 샘플의 정상여부 판단 후 이상치(outliers) 자동 제거 <div align="center"><img src="images/ch01/homl01-10.png" width="500"/></div> * 예제: 부정거래 사용 감지, 제조 결함 잡아내기 등등 #### 특이치 탐지(Novelty detection) * 전혀 '오염되지 않은'(clean) 훈련 세트 활용 후 훈련 세트에 포함된 데이터와 달라 보이는 테이터 감지하기 #### 이상치 탐지 vs. 특이치 탐지 * 예제: 수 천장의 강아지 사진에 치와와 사진이 1%정도 포함되어 있는 경우 * 특이치 탐지 알고리즘은 새로운 치와와 사진을 특이한 것으로 간주하지 않음. * 반면에 비정상 탐지 알고리즘은 새로운 치와와 사진을 다른 강아지들과 다른 종으로 간주할 수 있음. #### 연관 규칙 학습 * 데이터 특성 간의 흥미로운 관계 찾기 * 예제: 마트 판매 기록 * 바비규 소스와 감자 구매와 스테이크 구매 사의 연관성이 밝혀지면 상품을 서로 가까이 진열해야 함. ### 준지도 학습 * 레이블이 적용된 적은 수의 샘플이 주어졌을 때 유횽함. * 비지도 학습을 통해 군집을 분류한 후 샘플들을 활용해 지도 학습 실행 * 대부분 지도 학습과 비지도 학습 혼합 사용 #### 준지도 학습 예제 * 아래 그림 참조: 새로운 사례 `X`를 세모에 더 가깝다고 판단함. <div align="center"><img src="images/ch01/homl01-11.png" width="400"/></div> * 구글 포토 호스팅: 가족 사진 몇 장에만 레이블 적용. 이후 모든 사진에서 가족사진 확인 가능. ### 강화 학습 * 에이전트(학습 시스템)가 취한 행동에 대해 보상 또는 벌점을 주어 가장 큰 보상을 받는 방향으로 유도하기 <div align="center"><img src="images/ch01/homl01-12.png" width="400"/></div> * 예제: 딥마인드(DeepMind)의 알파고(AlphaGo) <div align="center"><img src="images/ch01/alphago01.png" width="500"/></div> ### 실시간 훈련 여부 구분 ### 배치 학습(batch learning) * 주어진 훈련 세트 전체를 사용해 오프라인에서 훈련 * 먼저 시스템을 훈련시킨 후 더 이상의 학습 없이 제품 시스템에 적용 * 단점 * 컴퓨팅 자원(cpu, gpu, 메모리, 저장장치 등)이 충분한 경우에만 사용 가능 * 새로운 데이터가 들어오면 처음부터 새롭게 학습해야 함. * 하지만 MLOps 등을 이용한 자동화 가능 ### 온라인 학습(online learing) * 하나씩 또는 적은 양의 데이터 묶음(미니배치, mini-batch)를 사용해 점진적으로 훈련 <div align="center"><img src="images/ch01/homl01-13.png" width="500"/></div> * 단점 * 나쁜 데이터가 주입되는 경우 시스템 성능이 점진적으로 떨어질 수 있음. * 지속적인 시스템 모니터링 필요 * 예제 * 주식가격 시스템 등 실시간 반영이 중요한 시스템 * 스마트폰 등 제한된 자원의 시스템 * 외부 메모리 학습: 매우 큰 데이터셋 활용하는 시스템 <div align="center"><img src="images/ch01/homl01-14.png" width="500"/></div> ### 예측 모델 사용 여부 구분 * 훈련 모델의 __일반화(generalization)__ 방식에 따른 분류 * 일반화 = '새로운 데이터에 대한 예측' ### 사례 기반 학습 * **샘플을 기억**하는 것이 훈련의 전부 * 예측을 위해 기존 샘플과의 **유사도** 측정 * 예제: k-최근접 이웃(k-NN, k-nearest neighbors) 알고리즘 * k-NN 활용 예제: 새로운 샘플 `X`가 기존에 세모인 샘플과의 유사도가 높기 때문에 세모로 분류. <div align="center"><img src="images/ch01/homl01-15.png" width="500"/></div> ### 모델 기반 학습 * 모델을 미리 지정한 후 훈련 세트를 사용해서 모델을 훈련시킴 * 훈련된 모델을 사용해 새로운 데이터에 대한 예측 실행 * 예제: 이 책에서 다루는 대부분의 알고리즘 * 예제: 학습된 모델을 이용하여 새로운 데이터 `X`를 세모 클래스로 분류 <div align="center"><img src="images/ch01/homl01-16.png" width="500"/></div> #### 선형 모델 학습 예제 * 목표: OECD 국가의 1인당 GDP(1인당 국가총생산)와 삶의 만족도 사이의 관계 파악 * 1인당 GDP가 증가할 수록 삶의 만족도가 선형으로 증가하는 것처럼 보임. <div align="center"><img src="images/ch01/homl01-17.png" width="500"/></div> * 데이터를 대표하는 하나의 직선(선형 모델)을 찾기 $$ \text{'삶의만족도'} = \theta_0 + \theta_1 \times \text{'1인당GDP'} $$ * 데이터를 대표할 수 있는 선형 방정식을 찾아야 함 <div align="center"><img src="images/ch01/homl01-18.png" width="500"/></div> * 학습되는 모델의 성능 평가 기준을 측정하여 가장 적합한 모델 학습 * 효용 함수: 모델이 얼마나 좋은지 측정 * 비용 함수: 모델이 얼마나 나쁜지 측정 * 아래 선형 모델이 최적! <div align="center"><img src="images/ch01/homl01-19.png" width="500"/></div> ## 1.5 머신러닝의 주요 도전 과제 ### 충분하지 않은 양의 훈련 데이터 * 간단한 문제라도 수천 개 이상의 데이터가 필요 * 이미지나 음성 인식 같은 문제는 수백만 개가 필요할 수도 있음 * 데이터가 부족하면 알고리즘 성능 향성 어려움 * 일반적으로 데이터가 많을 수록 모델의 성능 높아짐. <div align="center"><img src="images/ch01/homl01-20.png" width="400"/></div> ### 대표성 없는 훈련 데이터 * 샘플링 잡음: 우연에 의해 추가된 대표성이 없는 데이터 * 샘플링 편향: 표본 추출 방법이 잘못되어 한 쪽으로 쏠린 대표성이 없는 데이터 * 예제: 1인당 GDP와 삶의 만족도 관계 - 잡음: 빨강 네모 데이터가 추가 될 경우 선형 모델 달라짐. - 편향: OECD 국가중에서 이름에 영어 알파벳 W가 포함된 국가들은 삶의 만족도가 매우 높음. 하지만 일반화는 불가능. <div align="center"><img src="images/ch01/homl01-21.png" width="500"/></div> ### 낮은 품질의 데이터 처리 * 이상치 샘플이라면 고치거나 무시 * 특성이 누락되었다면 * 해당 특성을 제외 * 해당 샘플을 제외 * 누락된 값을 채움 * 해당 특성을 넣은 경우와 뺀 경우 각기 모델을 훈련 ### 관련이 없는 특성 * 풀려는 문제에 관련이 높은 특성을 찾아야 함 * 특성 선택: 준비되어 있는 특성 중 가장 유용한 특성을 찾음 * 특성 추출: 특성을 조합하여 새로운 특성을 만듦 ### 과대적합 * 훈련 세트에 특화되어 일반화 성능이 떨어지는 현상 <div align="center"><img src="images/ch01/homl01-22.png" width="500"/></div> * 규제를 적용해 과대적합을 감소시킬 수 있음 * 파라미터를 조정되는 과정에 규제 적용 * 파랑 점선이 규제를 적용해 훈련된 선형 모델임. <div align="center"><img src="images/ch01/homl01-23.png" width="500"/></div> ### 과소적합 * 모델이 너무 단순해서 훈련 세트를 잘 학습하지 못함 <div align="center"><img src="images/ch04/homl04-06.png" width="400"/></div> * 해결 방법 * 보다 많은 모델 파라미터를 사용하는 모델 적용 * 보다 좋은 특성 활용 * 보다 규제 강도 적용 ## 1.6 테스트와 검증 ### 검증 * 훈련된 모델의 성능 평가: 테스트 세트 활용 * 전체 데이터셋을 훈련 세트(80%)와 테스트 세트(20%)로 구분 * 훈련 세트: 모델 훈련용. * 테스트 세트: 모델 테스트용 * 데이터셋이 매우 크면 테스트 세트 비율을 낮출 수 있음. * 검증 기준: __일반화 오차__ * 새로운 샘플에 대한 오류 비율 * 학습된 모델의 일반화 성능의 기준 * 과대 적합: 훈련 오차에 비해 일반화 오차가 높은 경우 ### 하이퍼파라미터(hyper-parameter) * 알고리즘 학습 모델을 지정에 사용되는 파라미터 * 훈련 과정에 변하는 파라미터가 아님 * 하이퍼파라미터를 조절하면서 가장 좋은 성능의 모델 선정 ### 교차 검증 * 예비표본(홀드아웃, holdout) 검증 * 예비표본(검증세트): 훈련 세트의 일부로 만들어진 데이터셋 * 다양한 하이퍼파라미터 값을 사용하는 후보 모델을 평가하는 용도로 예비표본을 활용하는 기법 * 교차 검증 * 여러 개의 검증세트를 사용한 반복적인 예비표본 검증 적용 기법 * 장점: 교차 검증 후 모든 모델의 평가를 평균하면 훨씬 정확한 성능 측정 가능 * 단점: 훈련 시간이 검증 세트의 개수에 비례해 늘어남 ### 검증 예제: 데이터 불일치 * 모델 훈련에 사용된 데이터가 실전에 사용되는 데이터를 완벽하게 대변하지 못하는 경우 * 예제: 꽃이름 확인 알고리즘 * 인터넷으로 구한 꽃사진으로 모델 훈련 * 이후 직접 촬영한 사진으로 진행한 성능측정이 낮게 나오면 __데이터 불일치__ 가능성 높음 * 데이터 불일치 여부 확인 방법 * 훈련-개발 세트: 예를 들어, 인터넷에서 다운로드한 꽃사진의 일부로 이루어진 데이터셋 * 훈련-개발 세트를 제외한 나머지 꽃사진으로 모델 훈련 후, 훈련-개발 세트를 이용한 성능 평가 진행 * 훈련-개발 세트에 대한 평가가 좋은 경우: 과대적합 아님 * 훈련-개발 세티트에 평가는 좋지만 (실제 찍은 사진으로 이루어진) 검증 세트에 대한 평가 나쁜 경우: 데이터 불일치 * 다운로드한 사진을 실제 찍은 사진처럼 보이도록 전처리 한 후에 다시 훈련시키면 성능 향상시킬 수 있음. * 훈련-개발 세트에 대한 평가가 나쁜 경우: 과대적합 * 모델에 규제를 적용하거나 더 많은 훈련 데이터 활용해야 함.
github_jupyter
``` #Setup and imports import pandas as pd import numpy as np df = pd.read_csv('raw/jantojun2020.csv', dtype=object) existing = pd.read_csv('raw/airlines-corgis.csv') #For looking at our new dataset df.head(5).iloc[:,0:] #For looking at the old dataset existing.head(5) existing.iloc[0:10, 5:] #Start similar dataframe from new data new = pd.DataFrame() new['Airport.Code'] = df["ORIGIN"] #Create dictionary of airport codes : airport names codes = pd.Series(existing['Airport.Name'].values, existing['Airport.Code'].values).to_dict() #Add airport names to the new dataframe L = list(codes.keys()) + list(codes.values()) new['Airport.Name'] = new['Airport.Code'].str.extract('(' + '|'.join(L) + ')', expand=False).replace(codes) #Add date information new['Time.Label'] = df['YEAR'] + '/0' + df['MONTH'] new['Time.Month'] = df['MONTH'] months = {'1':'January', '2':'February', '3':'March', '4':'April', '5':'May', '6':'June'} new['Time.Month Name'] = new['Time.Month'].replace(months) new['Time.Year'] = df['YEAR'] #Create dataframe that will hold info about each individual flight new_indiv = new.copy(deep=True) #Ignore flights from airports not in original dataset new = new.dropna(subset=['Airport.Name']) #Reduce rows to one for each Airport.Code/Time.Label combo (like exisiting dataframe) new = new.drop_duplicates(subset=['Airport.Code', 'Time.Label']) #Copy over data from jantojun2020... Currently in delay per minutes new_indiv['Statistics.# of Delays.Carrier'] = df['CARRIER_DELAY'] new_indiv['Statistics.# of Delays.Late Aircraft'] = df['LATE_AIRCRAFT_DELAY'] new_indiv['Statistics.# of Delays.National Aviation System'] = df['NAS_DELAY'] new_indiv['Statistics.# of Delays.Security'] = df['SECURITY_DELAY'] new_indiv['Statistics.# of Delays.Weather'] = df['WEATHER_DELAY'] #Replace all delays of 0.0 minutes (no delays) with 0, otherwise replace with 1 (meaning there was a delay) new_indiv['Statistics.# of Delays.Carrier'] = [1 if float(x) > 0.0 else 0 for x in new_indiv['Statistics.# of Delays.Carrier']] new_indiv['Statistics.# of Delays.Late Aircraft'] = [1 if float(x) > 0.0 else 0 for x in new_indiv['Statistics.# of Delays.Late Aircraft']] new_indiv['Statistics.# of Delays.National Aviation System'] = [1 if float(x) > 0.0 else 0 for x in new_indiv['Statistics.# of Delays.National Aviation System']] new_indiv['Statistics.# of Delays.Security'] = [1 if float(x) > 0.0 else 0 for x in new_indiv['Statistics.# of Delays.Security']] new_indiv['Statistics.# of Delays.Weather'] = [1 if float(x) > 0.0 else 0 for x in new_indiv['Statistics.# of Delays.Weather']] #Create dict to map airline codes to names airlines = {'AA':'American Airlines Inc.', 'AS':'Alaska Airlines Inc.', 'B6':'JetBlue Airways', 'DL':'Delta Air Lines Inc.', 'F9':'Frontier Airlines Inc.', 'G4': np.nan, 'HA':'Hawaiian Airlines Inc.', 'NK':'Spirit Air Lines', 'UA':'United Air Lines Inc.', 'WN': 'Southwest Airlines Co.'} #Transfer over airline names new_indiv['Statistics.Carriers.Names'] = df['MKT_UNIQUE_CARRIER'] #Replace airline codes with names. Allegiant airlines not in original dataset, so its name is repalced with NaN new_indiv['Statistics.Carriers.Names'] = new_indiv['Statistics.Carriers.Names'].replace(airlines) #Transfer cancelled flights data new_indiv['Statistics.Flights.Cancelled'] = df['CANCELLED'].astype('int64') #Add delayed flights data (1=Delayed, 0=Not Delayed) df['DEP_DELAY_NEW'] = df['DEP_DELAY_NEW'].astype('float64') new_indiv['Statistics.Flights.Delayed'] = [1 if x > 0 else 0 for x in df['DEP_DELAY_NEW']] #[x if y == 0 else 0 for x in new_indiv['Statistics.Flights.Delayed'] for y in new_indiv['Statistics.Flights.Cancelled']] #No data in the new dataset on diverted or on-time flights #We cannot account for diverted, but we will count on-time as being neither cancelled or delayed #(1 = On-Time, 0 = Not On-Time) new_indiv['Statistics.Flights.On Time'] = new_indiv['Statistics.Flights.Cancelled'] + new_indiv['Statistics.Flights.Delayed'] new_indiv['Statistics.Flights.On Time'] = new_indiv['Statistics.Flights.On Time'].replace(2, 1) new_indiv['Statistics.Flights.On Time'] = [1-x for x in new_indiv['Statistics.Flights.On Time']] #Every flight that is cancelled has its delay status set to 0 (for sake of not double-counting in totals) new_indiv.loc[new_indiv['Statistics.Flights.Cancelled'] > 0, 'Statistics.Flights.Delayed'] = 0 #Transfer data of minutes of delays over new_indiv['Statistics.Minutes Delayed.Carrier'] = df['CARRIER_DELAY'] new_indiv['Statistics.Minutes Delayed.Late Aircraft'] = df['LATE_AIRCRAFT_DELAY'] new_indiv['Statistics.Minutes Delayed.National Aviation System'] = df['NAS_DELAY'] new_indiv['Statistics.Minutes Delayed.Security'] = df['SECURITY_DELAY'] new_indiv['Statistics.Minutes Delayed .Weather'] = df['WEATHER_DELAY'] #Drop and ignore and flights that are at airports not in the original dataset new_indiv = new_indiv.dropna(subset=['Airport.Name']) #Sort "new" df entries by data, then by airport new = new.sort_values(['Time.Label', 'Airport.Code']) #Adding total carrier delay info for each airport/data combo entry delays_car = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Statistics.# of Delays.Carrier'].sum()) new['Statistics.# of Delays.Carrier'] = delays_car #Adding total late aircraft delay info for each airport/data combo entry delays_late = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Statistics.# of Delays.Late Aircraft'].sum()) new['Statistics.# of Delays.Late Aircraft'] = delays_late #Adding total national aviation system delay info for each airport/data combo entry delays_nas = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Statistics.# of Delays.National Aviation System'].sum()) new['Statistics.# of Delays.National Aviation System'] = delays_nas #Adding total security delay info for each airport/data combo entry delays_sec = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Statistics.# of Delays.Security'].sum()) new['Statistics.# of Delays.Security'] = delays_sec #Adding total weather delay info for each airport/data combo entry delays_wthr = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Statistics.# of Delays.Weather'].sum()) new['Statistics.# of Delays.Weather'] = delays_wthr #Added as a placeholder for now new['Statistics.Carriers.Names'] = '' #Add number of carriers for each airport/date combo carriers = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Statistics.Carriers.Names'].nunique()) new['Statistics.Carriers.Total'] = carriers ##Add flights delayed for each airport/date combo cancelled = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Statistics.Flights.Cancelled'].sum()) new['Statistics.Flights.Cancelled'] = cancelled #Add flights cancelled for each airport/date combo delays = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Statistics.Flights.Delayed'].sum()) new['Statistics.Flights.Delayed'] = delays #Diverted flights arent in new dataset, so set to null new['Statistics.Flights.Diverted'] = np.nan #Placeholder for now new['Statistics.Flights.On Time'] = '' #Add total number of flights for each airport/date combo totals = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Airport.Name'].count()) new['Statistics.Flights.Total'] = totals new.to_csv('airlines-corgis.csv', header=False) ```
github_jupyter
#### ΠΡΟΣΟΧΗ: Τα joblib dumps των τελικών `corpus_tf_idf.pkl` και `som.pkl` δεν περιέχονται στο zip file καθώς είχαν απαγορευτικά μεγάλο μέγεθος. Αυτό ΔΕΝ οφείλεται σε δική μας ελλιπή υλοποίηση, αλλά σε μια ιδιομορφία του corpus που μας αντιστοιχεί και αναγκάζει ορισμένους πίνακες να αντιστοιχίζονται αχρείαστα σε float64. Το πρόβλημα το έχει δει ο κ. Σιόλας, ο οποίος μας έδωσε την άδεια να ανεβάσουμε τα pickles σε ένα drive ώστε να έχετε πρόσβαση σε αυτά. Μας διαβεβαίωσε πως δε θα υπάρξει βαθμολογική ποινή. Τα links των αρχείων: * `corpus_tf_idf.pkl` : https://drive.google.com/open?id=1q5G1fRPwNBhNUzkWNTAqvAZCzY1B0tJF * `som.pkl` : https://drive.google.com/open?id=1V5Je-RfpvQyCgm-F5UDGaPD88gXbdad8 Για οποιαδήποτε απορία ή πρόβλημα στα Links επικοινωνήστε μαζί μας. Για το θέμα που προέκυψε μπορείτε να απευθυνείτε στον κ. Σιόλα. # Neural Networks ECE NTUA Course 2019-20 ~ Team M.B.4 ## Lab Assingment #2: Unsupervised Learning (Recommendation System & SOM) ### A. The Team * Αβραμίδης Κλεάνθης ~ 03115117 * Κρατημένος Άγγελος ~ 03115025 * Πανίδης Κωνσταντίνος ~ 03113602 ### Requested Imports ``` import pandas as pd, numpy as np, scipy as sp import nltk, string, collections import time, joblib from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import PCA from sklearn.cluster import KMeans from nltk.corpus import stopwords from nltk.stem.porter import PorterStemmer from nltk.tokenize import word_tokenize import somoclu, matplotlib %matplotlib inline ``` ## Εισαγωγή του Dataset Το σύνολο δεδομένων με το οποίο θα δουλέψουμε είναι βασισμένο στο [Carnegie Mellon Movie Summary Corpus](http://www.cs.cmu.edu/~ark/personas/). Πρόκειται για ένα dataset με περίπου 40.000 περιγραφές ταινιών. Η περιγραφή κάθε ταινίας αποτελείται από τον τίτλο της, μια ή περισσότερες ετικέτες που χαρακτηρίζουν το είδος της ταινίας και τέλος τη σύνοψη της υπόθεσής της. Αρχικά εισάγουμε το dataset (χρησιμοποιήστε αυτούσιο τον κώδικα, δεν χρειάζεστε το αρχείο csv) στο dataframe `df_data_1`: ``` dataset_url = "https://drive.google.com/uc?export=download&id=1PdkVDENX12tQliCk_HtUnAUbfxXvnWuG" df_data_1 = pd.read_csv(dataset_url, sep='\t', header=None, quoting=3, error_bad_lines=False) ``` Κάθε ομάδα θα δουλέψει σε ένα μοναδικό υποσύνολο 5.000 ταινιών (διαφορετικό dataset για κάθε ομάδα) ως εξής 1. Κάθε ομάδα μπορεί να βρει [εδώ](https://docs.google.com/spreadsheets/d/1oEr3yuPg22lmMeqDjFtWjJRzmGQ8N57YIuV-ZOvy3dM/edit?usp=sharing) τον μοναδικό αριθμό της "Seed" από 1 έως 78. 2. Το data frame `df_data_2` έχει 78 γραμμές (ομάδες) και 5.000 στήλες. Σε κάθε ομάδα αντιστοιχεί η γραμμή του πίνακα με το `team_seed_number` της. Η γραμμή αυτή θα περιλαμβάνει 5.000 διαφορετικούς αριθμούς που αντιστοιχούν σε ταινίες του αρχικού dataset. 3. Στο επόμενο κελί αλλάξτε τη μεταβλητή `team_seed_number` με το Seed της ομάδας σας από το Google Sheet. 4. Τρέξτε τον κώδικα. Θα προκύψουν τα μοναδικά για κάθε ομάδα titles, categories, catbins, summaries και corpus με τα οποία θα δουλέψετε. ``` # βάλτε το seed που αντιστοιχεί στην ομάδα σας team_seed_number = 19 movie_seeds_url = "https://drive.google.com/uc?export=download&id=1RRoiOjhD0JB3l4oHNFOmPUqZHDphIdwL" df_data_2 = pd.read_csv(movie_seeds_url, header=None, error_bad_lines=False) # επιλέγεται my_index = df_data_2.iloc[team_seed_number,:].values titles = df_data_1.iloc[:, [2]].values[my_index] # movie titles (string) categories = df_data_1.iloc[:, [3]].values[my_index] # movie categories (string) bins = df_data_1.iloc[:, [4]] catbins = bins[4].str.split(',', expand=True).values.astype(np.float)[my_index] # movie categories in binary form summaries = df_data_1.iloc[:, [5]].values[my_index] # movie summaries (string) corpus = summaries[:,0].tolist() # list form of summaries ``` - Ο πίνακας **titles** περιέχει τους τίτλους των ταινιών. Παράδειγμα: 'Sid and Nancy'. - O πίνακας **categories** περιέχει τις κατηγορίες (είδη) της ταινίας υπό τη μορφή string. Παράδειγμα: '"Tragedy", "Indie", "Punk rock", "Addiction Drama", "Cult", "Musical", "Drama", "Biopic \[feature\]", "Romantic drama", "Romance Film", "Biographical film"'. Παρατηρούμε ότι είναι μια comma separated λίστα strings, με κάθε string να είναι μια κατηγορία. - Ο πίνακας **catbins** περιλαμβάνει πάλι τις κατηγορίες των ταινιών αλλά σε δυαδική μορφή ([one hot encoding](https://hackernoon.com/what-is-one-hot-encoding-why-and-when-do-you-have-to-use-it-e3c6186d008f)). Έχει διαστάσεις 5.000 x 322 (όσες οι διαφορετικές κατηγορίες). Αν η ταινία ανήκει στο συγκεκριμένο είδος η αντίστοιχη στήλη παίρνει την τιμή 1, αλλιώς παίρνει την τιμή 0. - Ο πίνακας **summaries** και η λίστα **corpus** περιλαμβάνουν τις συνόψεις των ταινιών (η corpus είναι απλά ο summaries σε μορφή λίστας). Κάθε σύνοψη είναι ένα (συνήθως μεγάλο) string. Παράδειγμα: *'The film is based on the real story of a Soviet Internal Troops soldier who killed his entire unit as a result of Dedovschina. The plot unfolds mostly on board of the prisoner transport rail car guarded by a unit of paramilitary conscripts.'* - Θεωρούμε ως **ID** της κάθε ταινίας τον αριθμό γραμμής της ή το αντίστοιχο στοιχείο της λίστας. Παράδειγμα: για να τυπώσουμε τη σύνοψη της ταινίας με `ID=99` (την εκατοστή) θα γράψουμε `print(corpus[99])`. ``` ID = 99 print(titles[ID]) print(categories[ID]) print(catbins[ID]) print(corpus[ID]) ``` # Υλοποίηση συστήματος συστάσεων ταινιών βασισμένο στο περιεχόμενο <img src="http://clture.org/wp-content/uploads/2015/12/Netflix-Streaming-End-of-Year-Posts.jpg" width="70%"> Η πρώτη εφαρμογή που θα αναπτύξετε θα είναι ένα [σύστημα συστάσεων](https://en.wikipedia.org/wiki/Recommender_system) ταινιών βασισμένο στο περιεχόμενο (content based recommender system). Τα συστήματα συστάσεων στοχεύουν στο να προτείνουν αυτόματα στο χρήστη αντικείμενα από μια συλλογή τα οποία ιδανικά θέλουμε να βρει ενδιαφέροντα ο χρήστης. Η κατηγοριοποίηση των συστημάτων συστάσεων βασίζεται στο πώς γίνεται η επιλογή (filtering) των συστηνόμενων αντικειμένων. Οι δύο κύριες κατηγορίες είναι η συνεργατική διήθηση (collaborative filtering) όπου το σύστημα προτείνει στο χρήστη αντικείμενα που έχουν αξιολογηθεί θετικά από χρήστες που έχουν παρόμοιο με αυτόν ιστορικό αξιολογήσεων και η διήθηση με βάση το περιεχόμενο (content based filtering), όπου προτείνονται στο χρήστη αντικείμενα με παρόμοιο περιεχόμενο (με βάση κάποια χαρακτηριστικά) με αυτά που έχει προηγουμένως αξιολογήσει θετικά. Το σύστημα συστάσεων που θα αναπτύξετε θα βασίζεται στο **περιεχόμενο** και συγκεκριμένα στις συνόψεις των ταινιών (corpus). ## Προσθήκη stop words που βρέθηκαν εμπειρικά οτι βελτιώνουν τις συστάσεις Προστέθηκαν ονόματα και συχνές λέξεις στην περιγραφή ταινιών (πχ plot,story,film) που δεν προσθέτουν αξία στο περιεχόμενο της περιγραφής: ``` nltk.download("stopwords") name_file = open("stopwords.txt",'r') names = [line.split(',') for line in name_file.readlines()] name_stopwords = names[0] for i in range(len(name_stopwords)): name_stopwords[i]=name_stopwords[i].strip() movie_words=["story","film","plot","about","movie",'000','mother','father','sister','brother','daughter','son','village' '10', '12', '15', '20','00', '01', '02', '04', '05', '06', '07', '08', '09', '100', '1000', '10th', '11', '120', '13', '13th', '14', '14th', '150', '15th', '16', '16th', '17', '18'] my_stopwords = stopwords.words('english') + movie_words + name_stopwords ``` ## Stemming & TF-IDF Προχωρούμε σε περαιτέρω επεξεργασία του corpus μας εστιάζοντας στο stem των λέξεων και αγνοώντας σχετικά λήμματα, προς αύξηση της αποδοτικότητας. Στη συνέχεια κάνουμε τη ζητούμενη μετατροπή σε tf-idf: ``` def thorough_filter(words): filtered_words = [] for word in words: pun = [] for letter in word: pun.append(letter in string.punctuation) if not all(pun): filtered_words.append(word) return filtered_words def preprocess_document(document): words = nltk.word_tokenize(document.lower()) porter_stemmer = PorterStemmer() stemmed_words = [porter_stemmer.stem(word) for word in words] return (" ".join(stemmed_words)) # απαραίτητα download για τους stemmer/lemmatizer/tokenizer nltk.download('wordnet') nltk.download('rslp') nltk.download('punkt') stemmed_corpus = [preprocess_document(corp) for corp in corpus] vectorizer = TfidfVectorizer(max_df=0.2, min_df=0.01, analyzer='word', stop_words = my_stopwords, ngram_range=(1,1)) corpus_tf_idf = vectorizer.fit_transform(stemmed_corpus).toarray() print("corpus after tf-idf",corpus_tf_idf.shape) joblib.dump(corpus_tf_idf, 'corpus_tf_idf.pkl') ``` Η συνάρτηση [TfidfVectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html) όπως καλείται εδώ **είναι βελτιστοποιημένη**. Οι επιλογές των μεθόδων και παραμέτρων που κάναμε έχουν **σημαντική επίδραση στην ποιότητα των συστάσεων, στη διαστατικότητα και τον όγκο των δεδομένων, κατά συνέπεια και στους χρόνους εκπαίδευσης**. ## Υλοποίηση του συστήματος συστάσεων Το σύστημα συστάσεων που θα παραδώσετε θα είναι μια συνάρτηση `content_recommender` με δύο ορίσματα `target_movie` και `max_recommendations`. Στην `target_movie` περνάμε το ID μιας ταινίας-στόχου για την οποία μας ενδιαφέρει να βρούμε παρόμοιες ως προς το περιεχόμενο (τη σύνοψη) ταινίες, `max_recommendations` στο πλήθος. Υλοποιήστε τη συνάρτηση ως εξής: - για την ταινία-στόχο, από το `corpus_tf_idf` υπολογίστε την [ομοιότητα συνημιτόνου](https://en.wikipedia.org/wiki/Cosine_similarity) της με όλες τις ταινίες της συλλογής σας - με βάση την ομοιότητα συνημιτόνου που υπολογίσατε, δημιουργήστε ταξινομημένο πίνακα από το μεγαλύτερο στο μικρότερο, με τα indices (`ID`) των ταινιών. Παράδειγμα: αν η ταινία με index 1 έχει ομοιότητα συνημιτόνου με 3 ταινίες \[0.2 1 0.6\] (έχει ομοιότητα 1 με τον εαύτό της) ο ταξινομημένος αυτός πίνακας indices θα είναι \[1 2 0\]. - Για την ταινία-στόχο εκτυπώστε: id, τίτλο, σύνοψη, κατηγορίες (categories) - Για τις `max_recommendations` ταινίες (πλην της ίδιας της ταινίας-στόχου που έχει cosine similarity 1 με τον εαυτό της) με τη μεγαλύτερη ομοιότητα συνημιτόνου (σε φθίνουσα σειρά), τυπώστε σειρά σύστασης (1 πιο κοντινή, 2 η δεύτερη πιο κοντινή κλπ), id, τίτλο, σύνοψη, κατηγορίες (categories) ``` def movie_info(movie_id): print(*titles[movie_id].flatten(),"~ ID:",movie_id) print("Category: ",*categories[movie_id].flatten()) def get_distances(target_movie_id,corpus): distances = np.zeros((corpus.shape[0])) for i in range(corpus.shape[0]): distances[i]=sp.spatial.distance.cosine(corpus[target_movie_id],corpus[i]) return distances def content_recommender(target_movie,max_recomendations): distances = get_distances(target_movie,corpus_tf_idf) similarity = np.argsort(distances) # similarity = 1-distance similarity = similarity[:max_recomendations+1] for i in similarity: if i==target_movie: print("Target Movie:",end=" ") movie_info(i) print("\nRecommendations:\n") else: movie_info(i); print() ``` ## Βελτιστοποίηση Αφού υλοποιήσετε τη συνάρτηση `content_recommender` χρησιμοποιήστε τη για να βελτιστοποιήσετε την `TfidfVectorizer`. Συγκεκριμένα, αρχικά μπορείτε να δείτε τι επιστρέφει το σύστημα για τυχαίες ταινίες-στόχους και για ένα μικρό `max_recommendations` (2 ή 3). Αν σε κάποιες ταινίες το σύστημα μοιάζει να επιστρέφει σημασιολογικά κοντινές ταινίες σημειώστε το `ID` τους. Δοκιμάστε στη συνέχεια να βελτιστοποιήσετε την `TfidfVectorizer` για τα συγκεκριμένα `ID` ώστε να επιστρέφονται σημασιολογικά κοντινές ταινίες για μεγαλύτερο αριθμό `max_recommendations`. Παράλληλα, όσο βελτιστοποιείτε την `TfidfVectorizer`, θα πρέπει να λαμβάνετε καλές συστάσεις για μεγαλύτερο αριθμό τυχαίων ταινιών. Μπορείτε επίσης να βελτιστοποιήσετε τη συνάρτηση παρατηρώντας πολλά φαινόμενα που το σύστημα εκλαμβάνει ως ομοιότητα περιεχομένου ενώ επί της ουσίας δεν είναι επιθυμητό να συνυπολογίζονται (δείτε σχετικά το [FAQ](https://docs.google.com/document/d/1-E4eQkVnTxa3Jb0HL9OAs11bugYRRZ7RNWpu7yh9G4s/edit?usp=sharing)). Ταυτόχρονα, μια άλλη κατεύθυνση της βελτιστοποίησης είναι να χρησιμοποιείτε τις παραμέτρους του `TfidfVectorizer` έτσι ώστε να μειώνονται οι διαστάσεις του Vector Space Model μέχρι το σημείο που θα αρχίσει να εμφανίζονται ποιοτικές επιπτώσεις. ``` corpus_tf_idf = joblib.load('corpus_tf_idf.pkl') content_recommender(120,5) ``` ## Επεξήγηση επιλογών και ποιοτική ερμηνεία Περιγράψτε πώς προχωρήσατε στις επιλογές σας για τη βελτιστοποίηση της `TfidfVectorizer`. Δώστε 10 παραδείγματα (IDs) που επιστρέφουν καλά αποτελέσματα μέχρι `max_recommendations` (5 και παραπάνω) και σημειώστε ποια είναι η θεματική που ενώνει τις ταινίες. ### Διαδικασία βελτιστοποίησης Ακολουθήσαμε την πρόταση της εκφώνησης της εργασίας και ξεκινήσαμε την τροποποίηση των παραμέτρων του TfidfVectorizer αρχικά για 2 προτάσεις, και στην συνέχεια για 5 προτάσεις. Αρχικά παρατηρήσαμε οτι η παράμετρος max_df του TfidfVectorizer δεν αφαιρούσε περισσότερα features για τιμές άνω του 0.4, και για τιμές χαμηλότερες απο αυτό μείωνε την ποιότητα των προτάσεων. Επίσης η παράμετρος ngram_range δοκιμάστηκε με τιμές (1,1)-only unigrams , (1,2)-unigrams and bigrams , (1,3)-unigrams bigrams and trigrams αλλα δεν επηρέασε θετικά τα αποτελέσματα οπότε αφέθηκε στην τιμη (1,1). Οπότε, η βελτιστοποίηση του TfidfVectorizer έγινε με την τροποποίηση της παραμέτρου min_df και την εισαγωγή stop words με ταυτόχρονη παρατήρηση των προτάσεων που προκύπτουν. Τα 10 ζητούμενα παραδείγματα: ``` movie_list = [10,20,21,100,120,222,540,2020,2859,3130] cats = ["Crime Fiction","Action/Adventure","Drama","Crime Fiction","Adventure", "Thriller","Horror","Science Fiction","Drama","Science Fiction"] for i in range(10): content_recommender(movie_list[i],5) print("Common category:",cats[i]," -------------------------------------------------------------\n") ``` # Τοπολογική και σημασιολογική απεικόνιση της ταινιών με χρήση SOM <img src="https://drive.google.com/uc?export=download&id=1R1R7Ds9UEfhjOY_fk_3wcTjsM0rI4WLl" width="60%"> ## Δημιουργία dataset Στη δεύτερη εφαρμογή θα βασιστούμε στις τοπολογικές ιδιότητες των Self Organizing Maps (SOM) για να φτιάξουμε ενά χάρτη (grid) δύο διαστάσεων όπου θα απεικονίζονται όλες οι ταινίες της συλλογής της ομάδας με τρόπο χωρικά συνεκτικό ως προς το περιεχόμενο και κυρίως το είδος τους. Η `build_final_set` αρχικά μετατρέπει την αραιή αναπαράσταση tf-idf της εξόδου της `TfidfVectorizer()` σε πυκνή (η [αραιή αναπαράσταση](https://en.wikipedia.org/wiki/Sparse_matrix) έχει τιμές μόνο για τα μη μηδενικά στοιχεία). Στη συνέχεια ενώνει την πυκνή `dense_tf_idf` αναπαράσταση και τις binarized κατηγορίες `catbins` των ταινιών ως επιπλέον στήλες (χαρακτηριστικά). Συνεπώς, κάθε ταινία αναπαρίσταται στο Vector Space Model από τα χαρακτηριστικά του TFIDF και τις κατηγορίες της. Τέλος, δέχεται ένα ορισμα για το πόσες ταινίες να επιστρέψει, με default τιμή όλες τις ταινίες (5000). Αυτό είναι χρήσιμο για να μπορείτε αν θέλετε να φτιάχνετε μικρότερα σύνολα δεδομένων ώστε να εκπαιδεύεται ταχύτερα το SOM. ``` def build_final_set(doc_limit=5000, tf_idf_only=False): # convert sparse tf_idf to dense tf_idf representation dense_tf_idf = corpus_tf_idf[0:doc_limit,:] if tf_idf_only: # use only tf_idf final_set = dense_tf_idf else: # append the binary categories features horizontaly to the (dense) tf_idf features final_set = np.hstack((dense_tf_idf, catbins[0:doc_limit,:])) # η somoclu θέλει δεδομένα σε float32 return np.array(final_set, dtype=np.float32) final_set = build_final_set() ``` Τυπώνουμε τις διαστάσεις του τελικού dataset μας. Χωρίς βελτιστοποίηση του TFIDF θα έχουμε περίπου 50.000 χαρακτηριστικά. ``` final_set.shape ``` Με βάση την εμπειρία σας στην προετοιμασία των δεδομένων στην επιβλεπόμενη μάθηση, υπάρχει κάποιο βήμα προεπεξεργασίας που θα μπορούσε να εφαρμοστεί σε αυτό το dataset; >Θα μπορούσαμε με PCA να μειώσουμε τις διαστάσεις: ``` pca = PCA(n_components=0.97) pca_final_set = pca.fit_transform(final_set) print(pca_final_set.shape) print("Decrease of components:", round ((1-pca_final_set.shape[1]/final_set.shape[1])*100, 2 ),"%") ``` Παρατηρούμε οτι με διατήρηση 97% της διασποράς των χαρακτηριστικών μπορούμε να μειώσουμε τις διαστάσεις πάνω από 40%. ## Εκπαίδευση χάρτη SOM Θα δουλέψουμε με τη βιβλιοθήκη SOM ["Somoclu"](http://somoclu.readthedocs.io/en/stable/index.html). Καταρχάς διαβάστε το [function reference](http://somoclu.readthedocs.io/en/stable/reference.html) του somoclu. Θα δoυλέψουμε με χάρτη τύπου planar, παραλληλόγραμμου σχήματος νευρώνων με τυχαία αρχικοποίηση (όλα αυτά είναι default). Μπορείτε να δοκιμάσετε διάφορα μεγέθη χάρτη ωστόσο όσο ο αριθμός των νευρώνων μεγαλώνει, μεγαλώνει και ο χρόνος εκπαίδευσης. Για το training δεν χρειάζεται να ξεπεράσετε τα 100 epochs. Σε γενικές γραμμές μπορούμε να βασιστούμε στις default παραμέτρους μέχρι να έχουμε τη δυνατότητα να οπτικοποιήσουμε και να αναλύσουμε ποιοτικά τα αποτελέσματα. Ξεκινήστε με ένα χάρτη 10 x 10, 100 epochs training και ένα υποσύνολο των ταινιών (π.χ. 2000). Χρησιμοποιήστε την `time` για να έχετε μια εικόνα των χρόνων εκπαίδευσης. Ενδεικτικά, με σωστή κωδικοποίηση tf-idf, μικροί χάρτες για λίγα δεδομένα (1000-2000) παίρνουν γύρω στο ένα λεπτό ενώ μεγαλύτεροι χάρτες με όλα τα δεδομένα μπορούν να πάρουν 10-15 λεπτά ή και περισσότερο. ``` n_rows, n_columns = 30, 30 som = somoclu.Somoclu(n_columns, n_rows, compactsupport=False) %time som.train(final_set,epochs=100) ``` Λόγω του προβλήματος στο training των μοντέλων Somoclu που αναφέρεται στο import του μοντέλου, η εκπαίδευση έγινε στο Colab. Αποθηκεύουμε το προκύπτον μοντέλο και το λαμβάνουμε εδώ: ``` som = joblib.load("som.pkl") ``` ## Best matching units Μετά από κάθε εκπαίδευση αποθηκεύστε σε μια μεταβλητή τα best matching units (bmus) για κάθε ταινία. Τα bmus μας δείχνουν σε ποιο νευρώνα ανήκει η κάθε ταινία. Προσοχή: η σύμβαση των συντεταγμένων των νευρώνων είναι (στήλη, γραμμή) δηλαδή το ανάποδο από την Python. Με χρήση της [np.unique](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.unique.html) (μια πολύ χρήσιμη συνάρτηση στην άσκηση) αποθηκεύστε τα μοναδικά best matching units και τους δείκτες τους (indices) προς τις ταινίες. Σημειώστε ότι μπορεί να έχετε λιγότερα μοναδικά bmus από αριθμό νευρώνων γιατί μπορεί σε κάποιους νευρώνες να μην έχουν ανατεθεί ταινίες. Ως αριθμό νευρώνα θα θεωρήσουμε τον αριθμό γραμμής στον πίνακα μοναδικών bmus. ``` bmus, indices = np.unique(som.bmus,axis=0,return_index=True) ``` ## Ομαδοποίηση (clustering) Τυπικά, η ομαδοποίηση σε ένα χάρτη SOM προκύπτει από το unified distance matrix (U-matrix): για κάθε κόμβο υπολογίζεται η μέση απόστασή του από τους γειτονικούς κόμβους. Εάν χρησιμοποιηθεί μπλε χρώμα στις περιοχές του χάρτη όπου η τιμή αυτή είναι χαμηλή (μικρή απόσταση) και κόκκινο εκεί που η τιμή είναι υψηλή (μεγάλη απόσταση), τότε μπορούμε να πούμε ότι οι μπλε περιοχές αποτελούν clusters και οι κόκκινες αποτελούν σύνορα μεταξύ clusters. To somoclu δίνει την επιπρόσθετη δυνατότητα να κάνουμε ομαδοποίηση των νευρώνων χρησιμοποιώντας οποιονδήποτε αλγόριθμο ομαδοποίησης του scikit-learn. Στην άσκηση θα χρησιμοποιήσουμε τον k-Means. Για τον αρχικό σας χάρτη δοκιμάστε ένα k=20 ή 25. Οι δύο προσεγγίσεις ομαδοποίησης είναι διαφορετικές, οπότε περιμένουμε τα αποτελέσματα να είναι κοντά αλλά όχι τα ίδια. ``` som.cluster(KMeans(n_clusters=30)) ``` ## Αποθήκευση του SOM Επειδή η αρχικοποίηση του SOM γίνεται τυχαία και το clustering είναι και αυτό στοχαστική διαδικασία, οι θέσεις και οι ετικέτες των νευρώνων και των clusters θα είναι διαφορετικές κάθε φορά που τρέχετε τον χάρτη, ακόμα και με τις ίδιες παραμέτρους. Για να αποθηκεύσετε ένα συγκεκριμένο som και clustering χρησιμοποιήστε και πάλι την `joblib`. Μετά την ανάκληση ενός SOM θυμηθείτε να ακολουθήσετε τη διαδικασία για τα bmus. ``` joblib.dump(som,'som.pkl') ``` ## Οπτικοποίηση U-matrix, clustering και μέγεθος clusters Για την εκτύπωση του U-matrix χρησιμοποιήστε τη `view_umatrix` με ορίσματα `bestmatches=True` και `figsize=(15, 15)` ή `figsize=(20, 20)`. Τα διαφορετικά χρώματα που εμφανίζονται στους κόμβους αντιπροσωπεύουν τα διαφορετικά clusters που προκύπτουν από τον k-Means. Μπορείτε να εμφανίσετε τη λεζάντα του U-matrix με το όρισμα `colorbar`. Μην τυπώνετε τις ετικέτες (labels) των δειγμάτων, είναι πολύ μεγάλος ο αριθμός τους. ``` som.view_umatrix(bestmatches=True, colorbar=True,figsize=(15, 15)); matplotlib.pyplot.show() ``` Για μια δεύτερη πιο ξεκάθαρη οπτικοποίηση του clustering τυπώστε απευθείας τη μεταβλητή `clusters`. ``` print(som.clusters) ``` Τέλος, χρησιμοποιώντας πάλι την `np.unique` (με διαφορετικό όρισμα) και την `np.argsort` (υπάρχουν και άλλοι τρόποι υλοποίησης) εκτυπώστε τις ετικέτες των clusters (αριθμοί από 0 έως k-1) και τον αριθμό των νευρώνων σε κάθε cluster, με φθίνουσα ή αύξουσα σειρά ως προς τον αριθμό των νευρώνων. Ουσιαστικά είναι ένα εργαλείο για να βρίσκετε εύκολα τα μεγάλα και μικρά clusters. ``` print("\nClusters sorted by increasing number of neurons: (cluster_index,num_neurons)") values,counts = np.unique(som.clusters,return_counts=True) sorted_counts = np.argsort(counts) print(np.array([list(values[sorted_counts]),list(counts[sorted_counts])])) ``` ## Σημασιολογική ερμηνεία των clusters Προκειμένου να μελετήσουμε τις τοπολογικές ιδιότητες του SOM και το αν έχουν ενσωματώσει σημασιολογική πληροφορία για τις ταινίες διαμέσου της διανυσματικής αναπαράστασης με το tf-idf και των κατηγοριών, χρειαζόμαστε ένα κριτήριο ποιοτικής επισκόπησης των clusters. Θα υλοποιήσουμε το εξής κριτήριο: Λαμβάνουμε όρισμα έναν αριθμό (ετικέτα) cluster. Για το cluster αυτό βρίσκουμε όλους τους νευρώνες που του έχουν ανατεθεί από τον k-Means. Για όλους τους νευρώνες αυτούς βρίσκουμε όλες τις ταινίες που τους έχουν ανατεθεί (για τις οποίες αποτελούν bmus). Για όλες αυτές τις ταινίες τυπώνουμε ταξινομημένη τη συνολική στατιστική όλων των ειδών (κατηγοριών) και τις συχνότητές τους. Αν το cluster διαθέτει καλή συνοχή και εξειδίκευση, θα πρέπει κάποιες κατηγορίες να έχουν σαφώς μεγαλύτερη συχνότητα από τις υπόλοιπες. Θα μπορούμε τότε να αναθέσουμε αυτήν/ές την/τις κατηγορία/ες ως ετικέτες κινηματογραφικού είδους στο cluster. Μπορείτε να υλοποιήσετε τη συνάρτηση αυτή όπως θέλετε. Μια πιθανή διαδικασία θα μπορούσε να είναι η ακόλουθη: 1. Ορίζουμε συνάρτηση `print_categories_stats` που δέχεται ως είσοδο λίστα με ids ταινιών. Δημιουργούμε μια κενή λίστα συνολικών κατηγοριών. Στη συνέχεια, για κάθε ταινία επεξεργαζόμαστε το string `categories` ως εξής: δημιουργούμε μια λίστα διαχωρίζοντας το string κατάλληλα με την `split` και αφαιρούμε τα whitespaces μεταξύ ετικετών με την `strip`. Προσθέτουμε τη λίστα αυτή στη συνολική λίστα κατηγοριών με την `extend`. Τέλος χρησιμοποιούμε πάλι την `np.unique` για να μετρήσουμε συχνότητα μοναδικών ετικετών κατηγοριών και ταξινομούμε με την `np.argsort`. Τυπώνουμε τις κατηγορίες και τις συχνότητες εμφάνισης ταξινομημένα. Χρήσιμες μπορεί να σας φανούν και οι `np.ravel`, `np.nditer`, `np.array2string` και `zip`. ``` def print_categories_stats(ID_list): total_categories = [] for i in ID_list: cat = [category.strip(" ").strip('"') for category in categories[i][0].split(",")] total_categories.extend(cat) result,counts = np.unique(total_categories,return_counts=True) sorted_counts = np.argsort(-counts) final = [(result[n],counts[n]) for n in sorted_counts] print("Overall Cluster Genres stats:") print(final); return ``` 2. Ορίζουμε τη βασική μας συνάρτηση `print_cluster_neurons_movies_report` που δέχεται ως όρισμα τον αριθμό ενός cluster. Με τη χρήση της `np.where` μπορούμε να βρούμε τις συντεταγμένες των bmus που αντιστοιχούν στο cluster και με την `column_stack` να φτιάξουμε έναν πίνακα bmus για το cluster. Προσοχή στη σειρά (στήλη - σειρά) στον πίνακα bmus. Για κάθε bmu αυτού του πίνακα ελέγχουμε αν υπάρχει στον πίνακα μοναδικών bmus που έχουμε υπολογίσει στην αρχή συνολικά και αν ναι προσθέτουμε το αντίστοιχο index του νευρώνα σε μια λίστα. Χρήσιμες μπορεί να είναι και οι `np.rollaxis`, `np.append`, `np.asscalar`. Επίσης πιθανώς να πρέπει να υλοποιήσετε ένα κριτήριο ομοιότητας μεταξύ ενός bmu και ενός μοναδικού bmu από τον αρχικό πίνακα bmus. ``` def where_is_same(a,b): return np.where(np.all(a==b,axis=1))[0] def print_cluster_neurons_movies_report(cluster): cluster_bmus = np.column_stack(np.where(som.clusters==cluster)[::-1]) li = [indices[where_is_same(bmu,bmus)] for bmu in cluster_bmus if bmu in bmus] li = [ind[0] for ind in li if len(ind)] return cluster_bmus ``` 3. Υλοποιούμε μια βοηθητική συνάρτηση `neuron_movies_report`. Λαμβάνει ένα σύνολο νευρώνων από την `print_cluster_neurons_movies_report` και μέσω της `indices` φτιάχνει μια λίστα με το σύνολο ταινιών που ανήκουν σε αυτούς τους νευρώνες. Στο τέλος καλεί με αυτή τη λίστα την `print_categories_stats` που τυπώνει τις στατιστικές των κατηγοριών. ``` def neuron_movies_report(neurons): id_list = [] index = [where_is_same(som.bmus,neuron) for neuron in neurons] index = [list(i) for i in index if len(i)] for i in index: id_list += i return id_list ``` Μπορείτε βέβαια να προσθέσετε οποιαδήποτε επιπλέον έξοδο σας βοηθάει. Μια χρήσιμη έξοδος είναι πόσοι νευρώνες ανήκουν στο cluster και σε πόσους και ποιους από αυτούς έχουν ανατεθεί ταινίες. Θα επιτελούμε τη σημασιολογική ερμηνεία του χάρτη καλώντας την `print_cluster_neurons_movies_report` με τον αριθμό ενός cluster που μας ενδιαφέρει. Παράδειγμα εξόδου για ένα cluster (μη βελτιστοποιημένος χάρτης): ``` Overall Cluster Genres stats: [('"Horror"', 86), ('"Science Fiction"', 24), ('"B-movie"', 16), ('"Monster movie"', 10), ('"Creature Film"', 10), ('"Indie"', 9), ('"Zombie Film"', 9), ('"Slasher"', 8), ('"World cinema"', 8), ('"Sci-Fi Horror"', 7), ('"Natural horror films"', 6), ('"Supernatural"', 6), ('"Thriller"', 6), ('"Cult"', 5), ('"Black-and-white"', 5), ('"Japanese Movies"', 4), ('"Short Film"', 3), ('"Drama"', 3), ('"Psychological thriller"', 3), ('"Crime Fiction"', 3), ('"Monster"', 3), ('"Comedy"', 2), ('"Western"', 2), ('"Horror Comedy"', 2), ('"Archaeology"', 2), ('"Alien Film"', 2), ('"Teen"', 2), ('"Mystery"', 2), ('"Adventure"', 2), ('"Comedy film"', 2), ('"Combat Films"', 1), ('"Chinese Movies"', 1), ('"Action/Adventure"', 1), ('"Gothic Film"', 1), ('"Costume drama"', 1), ('"Disaster"', 1), ('"Docudrama"', 1), ('"Film adaptation"', 1), ('"Film noir"', 1), ('"Parody"', 1), ('"Period piece"', 1), ('"Action"', 1)]``` ``` cluster = 7 cluster_neurons = print_cluster_neurons_movies_report(cluster) id_list = neuron_movies_report(cluster_neurons) print_categories_stats(id_list) ``` Βλέπουμε πως το 7ο cluster συγκεντρώνει κυρίως ταινίες δράματος, με τις υπόλοιπες κατηγορίες να είναι αρκετά σπανιότερες. ## Tips για το SOM και το clustering - Για την ομαδοποίηση ένα U-matrix καλό είναι να εμφανίζει και μπλε-πράσινες περιοχές (clusters) και κόκκινες περιοχές (ορίων). Παρατηρήστε ποια σχέση υπάρχει μεταξύ αριθμού ταινιών στο final set, μεγέθους grid και ποιότητας U-matrix. - Για το k του k-Means προσπαθήστε να προσεγγίζει σχετικά τα clusters του U-matrix (όπως είπαμε είναι διαφορετικοί μέθοδοι clustering). Μικρός αριθμός k δεν θα σέβεται τα όρια. Μεγάλος αριθμός θα δημιουργεί υπο-clusters εντός των clusters που φαίνονται στο U-matrix. Το τελευταίο δεν είναι απαραίτητα κακό, αλλά μεγαλώνει τον αριθμό clusters που πρέπει να αναλυθούν σημασιολογικά. - Σε μικρούς χάρτες και με μικρά final sets δοκιμάστε διαφορετικές παραμέτρους για την εκπαίδευση του SOM. Σημειώστε τυχόν παραμέτρους που επηρεάζουν την ποιότητα του clustering για το dataset σας ώστε να τις εφαρμόσετε στους μεγάλους χάρτες. - Κάποια τοπολογικά χαρακτηριστικά εμφανίζονται ήδη σε μικρούς χάρτες. Κάποια άλλα χρειάζονται μεγαλύτερους χάρτες. Δοκιμάστε μεγέθη 20x20, 25x25 ή και 30x30 και αντίστοιχη προσαρμογή των k. Όσο μεγαλώνουν οι χάρτες, μεγαλώνει η ανάλυση του χάρτη αλλά μεγαλώνει και ο αριθμός clusters που πρέπει να αναλυθούν. ## Ανάλυση τοπολογικών ιδιοτήτων χάρτη SOM Μετά το πέρας της εκπαίδευσης και του clustering θα έχετε ένα χάρτη με τοπολογικές ιδιότητες ως προς τα είδη των ταίνιών της συλλογής σας, κάτι αντίστοιχο με την εικόνα στην αρχή της Εφαρμογής 2 αυτού του notebook (η συγκεκριμένη εικόνα είναι μόνο για εικονογράφιση, δεν έχει καμία σχέση με τη συλλογή δεδομένων και τις κατηγορίες μας). Για τον τελικό χάρτη SOM που θα παράξετε για τη συλλογή σας, αναλύστε σε markdown με συγκεκριμένη αναφορά σε αριθμούς clusters και τη σημασιολογική ερμηνεία τους τις εξής τρεις τοπολογικές ιδιότητες του SOM: 1. *Δεδομένα που έχουν μεγαλύτερη πυκνότητα πιθανότητας στο χώρο εισόδου τείνουν να απεικονίζονται με περισσότερους νευρώνες στο χώρο μειωμένης διαστατικότητας. Δώστε παραδείγματα από συχνές και λιγότερο συχνές κατηγορίες ταινιών. Χρησιμοποιήστε τις στατιστικές των κατηγοριών στη συλλογή σας και τον αριθμό κόμβων που χαρακτηρίζουν.* ``` def categories_in_neuron(neuron): ID_list = neuron_movies_report([neuron]) total_categories = [] for i in ID_list: cat = [category.strip(" ").strip('"') for category in categories[i][0].split(",")] total_categories.extend(cat) result = np.unique(total_categories) sorted_counts = np.argsort(counts) return result categories_in_neurons = np.asarray([categories_in_neuron(i) for i in range(20)]) def number_of_neurons_in_category(category): i=0 for c in categories_in_neurons: if category in c: i+=1 return i # list of all categories distinct_categories=[] for i in range(5000): cat = [category.strip(" ").strip('"') for category in categories[i][0].split(",")] distinct_categories.extend(cat) distinct_categories=np.asarray(distinct_categories) distinct_categories=np.unique(distinct_categories) # all categories and their number of neurons ncategories=[] for c in distinct_categories: k=number_of_neurons_in_category(c) ncategories.append([c,k]) ncategories=np.asarray(ncategories) sorted_indices = np.argsort(ncategories[:,1]) ncategories = ncategories[sorted_indices] print("Most popular categories and their number of neurons:\n", ncategories[::-1][:5]) print("\nLeast popular categories and their number of neurons:\n", ncategories[::-1][-5:]) ``` Είναι εμφανές πως οι περισσότερο συχνές και οικείες κατηγορίες έχουν αντιστοιχηθεί σε μεγαλύτερο αριθμό νευρώνων. 2. *Μακρινά πρότυπα εισόδου τείνουν να απεικονίζονται απομακρυσμένα στο χάρτη. Υπάρχουν χαρακτηριστικές κατηγορίες ταινιών που ήδη από μικρούς χάρτες τείνουν να τοποθετούνται σε διαφορετικά ή απομονωμένα σημεία του χάρτη.* ``` cluster = 16 cluster_neurons = print_cluster_neurons_movies_report(cluster) id_list = neuron_movies_report(cluster_neurons) print_categories_stats(id_list) ``` Βλέπουμε πως για τα δύο μακρινά clusters 7, 16 όπως φαίνεται στον παραπάνω πίνακα των clusters, το είδος των ταινιών που αντιπροσωπεύεται διαφέρει σημαντικά (δράμα και κωμωδία). 3. *Κοντινά πρότυπα εισόδου τείνουν να απεικονίζονται κοντά στο χάρτη. Σε μεγάλους χάρτες εντοπίστε είδη ταινιών και κοντινά τους υποείδη.* ``` cluster = 11 cluster_neurons = print_cluster_neurons_movies_report(cluster) id_list = neuron_movies_report(cluster_neurons) print_categories_stats(id_list) ``` Παρατηρούμε όντως πως το cluster 11 που είναι γειτονικό του 16 έχει επίσης κλίση προς τηνκατηγορία "Κωμωδία" ενώ διευρύνει το είδος και προς τη συγγενή κατηγορία των οικογνειακών ταινιών. Προφανώς τοποθέτηση σε 2 διαστάσεις που να σέβεται μια απόλυτη τοπολογία δεν είναι εφικτή, αφενός γιατί δεν υπάρχει κάποια απόλυτη εξ ορισμού για τα κινηματογραφικά είδη ακόμα και σε πολλές διαστάσεις, αφετέρου γιατί πραγματοποιούμε μείωση διαστατικότητας. Εντοπίστε μεγάλα clusters και μικρά clusters που δεν έχουν σαφή χαρακτηριστικά. *Εντοπίστε clusters συγκεκριμένων ειδών που μοιάζουν να μην έχουν τοπολογική συνάφεια με γύρω περιοχές. Προτείνετε πιθανές ερμηνείες. Τέλος, εντοπίστε clusters που έχουν κατά την άποψή σας ιδιαίτερο ενδιαφέρον στη συλλογή της ομάδας σας και σχολιάστε:* ``` cluster = 14 cluster_neurons = print_cluster_neurons_movies_report(cluster) id_list = neuron_movies_report(cluster_neurons) print_categories_stats(id_list) ``` Παραπάνω έχουμε ένα παράδειγμα cluster (14) κατηγοριών κυρίως Ντοκιμαντέρ και κωμωδίες που δεν έχουν τόση σχέση με "Δράμα" ενώ βρίσκονται γειτονικά στον αντίστοιχο χάρτη (7). Η ερμηνεία που δίνουμε για την τοπολογική τους συνάφεια είναι η δευτερεύουσα παρουσία ταινιών "Action", "Family" και στα 2 cluster που γεφυρώνει το χάσμα τους. Σύμφωνα με την ερώτηση 1, ένα ενδιαφέρον cluster για εμάς θα ήταν αυτό που αντιπροσωπεύει το "Crime fiction". Με inspection επιβεβαιώσαμε πως πρόκειται για τo cluster 27 που ωστόσο δεν έχει απόλυτη τοπολογική συνάφεια με τη γειτονιά του, ούτε μεγάλη έκταση. Εκτιμούμε πως αυτό οφείλεται στην ιδιότητα του είδους να υπάγεται σε μεγαλύτερες κατηγορίες όπως Drama, Thriller κλπ, κατηγορίες που είναι τοπολογικά κοντινές στα συγκεκριμένα clusters. ``` cluster = 27 cluster_neurons = print_cluster_neurons_movies_report(cluster) id_list = neuron_movies_report(cluster_neurons) print_categories_stats(id_list) ``` ## Παράδοση Άσκησης Στο zip file περιέχονται, εκτός από το παρόν notebook, ο κώδικας σε .py script καθώς και το αρχείο `stopwords.txt` που απαιτήθηκε κατά τη βελτιστοποίηση του tf-idf. Επίσης, παραδίδουμε και μια HTML μορφή του notebook καθώς πιθανότατα σε επόμενο τρέξιμο του clustering, τα νέα clusters που θα προκύψουν να μην ανταποκρίνονται σε αυτό που περιγράφουμε στην αναφορά.
github_jupyter
### Abstract This is an example to show to use use the basic API of TensorFlow, to construct a linear regression model. This notebook is an exercise adapted from [the Medium.com blog](https://medium.com/@saxenarohan97/intro-to-tensorflow-solving-a-simple-regression-problem-e87b42fd4845). Note that recent version of TensorFlow does have more advanced API such like LinearClassifier that provides the scikit-learn alike machine learning API. ``` import tensorflow as tf import numpy as np from sklearn.datasets import load_boston from sklearn.preprocessing import scale from matplotlib import pyplot as plt %matplotlib inline from matplotlib.pylab import rcParams rcParams['figure.figsize'] = 15,6 ``` Split the data into training, validation and test sets. ``` # Retrieve the data bunch = load_boston() print('total data shape:', bunch.data.shape) total_features = bunch.data[:, range(12)] total_prices = bunch.data[:, [12]] print('features shape:', total_features.shape, 'targe shape:', total_prices.shape) # new in 0.18 version # total_features, total_prices = load_boston(True) # Keep 300 samples for training train_features = scale(total_features[:300]) train_prices = total_prices[:300] print('training dataset:', len(train_features)) print('feature example:', train_features[0:1]) print('mean of feature 0:', np.asarray(train_features[:, 0]).mean()) # Keep 100 samples for validation valid_features = scale(total_features[300:400]) valid_prices = total_prices[300:400] print('validation dataset:', len(valid_features)) # Keep remaining samples as test set test_features = scale(total_features[400:]) test_prices = total_prices[400:] print('test dataset:', len(test_features)) ``` #### Linear Regression Model ``` w = tf.Variable(tf.truncated_normal([12, 1], mean=0.0, stddev=1.0, dtype=tf.float64)) b = tf.Variable(tf.zeros(1, dtype = tf.float64)) def calc(x, y): ''' linear regression model that return (prediction, L2_error) ''' # Returns predictions and error predictions = tf.add(b, tf.matmul(x, w)) error = tf.reduce_mean(tf.square(y - predictions)) return [ predictions, error ] y, cost = calc(train_features, train_prices) # augment the model with the regularisation L1_regu_cost = tf.add(cost, tf.reduce_mean(tf.abs(w))) L2_regu_cost = tf.add(cost, tf.reduce_mean(tf.square(w))) def train(cost, learning_rate=0.025, epochs=300): ''' run the cost computation graph with gradient descent optimizer. ''' errors = [[], []] init = tf.global_variables_initializer() optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) config = tf.ConfigProto() config.gpu_options.allow_growth=True sess = tf.Session(config=config) with sess: sess.run(init) for i in range(epochs): sess.run(optimizer) errors[0].append(i+1) errors[1].append(sess.run(cost)) # Get the parameters of the linear regression model. print('weights:\n', sess.run(w)) print('bias:', sess.run(b)) valid_cost = calc(valid_features, valid_prices)[1] print('Validation error =', sess.run(valid_cost), '\n') test_cost = calc(test_features, test_prices)[1] print('Test error =', sess.run(test_cost), '\n') return errors # with L1 regularisation, the testing error is slightly improved, i.e. 75 vs. 76 # similarly with L1 regularisation, the L2 regularisation improves the testing error to 75 as well. epochs = 500 errors_lr_005 = train(cost, learning_rate=0.005, epochs=epochs) errors_lr_025 = train(cost, learning_rate=0.025, epochs=epochs) ax = plt.subplot(111) plt.plot(errors_lr_005[1], color='green', label='learning rate 0.005') plt.plot(errors_lr_025[1], color='red', label='learning rate 0.025') #ax = plt.plot(errors[0], errors[1], 'r--') plt.axis([0, epochs, 0, 200]) plt.title('Evolution of L2 errors along each epoch') plt.xlabel('epoch') plt.ylabel('L2 error') _ = plt.legend(loc='best') plt.show() ``` The **higher** the learning rate, the **faster** that the model converges. But if the learning rate is too large, it could also prevent the model from convergence.
github_jupyter
# High Accuracy CNN for MNIST ### Build your own CNN from scratch and try to achieve the highest possible accuracy on MNIST. ``` # Exercise: Build your own CNN from scratch and try to achieve the highest possible accuracy on MNIST. # he following model uses 2 convolutional layers, followed by 1 pooling layer, then dropout 25%, # then a dense layer, another dropout layer but with 50% dropout, and finally the output layer. # It reaches about 99.2% accuracy on the test set. This places this model roughly in the top 20% # in the MNIST Kaggle competition (if we ignore the models with an accuracy greater than 99.79% # which were most likely trained on the test set, as explained by Chris Deotte in this post). # Can you do better? To reach 99.5 to 99.7% accuracy on the test set, you need to add image augmentation, # batch norm, use a learning schedule such as 1-cycle, and possibly create an ensemble. # Python ≥3.5 is required import sys assert sys.version_info >= (3, 5) # Is this notebook running on Colab or Kaggle? IS_COLAB = "google.colab" in sys.modules IS_KAGGLE = "kaggle_secrets" in sys.modules # Scikit-Learn ≥0.20 is required import sklearn assert sklearn.__version__ >= "0.20" # TensorFlow ≥2.0 is required import tensorflow as tf from tensorflow import keras assert tf.__version__ >= "2.0" if not tf.config.list_physical_devices('GPU'): print("No GPU was detected. CNNs can be very slow without a GPU.") if IS_COLAB: print("Go to Runtime > Change runtime and select a GPU hardware accelerator.") if IS_KAGGLE: print("Go to Settings > Accelerator and select GPU.") # Common imports import numpy as np import os # to make this notebook's output stable across runs np.random.seed(42) tf.random.set_seed(42) # To plot pretty figures %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "cnn" IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID) os.makedirs(IMAGES_PATH, exist_ok=True) def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension) print("Saving figure", fig_id) if tight_layout: plt.tight_layout() (X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data() X_train_full = X_train_full / 255. X_test = X_test / 255. X_train, X_valid = X_train_full[:-5000], X_train_full[-5000:] y_train, y_valid = y_train_full[:-5000], y_train_full[-5000:] X_train = X_train[..., np.newaxis] X_valid = X_valid[..., np.newaxis] X_test = X_test[..., np.newaxis] keras.backend.clear_session() tf.random.set_seed(42) np.random.seed(42) model = keras.models.Sequential([ keras.layers.Conv2D(32, kernel_size=3, padding="same", activation="relu"), keras.layers.Conv2D(64, kernel_size=3, padding="same", activation="relu"), keras.layers.MaxPool2D(), keras.layers.Flatten(), keras.layers.Dropout(0.25), keras.layers.Dense(128, activation="relu"), keras.layers.Dropout(0.5), keras.layers.Dense(10, activation="softmax") ]) model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"]) model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid)) model.evaluate(X_test, y_test) ```
github_jupyter
# [Module 2.1] 세이지 메이커 로컬 모드 및 스크립트 모드로 훈련 본 워크샵의 모든 노트북은 **<font color="red">conda_tensorflow2_p36</font>** 를 사용합니다. 이 노트북은 아래와 같은 작업을 합니다. - 1. 기본 환경 세팅 - 2. 노트북에서 세이지 메이커 스크립트 모드 스타일로 코드 변경 - 3. 세이지 메이커 로컬 모드로 훈련 - 4. 세이지 메이커의 호스트 모드로 훈련 - 5. 모델 아티펙트 경로 저장 --- # 1. 기본 환경 세팅 사용하는 패키지는 import 시점에 다시 재로딩 합니다. ``` %load_ext autoreload %autoreload 2 import sagemaker sagemaker_session = sagemaker.Session() bucket = sagemaker_session.default_bucket() prefix = "sagemaker/DEMO-pytorch-cnn-cifar10" role = sagemaker.get_execution_role() import tensorflow as tf print("tensorflow version: ", tf.__version__) %store -r train_dir %store -r validation_dir %store -r eval_dir %store -r data_dir ``` # 2. 노트북에서 세이지 메이커 스크립트 모드 스타일로 코드 변경 - Keras 버전의 스크래치 코드에서 세이지 메이커의 코드 변경을 참고 하세요. - `1.2.Train_Keras_Local_Script_Mode.ipynb` 참고 ``` # !pygmentize src/cifar10_tf2_sm.py ``` # 3. 세이지 메이커 로컬 모드로 훈련 본격적으로 학습을 시작하기 전에 로컬 모드를 사용하여 디버깅을 먼저 수행합니다. 로컬 모드는 학습 인스턴스를 생성하는 과정이 없이 로컬 인스턴스로 컨테이너를 가져온 후 곧바로 학습을 수행하기 때문에 코드를 보다 신속히 검증할 수 있습니다. Amazon SageMaker Python SDK의 로컬 모드는 TensorFlow 또는 MXNet estimator서 단일 인자값을 변경하여 CPU (단일 및 다중 인스턴스) 및 GPU (단일 인스턴스) SageMaker 학습 작업을 에뮬레이션(enumlate)할 수 있습니다. 로컬 모드 학습을 위해서는 docker-compose 또는 nvidia-docker-compose (GPU 인스턴스인 경우)의 설치가 필요합니다. 아래 코드 셀을 통해 본 노트북 환경에 docker-compose 또는 nvidia-docker-compose를 설치하고 구성합니다. 로컬 모드의 학습을 통해 여러분의 코드가 현재 사용 중인 하드웨어를 적절히 활용하고 있는지 확인하기 위한 GPU 점유와 같은 지표(metric)를 쉽게 모니터링할 수 있습니다. ### 로컬 모드로 훈련 실행 - 아래의 두 라인이 로컬모드로 훈련을 지시 합니다. ```python instance_type=instance_type, # local_gpu or local 지정 session = sagemaker.LocalSession(), # 로컬 세션을 사용합니다. ``` #### 로컬의 GPU, CPU 여부로 instance_type 결정 ``` import os import subprocess instance_type = "local_gpu" # GPU 사용을 가정 합니다. CPU 사용시에 'local' 로 정의 합니다. print("Instance type = " + instance_type) ``` 학습 작업을 시작하기 위해 `estimator.fit() ` 호출 시, Amazon ECS에서 Amazon SageMaker TensorFlow 컨테이너를 로컬 노트북 인스턴스로 다운로드합니다. `sagemaker.tensorflow` 클래스를 사용하여 SageMaker Python SDK의 Tensorflow Estimator 인스턴스를 생성합니다. 인자값으로 하이퍼파라메터와 다양한 설정들을 변경할 수 있습니다. 자세한 내용은 [documentation](https://sagemaker.readthedocs.io/en/stable/using_tf.html#training-with-tensorflow-estimator)을 확인하시기 바랍니다. ``` hyperparameters = { 'epochs' : 1, 'learning-rate' : 0.001, 'print-interval' : 100, 'train-batch-size': 256, 'eval-batch-size': 512, 'validation-batch-size': 512, } from sagemaker.tensorflow import TensorFlow estimator = TensorFlow(base_job_name='cifar10', entry_point='cifar10_tf2_sm.py', source_dir='src', role=role, framework_version='2.4.1', py_version='py37', script_mode=True, hyperparameters= hyperparameters, train_instance_count=1, train_instance_type= instance_type) %%time estimator.fit({'train': f'file://{train_dir}', 'validation': f'file://{validation_dir}', 'eval': f'file://{eval_dir}'}) ``` # 4. 세이지 메이커의 호스트 모드로 훈련 ### 데이터 세트를 S3에 업로드 ``` dataset_location = sagemaker_session.upload_data(path=data_dir, key_prefix='data/DEMO-cifar10') display(dataset_location) hyperparameters = { 'epochs' : 20, 'learning-rate' : 0.001, 'print-interval' : 100, 'train-batch-size': 256, 'eval-batch-size': 512, 'validation-batch-size': 512, } from sagemaker.tensorflow import TensorFlow instance_type='ml.p3.8xlarge' sm_estimator = TensorFlow(base_job_name='cifar10', entry_point='cifar10_tf2_sm.py', source_dir='src', role=role, framework_version='2.4.1', py_version='py37', script_mode=True, hyperparameters= hyperparameters, train_instance_count=1, train_instance_type= instance_type) ``` ## SageMaker Host Mode 로 훈련 - `cifar10_estimator.fit(inputs, wait=False)` - 입력 데이터를 inputs로서 S3 의 경로를 제공합니다. - wait=False 로 지정해서 async 모드로 훈련을 실행합니다. - 실행 경과는 아래의 cifar10_estimator.logs() 에서 확인 합니다. ``` %%time sm_estimator.fit({'train':'{}/train'.format(dataset_location), 'validation':'{}/validation'.format(dataset_location), 'eval':'{}/eval'.format(dataset_location)}, wait=False) sm_estimator.logs() ``` # 5. 모델 아티펙트 저장 - S3 에 저장된 모델 아티펙트를 저장하여 추론시 사용합니다. ``` tf2_script_artifact_path = sm_estimator.model_data print("script_tf_artifact_path: ", tf2_script_artifact_path) %store tf2_script_artifact_path ```
github_jupyter
**This notebook is an exercise in the [AI Ethics](https://www.kaggle.com/learn/ai-ethics) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/ai-fairness).** --- In the tutorial, you learned about different ways of measuring fairness of a machine learning model. In this exercise, you'll train a few models to approve (or deny) credit card applications and analyze fairness. Don't worry if you're new to coding: this exercise assumes no programming knowledge. # Introduction We work with a **synthetic** dataset of information submitted by credit card applicants. To load and preview the data, run the next code cell. When the code finishes running, you should see a message saying the data was successfully loaded, along with a preview of the first five rows of the data. ``` # Set up feedback system from learntools.core import binder binder.bind(globals()) from learntools.ethics.ex4 import * import pandas as pd from sklearn.model_selection import train_test_split # Load the data, separate features from target data = pd.read_csv("../input/synthetic-credit-card-approval/synthetic_credit_card_approval.csv") X = data.drop(["Target"], axis=1) y = data["Target"] # Break into training and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) # Preview the data print("Data successfully loaded!\n") X_train.head() ``` The dataset contains, for each applicant: - income (in the `Income` column), - the number of children (in the `Num_Children` column), - whether the applicant owns a car (in the `Own_Car` column, the value is `1` if the applicant owns a car, and is else `0`), and - whether the applicant owns a home (in the `Own_Housing` column, the value is `1` if the applicant owns a home, and is else `0`) When evaluating fairness, we'll check how the model performs for users in different groups, as identified by the `Group` column: - The `Group` column breaks the users into two groups (where each group corresponds to either `0` or `1`). - For instance, you can think of the column as breaking the users into two different races, ethnicities, or gender groupings. If the column breaks users into different ethnicities, `0` could correspond to a non-Hispanic user, while `1` corresponds to a Hispanic user. Run the next code cell without changes to train a simple model to approve or deny individuals for a credit card. The output shows the performance of the model. ``` from sklearn import tree from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay import matplotlib.pyplot as plt # Train a model and make predictions model_baseline = tree.DecisionTreeClassifier(random_state=0, max_depth=3) model_baseline.fit(X_train, y_train) preds_baseline = model_baseline.predict(X_test) # Function to plot confusion matrix def plot_confusion_matrix(estimator, X, y_true, y_pred, display_labels=["Deny", "Approve"], include_values=True, xticks_rotation='horizontal', values_format='', normalize=None, cmap=plt.cm.Blues): cm = confusion_matrix(y_true, y_pred, normalize=normalize) disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=display_labels) return cm, disp.plot(include_values=include_values, cmap=cmap, xticks_rotation=xticks_rotation, values_format=values_format) # Function to evaluate the fairness of the model def get_stats(X, y, model, group_one, preds): y_zero, preds_zero, X_zero = y[group_one==False], preds[group_one==False], X[group_one==False] y_one, preds_one, X_one = y[group_one], preds[group_one], X[group_one] print("Total approvals:", preds.sum()) print("Group A:", preds_zero.sum(), "({}% of approvals)".format(round(preds_zero.sum()/sum(preds)*100, 2))) print("Group B:", preds_one.sum(), "({}% of approvals)".format(round(preds_one.sum()/sum(preds)*100, 2))) print("\nOverall accuracy: {}%".format(round((preds==y).sum()/len(y)*100, 2))) print("Group A: {}%".format(round((preds_zero==y_zero).sum()/len(y_zero)*100, 2))) print("Group B: {}%".format(round((preds_one==y_one).sum()/len(y_one)*100, 2))) cm_zero, disp_zero = plot_confusion_matrix(model, X_zero, y_zero, preds_zero) disp_zero.ax_.set_title("Group A") cm_one, disp_one = plot_confusion_matrix(model, X_one, y_one, preds_one) disp_one.ax_.set_title("Group B") print("\nSensitivity / True positive rate:") print("Group A: {}%".format(round(cm_zero[1,1] / cm_zero[1].sum()*100, 2))) print("Group B: {}%".format(round(cm_one[1,1] / cm_one[1].sum()*100, 2))) # Evaluate the model get_stats(X_test, y_test, model_baseline, X_test["Group"]==1, preds_baseline) ``` The confusion matrices above show how the model performs on some test data. We also print additional information (calculated from the confusion matrices) to assess fairness of the model. For instance, - The model approved 38246 people for a credit card. Of these individuals, 8028 belonged to Group A, and 30218 belonged to Group B. - The model is 94.56% accurate for Group A, and 95.02% accurate for Group B. These percentages can be calculated directly from the confusion matrix; for instance, for Group A, the accuracy is (39723+7528)/(39723+500+2219+7528). - The true positive rate (TPR) for Group A is 77.23%, and the TPR for Group B is 98.03%. These percentages can be calculated directly from the confusion matrix; for instance, for Group A, the TPR is 7528/(7528+2219). # 1) Varieties of fairness Consider three different types of fairness covered in the tutorial: - **Demographic parity**: Which group has an unfair advantage, with more representation in the group of approved applicants? (Roughly 50% of applicants are from Group A, and 50% of applicants are from Group B.) - **Equal accuracy**: Which group has an unfair advantage, where applicants are more likely to be correctly classified? - **Equal opportunity**: Which group has an unfair advantage, with a higher true positive rate? ``` # Check your answer (Run this code cell to get credit!) q_1.check() ``` Run the next code cell without changes to visualize the model. ``` def visualize_model(model, feature_names, class_names=["Deny", "Approve"], impurity=False): plot_list = tree.plot_tree(model, feature_names=feature_names, class_names=class_names, impurity=impurity) [process_plot_item(item) for item in plot_list] def process_plot_item(item): split_string = item.get_text().split("\n") if split_string[0].startswith("samples"): item.set_text(split_string[-1]) else: item.set_text(split_string[0]) plt.figure(figsize=(20, 6)) plot_list = visualize_model(model_baseline, feature_names=X_train.columns) ``` The flowchart shows how the model makes decisions: - `Group <= 0.5` checks what group the applicant belongs to: if the applicant belongs to Group A, then `Group <= 0.5` is true. - Entries like `Income <= 80210.5` check the applicant's income. To follow the flow chart, we start at the top and trace a path depending on the details of the applicant. If the condition is true at a split, then we move down and to the left branch. If it is false, then we move to the right branch. For instance, consider an applicant in Group B, who has an income of 75k. Then, - We start at the top of the flow chart. the applicant has an income of 75k, so `Income <= 80210.5` is true, and we move to the left. - Next, we check the income again. Since `Income <= 71909.5` is false, we move to the right. - The last thing to check is what group the applicant belongs to. The applicant belongs to Group B, so `Group <= 0.5` is false, and we move to the right, where the model has decided to approve the applicant. # 2) Understand the baseline model Based on the visualization, how can you explain one source of unfairness in the model? **Hint**: Consider the example applicant, but change the group membership from Group B to Group A (leaving all other characteristics the same). Is this slightly different applicant approved or denied by the model? ``` # Check your answer (Run this code cell to get credit!) q_2.check() ``` Next, you decide to remove group membership from the training data and train a new model. Do you think this will make the model treat the groups more equally? Run the next code cell to see how this new **group unaware** model performs. ``` # Create new dataset with gender removed X_train_unaware = X_train.drop(["Group"],axis=1) X_test_unaware = X_test.drop(["Group"],axis=1) # Train new model on new dataset model_unaware = tree.DecisionTreeClassifier(random_state=0, max_depth=3) model_unaware.fit(X_train_unaware, y_train) # Evaluate the model preds_unaware = model_unaware.predict(X_test_unaware) get_stats(X_test_unaware, y_test, model_unaware, X_test["Group"]==1, preds_unaware) ``` # 3) Varieties of fairness, part 2 How does this model compare to the first model you trained, when you consider **demographic parity**, **equal accuracy**, and **equal opportunity**? Once you have an answer, run the next code cell. ``` # Check your answer (Run this code cell to get credit!) q_3.check() ``` You decide to train a third potential model, this time with the goal of having each group have even representation in the group of approved applicants. (This is an implementation of group thresholds, which you can optionally read more about [here](https://pair-code.github.io/what-if-tool/ai-fairness.html).) Run the next code cell without changes to evaluate this new model. ``` # Change the value of zero_threshold to hit the objective zero_threshold = 0.11 one_threshold = 0.99 # Evaluate the model test_probs = model_unaware.predict_proba(X_test_unaware)[:,1] preds_approval = (((test_probs>zero_threshold)*1)*[X_test["Group"]==0] + ((test_probs>one_threshold)*1)*[X_test["Group"]==1])[0] get_stats(X_test, y_test, model_unaware, X_test["Group"]==1, preds_approval) ``` # 4) Varieties of fairness, part 3 How does this final model compare to the previous models, when you consider **demographic parity**, **equal accuracy**, and **equal opportunity**? ``` # Check your answer (Run this code cell to get credit!) q_4.check() ``` This is only a short exercise to explore different types of fairness, and to illustrate the tradeoff that can occur when you optimize for one type of fairness over another. We have focused on model training here, but in practice, to really mitigate bias, or to make ML systems fair, we need to take a close look at every step in the process, from data collection to releasing a final product to users. For instance, if you take a close look at the data, you'll notice that on average, individuals from Group B tend to have higher income than individuals from Group A, and are also more likely to own a home or a car. Knowing this will prove invaluable to deciding what fairness criterion you should use, and to inform ways to achieve fairness. (*For instance, it would likely be a bad aproach, if you did not remove the historical bias in the data and then train the model to get equal accuracy for each group.*) In this course, we intentionally avoid taking an opinionated stance on how exactly to minimize bias and ensure fairness in specific projects. This is because the correct answers continue to evolve, since AI fairness is an active area of research. This lesson was a hands-on introduction to the topic, and you can continue your learning by reading blog posts from the [Partnership on AI](https://www.partnershiponai.org/research-lander/) or by following conferences like the [ACM Conference on Fairness, Accountability, and Transparency (ACM FAccT)](https://facctconference.org/). # Keep going Continue to **[learn how to use model cards](https://www.kaggle.com/var0101/model-cards)** to make machine learning models transparent to large audiences.
github_jupyter
# Parsing Text and the LDA output ## a.1) Opening pdfs and extracting their text Under the material for Lecture 3 I have added a folder called FOMC_pdf. This folder contains the transcripts of all the meetings that took place during the [Greenspan](https://en.wikipedia.org/wiki/Alan_Greenspan) era (August 11, 1987 to January 31st, 2006). We are some lines of code to parse those pdfs. ``` #load operating system module import os ``` This module is used to conduct operating system like tasks (such as opening a file, or listing the contents of a directory). ``` #Define the base directory containing the FOMC statements base_directory = "../../../../collection/python/data/transcript_raw_text" #Return a list containing the name of the files in the directory raw_doc = os.listdir(base_directory) #Sort the list in ascending order filelist = sorted(raw_doc)[:-1] filelist ``` To parse the text in the pdfs I will use the PyPDF2 module (but there are other ways to do it. See for example the Tika module). Warning: Depending on your Python configuration, you might not be able to use PyPDF2 directly. I downloaded Python using the Anaconda distribution and I needed to type directly on the terminal conda -forge pypdf2 After doing this, I was able to upload the PyPDF2 module ## a.2) Organizing the information in a data frame ``` import pandas as pd #load re to split the content of the pdfs by the occurrence of a pattern import re #Creates a data frame containing the date|s of the FOMC meetings date = pd.Series(data=filelist).apply(lambda x: x[0:10]) print(date) documents = [] for doc in filelist: with open("{}/{}".format(base_directory,doc),"r") as f: documents += f.read() for i in range(len(document)): interjections = re.split('MR. |MS. |CHAIRMAN |VICE CHAIRMAN ', document[i]) #Split the doc by interjections temp_df = pd.DataFrame(columns=['Date','Speaker','content'],index=range(len(interjections))) #Temporary data frame for j in range(len(interjections)): interjection = interjections[j].replace('\n',' ') #Replace page break (\n) with space temp_df['Date'].loc[j] = date[i] temp_df['Speaker'].loc[j] = interjection.split('.')[0] temp_df['content'].loc[j] = ''.join(interjection.split('.')[1:]) parsed_text = pd.concat([parsed_text,temp_df],ignore_index=True) parsed_text ``` We will focus only on Greenspan's interjections. ``` Greenspan_text = parsed_text.loc[parsed_text['Speaker'] == 'GREENSPAN'] Greenspan_text.index = range(sum(parsed_text['Speaker'] == 'GREENSPAN')) Greenspan_text ``` ## a.3) Bag of Words ``` Greenspan_corpus = list(Greenspan_text['content']) len(Greenspan_corpus) Greenspan_corpus[0] from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer() term_doc_matrix = vectorizer.fit_transform(Greenspan_corpus[0:1]).todense() vectorizer.get_feature_names() term_doc_matrix ``` ## a.4) Cloud of words If you got the Anaconda distribution of Python go the terminal and type conda install -c conda-forge wordcloud Here is a good tutorial on how to generate words of clouds: https://www.datacamp.com/community/tutorials/wordcloud-python ``` from wordcloud import WordCloud wordcloud = WordCloud(background_color='white', font_step = 3, stopwords='None', relative_scaling=1).generate(Greenspan_text['content'].loc[0]) Greenspan_text['content'].loc[0] Greenspan_text.content[0] ``` Display the generated image ``` import matplotlib.pyplot as plt plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.show() ``` Let's do it for the whole Greenspan interjections ``` text_aux = " ".join(interjection for interjection in Greenspan_text.content) len(text_aux) wordcloudG = WordCloud(background_color='white', font_step = 3, relative_scaling=1).generate(text_aux) plt.imshow(wordcloudG, interpolation='bilinear') plt.axis("off") plt.show() ``` ## a.5) LDA ### Tokenize ``` import nltk from nltk.tokenize import RegexpTokenizer tokenizer = RegexpTokenizer(r'\w+') tokens_example = tokenizer.tokenize(Greenspan_text['content'].loc[0]) Greenspan_text['content'].loc[0] tokens_example ``` ### Remove Stop Words ``` from nltk.corpus import stopwords nltk.download('stopwords') stopwords = stopwords.words('english') len(stopwords) stopped_tokens = [i for i in tokens_example if not i in stopwords] stopped_tokens ``` ### Stems ``` from nltk.stem.porter import PorterStemmer # Create p_stemmer of class PorterStemmer p_stemmer = PorterStemmer() p_stemmer.*? texts_G = [p_stemmer.stem(i) for i in stopped_tokens] texts_G ``` ### Loop to Tokenize, Remove Stop Words, Stem ``` texts = [] for i in range(0,len(Greenspan_text['content'])): tokens = tokenizer.tokenize(Greenspan_text['content'].loc[i]) stopped_tokens = [j for j in tokens if not j in stopwords] texts.append([p_stemmer.stem(j) for j in stopped_tokens]) Greenspan_text['content'].loc[1] texts len(Greenspan_text['content']) ``` ### LDA conda install -c anaconda gensim ``` import gensim from gensim import corpora, models dictionary = corpora.Dictionary(texts) dictionary.*? corpus = [dictionary.doc2bow(text) for text in texts] corpus[2] ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=2, id2word = dictionary, passes=20) for t in range(2): plt.figure() plt.imshow(WordCloud(background_color='white').fit_words(dict(ldamodel.show_topic(t,200))),interpolation='bilinear') plt.axis('off') plt.show ```
github_jupyter
## 03 Intro to PyTorch *special thanks to YSDA team for provided materials* What comes today: - Introduction to PyTorch - Automatic gradient computation - Logistic regression (it's a neural network, actually ;) ) ![img](https://pytorch.org/tutorials/_static/pytorch-logo-dark.svg) __This notebook__ will teach you to use pytorch low-level core. You can install it [here](http://pytorch.org/). __Pytorch feels__ differently than other frameworks (like tensorflow/theano) on almost every level. TensorFlow makes your code live in two "worlds" simultaneously: symbolic graphs and actual tensors. First you declare a symbolic "recipe" of how to get from inputs to outputs, then feed it with actual minibatches of data. In pytorch, __there's only one world__: all tensors have a numeric value. You compute outputs on the fly without pre-declaring anything. The code looks exactly as in pure numpy with one exception: pytorch computes gradients for you. And can run stuff on GPU. And has a number of pre-implemented building blocks for your neural nets. [And a few more things.](https://medium.com/towards-data-science/pytorch-vs-tensorflow-spotting-the-difference-25c75777377b) Let's dive into it! ``` # !wget hhttps://raw.githubusercontent.com/neychev/harbour_ml2020/master/day03_Linear_classification/notmnist.py import numpy as np import torch print(torch.__version__) # numpy world x = np.arange(16).reshape(4,4) print("X :\n%s\n" % x) print("X.shape : %s\n" % (x.shape,)) print("add 5 :\n%s\n" % (x + 5)) print("X*X^T :\n%s\n" % np.dot(x,x.T)) print("mean over cols :\n%s\n" % (x.mean(axis=-1))) print("cumsum of cols :\n%s\n" % (np.cumsum(x,axis=0))) # pytorch world x = np.arange(16).reshape(4,4) x = torch.tensor(x, dtype=torch.float32) #or torch.arange(0,16).view(4,4) print ("X :\n%s" % x) print("X.shape : %s\n" % (x.shape,)) print ("add 5 :\n%s" % (x + 5)) print ("X*X^T :\n%s" % torch.matmul(x,x.transpose(1,0))) #short: x.mm(x.t()) print ("mean over cols :\n%s" % torch.mean(x,dim=-1)) print ("cumsum of cols :\n%s" % torch.cumsum(x,dim=0)) ``` #### NumPy and Pytorch As you can notice, pytorch allows you to hack stuff much the same way you did with numpy. This means that you can _see the numeric value of any tensor at any moment of time_. Debugging such code can be done with by printing tensors or using any debug tool you want (e.g. [gdb](https://wiki.python.org/moin/DebuggingWithGdb)). You could also notice the a few new method names and a different API. So no, there's no compatibility with numpy [yet](https://github.com/pytorch/pytorch/issues/2228) and yes, you'll have to memorize all the names again. Get excited! ![img](http://i0.kym-cdn.com/entries/icons/original/000/017/886/download.jpg) For example, * If something takes a list/tuple of axes in numpy, you can expect it to take *args in pytorch * `x.reshape([1,2,8]) -> x.view(1,2,8)` * You should swap _axis_ for _dim_ in operations like mean or cumsum * `x.sum(axis=-1) -> x.sum(dim=-1)` * most mathematical operations are the same, but types an shaping is different * `x.astype('int64') -> x.type(torch.LongTensor)` To help you acclimatize, there's a [table](https://github.com/torch/torch7/wiki/Torch-for-Numpy-users) covering most new things. There's also a neat [documentation page](http://pytorch.org/docs/master/). Finally, if you're stuck with a technical problem, we recommend searching [pytorch forumns](https://discuss.pytorch.org/). Or just googling, which usually works just as efficiently. If you feel like you almost give up, remember two things: __GPU__ and __free gradients__. Besides you can always jump back to numpy with x.numpy() ### Warmup: trigonometric knotwork _inspired by [this post](https://www.quora.com/What-are-the-most-interesting-equation-plots)_ There are some simple mathematical functions with cool plots. For one, consider this: $$ x(t) = t - 1.5 * cos( 15 t) $$ $$ y(t) = t - 1.5 * sin( 16 t) $$ ``` import matplotlib.pyplot as plt %matplotlib inline t = torch.linspace(-10, 10, steps = 10000) # compute x(t) and y(t) as defined above x = <your_code_here> y = <your_code_here> plt.plot(x.numpy(), y.numpy()) ``` if you're done early, try adjusting the formula and seing how it affects the function ``` ``` ``` ``` ``` ``` ``` ``` ``` ``` ``` ``` ``` ``` ``` ``` ``` ``` ## Automatic gradients Any self-respecting DL framework must do your backprop for you. Torch handles this with the `autograd` module. The general pipeline looks like this: * When creating a tensor, you mark it as `requires_grad`: * __```torch.zeros(5, requires_grad=True)```__ * torch.tensor(np.arange(5), dtype=torch.float32, requires_grad=True) * Define some differentiable `loss = arbitrary_function(a)` * Call `loss.backward()` * Gradients are now available as ```a.grads``` __Here's an example:__ let's fit a linear regression on Boston house prices ``` from sklearn.datasets import load_boston boston = load_boston() plt.scatter(boston.data[:, -1], boston.target) from torch.autograd import Variable w = torch.zeros(1, requires_grad=True) b = torch.zeros(1, requires_grad=True) x = torch.tensor(boston.data[:,-1] / 10, dtype=torch.float32) y = torch.tensor(boston.target, dtype=torch.float32) y_pred = w * x + b loss = torch.mean( (y_pred - y)**2 ) # propagete gradients loss.backward() ``` The gradients are now stored in `.grad` of those variables that require them. ``` print("dL/dw = {}\n".format(w.grad)) print("dL/db = {}\n".format(b.grad)) ``` If you compute gradient from multiple losses, the gradients will add up at variables, therefore it's useful to __zero the gradients__ between iteratons. ``` from IPython.display import clear_output for i in range(100): y_pred = w * x + b loss = torch.mean( (y_pred - y)**2 ) loss.backward() w.data -= 0.05 * w.grad.data b.data -= 0.05 * b.grad.data #zero gradients w.grad.data.zero_() b.grad.data.zero_() # the rest of code is just bells and whistles if (i+1)%5==0: clear_output(True) plt.scatter(x.data.numpy(), y.data.numpy()) plt.scatter(x.data.numpy(), y_pred.data.numpy(), color='orange', linewidth=5) plt.show() print("loss = ", loss.data.numpy()) if loss.data.numpy() < 0.5: print("Done!") break ``` __Quest__: try implementing and writing some nonlinear regression. You can try quadratic features or some trigonometry, or a simple neural network. The only difference is that now you have more variables and a more complicated `y_pred`. **Remember!** ![img](https://media.giphy.com/media/3o751UMCYtSrRAFRFC/giphy.gif) When dealing with more complex stuff like neural network, it's best if you use tensors the way samurai uses his sword. # High-level pytorch So far we've been dealing with low-level torch API. While it's absolutely vital for any custom losses or layers, building large neura nets in it is a bit clumsy. Luckily, there's also a high-level torch interface with a pre-defined layers, activations and training algorithms. We'll cover them as we go through a simple image recognition problem: classifying letters into __"A"__ vs __"B"__. ``` from notmnist import load_notmnist X_train, y_train, X_test, y_test = load_notmnist(letters='AB') X_train, X_test = X_train.reshape([-1, 784]), X_test.reshape([-1, 784]) print("Train size = %i, test_size = %i"%(len(X_train),len(X_test))) for i in [0,1]: plt.subplot(1, 2, i + 1) plt.imshow(X_train[i].reshape([28,28])) plt.title(str(y_train[i])) ``` Let's start with layers. The main abstraction here is __`torch.nn.Module`__ ``` from torch import nn import torch.nn.functional as F print(nn.Module.__doc__) ``` There's a vast library of popular layers and architectures already built for ya'. This is a binary classification problem, so we'll train a __Logistic Regression with sigmoid__. $$P(y_i | X_i) = \sigma(W \cdot X_i + b) ={ 1 \over {1+e^{- [W \cdot X_i + b]}} }$$ ``` # create a network that stacks layers on top of each other model = nn.Sequential() # add first "dense" layer with 784 input units and 1 output unit. model.add_module('l1', nn.Linear(784, 1)) # add softmax activation for probabilities. Normalize over axis 1 # note: layer names must be unique model.add_module('l2', nn.Sigmoid()) print("Weight shapes:", [w.shape for w in model.parameters()]) # create dummy data with 3 samples and 784 features x = torch.tensor(X_train[:3], dtype=torch.float32) y = torch.tensor(y_train[:3], dtype=torch.float32) # compute outputs given inputs, both are variables y_predicted = model(x)[:, 0] y_predicted # display what we've got ``` Let's now define a loss function for our model. The natural choice is to use binary crossentropy (aka logloss, negative llh): $$ L = {1 \over N} \underset{X_i,y_i} \sum - [ y_i \cdot log P(y_i | X_i) + (1-y_i) \cdot log (1-P(y_i | X_i)) ]$$ ``` crossentropy = ### YOUR CODE loss = ### YOUR CODE assert tuple(crossentropy.size()) == (3,), "Crossentropy must be a vector with element per sample" assert tuple(loss.size()) == (1,), "Loss must be scalar. Did you forget the mean/sum?" assert loss.data.numpy()[0] > 0, "Crossentropy must non-negative, zero only for perfect prediction" assert loss.data.numpy()[0] <= np.log(3), "Loss is too large even for untrained model. Please double-check it." ``` __Note:__ you can also find many such functions in `torch.nn.functional`, just type __`F.<tab>`__. __Torch optimizers__ When we trained Linear Regression above, we had to manually .zero_() gradients on both our variables. Imagine that code for a 50-layer network. Again, to keep it from getting dirty, there's `torch.optim` module with pre-implemented algorithms: ``` opt = torch.optim.RMSprop(model.parameters(), lr=0.01) # here's how it's used: loss.backward() # add new gradients opt.step() # change weights opt.zero_grad() # clear gradients # dispose of old variables to avoid bugs later del x, y, y_predicted, loss, y_pred ``` ### Putting it all together ``` # create network again just in case model = nn.Sequential() model.add_module('first', nn.Linear(784, 1)) model.add_module('second', nn.Sigmoid()) opt = torch.optim.Adam(model.parameters(), lr=1e-3) history = [] for i in range(100): # sample 256 random images ix = np.random.randint(0, len(X_train), 256) x_batch = torch.tensor(X_train[ix], dtype=torch.float32) y_batch = torch.tensor(y_train[ix], dtype=torch.float32) # predict probabilities y_predicted = ### YOUR CODE assert y_predicted.dim() == 1, "did you forget to select first column with [:, 0]" # compute loss, just like before loss = ### YOUR CODE # compute gradients ### YOUR CODE # Adam step ### YOUR CODE # clear gradients ### YOUR CODE history.append(loss.data.numpy()) if i % 10 == 0: print("step #%i | mean loss = %.3f" % (i, np.mean(history[-10:]))) ``` __Debugging tips:__ * make sure your model predicts probabilities correctly. Just print them and see what's inside. * don't forget _minus_ sign in the loss function! It's a mistake 99% ppl do at some point. * make sure you zero-out gradients after each step. Srsly:) * In general, pytorch's error messages are quite helpful, read 'em before you google 'em. * if you see nan/inf, print what happens at each iteration to find our where exactly it occurs. * If loss goes down and then turns nan midway through, try smaller learning rate. (Our current loss formula is unstable). ### Evaluation Let's see how our model performs on test data ``` # use your model to predict classes (0 or 1) for all test samples predicted_y_test = ### YOUR CODE predicted_y_test = np.array(predicted_y_test > 0.5) assert isinstance(predicted_y_test, np.ndarray), "please return np array, not %s" % type(predicted_y_test) assert predicted_y_test.shape == y_test.shape, "please predict one class for each test sample" assert np.in1d(predicted_y_test, y_test).all(), "please predict class indexes" accuracy = np.mean(predicted_y_test == y_test) print("Test accuracy: %.5f" % accuracy) assert accuracy > 0.95, "try training longer" print('Great job!') ``` ``` ``` ``` ``` ``` ``` ``` ``` ``` ``` ### More about pytorch: * Using torch on GPU and multi-GPU - [link](http://pytorch.org/docs/master/notes/cuda.html) * More tutorials on pytorch - [link](http://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html) * Pytorch examples - a repo that implements many cool DL models in pytorch - [link](https://github.com/pytorch/examples) * Practical pytorch - a repo that implements some... other cool DL models... yes, in pytorch - [link](https://github.com/spro/practical-pytorch) * And some more - [link](https://www.reddit.com/r/pytorch/comments/6z0yeo/pytorch_and_pytorch_tricks_for_kaggle/)
github_jupyter
Please find jax implementation of this notebook here: https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/book1/15/attention_jax.ipynb <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/attention_torch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Basics of differentiable (soft) attention We show how to implement soft attention. Based on sec 10.3 of http://d2l.ai/chapter_attention-mechanisms/attention-scoring-functions.html. ``` import numpy as np import matplotlib.pyplot as plt import math from IPython import display try: import torch except ModuleNotFoundError: %pip install torch import torch from torch import nn from torch.nn import functional as F from torch.utils import data import random import os import time np.random.seed(seed=1) torch.manual_seed(1) !mkdir figures # for saving plots ``` # Masked soft attention ``` def sequence_mask(X, valid_len, value=0): """Mask irrelevant entries in sequences.""" maxlen = X.size(1) mask = torch.arange((maxlen), dtype=torch.float32, device=X.device)[None, :] < valid_len[:, None] X[~mask] = value return X def masked_softmax(X, valid_lens): """Perform softmax operation by masking elements on the last axis.""" # `X`: 3D tensor, `valid_lens`: 1D or 2D tensor if valid_lens is None: return nn.functional.softmax(X, dim=-1) else: shape = X.shape if valid_lens.dim() == 1: valid_lens = torch.repeat_interleave(valid_lens, shape[1]) else: valid_lens = valid_lens.reshape(-1) # On the last axis, replace masked elements with a very large negative # value, whose exponentiation outputs 0 X = sequence_mask(X.reshape(-1, shape[-1]), valid_lens, value=-1e6) return nn.functional.softmax(X.reshape(shape), dim=-1) ``` Example. Batch size 2, feature size 2, sequence length 4. The valid lengths are 2,3. So the output has size (2,2,4), but the length dimension is full of 0s in the invalid locations. ``` Y = masked_softmax(torch.rand(2, 2, 4), torch.tensor([2, 3])) print(Y) ``` Example. Batch size 2, feature size 2, sequence length 4. The valid lengths are (1,3) for batch 1, and (2,4) for batch 2. ``` Y = masked_softmax(torch.rand(2, 2, 4), torch.tensor([[1, 3], [2, 4]])) print(Y) ``` # Additive attention $$ \alpha(q,k) = w_v^T \tanh(W_q q + w_k k) $$ ``` class AdditiveAttention(nn.Module): def __init__(self, key_size, query_size, num_hiddens, dropout, **kwargs): super(AdditiveAttention, self).__init__(**kwargs) self.W_k = nn.Linear(key_size, num_hiddens, bias=False) self.W_q = nn.Linear(query_size, num_hiddens, bias=False) self.w_v = nn.Linear(num_hiddens, 1, bias=False) self.dropout = nn.Dropout(dropout) def forward(self, queries, keys, values, valid_lens): queries, keys = self.W_q(queries), self.W_k(keys) # After dimension expansion, shape of `queries`: (`batch_size`, no. of # queries, 1, `num_hiddens`) and shape of `keys`: (`batch_size`, 1, # no. of key-value pairs, `num_hiddens`). Sum them up with # broadcasting features = queries.unsqueeze(2) + keys.unsqueeze(1) features = torch.tanh(features) # There is only one output of `self.w_v`, so we remove the last # one-dimensional entry from the shape. Shape of `scores`: # (`batch_size`, no. of queries, no. of key-value pairs) scores = self.w_v(features).squeeze(-1) self.attention_weights = masked_softmax(scores, valid_lens) # Shape of `values`: (`batch_size`, no. of key-value pairs, value # dimension) return torch.bmm(self.dropout(self.attention_weights), values) # batch size 2. 1 query of dim 20, 10 keys of dim 2. queries, keys = torch.normal(0, 1, (2, 1, 20)), torch.ones((2, 10, 2)) # 10 values of dim 4 in each of the 2 batches. values = torch.arange(40, dtype=torch.float32).reshape(1, 10, 4).repeat(2, 1, 1) print(values.shape) valid_lens = torch.tensor([2, 6]) attention = AdditiveAttention(key_size=2, query_size=20, num_hiddens=8, dropout=0.1) attention.eval() A = attention(queries, keys, values, valid_lens) print(A.shape) print(A) ``` The heatmap is uniform across the keys, since the keys are all 1s. However, the support is truncated to the valid length. ``` def show_heatmaps(matrices, xlabel, ylabel, titles=None, figsize=(2.5, 2.5), cmap="Reds"): display.set_matplotlib_formats("svg") num_rows, num_cols = matrices.shape[0], matrices.shape[1] fig, axes = plt.subplots(num_rows, num_cols, figsize=figsize, sharex=True, sharey=True, squeeze=False) for i, (row_axes, row_matrices) in enumerate(zip(axes, matrices)): for j, (ax, matrix) in enumerate(zip(row_axes, row_matrices)): pcm = ax.imshow(matrix.detach(), cmap=cmap) if i == num_rows - 1: ax.set_xlabel(xlabel) if j == 0: ax.set_ylabel(ylabel) if titles: ax.set_title(titles[j]) fig.colorbar(pcm, ax=axes, shrink=0.6) show_heatmaps(attention.attention_weights.reshape((1, 1, 2, 10)), xlabel="Keys", ylabel="Queries") ``` # Dot-product attention $$ A = \text{softmax}(Q K^T/\sqrt{d}) V $$ ``` class DotProductAttention(nn.Module): """Scaled dot product attention.""" def __init__(self, dropout, **kwargs): super(DotProductAttention, self).__init__(**kwargs) self.dropout = nn.Dropout(dropout) # Shape of `queries`: (`batch_size`, no. of queries, `d`) # Shape of `keys`: (`batch_size`, no. of key-value pairs, `d`) # Shape of `values`: (`batch_size`, no. of key-value pairs, value # dimension) # Shape of `valid_lens`: (`batch_size`,) or (`batch_size`, no. of queries) def forward(self, queries, keys, values, valid_lens=None): d = queries.shape[-1] # Set `transpose_b=True` to swap the last two dimensions of `keys` scores = torch.bmm(queries, keys.transpose(1, 2)) / math.sqrt(d) self.attention_weights = masked_softmax(scores, valid_lens) return torch.bmm(self.dropout(self.attention_weights), values) # batch size 2. 1 query of dim 2, 10 keys of dim 2. queries = torch.normal(0, 1, (2, 1, 2)) attention = DotProductAttention(dropout=0.5) attention.eval() attention(queries, keys, values, valid_lens) show_heatmaps(attention.attention_weights.reshape((1, 1, 2, 10)), xlabel="Keys", ylabel="Queries") ```
github_jupyter
<a href="https://colab.research.google.com/github/yukinaga/lecture_pytorch/blob/master/lecture4/cnn.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # CNNの実装 PyTorchを使って、畳み込みニューラルネットワーク(CNN)を実装します。 CNN自体はCNNの層を追加するのみで実装可能なのですが、今回はデータ拡張とドロップアウトの実装も行います。 ## CIFAR-10 torchvision.datasetsを使い、CIFAR-10を読み込みます。 CIFARは、約6万枚の画像にラベルをつけたたデータセットです。 以下のコードでは、CIFAR-10を読み込み、ランダムな25枚の画像を表示します。 ``` from torchvision.datasets import CIFAR10 import torchvision.transforms as transforms from torch.utils.data import DataLoader import numpy as np import matplotlib.pyplot as plt cifar10_data = CIFAR10(root="./data", train=False,download=True, transform=transforms.ToTensor()) cifar10_classes = np.array(["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]) print("データの数:", len(cifar10_data)) n_image = 25 # 表示する画像の数 cifar10_loader = DataLoader(cifar10_data, batch_size=n_image, shuffle=True) dataiter = iter(cifar10_loader) # イテレータ images, labels = dataiter.next() # 最初のバッチを取り出す plt.figure(figsize=(10,10)) # 画像の表示サイズ for i in range(n_image): plt.subplot(5,5,i+1) plt.imshow(np.transpose(images[i], (1, 2, 0))) # チャンネルを一番後ろに label = cifar10_classes[labels[i]] plt.title(label) plt.tick_params(labelbottom=False, labelleft=False, bottom=False, left=False) # ラベルとメモリを非表示に plt.show() ``` ## データ拡張 torchvision.transformsを使ってデータ拡張を行います。 今回は、cifar-10の画像に-30〜30°の回転、および0.8〜1.2倍のリサイズを行います。 これらの処理は、バッチを取り出す際に元の画像に対してランダムに加えられます。 ``` from torchvision.datasets import CIFAR10 import torchvision.transforms as transforms from torch.utils.data import DataLoader import numpy as np import matplotlib.pyplot as plt transform = transforms.Compose([transforms.RandomAffine([-30, 30], scale=(0.8, 1.2)), # 回転とリサイズ transforms.ToTensor()]) cifar10_data = CIFAR10(root="./data", train=False,download=True, transform=transform) cifar10_classes = np.array(["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]) print("データの数:", len(cifar10_data)) n_image = 25 # 表示する画像の数 cifar10_loader = DataLoader(cifar10_data, batch_size=n_image, shuffle=True) dataiter = iter(cifar10_loader) # イテレータ images, labels = dataiter.next() # 最初のバッチを取り出す plt.figure(figsize=(10,10)) # 画像の表示サイズ for i in range(n_image): plt.subplot(5,5,i+1) plt.imshow(np.transpose(images[i], (1, 2, 0))) # チャンネルを一番後ろに label = cifar10_classes[labels[i]] plt.title(label) plt.tick_params(labelbottom=False, labelleft=False, bottom=False, left=False) # ラベルとメモリを非表示に plt.show() ``` ## データの前処理 ここからCNNを実装します。 データ拡張として、回転とリサイズ、および左右反転を行います。 また、学習が効率的になるように入力の平均値を0、標準偏差を1にします(標準化)。 DataLoaderは、訓練データ、テストデータそれぞれで設定しますが、テストデータにはミニバッチ法を適用しないのでバッチサイズは元データのサンプル数にします。 ``` from torchvision.datasets import CIFAR10 import torchvision.transforms as transforms from torch.utils.data import DataLoader affine = transforms.RandomAffine([-15, 15], scale=(0.8, 1.2)) # 回転とリサイズ flip = transforms.RandomHorizontalFlip(p=0.5) # 左右反転 normalize = transforms.Normalize((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)) # 平均値を0、標準偏差を1に to_tensor = transforms.ToTensor() transform_train = transforms.Compose([affine, flip, to_tensor, normalize]) transform_test = transforms.Compose([to_tensor, normalize]) cifar10_train = CIFAR10("./data", train=True, download=True, transform=transform_train) cifar10_test = CIFAR10("./data", train=False, download=True, transform=transform_test) # DataLoaderの設定 batch_size = 64 train_loader = DataLoader(cifar10_train, batch_size=batch_size, shuffle=True) test_loader = DataLoader(cifar10_test, batch_size=len(cifar10_test), shuffle=False) ``` ## モデルの構築 `nn.Module`モジュールを継承したクラスとして、モデルを構築します。 今回は、過学習を抑制するためにドロップアウトを導入します。 ``` import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 6, 5) # 畳み込み層:(入力チャンネル数, フィルタ数、フィルタサイズ) self.pool = nn.MaxPool2d(2, 2) # プーリング層:(領域のサイズ, ストライド) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16*5*5, 256) # 全結合層 self.dropout = nn.Dropout(p=0.5) # ドロップアウト:(p=ドロップアウト率) self.fc2 = nn.Linear(256, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16*5*5) x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x net = Net() net.cuda() # GPU対応 print(net) ``` ## 学習 モデルを訓練します。 DataLoaderを使い、ミニバッチを取り出して訓練および評価を行います。 今回は、評価時にミニバッチ法は使わず、テストデータ全体を使って一度に誤差を計算します。 学習には時間がかかりますので、編集→ノートブックの設定のハードウェアアクセラレーターでGPUを選択しましょう。 ``` from torch import optim # 交差エントロピー誤差関数 loss_fnc = nn.CrossEntropyLoss() # 最適化アルゴリズム optimizer = optim.Adam(net.parameters()) # 損失のログ record_loss_train = [] record_loss_test = [] # 学習 x_test, t_test = iter(test_loader).next() x_test, t_test = x_test.cuda(), t_test.cuda() for i in range(20): # 20エポック学習 net.train() # 訓練モード loss_train = 0 for j, (x, t) in enumerate(train_loader): # ミニバッチ(x, t)を取り出す x, t = x.cuda(), t.cuda() # GPU対応 y = net(x) loss = loss_fnc(y, t) loss_train += loss.item() optimizer.zero_grad() loss.backward() optimizer.step() loss_train /= j+1 record_loss_train.append(loss_train) net.eval() # 評価モード y_test = net(x_test) loss_test = loss_fnc(y_test, t_test).item() record_loss_test.append(loss_test) if i%1 == 0: print("Epoch:", i, "Loss_Train:", loss_train, "Loss_Test:", loss_test) ``` ## 誤差の推移 訓練データ、テストデータで誤差の推移をグラフ表示します。 ``` import matplotlib.pyplot as plt plt.plot(range(len(record_loss_train)), record_loss_train, label="Train") plt.plot(range(len(record_loss_test)), record_loss_test, label="Test") plt.legend() plt.xlabel("Epochs") plt.ylabel("Error") plt.show() ``` ## 正解率 モデルの性能を把握するため、テストデータ使い正解率を測定します。 ``` correct = 0 total = 0 net.eval() # 評価モード for i, (x, t) in enumerate(test_loader): x, t = x.cuda(), t.cuda() # GPU対応 y = net(x) correct += (y.argmax(1) == t).sum().item() total += len(x) print("正解率:", str(correct/total*100) + "%") ``` ## 訓練済みのモデルを使った予測 訓練済みのモデルを使ってみましょう。 画像を入力し、モデルが機能していることを確かめます。 ``` cifar10_loader = DataLoader(cifar10_test, batch_size=1, shuffle=True) dataiter = iter(cifar10_loader) images, labels = dataiter.next() # サンプルを1つだけ取り出す plt.imshow(np.transpose(images[0], (1, 2, 0))) # チャンネルを一番後ろに plt.tick_params(labelbottom=False, labelleft=False, bottom=False, left=False) # ラベルとメモリを非表示に plt.show() net.eval() # 評価モード x, t = images.cuda(), labels.cuda() # GPU対応 y = net(x) print("正解:", cifar10_classes[labels[0]], "予測結果:", cifar10_classes[y.argmax().item()]) ```
github_jupyter
# Generative Adversarial Network In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits! GANs were [first reported on](https://arxiv.org/abs/1406.2661) in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out: * [Pix2Pix](https://affinelayer.com/pixsrv/) * [CycleGAN & Pix2Pix in PyTorch, Jun-Yan Zhu](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) * [A list of generative models](https://github.com/wiseodd/generative-models) The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes "fake" data to pass to the discriminator. The discriminator also sees real training data and predicts if the data it's received is real or fake. > * The generator is trained to fool the discriminator, it wants to output data that looks _as close as possible_ to real, training data. * The discriminator is a classifier that is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistinguishable from real data to the discriminator. <img src='assets/gan_pipeline.png' width=70% /> The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector that the generator uses to construct its fake images. This is often called a **latent vector** and that vector space is called **latent space**. As the generator trains, it figures out how to map latent vectors to recognizable images that can fool the discriminator. If you're interested in generating only new images, you can throw out the discriminator after training. In this notebook, I'll show you how to define and train these adversarial networks in PyTorch and generate new images! ``` %matplotlib inline import numpy as np import torch import matplotlib.pyplot as plt from torchvision import datasets import torchvision.transforms as transforms # number of subprocesses to use for data loading num_workers = 0 # how many samples per batch to load batch_size = 64 # convert data to torch.FloatTensor transform = transforms.ToTensor() # get the training datasets train_data = datasets.MNIST(root='data', train=True, download=True, transform=transform) # prepare data loader train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers) ``` ### Visualize the data ``` # obtain one batch of training images dataiter = iter(train_loader) images, labels = dataiter.next() images = images.numpy() # get one image from the batch img = np.squeeze(images[0]) fig = plt.figure(figsize = (3,3)) ax = fig.add_subplot(111) ax.imshow(img, cmap='gray') ``` --- # Define the Model A GAN is comprised of two adversarial networks, a discriminator and a generator. ## Discriminator The discriminator network is going to be a pretty typical linear classifier. To make this network a universal function approximator, we'll need at least one hidden layer, and these hidden layers should have one key attribute: > All hidden layers will have a [Leaky ReLu](https://pytorch.org/docs/stable/nn.html#torch.nn.LeakyReLU) activation function applied to their outputs. <img src='assets/gan_network.png' width=70% /> #### Leaky ReLu We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values. <img src='assets/leaky_relu.png' width=40% /> #### Sigmoid Output We'll also take the approach of using a more numerically stable loss function on the outputs. Recall that we want the discriminator to output a value 0-1 indicating whether an image is _real or fake_. > We will ultimately use [BCEWithLogitsLoss](https://pytorch.org/docs/stable/nn.html#bcewithlogitsloss), which combines a `sigmoid` activation function **and** and binary cross entropy loss in one function. So, our final output layer should not have any activation function applied to it. ``` import torch.nn as nn import torch.nn.functional as F class Discriminator(nn.Module): def __init__(self, input_size, hidden_dim, output_size): super(Discriminator, self).__init__() self.fc1 = nn.Linear(input_size, hidden_dim*4) self.fc2 = nn.Linear(hidden_dim*4, hidden_dim*2) self.fc3 = nn.Linear(hidden_dim*2, hidden_dim) self.fc4 = nn.Linear(hidden_dim, output_size) self.dropout = nn.Dropout(0.3) def forward(self, x): # flatten image x = x.view(-1, 28*28) x = F.leaky_relu(self.fc1(x), 0.2) x = self.dropout(x) x = F.leaky_relu(self.fc2(x), 0.2) x = self.dropout(x) x = F.leaky_relu(self.fc3(x), 0.2) x = self.dropout(x) x = self.fc4(x) return x ``` ## Generator The generator network will be almost exactly the same as the discriminator network, except that we're applying a [tanh activation function](https://pytorch.org/docs/stable/nn.html#tanh) to our output layer. #### tanh Output The generator has been found to perform the best with $tanh$ for the generator output, which scales the output to be between -1 and 1, instead of 0 and 1. <img src='assets/tanh_fn.png' width=40% /> Recall that we also want these outputs to be comparable to the *real* input pixel values, which are read in as normalized values between 0 and 1. > So, we'll also have to **scale our real input images to have pixel values between -1 and 1** when we train the discriminator. I'll do this in the training loop, later on. ``` class Generator(nn.Module): def __init__(self, input_size, hidden_dim, output_size): super(Generator, self).__init__() # define all layers self.fc1 = nn.Linear(input_size, hidden_dim) self.fc2 = nn.Linear(hidden_dim, hidden_dim*2) self.fc3 = nn.Linear(hidden_dim*2, hidden_dim*4) self.fc4 = nn.Linear(hidden_dim*4, output_size) self.dropout = nn.Dropout(0.3) def forward(self, x): # pass x through all layers x = F.leaky_relu(self.fc1(x), 0.2) # (input, negative_slope=0.2) x = self.dropout(x) x = F.leaky_relu(self.fc2(x), 0.2) x = self.dropout(x) x = F.leaky_relu(self.fc3(x), 0.2) x = self.dropout(x) # final layer with tanh applied x = F.tanh(self.fc4(x)) return x ``` ## Model hyperparameters ``` # Discriminator hyperparams # Size of input image to discriminator (28*28) input_size = 784 # Size of discriminator output (real or fake) d_output_size = 1 # Size of *last* hidden layer in the discriminator d_hidden_size = 32 # Generator hyperparams # Size of latent vector to give to generator z_size = 100 # Size of discriminator output (generated image) g_output_size = 784 # Size of *first* hidden layer in the generator g_hidden_size = 32 ``` ## Build complete network Now we're instantiating the discriminator and generator from the classes defined above. Make sure you've passed in the correct input arguments. ``` # instantiate discriminator and generator D = Discriminator(input_size, d_hidden_size, d_output_size) G = Generator(z_size, g_hidden_size, g_output_size) # check that they are as you expect print(D) print() print(G) ``` --- ## Discriminator and Generator Losses Now we need to calculate the losses. ### Discriminator Losses > * For the discriminator, the total loss is the sum of the losses for real and fake images, `d_loss = d_real_loss + d_fake_loss`. * Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that. <img src='assets/gan_pipeline.png' width=70% /> The losses will by binary cross entropy loss with logits, which we can get with [BCEWithLogitsLoss](https://pytorch.org/docs/stable/nn.html#bcewithlogitsloss). This combines a `sigmoid` activation function **and** and binary cross entropy loss in one function. For the real images, we want `D(real_images) = 1`. That is, we want the discriminator to classify the the real images with a label = 1, indicating that these are real. To help the discriminator generalize better, the labels are **reduced a bit from 1.0 to 0.9**. For this, we'll use the parameter `smooth`; if True, then we should smooth our labels. In PyTorch, this looks like `labels = torch.ones(size) * 0.9` The discriminator loss for the fake data is similar. We want `D(fake_images) = 0`, where the fake images are the _generator output_, `fake_images = G(z)`. ### Generator Loss The generator loss will look similar only with flipped labels. The generator's goal is to get `D(fake_images) = 1`. In this case, the labels are **flipped** to represent that the generator is trying to fool the discriminator into thinking that the images it generates (fakes) are real! ``` # Calculate losses def real_loss(D_out, smooth=False): batch_size = D_out.size(0) # label smoothing if smooth: # smooth, real labels = 0.9 labels = torch.ones(batch_size)*0.9 else: labels = torch.ones(batch_size) # real labels = 1 # numerically stable loss criterion = nn.BCEWithLogitsLoss() # calculate loss loss = criterion(D_out.squeeze(), labels) return loss def fake_loss(D_out): batch_size = D_out.size(0) labels = torch.zeros(batch_size) # fake labels = 0 criterion = nn.BCEWithLogitsLoss() # calculate loss loss = criterion(D_out.squeeze(), labels) return loss# Calculate losses ``` ## Optimizers We want to update the generator and discriminator variables separately. So, we'll define two separate Adam optimizers. ``` import torch.optim as optim # learning rate for optimizers lr = 0.002 # Create optimizers for the discriminator and generator d_optimizer = optim.Adam(D.parameters(), lr) g_optimizer = optim.Adam(G.parameters(), lr) ``` --- ## Training Training will involve alternating between training the discriminator and the generator. We'll use our functions `real_loss` and `fake_loss` to help us calculate the discriminator losses in all of the following cases. ### Discriminator training 1. Compute the discriminator loss on real, training images 2. Generate fake images 3. Compute the discriminator loss on fake, generated images 4. Add up real and fake loss 5. Perform backpropagation + an optimization step to update the discriminator's weights ### Generator training 1. Generate fake images 2. Compute the discriminator loss on fake images, using **flipped** labels! 3. Perform backpropagation + an optimization step to update the generator's weights #### Saving Samples As we train, we'll also print out some loss statistics and save some generated "fake" samples. ``` import pickle as pkl # training hyperparams num_epochs = 100 # keep track of loss and generated, "fake" samples samples = [] losses = [] print_every = 400 # Get some fixed data for sampling. These are images that are held # constant throughout training, and allow us to inspect the model's performance sample_size=16 fixed_z = np.random.uniform(-1, 1, size=(sample_size, z_size)) fixed_z = torch.from_numpy(fixed_z).float() # train the network D.train() G.train() for epoch in range(num_epochs): for batch_i, (real_images, _) in enumerate(train_loader): batch_size = real_images.size(0) ## Important rescaling step ## real_images = real_images*2 - 1 # rescale input images from [0,1) to [-1, 1) # ============================================ # TRAIN THE DISCRIMINATOR # ============================================ d_optimizer.zero_grad() # 1. Train with real images # Compute the discriminator losses on real images # smooth the real labels D_real = D(real_images) d_real_loss = real_loss(D_real, smooth=True) # 2. Train with fake images # Generate fake images z = np.random.uniform(-1, 1, size=(batch_size, z_size)) z = torch.from_numpy(z).float() fake_images = G(z) # Compute the discriminator losses on fake images D_fake = D(fake_images) d_fake_loss = fake_loss(D_fake) # add up loss and perform backprop d_loss = d_real_loss + d_fake_loss d_loss.backward() d_optimizer.step() # ========================================= # TRAIN THE GENERATOR # ========================================= g_optimizer.zero_grad() # 1. Train with fake images and flipped labels # Generate fake images z = np.random.uniform(-1, 1, size=(batch_size, z_size)) z = torch.from_numpy(z).float() fake_images = G(z) # Compute the discriminator losses on fake images # using flipped labels! D_fake = D(fake_images) g_loss = real_loss(D_fake) # use real loss to flip labels # perform backprop g_loss.backward() g_optimizer.step() # Print some loss stats if batch_i % print_every == 0: # print discriminator and generator loss print('Epoch [{:5d}/{:5d}] | d_loss: {:6.4f} | g_loss: {:6.4f}'.format( epoch+1, num_epochs, d_loss.item(), g_loss.item())) ## AFTER EACH EPOCH## # append discriminator loss and generator loss losses.append((d_loss.item(), g_loss.item())) # generate and save sample, fake images G.eval() # eval mode for generating samples samples_z = G(fixed_z) samples.append(samples_z) G.train() # back to train mode # Save training generator samples with open('train_samples.pkl', 'wb') as f: pkl.dump(samples, f)import pickle as pkl ``` ## Training loss Here we'll plot the training losses for the generator and discriminator, recorded after each epoch. ``` fig, ax = plt.subplots() losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator') plt.plot(losses.T[1], label='Generator') plt.title("Training Losses") plt.legend() ``` ## Generator samples from training Here we can view samples of images from the generator. First we'll look at the images we saved during training. ``` # helper function for viewing a list of passed in sample images def view_samples(epoch, samples): fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True) for ax, img in zip(axes.flatten(), samples[epoch]): img = img.detach() ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) im = ax.imshow(img.reshape((28,28)), cmap='Greys_r') # Load samples from generator, taken while training with open('train_samples.pkl', 'rb') as f: samples = pkl.load(f) ``` These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 1, 7, 3, 2. Since this is just a sample, it isn't representative of the full range of images this generator can make. ``` # -1 indicates final epoch's samples (the last in the list) view_samples(-1, samples) ``` Below I'm showing the generated images as the network was training, every 10 epochs. ``` rows = 10 # split epochs into 10, so 100/10 = every 10 epochs cols = 6 fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True) for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes): for img, ax in zip(sample[::int(len(sample)/cols)], ax_row): img = img.detach() ax.imshow(img.reshape((28,28)), cmap='Greys_r') ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) ``` It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise like 1s and 9s. ## Sampling from the generator We can also get completely new images from the generator by using the checkpoint we saved after training. **We just need to pass in a new latent vector $z$ and we'll get new samples**! ``` # randomly generated, new latent vectors sample_size=16 rand_z = np.random.uniform(-1, 1, size=(sample_size, z_size)) rand_z = torch.from_numpy(rand_z).float() G.eval() # eval mode # generated samples rand_images = G(rand_z) # 0 indicates the first set of samples in the passed in list # and we only have one batch of samples, here view_samples(0, [rand_images]) ```
github_jupyter
![title](yesbank_feature_banner.png) # YES BANK DATATHON ## Machine Learning Challenge Round 3 - EDA ### Data Description The data given is of credit records of individuals with certain attributes. ``` import numpy as np import pandas as pd import seaborn as sns %matplotlib inline import matplotlib.pyplot as plt train=pd.read_csv('Yes_Bank_Train.csv') test=pd.read_csv('Yes_Bank_Test_int.csv') train.info() sub=pd.read_csv('sample_clusters.csv') train.head() train.describe(include='all').T test.describe(include='all').T ``` Most of them are Categorical value, lets have a view at the features and what they signify a. **serial number** : unique identification key b. **account_info :** Categorized details of existing accounts of the individuals. The balance of money in account provided is stated by this variable * A11 signifies 0 (excluding 0) or lesser amount credited to current checking account. (Amounts are in units of certain currency) * A12 signifies greater than 0 (including 0) and lesser than 200 (excluding 200) units of currency * A13 signifies amount greater than 200 (including 200) being recorded in the account * A14 signifies no account details provided* c. **duration_month** : Duration in months for which the credit is existing d. **credit_history** : This categorical variable signifies the credit history of the individual who has taken the loan * A30 signifies that no previous loans has been taken or all loans taken have been payed back. * A31 signifies that all loans from the current bank has been payed off. Loan information of other banks are not available. * A32 signifies loan exists but till now regular installments have been payed back in full amount. * A33 signifies that significant delays have been seen in repayment of loan installments. * A34 signifies other loans exist at the same bank. Irregular behaviour in repayment.* e. **purpose**: This variable signifies why the loan was taken * A40 signifies that the loan is taken to buy a new car * A41 signifies that the loan was taken to buy a old car * A42 signifies that the loan is taken to buy furniture or equipment * A43 signifies that the loan is taken to buy radio or TV * A44 signifies that the loan is taken to buy domestic appliances * A45 signifies that the loan is taken for repairing purposes * A46 signifies that the loan is taken for education * A47 signifies that the loan is taken for vacation * A48 signifies that the loan is taken for re skilling * A49 signifies that the loan is taken for business and establishment * A410 signifies other purposes* f. **credit_amount**: The numerical variable signifies the amount credited to the individual (in units of a certain currency)(**TARGET**) g. **savings_account**: This variable signifies details of the amount present in savings account of the individual: * A61 signifies that less than 100 units (excluding 100) of currency is present * A62 signifies that greater than 100 units (including 100) and less than 500 (excluding 500) units of currency is present * A63 signifies that greater than 500 (including 500) and less than 1000 (excluding 1000) units of currency is present. * A64 signifies that greater than 1000 (including 1000) units of currency is present. * A65 signifies that no savings account details is present on record* h. **employment_s**: Catergorical variable that signifies the employment status of everyone who has been alloted loans * A71 signifies that the individual is unemployed * A72 signifies that the individual has been employed for less than a year * A73 signifies that the individual has been employed for more than a year but less than four years * A74 signifies that the individual has been employed more than four years but less than seven years * A75 signifies that the individual has been employed for more than seven years* i. **poi**: This numerical variable signifies what percentage of disposable income is spent on loan interest amount. j. ***personal_status**: This categorical variable signifies the personal status of the individual * A91 signifies that the individual is a separated or divorced male * A92 signifies female individuals who are separated or divorced * A93 signifies unmarried males * A94 signifies married or widowed males * A95 signifies single females* k. **gurantors**: Categorical variable which signifies if any other individual is involved with an individual loan case * A101 signifies that only a single individual is involved in the loan application * A102 signifies that one or more co-applicant is present in the loan application * A103 signifies that guarantor are present.* l. **resident_since**: Numerical variable that signifies for how many years the applicant has been a resident m. **property_type**: This qualitative variable defines the property holding information of the individual * A121 signifies that the individual holds real estate property * A122 signifies that the individual holds a building society savings agreement or life insurance * A123 signifies that the individual holds cars or other properties * A124 signifies that property information is not available* n. **age**: Numerical variable that signifies age in number of years o. **installment_type**: This variable signifies other installment types taken * A141 signifies installment to bank * A142 signifies installment to outlets or stores * A143 signifies that no information is present* p. **housing_type**: This is a categorical variable that signifies which type of housing does a applicant have. * A151 signifies that the housing is on rent * A152 signifies that the housing is owned by the applicant * A153 signifies that no loan amount is present on the housing and there is no expense for the housing) * q. **credits_no**: Numerical variable for number of credits taken by the person r. **job_type**: Signifies the employment status of the person * A171 signifies that the individual is unemployed or unskilled and is a non-resident * A172 signifies that the individual is unskilled but is a resident * A173 signifies that the individual is a skilled employee or official * A174 signifies that the individual is involved in management or is self-employed or a highly qualified employee or officer* s. **liables**: Signifies number of persons dependent on the applicant t. **telephone**: Signifies if the individual has a telephone or not * A191 signifies that no telephonic records are present * A192 signifies that a telephone is registered with the customer’s name* u. **foreigner**: Signifies if the individual is a foreigner or not (considering the country of residence of the bank) * A201 signifies that the individual is a foreigner * A202 signifies that the individual is a resident* ``` plt.figure(figsize=(12,9)) sns.countplot(train.account_info) plt.figure(figsize=(12,9)) sns.pairplot(train.drop(['serial number','liables'],axis=1),hue='account_info') plt.figure(figsize=(12,9)) sns.pairplot(train.drop(['serial number','liables'],axis=1),hue='credit_history') plt.figure(figsize=(12,9)) sns.jointplot(train.age,train.credit_amount,kind='hex') # sns.jointplot(train.poi,train.credit_amount,kind='scatter') sns.lmplot('duration_month','credit_amount',train,hue='credit_history') ``` ### One Hot Encoded ``` dftrain=pd.get_dummies(train,drop_first=True) dftrain.head() ``` **Correlation** ``` plt.figure(figsize=(15,10)) sns.heatmap(dftrain.corr()) plt.figure(figsize=(12,9)) plt.scatter(dftrain['serial number'],dftrain.credit_amount) ``` **Lets have a try at train test split to see where we are at** ``` X,y=dftrain.drop(['serial number','credit_amount'],axis=1),dftrain.credit_amount from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1994) ``` ![title](yesbank_feature_banner.png) # YES BANK DATATHON ## Machine Learning Challenge Round 3 - Prediction ### Ensemble Taking **RMSE** as the Eval Metric ``` from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_squared_error rf=RandomForestRegressor(n_estimators=420) rf.fit(X_train,y_train) p=rf.predict(X_test) print(np.sqrt(mean_squared_error(y_test,p))) ``` Feature Importances ``` col=pd.DataFrame({'col':X.columns,'imp':rf.feature_importances_}).sort_values('imp',ascending=False) col ``` Taking top 45 values ``` main_col=col.col.values[:45] ``` Taking and Encoding Test Data as well ``` dftest=pd.get_dummies(test,drop_first=True) dftest.head() ``` Using **Kfold** ``` X1=X[main_col] dftest1=dftest[main_col] from sklearn.metrics import mean_squared_error err=[] pdd=[] from sklearn.model_selection import KFold fold=KFold(n_splits=4,shuffle=True) for train_index, test_index in fold.split(X1): X_train, X_test = X1.iloc[train_index], X1.iloc[test_index] y_train, y_test = y[train_index], y[test_index] rf=RandomForestRegressor(n_estimators=420,max_features=10) rf.fit(X_train,y_train) err.append(np.sqrt(mean_squared_error(y_test,rf.predict(X_test)))) p=rf.predict(dftest1) pdd.append(p) np.mean(err,axis=0) pdd_mean=np.mean(pdd,axis=0) pdd_mean ``` ### Neural Network ``` from sklearn.neural_network import MLPClassifier, MLPRegressor mlp=MLPRegressor(hidden_layer_sizes=(120,30,), activation="relu", max_iter=500, random_state=8,solver='adam') mlp.fit(X_train,y_train) p=mlp.predict(X_test) print(np.sqrt(mean_squared_error(y_test,p))) mlp.fit(X,y) pred=mlp.predict(dftest.drop('serial number',axis=1)) pred ``` Taking Avg ``` main_p=(pdd_mean+pred)/2 sub=pd.DataFrame({'serial number':test['serial number'],'credit_amount':main_p}) sub.head() sub.to_csv('stack_main.csv',index=False) ```
github_jupyter
``` %matplotlib inline from jax.config import config; config.update("jax_enable_x64", True) import jax.numpy as jnp from jax import grad, jit, value_and_grad import numpy as np from matplotlib import pyplot as plt from matplotlib import ticker, colors @jit def loss_lik(mu,v): b1 = 0.5; b2 = 0.01; a1 = 2.0; a2 = 5.0; ls = b1*(mu**2+v-2.0*a1*mu+a1**2)+b2*((mu**3+3.0*mu*v)-3.0*a2*(mu**2+v)+3.0*(a2**2)*mu-a2**3)+4.0/v return ls @jit def loss_pre(params): (mu,s) = params return loss_lik(mu,1.0/s) + jnp.log(s)/2 loss_f_pre = jit(value_and_grad(loss_pre)) def gd(init_params, loss_fun, step_size, num_iters): J_history = np.zeros(num_iters+1) mu_hist, s_hist = np.zeros(num_iters+1), np.zeros(num_iters+1) #For plotting cur_params = init_params for i in range(num_iters): (val,g) = loss_fun(cur_params) #Euclidean gradient mu_hist[i] = cur_params[0] s_hist[i] = cur_params[1] J_history[i] = val cur_params = cur_params - step_size* g #GD (val,_) = loss_fun(cur_params) J_history[num_iters] = val mu_hist[num_iters] = cur_params[0] s_hist[num_iters] = cur_params[1] return J_history, mu_hist, s_hist def ngd_pre(init_params, loss_fun, step_size, num_iters): J_history = np.zeros(num_iters+1) mu_hist, s_hist = np.zeros(num_iters+1), np.zeros(num_iters+1) #For plotting cur_params = init_params for i in range(num_iters): (mu,s)=cur_params (val,(g_mu,g_s)) = loss_fun(cur_params) ng = jnp.array( [g_mu/s, 2.0*(s**2)*g_s ] ) #Natural gradient mu_hist[i] = cur_params[0] s_hist[i] = cur_params[1] J_history[i] = val cur_params = cur_params - step_size* ng #NGD (val,_) = loss_fun(cur_params) J_history[num_iters] = val mu_hist[num_iters] = cur_params[0] s_hist[num_iters] = cur_params[1] return J_history, mu_hist, s_hist #Setup of meshgrid of theta values mu_list, s_list = np.meshgrid(np.linspace(-10,10,200),np.logspace(-1,0.2,800)) #Computing the cost function for each theta combination zs = np.array( [loss_pre( jnp.array([mu,s]) ) for mu,s in zip(np.ravel(mu_list), np.ravel(s_list)) ] ) Z = zs.reshape(mu_list.shape) mu_0 = -8.0 s_0 = 1.0 max_num_iters = 200 init_params = jnp.array([mu_0,s_0]) gd_pre_history, mu_gd_pre_hist, s_gd_pre_hist = gd(init_params, loss_f_pre, step_size = 1e-2, num_iters=max_num_iters) anglesx_gd_pre = np.array(mu_gd_pre_hist)[1:] - np.array(mu_gd_pre_hist)[:-1] anglesy_gd_pre = np.array(s_gd_pre_hist)[1:] - np.array(s_gd_pre_hist)[:-1] init_params = jnp.array([mu_0,s_0]) ngd_pre_history, mu_ngd_pre_hist, s_ngd_pre_hist = ngd_pre(init_params, loss_f_pre, step_size = 1e-2, num_iters=max_num_iters) anglesx_ngd_pre = np.array(mu_ngd_pre_hist)[1:] - np.array(mu_ngd_pre_hist)[:-1] anglesy_ngd_pre = np.array(s_ngd_pre_hist)[1:] - np.array(s_ngd_pre_hist)[:-1] fig = plt.figure(figsize = (16,8)) ax = fig.add_subplot(1, 2, 1) ax.contour(mu_list, s_list, Z, 50, cmap = 'jet') ax.quiver(mu_gd_pre_hist[:-1], s_gd_pre_hist[:-1], anglesx_gd_pre, anglesy_gd_pre, label='GD $(\mu,s)$', scale_units = 'xy', angles = 'xy', scale = 1, color = 'g', alpha = .9) ax.quiver(mu_ngd_pre_hist[:-1], s_ngd_pre_hist[:-1], anglesx_ngd_pre, anglesy_ngd_pre, label='NGD $(\mu,s)$', scale_units = 'xy', angles = 'xy', scale = 1, color = 'r', alpha = .9) ax.set_xlabel('$\mu$', fontsize=18) ax.set_ylabel('$s$', fontsize=18) ax.legend(loc='upper right', fontsize=18) ax = fig.add_subplot(1, 2, 2) ax.plot(np.array(list(range(0, max_num_iters+1))),gd_pre_history,label='GD $(\mu,s)$',color='g') ax.plot(np.array(list(range(0, max_num_iters+1))),ngd_pre_history,label='NGD $(\mu,s)$',color='r') ax.legend(loc='upper right', fontsize=18) ax.set_xlabel('# of iters', fontsize=18) ax.set_ylabel('loss', fontsize=18) plt.tight_layout() plt.savefig('gd_vs_ngd.png') plt.show() ```
github_jupyter
Week 5 Notebook: Building a Deep Learning Model =============================================================== Now, we'll look at a deep learning model based on low-level track features. ``` import tensorflow.keras as keras import numpy as np from sklearn.metrics import roc_curve, auc import matplotlib.pyplot as plt import uproot import tensorflow import yaml with open('definitions.yml') as file: # The FullLoader parameter handles the conversion from YAML # scalar values to Python the dictionary format definitions = yaml.load(file, Loader=yaml.FullLoader) features = definitions['features'] spectators = definitions['spectators'] labels = definitions['labels'] nfeatures = definitions['nfeatures'] nspectators = definitions['nspectators'] nlabels = definitions['nlabels'] ntracks = definitions['ntracks'] ``` ## Data Generators A quick aside on data generators. As training on large datasets is a key component of many deep learning approaches (and especially in high energy physics), and these datasets no longer fit in memory, it is imporatant to write a data generator which can automatically fetch data. Here we modify one from: https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly ``` from DataGenerator import DataGenerator help(DataGenerator) # load training and validation generators train_files = ['root://eospublic.cern.ch//eos/opendata/cms/datascience/HiggsToBBNtupleProducerTool/HiggsToBBNTuple_HiggsToBB_QCD_RunII_13TeV_MC/train/ntuple_merged_10.root'] val_files = ['root://eospublic.cern.ch//eos/opendata/cms/datascience/HiggsToBBNtupleProducerTool/HiggsToBBNTuple_HiggsToBB_QCD_RunII_13TeV_MC/train/ntuple_merged_11.root'] train_generator = DataGenerator(train_files, features, labels, spectators, batch_size=1024, n_dim=ntracks, remove_mass_pt_window=False, remove_unlabeled=True, max_entry=8000) val_generator = DataGenerator(val_files, features, labels, spectators, batch_size=1024, n_dim=ntracks, remove_mass_pt_window=False, remove_unlabeled=True, max_entry=2000) ``` ## Test Data Generator Note that the track array has a different "shape." There are also less than the requested `batch_size=1024` because we remove unlabeled samples. ``` X, y = train_generator[1] print(X.shape) print(y.shape) ``` Note this generator can be optimized further (storing the data file locally, etc.). It's important to note that I/O is often a bottleneck for training big networks. ## Fully Connected Neural Network Classifier ``` from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Dense, BatchNormalization, Flatten import tensorflow.keras.backend as K # define dense keras model inputs = Input(shape=(ntracks, nfeatures,), name='input') x = BatchNormalization(name='bn_1')(inputs) x = Flatten(name='flatten_1')(x) x = Dense(64, name='dense_1', activation='relu')(x) x = Dense(32, name='dense_2', activation='relu')(x) x = Dense(32, name='dense_3', activation='relu')(x) outputs = Dense(nlabels, name='output', activation='softmax')(x) keras_model_dense = Model(inputs=inputs, outputs=outputs) keras_model_dense.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) print(keras_model_dense.summary()) # define callbacks from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau early_stopping = EarlyStopping(monitor='val_loss', patience=5) reduce_lr = ReduceLROnPlateau(patience=5, factor=0.5) model_checkpoint = ModelCheckpoint('keras_model_dense_best.h5', monitor='val_loss', save_best_only=True) callbacks = [early_stopping, model_checkpoint, reduce_lr] # fit keras model history_dense = keras_model_dense.fit(train_generator, validation_data=val_generator, steps_per_epoch=len(train_generator), validation_steps=len(val_generator), max_queue_size=5, epochs=20, shuffle=False, callbacks=callbacks, verbose=0) # reload best weights keras_model_dense.load_weights('keras_model_dense_best.h5') plt.figure() plt.plot(history_dense.history['loss'], label='Loss') plt.plot(history_dense.history['val_loss'], label='Val. loss') plt.xlabel('Epoch') plt.legend() plt.show() ``` ## Deep Sets Classifier This model uses the `Dense` layer of Keras, but really it's more like the Deep Sets architecture applied to jets, the so-caled Particle-flow network approach{cite:p}`Komiske:2018cqr,NIPS2017_6931`. We are applying the same fully connected neural network to each track. Then the `GlobalAveragePooling1D` layer sums over the tracks (actually it takes the mean). ``` from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Dense, BatchNormalization, GlobalAveragePooling1D import tensorflow.keras.backend as K # define Deep Sets model with Dense Keras layer inputs = Input(shape=(ntracks, nfeatures,), name='input') x = BatchNormalization(name='bn_1')(inputs) x = Dense(64, name='dense_1', activation='relu')(x) x = Dense(32, name='dense_2', activation='relu')(x) x = Dense(32, name='dense_3', activation='relu')(x) # sum over tracks x = GlobalAveragePooling1D(name='pool_1')(x) x = Dense(100, name='dense_4', activation='relu')(x) outputs = Dense(nlabels, name='output', activation='softmax')(x) keras_model_deepset = Model(inputs=inputs, outputs=outputs) keras_model_deepset.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) print(keras_model_deepset.summary()) # define callbacks from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau early_stopping = EarlyStopping(monitor='val_loss', patience=5) reduce_lr = ReduceLROnPlateau(patience=5, factor=0.5) model_checkpoint = ModelCheckpoint('keras_model_deepset_best.h5', monitor='val_loss', save_best_only=True) callbacks = [early_stopping, model_checkpoint, reduce_lr] # fit keras model history_deepset = keras_model_deepset.fit(train_generator, validation_data=val_generator, steps_per_epoch=len(train_generator), validation_steps=len(val_generator), max_queue_size=5, epochs=20, shuffle=False, callbacks=callbacks, verbose=0) # reload best weights keras_model_deepset.load_weights('keras_model_deepset_best.h5') plt.figure() plt.plot(history_deepset.history['loss'], label='Loss') plt.plot(history_deepset.history['val_loss'], label='Val. loss') plt.xlabel('Epoch') plt.legend() plt.show() # load testing file test_files = ['root://eospublic.cern.ch//eos/opendata/cms/datascience/HiggsToBBNtupleProducerTool/HiggsToBBNTuple_HiggsToBB_QCD_RunII_13TeV_MC/test/ntuple_merged_0.root'] test_generator = DataGenerator(test_files, features, labels, spectators, batch_size=1024, n_dim=ntracks, remove_mass_pt_window=True, remove_unlabeled=True) # run model inference on test data set predict_array_dense = [] predict_array_deepset = [] label_array_test = [] for t in test_generator: label_array_test.append(t[1]) predict_array_dense.append(keras_model_dense.predict(t[0])) predict_array_deepset.append(keras_model_deepset.predict(t[0])) predict_array_dense = np.concatenate(predict_array_dense, axis=0) predict_array_deepset = np.concatenate(predict_array_deepset, axis=0) label_array_test = np.concatenate(label_array_test, axis=0) # create ROC curves fpr_dense, tpr_dense, threshold_dense = roc_curve(label_array_test[:,1], predict_array_dense[:,1]) fpr_deepset, tpr_deepset, threshold_deepset = roc_curve(label_array_test[:,1], predict_array_deepset[:,1]) # plot ROC curves plt.figure() plt.plot(tpr_dense, fpr_dense, lw=2.5, label="Dense, AUC = {:.1f}%".format(auc(fpr_dense, tpr_dense)*100)) plt.plot(tpr_deepset, fpr_deepset, lw=2.5, label="Deep Sets, AUC = {:.1f}%".format(auc(fpr_deepset, tpr_deepset)*100)) plt.xlabel(r'True positive rate') plt.ylabel(r'False positive rate') plt.semilogy() plt.ylim(0.001, 1) plt.xlim(0, 1) plt.grid(True) plt.legend(loc='upper left') plt.show() ``` We see the more structurally-aware Deep Sets model does better than a simple fully conneted neural network appraoch.
github_jupyter
``` #Estre programa contém os código demonstrados na aula sobre Aplicação de ML+IoT para o Healthcare(previsão de arritmia) #importando o banco de dados a ser utilizado (comando necessário para o google colab) from google.colab import files uploaded = files.upload() #importando as bibliotecas import pandas as pd #biblioteca utilizada para tratar os dados em formato de dataframe import numpy as np # biblioteca utilizada para tratar vetores e matrizesimport matplotlib.pyplot as plt #utilizapa para construir os gráficos em um formato similar ao "Matlab" from sklearn.preprocessing import MinMaxScaler, LabelEncoder #utilizada para realizar o preprocessamento dos dados from sklearn.model_selection import train_test_split #utilizada para realizar o divisão entre dados para treinamento e teste from sklearn.metrics import confusion_matrix, accuracy_score #utilizada para verificar a acurácia do modelo construído from sklearn.naive_bayes import GaussianNB # utilizada para construir o modelo de classificação naive_bayes import seaborn as sns #utilizada para constuir os gráficos em uma forma mais "bonita" import matplotlib.pyplot as plt #biblioteca para realizar a construção dos gráficos from sklearn.svm import SVC #utilizada para importar o algoritmo SVM from google.colab import drive drive.mount('/content/drive') #lendo o dataset no formato de um dataframe através da função read do pandas nomeArquivo = '/content/drive/My Drive/Colab Notebooks/IGTI/app_saude/data.csv' dataset = pd.read_csv(nomeArquivo, sep=',') #realiza a leitura do banco de dados #print do dataset dataset.head() # são 76 colunas, mas nem todas serão utilizadas para realizar a previsão de doença cardíaca dataset.shape # mostra a dimensão do dataset #conhecendo o dataset dataset.info() ``` **Existem vários dados nulos** ``` #tratando os dados nulos dataset.fillna(dataset.mean(), inplace=True) #substitui os dados que estão como NAN pela média dos valores na coluna dataset.head(3) ``` **Preparando os dados** ``` dataset_to_array = np.array(dataset) #transforma o dataframe em array para facilitar a escolha dos dados a serem utilizados target = dataset_to_array[:,57] # esse é o vetor de saída (target) target= target.astype('int') #indica o tipo de dados # target[target>0] = 1 # 0 para o coração saudável e 1 para problema detectado target ``` **Iniciando a previsão** ``` #dados coletados pelos sensores dataset_sensor = np.column_stack(( dataset_to_array[:,11], # pressão sanguínea em repouso dataset_to_array[:,33], # frequencia máxima atingida dataset_to_array[:,34], # frequencia cardíaca em repouso dataset_to_array[:,35], # pico de pressão sanguínea durante exercício dataset_to_array[:,36], # pico de pressão sanguínea durante exercício dataset_to_array[:,38] # pressão sanguínea em repouso )) print(dataset_sensor) ## dataset_sensor_teste = np.column_stack(( ## [ i[0] for i in dataset_sensor if i[0] != np.empty and i[1] != np.empty and i[2] != np.empty_like and i[3] != np.empty_like and i[4] != np.empty_like and i[5] != np.empty_like ], ## [ i[1] for i in dataset_sensor if i[0] != np.empty and i[1] != np.empty_like and i[2] != np.empty_like and i[3] != np.empty_like and i[4] != np.empty_like and i[5] != np.empty_like ], ## [ i[2] for i in dataset_sensor if i[0] != np.empty and i[1] != np.empty_like and i[2] != np.empty_like and i[3] != np.empty_like and i[4] != np.empty_like and i[5] != np.empty_like ], ## [ i[3] for i in dataset_sensor if i[0] != np.empty and i[1] != np.empty_like and i[2] != np.empty_like and i[3] != np.empty_like and i[4] != np.empty_like and i[5] != np.empty_like ], ## [ i[4] for i in dataset_sensor if i[0] != np.empty and i[1] != np.empty_like and i[2] != np.empty_like and i[3] != np.empty_like and i[4] != np.empty_like and i[5] != np.empty_like ], ## [ i[5] for i in dataset_sensor if i[0] != np.empty and i[1] != np.empty_like and i[2] != np.empty_like and i[3] != np.empty_like and i[4] != np.empty_like and i[5] != np.empty_like ] ## )) print("=====") # print(dataset_sensor_teste) ## print("-------------------------------") ## print(dataset_sensor.shape) ## print(dataset_sensor_teste.shape) ## print("-------------------------------") ##print(dataset_sensor.info()) #dataset com os dados médicos do paciente dataset_medico = np.column_stack((dataset_to_array[:,4] , # localização da dor dataset_to_array[:,6] , # alivio após o cansaço dataset_to_array[:,9] , # tipo de dor dataset_to_array[:,39], # angina induzida pelo exercício (1 = sim; 0 = nao) dataset.age, # idade dataset.sex , # sexo dataset.hypertension # hipertensão )) print(dataset_medico) print(dataset_medico.shape) #concatena as duas bases de dados print("Medico : ", dataset_medico.shape) print("Sensor : ", dataset_sensor.shape) dataset_paciente=np.concatenate((dataset_medico,dataset_sensor),axis=1) print(dataset_paciente) print("Resultado", dataset_paciente.shape) print("Paciente: ", dataset_paciente.shape) print("Target: ", target.shape) #encontrando os dados para treinamento e teste X_train, X_test, y_train, y_test = train_test_split(dataset_paciente, target, random_state = 223) print("X treino : ", X_train.shape) print("X test : ", X_test.shape) #cria o objeto SVM modelSVM = SVC(kernel = 'linear', verbose=True) #escolha do kernel polinomial #aplica o treinamento ao modelo modelSVM.fit(X_train, y_train) ``` **Analisando a performance do modelo** ``` previsao = modelSVM.predict(X_test) #aplica o modelo para os dados de teste #encontra a acuracia do modelo de previsão utilizando o SVM accuracia = accuracy_score(y_test, previsao) print ("Acuracia utilizando o SVM :" , accuracia , "\nEm porcentagem : ", round(accuracia*100) , "%\n") #criando a matriz de confusão import pandas as pd import seaborn as sn import matplotlib.pyplot as plt cm = confusion_matrix(y_test, previsao) #gera a matriz de confusão df_cm = pd.DataFrame(cm, index = [i for i in "01234"],columns = [i for i in "01234"]) #cria o df com as classes plt.figure(figsize = (10,7)) #indica o tamanho da figura sn.heatmap(df_cm, annot=True) #plota a figura ``` **Modificando o Dataset** ``` #vamos escolher apenas 13 atributos para realizar a previsão de doenças cardíacas dataset_to_array = np.array(dataset) label = dataset_to_array[:,57] # "Target" classes binárias 0 e 1 label = label.astype('int') label[label>0] = 1 # Quando os dados são 0 está saldável e 1 doente label #encontrando os dados para treinamento e teste X_train, X_test, y_train, y_test = train_test_split(dataset_paciente, label, random_state = 223) #cria o objeto SVM modelSVM = SVC(kernel = 'linear', verbose=True, degree=3) #escolha do kernel polinomial :: linear, poly, rbf #aplica o treinamento ao modelo modelSVM.fit(X_train, y_train) previsao = modelSVM.predict(X_test) #aplica o modelo para os dados de teste #encontra a acuracia do modelo de previsão utilizando o SVM accuracia = accuracy_score(y_test, previsao) print ("Acuracia utilizando o SVM :" , accuracia , "\nEm porcentagem : ", round(accuracia*100) , "%\n") #criando a matriz de confusão import pandas as pd import seaborn as sn import matplotlib.pyplot as plt cm = confusion_matrix(y_test, previsao) #gera a matriz de confusão df_cm = pd.DataFrame(cm, index = [i for i in "01"],columns = [i for i in "01"]) #cria o df com as classes plt.figure(figsize = (10,7)) #indica o tamanho da figura sn.heatmap(df_cm, annot=True) #plota a figura from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report y_pred = previsao y_true = y_test print(accuracy_score(y_true, y_pred)) print(classification_report(y_true, y_pred)) ##accuracy_score(y_true, y_pred, normalize=False) ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import pandas as pd from tqdm import tqdm from astropy.table import Table import astropy.units as u import os # Using `batman` to create & fit fake transit import batman # Using astropy BLS and scipy curve_fit to fit transit from astropy.timeseries import BoxLeastSquares # Using emcee & corner to find and plot (e, w) distribution with MCMC import emcee import corner # Using dynesty to do the same with nested sampling import dynesty import scipy.constants as c # And importing `photoeccentric` import photoeccentric as ph %load_ext autoreload %autoreload 2 # pandas display option pd.set_option('display.float_format', lambda x: '%.5f' % x) ``` 1. Choose 3 planets with e from a Gaussian distrbution with (0.5, 0.2) 2. Fit them using photoeccentric, find fit e and w 3. Implement Van Eylen equation to find underlying e dist on surface ``` true_es = np.random.normal(loc=0.5, scale=0.2, size=3) true_ws = np.random.uniform(low=-90, high=270, size=3) true_es nwalk = 64 nsteps = 1000 ndiscard = 500 arrlen = (nsteps-ndiscard)*nwalk smass_kg = 1.9885e30 # Solar mass (kg) srad_m = 696.34e6 # Solar radius (m) muirhead_data = pd.read_csv("datafiles/Muirhead2013_isochrones/muirhead_data_incmissing.txt", sep=" ") # ALL Kepler planets from exo archive planets = pd.read_csv('datafiles/exoplanetarchive/cumulative_kois.csv') # Take the Kepler planet archive entries for the planets in Muirhead et al. 2013 sample spectplanets = pd.read_csv('spectplanets.csv') # Kepler-Gaia Data kpgaia = Table.read('datafiles/Kepler-Gaia/kepler_dr2_4arcsec.fits', format='fits').to_pandas(); # Kepler-Gaia data for only the objects in our sample muirhead_gaia = pd.read_csv("muirhead_gaia.csv") # Combined spectroscopy data + Gaia/Kepler data for our sample muirhead_comb = pd.read_csv('muirhead_comb.csv') # Only targets from table above with published luminosities from Gaia muirhead_comb_lums = pd.read_csv('muirhead_comb_lums.csv') # Kepler ID for Kepler-1582 b kepid = 9710326 kepname = spectplanets.loc[spectplanets['kepid'] == kepid].kepler_name.values[0] kp737b = muirhead_comb.loc[muirhead_comb['KIC'] == kepid] KOI = 947 isodf = pd.read_csv("datafiles/isochrones/iso_lums_" + str(kepid) + ".csv") mstar = isodf["mstar"].mean() mstar_err = isodf["mstar"].std() rstar = isodf["radius"].mean() rstar_err = isodf["radius"].std() rho_star, mass, radius = ph.find_density_dist_symmetric(mstar, mstar_err, rstar, rstar_err, arrlen) period, period_uerr, period_lerr, rprs, rprs_uerr, rprs_lerr, a_arc, a_uerr_arc, a_lerr_arc, i, e_arc, w_arc = ph.planet_params_from_archive(spectplanets, kepname) # We calculate a_rs to ensure that it's consistent with the spec/Gaia stellar density. a_rs = ph.calc_a(period*86400.0, mstar*smass_kg, rstar*srad_m) a_rs_err = np.mean((a_uerr_arc, a_lerr_arc)) print('Stellar mass (Msun): ', mstar, 'Stellar radius (Rsun): ', rstar) print('Period (Days): ', period, 'Rp/Rs: ', rprs) print('a/Rs: ', a_rs) print('i (deg): ', i) inc = 89.99 ``` ## First Planet ``` # 30 minute cadence cadence = 0.02142857142857143 time = np.arange(-300, 300, cadence) e w # Define e and w, calculate flux from transit model e = true_es[0] w = true_ws[0] flux = ph.integratedlc(time, period, rprs, a_rs, e, i, w, 0.0) # Adding some gaussian noise on the order of Kepler noise (by eyeball) noise = np.random.normal(0,0.0001,len(time)) nflux = flux+noise flux_err = np.array([0.0001]*len(nflux)) plt.errorbar(time, nflux, yerr=flux_err, fmt='o') plt.xlabel('Time') plt.ylabel('Flux') plt.xlim(-0.5, 0.5) plt.axvline(0.0, c='r', label='Transit midpoint') plt.legend() transitmpt = 0 midpoints = np.unique(np.sort(np.concatenate((np.arange(transitmpt, time[0], -period), np.arange(transitmpt, time[-1], period))))) ``` ## Fitting the transit ``` # Remove Out of Transit Data ttime = [] tflux = [] tflux_err = [] for i in range(len(midpoints)): m, b, t1bjd, t1, fnorm, fe1 = ph.do_linfit(time, nflux, flux_err, midpoints[i], 11, 5) ttime.append(t1bjd) tflux.append(fnorm) tflux_err.append(fe1) ttime = np.array(ttime).flatten() tflux = np.array(tflux).flatten() tflux_err = np.array(tflux_err).flatten() tflux = np.nan_to_num(tflux, nan=1.0) tflux_err = np.nan_to_num(tflux_err, nan=np.nanmedian(tflux_err)) priortransform = [3., 27., 1., 0., 15., 64., 2., 88., 0.1, transitmpt] nbuffer = 11 dres, perDists, rpDists, arsDists, incDists, t0Dist = ph.fit_keplc_dynesty(KOI, midpoints, ttime, tflux, tflux_err, priortransform, arrlen, nbuffer, spectplanets, muirhead_comb) #perDists np.savetxt('S1periods.csv', perDists, delimiter=',') np.savetxt('S1rprs.csv', rpDists, delimiter=',') np.savetxt('S1ars.csv', arsDists, delimiter=',') np.savetxt('S1inc.csv', incDists, delimiter=',') np.savetxt('S1t0.csv', t0Dist, delimiter=',') t0Dists = t0Dist per_f = ph.mode(perDists) rprs_f = ph.mode(rpDists) a_f = ph.mode(arsDists) i_f = ph.mode(incDists) t0_f = ph.mode(t0Dists) # Create a light curve with the fit parameters fit1 = ph.integratedlc_fitter(ttime, per_f, rprs_f, a_f, i_f, t0_f) plt.errorbar(ttime, tflux, yerr=tflux_err, c='blue', alpha=0.5, label='Original LC') plt.plot(ttime, fit1, c='red', alpha=1.0, label='Fit LC') #plt.xlim(-0.1, 0.1) plt.legend() print('Stellar mass (Msun): ', mstar, 'Stellar radius (Rsun): ', rstar) print('\n') print('Input params:') print('Rp/Rs: ', rprs) print('a/Rs: ', a_rs) print('i (deg): ', i) print('\n') print('Fit params:') print('Rp/Rs: ', rprs_f) print('a/Rs: ', a_f) print('i (deg): ', i_f) ``` ### Determining T14 and T23 ``` pdist = perDists rdist = rpDists adist = arsDists idist = incDists t0dist = t0Dists T14dist = ph.get_T14(pdist, rdist, adist, idist) T14errs = ph.get_sigmas(T14dist) T23dist = ph.get_T23(pdist, rdist, adist, idist) T23errs = ph.get_sigmas(T23dist) ``` # Get $g$ ``` gs, rho_c = ph.get_g_distribution(rho_star, pdist, rdist, T14dist, T23dist) g_mean = ph.mode(gs) g_sigma = np.mean(np.abs(ph.get_sigmas(gs))) g_mean g_sigma #Guesses w_guess = 0.0 e_guess = 0.0 solnx = (w_guess, e_guess) pos = solnx + 1e-4 * np.random.randn(32, 2) nwalkers, ndim = pos.shape sampler = emcee.EnsembleSampler(nwalkers, ndim, ph.log_probability, args=(g_mean, g_sigma), threads=4) sampler.run_mcmc(pos, 5000, progress=True); labels = ["w", "e"] flat_samples = sampler.get_chain(discard=100, thin=15, flat=True) fig = corner.corner(flat_samples, labels=labels, title_kwargs={"fontsize": 12}, truths=[w, e], plot_contours=True) ``` ## Second Planet ``` # 30 minute cadence cadence = 0.02142857142857143 time = np.arange(-300, 300, cadence) i # Define e and w, calculate flux from transit model e = true_es[1] w = true_ws[1] flux = ph.integratedlc(time, period, rprs, a_rs, e, i, w, 0.0) inc # Adding some gaussian noise on the order of Kepler noise (by eyeball) noise = np.random.normal(0,0.0001,len(time)) nflux = flux+noise flux_err = np.array([0.0001]*len(nflux)) plt.errorbar(time, flux, yerr=flux_err, fmt='o') plt.xlabel('Time') plt.ylabel('Flux') plt.xlim(-0.5, 0.5) plt.axvline(0.0, c='r', label='Transit midpoint') plt.legend() transitmpt = 0 midpoints = np.unique(np.sort(np.concatenate((np.arange(transitmpt, time[0], -period), np.arange(transitmpt, time[-1], period))))) ``` ## Fitting the transit ``` # Remove Out of Transit Data ttime = [] tflux = [] tflux_err = [] for i in range(len(midpoints)): m, b, t1bjd, t1, fnorm, fe1 = ph.do_linfit(time, nflux, flux_err, midpoints[i], 11, 5) ttime.append(t1bjd) tflux.append(fnorm) tflux_err.append(fe1) ttime = np.array(ttime).flatten() tflux = np.array(tflux).flatten() tflux_err = np.array(tflux_err).flatten() tflux = np.nan_to_num(tflux, nan=1.0) tflux_err = np.nan_to_num(tflux_err, nan=np.nanmedian(tflux_err)) priortransform = [3., 27., 1., 0., 15., 64., 2., 88., 0.1, transitmpt] nbuffer = 11 ms, bs, timesBJD, timesPhase, fluxNorm, fluxErrs, perDists, rpDists, arsDists, incDists, t0Dist = ph.fit_keplc_dynesty(KOI, midpoints, ttime, tflux, tflux_err, priortransform, arrlen, nbuffer, spectplanets, muirhead_comb) perDists np.savetxt('Speriods.csv', perDists, delimiter=',') np.savetxt('Srprs.csv', rpDists, delimiter=',') np.savetxt('Sars.csv', arsDists, delimiter=',') np.savetxt('Sinc.csv', incDists, delimiter=',') np.savetxt('St0.csv', t0Dists, delimiter=',') per_f = ph.mode(perDists) rprs_f = ph.mode(rpDists) a_f = ph.mode(arsDists) i_f = ph.mode(incDists) t0_f = ph.mode(t0Dists) # Create a light curve with the fit parameters fit1 = ph.integratedlc_fitter(time1, per_f, rprs_f, a_f, i_f, t0_f) plt.errorbar(time1, nflux1, yerr=fluxerr1, c='blue', alpha=0.5, label='Original LC') plt.plot(time1, fit1, c='red', alpha=1.0, label='Fit LC') #plt.xlim(-0.1, 0.1) plt.legend() print('Stellar mass (Msun): ', mstar, 'Stellar radius (Rsun): ', rstar) print('\n') print('Input params:') print('Rp/Rs: ', rprs) print('a/Rs: ', a_rs) print('i (deg): ', i) print('\n') print('Fit params:') print('Rp/Rs: ', rprs_f) print('a/Rs: ', a_f) print('i (deg): ', i_f) ``` ### Determining T14 and T23 ``` T14dist = ph.get_T14(pdist, rdist, adist, idist) T14errs = ph.get_sigmas(T14dist) T23dist = ph.get_T23(pdist, rdist, adist, idist) T23errs = ph.get_sigmas(T23dist) ``` # Get $g$ ``` gs, rho_c = ph.get_g_distribution(rho_star, pdist, rdist, T14dist, T23dist) g_mean = ph.mode(gs) g_sigma = np.mean(np.abs(ph.get_sigmas(gs))) g_mean g_sigma #Guesses w_guess = 0.0 e_guess = 0.0 solnx = (w_guess, e_guess) pos = solnx + 1e-4 * np.random.randn(32, 2) nwalkers, ndim = pos.shape sampler = emcee.EnsembleSampler(nwalkers, ndim, ph.log_probability, args=(g_mean, g_sigma), threads=4) sampler.run_mcmc(pos, 5000, progress=True); labels = ["w", "e"] flat_samples = sampler.get_chain(discard=100, thin=15, flat=True) fig = corner.corner(flat_samples, labels=labels, title_kwargs={"fontsize": 12}, truths=[w, e], plot_contours=True) ``` ## Third Planet ``` # 30 minute cadence cadence = 0.02142857142857143 time = np.arange(-300, 300, cadence) # Define e and w, calculate flux from transit model e = true_es[2] w = true_ws[2] flux = ph.integratedlc(time, period, rprs, a_rs, e, i, w, 0.0) # Adding some gaussian noise on the order of Kepler noise (by eyeball) noise = np.random.normal(0,0.0001,len(time)) nflux = flux+noise flux_err = np.array([0.0001]*len(nflux)) plt.errorbar(time, nflux, yerr=flux_err, fmt='o') plt.xlabel('Time') plt.ylabel('Flux') plt.xlim(-0.5, 0.5) plt.axvline(0.0, c='r', label='Transit midpoint') plt.legend() transitmpt = 0 midpoints = np.unique(np.sort(np.concatenate((np.arange(transitmpt, time[0], -period), np.arange(transitmpt, time[-1], period))))) ``` ## Fitting the transit ``` # Remove Out of Transit Data ttime = [] tflux = [] tflux_err = [] for i in range(len(midpoints)): m, b, t1bjd, t1, fnorm, fe1 = ph.do_linfit(time, nflux, flux_err, midpoints[i], 11, 5) ttime.append(t1bjd) tflux.append(fnorm) tflux_err.append(fe1) ttime = np.array(ttime).flatten() tflux = np.array(tflux).flatten() tflux_err = np.array(tflux_err).flatten() tflux = np.nan_to_num(tflux, nan=1.0) tflux_err = np.nan_to_num(tflux_err, nan=np.nanmedian(tflux_err)) priortransform = [3., 27., 1., 0., 15., 64., 2., 88., 0.1, transitmpt] nbuffer = 11 ms, bs, timesBJD, timesPhase, fluxNorm, fluxErrs, perDists, rpDists, arsDists, incDists, t0Dist = ph.fit_keplc_dynesty(KOI, midpoints, ttime, tflux, tflux_err, priortransform, arrlen, nbuffer, spectplanets, muirhead_comb) perDists np.savetxt('Speriods.csv', perDists, delimiter=',') np.savetxt('Srprs.csv', rpDists, delimiter=',') np.savetxt('Sars.csv', arsDists, delimiter=',') np.savetxt('Sinc.csv', incDists, delimiter=',') np.savetxt('St0.csv', t0Dists, delimiter=',') per_f = ph.mode(perDists) rprs_f = ph.mode(rpDists) a_f = ph.mode(arsDists) i_f = ph.mode(incDists) t0_f = ph.mode(t0Dists) # Create a light curve with the fit parameters fit1 = ph.integratedlc_fitter(time1, per_f, rprs_f, a_f, i_f, t0_f) plt.errorbar(time1, nflux1, yerr=fluxerr1, c='blue', alpha=0.5, label='Original LC') plt.plot(time1, fit1, c='red', alpha=1.0, label='Fit LC') #plt.xlim(-0.1, 0.1) plt.legend() print('Stellar mass (Msun): ', mstar, 'Stellar radius (Rsun): ', rstar) print('\n') print('Input params:') print('Rp/Rs: ', rprs) print('a/Rs: ', a_rs) print('i (deg): ', i) print('\n') print('Fit params:') print('Rp/Rs: ', rprs_f) print('a/Rs: ', a_f) print('i (deg): ', i_f) ``` ### Determining T14 and T23 ``` T14dist = ph.get_T14(pdist, rdist, adist, idist) T14errs = ph.get_sigmas(T14dist) T23dist = ph.get_T23(pdist, rdist, adist, idist) T23errs = ph.get_sigmas(T23dist) ``` # Get $g$ ``` gs, rho_c = ph.get_g_distribution(rho_star, pdist, rdist, T14dist, T23dist) g_mean = ph.mode(gs) g_sigma = np.mean(np.abs(ph.get_sigmas(gs))) g_mean g_sigma #Guesses w_guess = 0.0 e_guess = 0.0 solnx = (w_guess, e_guess) pos = solnx + 1e-4 * np.random.randn(32, 2) nwalkers, ndim = pos.shape sampler = emcee.EnsembleSampler(nwalkers, ndim, ph.log_probability, args=(g_mean, g_sigma), threads=4) sampler.run_mcmc(pos, 5000, progress=True); labels = ["w", "e"] flat_samples = sampler.get_chain(discard=100, thin=15, flat=True) fig = corner.corner(flat_samples, labels=labels, title_kwargs={"fontsize": 12}, truths=[w, e], plot_contours=True) ``` Probability Grid ``` mumesh = np.linspace(0, 1, 100) sigmesh = np.linspace(0.01, 0.3, 100) mus, sigmas = np.meshgrid(mumesh, sigmesh) # Vet 100 values from each e distribution fit_es = [np.random.normal(loc=0.2, scale=0.05, size=100), np.random.normal(loc=0.3, scale=0.05, size=100), np.random.normal(loc=0.4, scale=0.05, size=100)] fit_ws = [np.random.normal(loc=90, scale=10, size=100), np.random.normal(loc=-90, scale=10, size=100), np.random.normal(loc=0.0, scale=10, size=100)] import scipy # for each planet # Planet 1: true_es[0] pethetasum1 = np.zeros((100,100)) # Calculating p(obs|theta) for 10,000 grid points, for N posterior values for 1 panet for n1 in tqdm(range(len(mus))): # For each grid point x for n2 in range(len(mus[0])): # For each grid point y mu_test = mus[n1][n2] sig_test = sigmas[n1][n2] # x, y of grid point for N in range(len(fit_es[0])): # For each posterior value (out of 100) pethetasum1[n1][n2] += scipy.stats.norm.pdf(fit_es[0][N], loc=mu_test, scale=sig_test) fig, ax = plt.subplots(figsize=(6,6)) ax.imshow(pethetasum1, extent=[0, 1, 0.01, 0.3], aspect = 'auto') ax.set_xlabel('mean eccentricity') ax.set_ylabel('sigma') # Planet 2: true_es[1] pethetasum2 = np.zeros((100,100)) # Calculating p(obs|theta) for 10,000 grid points, for N posterior values for 1 panet for n1 in tqdm(range(len(mus))): # For each grid point x for n2 in range(len(mus[0])): # For each grid point y mu_test = mus[n1][n2] sig_test = sigmas[n1][n2] # x, y of grid point for N in range(len(fit_es[1])): # For each posterior value (out of 100) pethetasum2[n1][n2] += scipy.stats.norm.pdf(fit_es[1][N], loc=mu_test, scale=sig_test) fig, ax = plt.subplots(figsize=(6,6)) ax.imshow(pethetasum2, extent=[0, 1, 0.01, 0.3], aspect = 'auto') ax.set_xlabel('mean eccentricity') ax.set_ylabel('sigma') # Planet 2: true_es[1] pethetasum3 = np.zeros((100,100)) # Calculating p(obs|theta) for 10,000 grid points, for N posterior values for 1 panet for n1 in tqdm(range(len(mus))): # For each grid point x for n2 in range(len(mus[0])): # For each grid point y mu_test = mus[n1][n2] sig_test = sigmas[n1][n2] # x, y of grid point for N in range(len(fit_es[1])): # For each posterior value (out of 100) pethetasum3[n1][n2] += scipy.stats.norm.pdf(fit_es[2][N], loc=mu_test, scale=sig_test) fig, ax = plt.subplots(figsize=(6,6)) ax.imshow(pethetasum3, extent=[0, 1, 0.01, 0.3], aspect = 'auto') ax.set_xlabel('mean eccentricity') ax.set_ylabel('sigma') P = pethetasum1*pethetasum2*pethetasum3 P = P/np.sqrt(100*100) fig, ax = plt.subplots(figsize=(6,6)) ax.imshow(P, extent=[0, 1, 0.01, 0.3], aspect = 'auto') ax.set_xlabel('mean eccentricity') ax.set_ylabel('sigma') ```
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Using the SavedModel format <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/beta/guide/saved_model"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/guide/saved_model.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/guide/saved_model.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/r2/guide/saved_model.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> A SavedModel contains a complete TensorFlow program, including weights and computation. It does not require the original model building code to run, which makes it useful for sharing or deploying (with [TFLite](https://tensorflow.org/lite), [TensorFlow.js](https://js.tensorflow.org/), [TensorFlow Serving](https://www.tensorflow.org/tfx/serving/tutorials/Serving_REST_simple), or [TFHub](https://tensorflow.org/hub)). If you have code for a model in Python and want to load weights into it, see the [guide to training checkpoints](./checkpoints.ipynb). For a quick introduction, this section exports a pre-trained Keras model and serves image classification requests with it. The rest of the guide will fill in details and discuss other ways to create SavedModels. ``` from __future__ import absolute_import, division, print_function, unicode_literals try: %tensorflow_version 2.x # Colab only. except Exception: pass import tensorflow as tf from matplotlib import pyplot as plt import numpy as np file = tf.keras.utils.get_file( "grace_hopper.jpg", "https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg") img = tf.keras.preprocessing.image.load_img(file, target_size=[224, 224]) plt.imshow(img) plt.axis('off') x = tf.keras.preprocessing.image.img_to_array(img) x = tf.keras.applications.mobilenet.preprocess_input( x[tf.newaxis,...]) ``` We'll use an image of Grace Hopper as a running example, and a Keras pre-trained image classification model since it's easy to use. Custom models work too, and are covered in detail later. ``` #tf.keras.applications.vgg19.decode_predictions labels_path = tf.keras.utils.get_file('ImageNetLabels.txt','https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt') imagenet_labels = np.array(open(labels_path).read().splitlines()) pretrained_model = tf.keras.applications.MobileNet() result_before_save = pretrained_model(x) print() decoded = imagenet_labels[np.argsort(result_before_save)[0,::-1][:5]+1] print("Result before saving:\n", decoded) ``` The top prediction for this image is "military uniform". ``` tf.saved_model.save(pretrained_model, "/tmp/mobilenet/1/") ``` The save-path follows a convention used by TensorFlow Serving where the last path component (`1/` here) is a version number for your model - it allows tools like Tensorflow Serving to reason about the relative freshness. SavedModels have named functions called signatures. Keras models export their forward pass under the `serving_default` signature key. The [SavedModel command line interface](#saved_model_cli) is useful for inspecting SavedModels on disk: ``` !saved_model_cli show --dir /tmp/mobilenet/1 --tag_set serve --signature_def serving_default ``` We can load the SavedModel back into Python with `tf.saved_model.load` and see how Admiral Hopper's image is classified. ``` loaded = tf.saved_model.load("/tmp/mobilenet/1/") print(list(loaded.signatures.keys())) # ["serving_default"] ``` Imported signatures always return dictionaries. ``` infer = loaded.signatures["serving_default"] print(infer.structured_outputs) ``` Running inference from the SavedModel gives the same result as the original model. ``` labeling = infer(tf.constant(x))[pretrained_model.output_names[0]] decoded = imagenet_labels[np.argsort(labeling)[0,::-1][:5]+1] print("Result after saving and loading:\n", decoded) ``` ## Serving the model SavedModels are usable from Python, but production environments typically use a dedicated service for inference. This is easy to set up from a SavedModel using TensorFlow Serving. See the [TensorFlow Serving REST tutorial](https://github.com/tensorflow/serving/blob/master/tensorflow_serving/g3doc/tutorials/Serving_REST_simple.ipynb) for more details about serving, including instructions for installing `tensorflow_model_server` in a notebook or on your local machine. As a quick sketch, to serve the `mobilenet` model exported above just point the model server at the SavedModel directory: ```bash nohup tensorflow_model_server \ --rest_api_port=8501 \ --model_name=mobilenet \ --model_base_path="/tmp/mobilenet" >server.log 2>&1 ``` Then send a request. ```python !pip install requests import json import numpy import requests data = json.dumps({"signature_name": "serving_default", "instances": x.tolist()}) headers = {"content-type": "application/json"} json_response = requests.post('http://localhost:8501/v1/models/mobilenet:predict', data=data, headers=headers) predictions = numpy.array(json.loads(json_response.text)["predictions"]) ``` The resulting `predictions` are identical to the results from Python. ### SavedModel format A SavedModel is a directory containing serialized signatures and the state needed to run them, including variable values and vocabularies. ``` !ls /tmp/mobilenet/1 # assets saved_model.pb variables ``` The `saved_model.pb` file contains a set of named signatures, each identifying a function. SavedModels may contain multiple sets of signatures (multiple MetaGraphs, identified with the `tag_set` argument to `saved_model_cli`), but this is rare. APIs which create multiple sets of signatures include [`tf.Estimator.experimental_export_all_saved_models`](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator#experimental_export_all_saved_models) and in TensorFlow 1.x `tf.saved_model.Builder`. ``` !saved_model_cli show --dir /tmp/mobilenet/1 --tag_set serve ``` The `variables` directory contains a standard training checkpoint (see the [guide to training checkpoints](./checkpoints.ipynb)). ``` !ls /tmp/mobilenet/1/variables ``` The `assets` directory contains files used by the TensorFlow graph, for example text files used to initialize vocabulary tables. It is unused in this example. SavedModels may have an `assets.extra` directory for any files not used by the TensorFlow graph, for example information for consumers about what to do with the SavedModel. TensorFlow itself does not use this directory. ### Exporting custom models In the first section, `tf.saved_model.save` automatically determined a signature for the `tf.keras.Model` object. This worked because Keras `Model` objects have an unambiguous method to export and known input shapes. `tf.saved_model.save` works just as well with low-level model building APIs, but you will need to indicate which function to use as a signature if you're planning to serve a model. ``` class CustomModule(tf.Module): def __init__(self): super(CustomModule, self).__init__() self.v = tf.Variable(1.) @tf.function def __call__(self, x): return x * self.v @tf.function(input_signature=[tf.TensorSpec([], tf.float32)]) def mutate(self, new_v): self.v.assign(new_v) module = CustomModule() ``` This module has two methods decorated with `tf.function`. While these functions will be included in the SavedModel and available if the SavedModel is reloaded via `tf.saved_model.load` into a Python program, without explicitly declaring the serving signature tools like Tensorflow Serving and `saved_model_cli` cannot access them. `module.mutate` has an `input_signature`, and so there is enough information to save its computation graph in the SavedModel already. `__call__` has no signature and so this method needs to be called before saving. ``` module(tf.constant(0.)) tf.saved_model.save(module, "/tmp/module_no_signatures") ``` For functions without an `input_signature`, any input shapes used before saving will be available after loading. Since we called `__call__` with just a scalar, it will accept only scalar values. ``` imported = tf.saved_model.load("/tmp/module_no_signatures") assert 3. == imported(tf.constant(3.)).numpy() imported.mutate(tf.constant(2.)) assert 6. == imported(tf.constant(3.)).numpy() ``` The function will not accept new shapes like vectors. ```python imported(tf.constant([3.])) ``` <pre> ValueError: Could not find matching function to call for canonicalized inputs ((<tf.Tensor 'args_0:0' shape=(1,) dtype=float32>,), {}). Only existing signatures are [((TensorSpec(shape=(), dtype=tf.float32, name=u'x'),), {})]. </pre> `get_concrete_function` lets you add input shapes to a function without calling it. It takes `tf.TensorSpec` objects in place of `Tensor` arguments, indicating the shapes and dtypes of inputs. Shapes can either be `None`, indicating that any shape is acceptable, or a list of axis sizes. If an axis size is `None` then any size is acceptable for that axis. `tf.TensorSpecs` can also have names, which default to the function's argument keywords ("x" here). ``` module.__call__.get_concrete_function(x=tf.TensorSpec([None], tf.float32)) tf.saved_model.save(module, "/tmp/module_no_signatures") imported = tf.saved_model.load("/tmp/module_no_signatures") assert [3.] == imported(tf.constant([3.])).numpy() ``` Functions and variables attached to objects like `tf.keras.Model` and `tf.Module` are available on import, but many Python types and attributes are lost. The Python program itself is not saved in the SavedModel. We didn't identify any of the functions we exported as a signature, so it has none. ``` !saved_model_cli show --dir /tmp/module_no_signatures --tag_set serve ``` ## Identifying a signature to export To indicate that a function should be a signature, specify the `signatures` argument when saving. ``` call = module.__call__.get_concrete_function(tf.TensorSpec(None, tf.float32)) tf.saved_model.save(module, "/tmp/module_with_signature", signatures=call) ``` Notice that we first converted the `tf.function` to a `ConcreteFunction` with `get_concrete_function`. This is necessary because the function was created without a fixed `input_signature`, and so did not have a definite set of `Tensor` inputs associated with it. ``` !saved_model_cli show --dir /tmp/module_with_signature --tag_set serve --signature_def serving_default imported = tf.saved_model.load("/tmp/module_with_signature") signature = imported.signatures["serving_default"] assert [3.] == signature(x=tf.constant([3.]))["output_0"].numpy() imported.mutate(tf.constant(2.)) assert [6.] == signature(x=tf.constant([3.]))["output_0"].numpy() assert 2. == imported.v.numpy() ``` We exported a single signature, and its key defaulted to "serving_default". To export multiple signatures, pass a dictionary. ``` @tf.function(input_signature=[tf.TensorSpec([], tf.string)]) def parse_string(string_input): return imported(tf.strings.to_number(string_input)) signatures = {"serving_default": parse_string, "from_float": imported.signatures["serving_default"]} tf.saved_model.save(imported, "/tmp/module_with_multiple_signatures", signatures) !saved_model_cli show --dir /tmp/module_with_multiple_signatures --tag_set serve ``` `saved_model_cli` can also run SavedModels directly from the command line. ``` !saved_model_cli run --dir /tmp/module_with_multiple_signatures --tag_set serve --signature_def serving_default --input_exprs="string_input='3.'" !saved_model_cli run --dir /tmp/module_with_multiple_signatures --tag_set serve --signature_def from_float --input_exprs="x=3." ``` ## Fine-tuning imported models Variable objects are available, and we can backprop through imported functions. ``` optimizer = tf.optimizers.SGD(0.05) def train_step(): with tf.GradientTape() as tape: loss = (10. - imported(tf.constant(2.))) ** 2 variables = tape.watched_variables() grads = tape.gradient(loss, variables) optimizer.apply_gradients(zip(grads, variables)) return loss for _ in range(10): # "v" approaches 5, "loss" approaches 0 print("loss={:.2f} v={:.2f}".format(train_step(), imported.v.numpy())) ``` ## Control flow in SavedModels Anything that can go in a `tf.function` can go in a SavedModel. With [AutoGraph](./autograph.ipynb) this includes conditional logic which depends on Tensors, specified with regular Python control flow. ``` @tf.function(input_signature=[tf.TensorSpec([], tf.int32)]) def control_flow(x): if x < 0: tf.print("Invalid!") else: tf.print(x % 3) to_export = tf.Module() to_export.control_flow = control_flow tf.saved_model.save(to_export, "/tmp/control_flow") imported = tf.saved_model.load("/tmp/control_flow") imported.control_flow(tf.constant(-1)) # Invalid! imported.control_flow(tf.constant(2)) # 2 imported.control_flow(tf.constant(3)) # 0 ``` ## SavedModels from Estimators Estimators export SavedModels through [`tf.Estimator.export_saved_model`](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator#export_saved_model). See the [guide to Estimator](https://www.tensorflow.org/guide/estimators) for details. ``` input_column = tf.feature_column.numeric_column("x") estimator = tf.estimator.LinearClassifier(feature_columns=[input_column]) def input_fn(): return tf.data.Dataset.from_tensor_slices( ({"x": [1., 2., 3., 4.]}, [1, 1, 0, 0])).repeat(200).shuffle(64).batch(16) estimator.train(input_fn) serving_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn( tf.feature_column.make_parse_example_spec([input_column])) export_path = estimator.export_saved_model( "/tmp/from_estimator/", serving_input_fn) ``` This SavedModel accepts serialized `tf.Example` protocol buffers, which are useful for serving. But we can also load it with `tf.saved_model.load` and run it from Python. ``` imported = tf.saved_model.load(export_path) def predict(x): example = tf.train.Example() example.features.feature["x"].float_list.value.extend([x]) return imported.signatures["predict"]( examples=tf.constant([example.SerializeToString()])) print(predict(1.5)) print(predict(3.5)) ``` `tf.estimator.export.build_raw_serving_input_receiver_fn` allows you to create input functions which take raw tensors rather than `tf.train.Example`s. ## Load a SavedModel in C++ The C++ version of the SavedModel [loader](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/cc/saved_model/loader.h) provides an API to load a SavedModel from a path, while allowing SessionOptions and RunOptions. You have to specify the tags associated with the graph to be loaded. The loaded version of SavedModel is referred to as SavedModelBundle and contains the MetaGraphDef and the session within which it is loaded. ```C++ const string export_dir = ... SavedModelBundle bundle; ... LoadSavedModel(session_options, run_options, export_dir, {kSavedModelTagTrain}, &bundle); ``` <a id=saved_model_cli/> ## Details of the SavedModel command line interface You can use the SavedModel Command Line Interface (CLI) to inspect and execute a SavedModel. For example, you can use the CLI to inspect the model's `SignatureDef`s. The CLI enables you to quickly confirm that the input Tensor dtype and shape match the model. Moreover, if you want to test your model, you can use the CLI to do a sanity check by passing in sample inputs in various formats (for example, Python expressions) and then fetching the output. ### Install the SavedModel CLI Broadly speaking, you can install TensorFlow in either of the following two ways: * By installing a pre-built TensorFlow binary. * By building TensorFlow from source code. If you installed TensorFlow through a pre-built TensorFlow binary, then the SavedModel CLI is already installed on your system at pathname `bin\saved_model_cli`. If you built TensorFlow from source code, you must run the following additional command to build `saved_model_cli`: ``` $ bazel build tensorflow/python/tools:saved_model_cli ``` ### Overview of commands The SavedModel CLI supports the following two commands on a `MetaGraphDef` in a SavedModel: * `show`, which shows a computation on a `MetaGraphDef` in a SavedModel. * `run`, which runs a computation on a `MetaGraphDef`. ### `show` command A SavedModel contains one or more `MetaGraphDef`s, identified by their tag-sets. To serve a model, you might wonder what kind of `SignatureDef`s are in each model, and what are their inputs and outputs. The `show` command let you examine the contents of the SavedModel in hierarchical order. Here's the syntax: ``` usage: saved_model_cli show [-h] --dir DIR [--all] [--tag_set TAG_SET] [--signature_def SIGNATURE_DEF_KEY] ``` For example, the following command shows all available MetaGraphDef tag-sets in the SavedModel: ``` $ saved_model_cli show --dir /tmp/saved_model_dir The given SavedModel contains the following tag-sets: serve serve, gpu ``` The following command shows all available `SignatureDef` keys in a `MetaGraphDef`: ``` $ saved_model_cli show --dir /tmp/saved_model_dir --tag_set serve The given SavedModel `MetaGraphDef` contains `SignatureDefs` with the following keys: SignatureDef key: "classify_x2_to_y3" SignatureDef key: "classify_x_to_y" SignatureDef key: "regress_x2_to_y3" SignatureDef key: "regress_x_to_y" SignatureDef key: "regress_x_to_y2" SignatureDef key: "serving_default" ``` If a `MetaGraphDef` has *multiple* tags in the tag-set, you must specify all tags, each tag separated by a comma. For example: <pre> $ saved_model_cli show --dir /tmp/saved_model_dir --tag_set serve,gpu </pre> To show all inputs and outputs TensorInfo for a specific `SignatureDef`, pass in the `SignatureDef` key to `signature_def` option. This is very useful when you want to know the tensor key value, dtype and shape of the input tensors for executing the computation graph later. For example: ``` $ saved_model_cli show --dir \ /tmp/saved_model_dir --tag_set serve --signature_def serving_default The given SavedModel SignatureDef contains the following input(s): inputs['x'] tensor_info: dtype: DT_FLOAT shape: (-1, 1) name: x:0 The given SavedModel SignatureDef contains the following output(s): outputs['y'] tensor_info: dtype: DT_FLOAT shape: (-1, 1) name: y:0 Method name is: tensorflow/serving/predict ``` To show all available information in the SavedModel, use the `--all` option. For example: <pre> $ saved_model_cli show --dir /tmp/saved_model_dir --all MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs: signature_def['classify_x2_to_y3']: The given SavedModel SignatureDef contains the following input(s): inputs['inputs'] tensor_info: dtype: DT_FLOAT shape: (-1, 1) name: x2:0 The given SavedModel SignatureDef contains the following output(s): outputs['scores'] tensor_info: dtype: DT_FLOAT shape: (-1, 1) name: y3:0 Method name is: tensorflow/serving/classify ... signature_def['serving_default']: The given SavedModel SignatureDef contains the following input(s): inputs['x'] tensor_info: dtype: DT_FLOAT shape: (-1, 1) name: x:0 The given SavedModel SignatureDef contains the following output(s): outputs['y'] tensor_info: dtype: DT_FLOAT shape: (-1, 1) name: y:0 Method name is: tensorflow/serving/predict </pre> ### `run` command Invoke the `run` command to run a graph computation, passing inputs and then displaying (and optionally saving) the outputs. Here's the syntax: ``` usage: saved_model_cli run [-h] --dir DIR --tag_set TAG_SET --signature_def SIGNATURE_DEF_KEY [--inputs INPUTS] [--input_exprs INPUT_EXPRS] [--input_examples INPUT_EXAMPLES] [--outdir OUTDIR] [--overwrite] [--tf_debug] ``` The `run` command provides the following three ways to pass inputs to the model: * `--inputs` option enables you to pass numpy ndarray in files. * `--input_exprs` option enables you to pass Python expressions. * `--input_examples` option enables you to pass `tf.train.Example`. #### `--inputs` To pass input data in files, specify the `--inputs` option, which takes the following general format: ```bsh --inputs <INPUTS> ``` where *INPUTS* is either of the following formats: * `<input_key>=<filename>` * `<input_key>=<filename>[<variable_name>]` You may pass multiple *INPUTS*. If you do pass multiple inputs, use a semicolon to separate each of the *INPUTS*. `saved_model_cli` uses `numpy.load` to load the *filename*. The *filename* may be in any of the following formats: * `.npy` * `.npz` * pickle format A `.npy` file always contains a numpy ndarray. Therefore, when loading from a `.npy` file, the content will be directly assigned to the specified input tensor. If you specify a *variable_name* with that `.npy` file, the *variable_name* will be ignored and a warning will be issued. When loading from a `.npz` (zip) file, you may optionally specify a *variable_name* to identify the variable within the zip file to load for the input tensor key. If you don't specify a *variable_name*, the SavedModel CLI will check that only one file is included in the zip file and load it for the specified input tensor key. When loading from a pickle file, if no `variable_name` is specified in the square brackets, whatever that is inside the pickle file will be passed to the specified input tensor key. Otherwise, the SavedModel CLI will assume a dictionary is stored in the pickle file and the value corresponding to the *variable_name* will be used. #### `--input_exprs` To pass inputs through Python expressions, specify the `--input_exprs` option. This can be useful for when you don't have data files lying around, but still want to sanity check the model with some simple inputs that match the dtype and shape of the model's `SignatureDef`s. For example: ```bsh `<input_key>=[[1],[2],[3]]` ``` In addition to Python expressions, you may also pass numpy functions. For example: ```bsh `<input_key>=np.ones((32,32,3))` ``` (Note that the `numpy` module is already available to you as `np`.) #### `--input_examples` To pass `tf.train.Example` as inputs, specify the `--input_examples` option. For each input key, it takes a list of dictionary, where each dictionary is an instance of `tf.train.Example`. The dictionary keys are the features and the values are the value lists for each feature. For example: ```bsh `<input_key>=[{"age":[22,24],"education":["BS","MS"]}]` ``` #### Save output By default, the SavedModel CLI writes output to stdout. If a directory is passed to `--outdir` option, the outputs will be saved as `.npy` files named after output tensor keys under the given directory. Use `--overwrite` to overwrite existing output files.
github_jupyter
# Import Dependencies ``` import warnings warnings.filterwarnings('ignore') import keras import matplotlib.pyplot as plt ``` ## Define Types ``` from typing import Tuple ImageShape = Tuple[int, int] GrayScaleImageShape = Tuple[int, int, int] ``` # MNIST Sandbox Baseline Example This sandbox example is meant mostly to establish a few baselines for model performance to compare against, and also to get the basic Keras neural network architecture set up. I split the training and testing data and then one-hot encode the targets (one column per target, so ten columns after encoding). ``` from keras.datasets import mnist import matplotlib.pyplot as plt from typing import Tuple import numpy as np Dataset = Tuple[np.ndarray, np.ndarray] #download mnist data and split into train and test sets (X_train, y_train), (X_test, y_test) = mnist.load_data() print(f"The shape of X_train is {X_train.shape}") print(f"The shape of y_train is {y_train.shape}") print(f"The shape of X_test is {X_test.shape}") print(f"The shape of y_test is {y_test.shape} - some example targets: {y_test[:5]}") mnist_image_shape: ImageShape = X_train.shape[1:] print(mnist_image_shape) from keras.utils import to_categorical OneHotEncodedTarget = np.ndarray Categories = int encoded_y_train: OneHotEncodedTarget = to_categorical(y_train) encoded_y_test: OneHotEncodedTarget = to_categorical(y_test) print(f"One-hot encoding y_train {y_train.shape} -> {encoded_y_train.shape}") print(f"One-hot encoding y_test {y_test.shape} -> {encoded_y_test.shape}") K: Categories = encoded_y_test.shape[1] ``` # Vanilla CNN Implementation Build a vanilla CNN implementation, with two convolutional layers, 64 and 32 filters each, with kernel size of `3 x 3`. Then the values are flattened and fed into the final softmax classification dense layer for predictions. ``` from keras.models import Sequential, Model from keras.layers import Dense, Conv2D, Flatten, Input from tensorflow.python.framework.ops import Tensor import warnings warnings.filterwarnings('ignore') # define model architecture and hyperparameters NUM_FILTERS_L1 = 64 NUM_FILTERS_L2 = 32 KERNEL_SIZE = 3 # the images are 28 x 28 (pixel size) x 1 (grayscale - if RGB, then 3) input_dims: GrayScaleImageShape = (28,28,1) def build_vanilla_cnn(filters_layer1:int, filters_layer2:int, kernel_size:int, input_dims: GrayScaleImageShape)-> Model: inputs: Tensor = Input(shape=input_dims) x: Tensor = Conv2D(filters=filters_layer1, kernel_size=kernel_size, activation='relu')(inputs) x: Tensor = Conv2D(filters=filters_layer2, kernel_size=kernel_size, activation='relu')(x) x: Tensor = Flatten()(x) predictions = Dense(K, activation="softmax")(x) print(predictions) #compile model using accuracy to measure model performance model: Model = Model(inputs=inputs, outputs=predictions) model.compile(optimizer='adam', loss="categorical_crossentropy", metrics=['accuracy']) return model model: Model = build_vanilla_cnn(NUM_FILTERS_L1, NUM_FILTERS_L2, KERNEL_SIZE, input_dims) ``` ## Helper Function to Expand Tensor Dimensions By 1 ``` X_train.reshape((60000,1,28,28)) def expand_tensor_shape(X_train: np.ndarray)-> np.ndarray: new_shape: Tuple = X_train.shape + (1,) # new_tensor = X_train.reshape(new_shape).reshape((-1,1,28,28)) new_tensor = X_train.reshape(new_shape) print(f"Expanding shape from {X_train.shape} to {new_tensor.shape}") return new_tensor X_train_expanded: np.ndarray = expand_tensor_shape(X_train) X_test_expanded: np.ndarray = expand_tensor_shape(X_test) # train model and retrieve history # from keras.callbacks import History # history: History = model.fit(X_train_expanded, encoded_y_train, # validation_data=(X_test_expanded, encoded_y_test), epochs=2, batch_size=2058) ``` ## Global Average Pooling Layer Output shape of convolutional layer is typically `batch size x number of filters x width x height`. The GAP layer will take the average of the width/height axis and return a vector of length equal to the number of filters. ``` from keras import backend as K np.reshape(X_train_expanded, (-1,1,28,28)).shape from keras.layers import Dense, Conv2D, Flatten, Input, MaxPool2D ###### from keras.layers import Layer, Lambda, Input from tensorflow.python.framework.ops import Tensor from keras.models import Sequential, Model from keras.layers import Dense, Conv2D, Flatten, Input, MaxPool2D from tensorflow.python.framework.ops import Tensor def global_average_pooling(x: Layer): return K.mean(x, axis = (2,3)) def global_average_pooling_shape(input_shape): # return the dimensions corresponding with batch size and number of filters return (input_shape[0], input_shape[-1]) def build_global_average_pooling_layer(function, output_shape): return Lambda(pooling_function, output_shape) inputs: Tensor = Input(shape=(28,28,1)) x: Tensor = Conv2D(filters=32, kernel_size=5, activation='relu')(inputs) # x: Tensor = MaxPool2D()(x) # x: Tensor = Conv2D(filters=64, kernel_size=5, activation='relu')(x) x: Tensor = Lambda(lambda x: K.mean(x, axis=(1,2)), output_shape=global_average_pooling_shape)(x) # x: Tensor = Dense(128, activation="relu")(x) predictions: Tensor = Dense(10, activation="softmax")(x) model: Model = Model(inputs=inputs, outputs=predictions) model.summary() model.compile(optimizer='adam', loss="categorical_crossentropy", metrics=['accuracy']) from keras.callbacks import History history: History = model.fit(X_train_expanded, encoded_y_train, validation_data=(X_test_expanded, encoded_y_test), epochs=100, batch_size=5126) ``` ## Save the Class Activation Model Weights ``` import cv2 from keras.layers import Layer, Lambda def global_average_pooling(x: Layer): return K.mean(x, axis = (2,3)) def global_average_pooling_shape(input_shape): # return only the first two dimensions (batch size and number of filters) return input_shape[0:2] def build_global_average_pooling_layer(function, output_shape): return Lambda(pooling_function, output_shape) def get_output_layer(model, layer_name): # get the symbolic outputs of each "key" layer (we gave them unique names). layer_dict = dict([(layer.name, layer) for layer in model.layers]) layer = layer_dict[layer_name] return layer # persist mode save_filepath: str = "basic_cam.h5" model.save(save_filepath) first_image = X_train[5] first_image = first_image.reshape(28,28,1) img = np.array(first_image).reshape(1, 28, 28, 1) img.shape plt.imshow(img.reshape((28,28))) #img = np.array([np.transpose(np.float32(first_image), (2, 0, 1))]) ``` ## Load Basic Model (since the model files are so large, they cannot be pushed to Github- just email me for a copy of the `.h5` model files) ``` from keras.models import load_model model = load_model("basic_cam.h5") dense_10_layer: Layer = model.layers[-1] dense_10_weights = dense_10_layer.get_weights()[0] print(f"Dense 10 weights: {dense_10_weights.shape}") dense_128_layer: Layer = model.layers[-2] dense_128_weights = dense_128_layer.get_weights()[0] print(f"Dense 128 weights: {dense_128_weights.shape}") ``` ## Map the Final Class Activation Map Back to the Original Input Shapes and Visualize ``` import keras.backend as K class_weights = model.layers[-1].get_weights()[0] final_conv_layer = get_output_layer(model, "conv2d_1") get_output = K.function([model.layers[0].input], [final_conv_layer.output, model.layers[-1].output]) [conv_outputs, predictions] = get_output([img]) conv_outputs = conv_outputs[0,:,:,:] print(conv_outputs.shape) print(class_weights.shape) def make_cam(conv_outputs, class_weights, original_shape, target_class): cam = np.zeros(dtype=np.float32, shape = conv_outputs.shape[0:2]) for i, w in enumerate(class_weights[:, target_class]): cam += w * conv_outputs[:,:,i] cam /= np.max(cam) return cv2.resize(cam, (28, 28)) def make_heatmap(cam): heatmap = cv2.applyColorMap(np.uint8(255*cam), cv2.COLORMAP_JET) heatmap[np.where(cam < 0.1)] = 0 return heatmap cam = make_cam(conv_outputs, class_weights, original_shape=(28,28), target_class=2) false_cam = make_cam(conv_outputs, class_weights, original_shape=(28,28), target_class=4) false2_cam = make_cam(conv_outputs, class_weights, original_shape=(28,28), target_class=5) heatmap = make_heatmap(cam) false_heatmap = make_heatmap(false_cam) false2_heatmap = make_heatmap(false2_cam) new_img = heatmap*0.5 + img final_img = new_img.reshape((28,28,3)) # f, axarr = plt.subplots(2,1) # axarr[0,0].imshow(heatmap) # axarr[0,1].imshow(img.reshape(28,28)) imgs = [heatmap, img.reshape(28,28)] fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(15, 15)) axes[0].imshow(heatmap) axes[0].set_title("Activation Map for 2") axes[1].imshow(false_heatmap) axes[1].set_title("Activation Map for 4") axes[2].imshow(false2_heatmap) axes[2].set_title("Activation Map for 5") axes[3].imshow(img.reshape((28,28))) axes[3].set_title("True Image") import matplotlib.pyplot as plt plt.imshow(img.reshape((28,28))) cam /= np.max(cam) import keras.backend as K from tensorflow.python.framework.ops import Tensor dense_weights = model.layers[-2].get_weights()[0] softmax_weights = model.layers[-1].get_weights()[0] dense_weights.shape softmax_weights.shape final_conv_layer = get_output_layer(model, "conv2d_28") final_conv_layer.output import keras.backend as K from tensorflow.python.framework.ops import Tensor class_weights: np.ndarray = model.layers[-1].get_weights()[0] # class weights is of shape 32 x 10 (number of filter outputs x classes) print(f"Class weights is shape {class_weights.shape}") final_conv_layer: Conv2D = get_output_layer(model, "conv2d_28") input_tensor: Tensor = model.layers[0].input final_conv_layer_output: Tensor = final_conv_layer.output model_class_weights: Tensor = model.layers[-1].output # K.function is a function factory that accepts arbitrary input layers and outputs arbitrary output layers get_output = K.function([input_tensor], [final_conv_layer_output, model_class_weights]) [conv_outputs, predictions] = get_output([img]) print("Conv2D output shape:", conv_outputs.shape) # should match the shape of the outputs from the Conv2D layer print("Predictions:", predictions.shape) np.argmax(predictions) conv_outputs = conv_outputs[0,:,:,:] # [conv_outputs, predictions] = get_output([img]) # conv_outputs = conv_outputs[0, :, :, :] class_weights.shape # Create the class activation map class_activation_map = np.zeros(dtype=np.float32, shape=conv_outputs.shape[1:3]) class_activation_map.shape #Reshape to the network input shape (3, w, h). img = np.array([np.transpose(np.float32(original_img), (2, 0, 1))]) #Get the 512 input weights to the softmax. class_weights = model.layers[-1].get_weights()[0] final_conv_layer = get_output_layer(model, "conv5_3") get_output = K.function([model.layers[0].input], \ [final_conv_layer.output, model.layers[-1].output]) [conv_outputs, predictions] = get_output([img]) conv_outputs = conv_outputs[0, :, :, :] #Create the class activation map. cam = np.zeros(dtype = np.float32, shape = conv_outputs.shape[1:3]) target_class = 1 for i, w in enumerate(class_weights[:, target_class]): cam += w * conv_outputs[i, :, :] ``` # Everything Below This Section Is Doodling ``` image_path = original_img = cv2.imread(image_path, 1) width, height, _ = original_image.shape def build_vanilla_cnn(filters_layer1:int, filters_layer2:int, kernel_size:int, input_dims: GrayScaleImageShape)-> Model: inputs: Tensor = Input(shape=input_dims) x: Tensor = Conv2D(filters=filters_layer1, kernel_size=kernel_size, activation='relu')(inputs) x: Tensor = Conv2D(filters=filters_layer2, kernel_size=kernel_size, activation='relu')(x) x: Tensor = build_global_average_pooling_layer(global_average_pooling, ) predictions = Dense(K, activation="softmax")(x) print(predictions) #compile model using accuracy to measure model performance model: Model = Model(inputs=inputs, outputs=predictions) model.compile(optimizer='adam', loss="categorical_crossentropy", metrics=['accuracy']) return model from keras.layers import merge def build_model(input_dim): inputs = Input(shape=input_dim) # ATTENTION PART STARTS HERE attention_probs = Dense(input_dim, activation='softmax', name='attention_vec')(inputs) attention_mul = merge([inputs, attention_probs], output_shape=32, name='attention_mul', mode='mul') # ATTENTION PART FINISHES HERE attention_mul = Dense(64)(attention_mul) output = Dense(1, activation='sigmoid')(attention_mul) model = Model(input=[inputs], output=output) return model inputs = Input(shape=input_dims) attention_probs = Dense(input_dims, activation='softmax', name='attention_vec')(inputs) ``` ## Compile and Fit Model ``` X_train.reshape((60000,1,28,28)) def expand_tensor_shape(X_train: np.ndarray)-> np.ndarray: new_shape: Tuple = X_train.shape + (1,) print(f"Expanding shape from {X_train.shape} to {new_shape}") return X_train.reshape(new_shape) X_train_expanded: np.ndarray = expand_tensor_shape(X_train) X_test_expanded: np.ndarray = expand_tensor_shape(X_test) ``` # FEI Face Dataset ``` from PIL.JpegImagePlugin import JpegImageFile image: JpegImageFile = load_img('1-01.jpg') ```
github_jupyter
``` import os import glob import math import time import pandas as pd import numpy as np import scipy as sc from sklearn.model_selection import KFold import warnings import matplotlib.pyplot as plt import matplotlib from sklearn.model_selection import train_test_split from torch.utils.data import TensorDataset, DataLoader import torch import torch.nn as nn import random import seaborn as sns; sns.set_theme() import torch.nn.functional as F from sklearn.linear_model import LinearRegression from sklearn.linear_model import Ridge from sklearn.linear_model import Lasso from matplotlib.pyplot import figure from IPython import display from pandas.plotting import scatter_matrix from sklearn.metrics import r2_score from sklearn import svm from numpy import std from numpy import mean from sklearn.model_selection import cross_val_score from sklearn.model_selection import RepeatedStratifiedKFold from matplotlib import cm from sklearn.metrics import confusion_matrix device = torch.device("cuda" if torch.cuda.is_available() else "cpu") warnings.filterwarnings('ignore') pd.set_option('max_columns', 300) train_test_seurat = pd.read_csv('./integrate.csv') train_test_seurat = train_test_seurat.T train_test_seurat_std = train_test_seurat.std() column_names = list(train_test_seurat.columns) columns_remove = [] for i in range(train_test_seurat.shape[1]): if train_test_seurat_std[i] == 0: columns_remove.append(column_names[i]) train_test_seurat = train_test_seurat.drop(columns_remove, axis=1) train_test_seurat[columns_remove[0]] = train_test_seurat.iloc[:, 0] train_test_seurat.shape train_seurat = train_test_seurat.iloc[:90000, :] test_seurat = train_test_seurat.iloc[90000:, :] ``` Load train and test data # 1. Load data and preprocessing ## 1.1 Load train and test data ``` train = pd.read_csv('./MLR_Project_train.csv') test = pd.read_csv('./MLR_Project_test.csv') ``` Show the data format and dimension ``` train.head() test.head() ``` ## 1.3 Show the maximum return of train and test ``` train_max = np.sum(train['TARGET'][train['TARGET']>0]) test_max = np.sum(test['TARGET'][test['TARGET']>0]) print('Maximum return of training set:', train_max) print('Maximum return of testing set:', test_max) reg = Ridge(alpha=0.5).fit(pd.DataFrame(train_seurat.iloc[:, :]), train['TARGET']) pred = reg.predict(pd.DataFrame(train_seurat.iloc[:, :])) pred_test = reg.predict(pd.DataFrame(test_seurat.iloc[:, :])) train_res = np.sum(train['TARGET'][pred>0]) test_res = np.sum(test['TARGET'][pred_test>0]) print(f'Train naive random selection percentage return: {train_res/train_max*100}%') print(f'Test naive random selection percentage return: {test_res/test_max*100}%') ``` ### 1.3.1 Remove the Unnamed columns in dataframe ``` train = train.loc[:, ~train.columns.str.contains('^Unnamed')] test = test.loc[:, ~test.columns.str.contains('^Unnamed')] train.shape train_ = pd.DataFrame() for i in range(train.shape[1]-1): for j in range(train.shape[1]-1): train_[str(i)+'_'+str(j)+'_feat'] = train.iloc[:, i] * train.iloc[:, j] train_target = pd.DataFrame(train['TARGET']) train = train.drop(['TARGET'], axis = 1) train = pd.concat([train, train_], axis = 1) train['TARGET'] = train_target test_ = pd.DataFrame() for i in range(test.shape[1]-1): for j in range(test.shape[1]-1): test_[str(i)+'_'+str(j)+'_feat'] = test.iloc[:, i] * test.iloc[:, j] test_target = pd.DataFrame(test['TARGET']) test = test.drop(['TARGET'], axis = 1) test = pd.concat([test, test_], axis = 1) test['TARGET'] = test_target ``` ## 5.5 Autoencoder Resnet model ``` input_features = train.drop(['TARGET'], axis=1).to_numpy() output_features = pd.DataFrame((np.sign(train['TARGET'])+1)//2).to_numpy() X_test = test.drop(['TARGET'], axis=1).to_numpy() Y_test = pd.DataFrame((np.sign(test['TARGET'])+1)//2).to_numpy() X_train, X_val, Y_train, Y_val = train_test_split(input_features, output_features, test_size=0.2, random_state=42) #### train_data, val_data = train_test_split(train, test_size=0.2, random_state=42) test_data = test #### auto_train_max = np.sum(train_data['TARGET'][train_data['TARGET']>0]) auto_val_max = np.sum(val_data['TARGET'][val_data['TARGET']>0]) auto_test_max = np.sum(test['TARGET'][test['TARGET']>0]) print('Train X shape:', X_train.shape) print('Validation X shape:', X_val.shape) print('Test X shape:', X_test.shape) print('Train Y shape:', Y_train.shape) print('Val Y shape:', Y_val.shape) print('Test Y shape:', Y_test.shape) print('train_max:', auto_train_max) print('val_max:', auto_val_max) print('test_max:', auto_test_max) train_input = torch.from_numpy(X_train) train_output = torch.from_numpy(Y_train) val_input = torch.from_numpy(X_val) val_output = torch.from_numpy(Y_val) test_input = torch.from_numpy(X_test) test_output = torch.from_numpy(Y_test) train_input = torch.unsqueeze(train_input, 1) val_input = torch.unsqueeze(val_input, 1) test_input = torch.unsqueeze(test_input, 1) train_input = train_input.float() train_output = train_output.float() val_input = val_input.float() val_output = val_output.float() test_input = test_input.float() test_output = test_output.float() input_feature = train_input.shape[1] output_feature = 1 # print('input_feature:', input_feature) # print('output_feature:', output_feature) train_input = train_input.to(device) train_output = train_output.to(device) val_input = val_input.to(device) val_output = val_output.to(device) test_input = test_input.to(device) test_output = test_output.to(device) def seed_everything(seed=1234): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything() # auto-encoder model # base model class Autoencoder(nn.Module): def __init__(self): super(Autoencoder, self).__init__() self.linear1 = nn.Linear(input_feature, input_feature//2) self.linear2 = nn.Linear(input_feature//2, input_feature//4) self.linear3 = nn.Linear(input_feature//4, input_feature//16) self.linear4 = nn.Linear(input_feature//16, input_feature//16) self.linear5 = nn.Linear(input_feature//16, input_feature//16) self.linear6 = nn.Linear(input_feature//16, input_feature//16) self.batchnorm_1 = nn.BatchNorm1d(input_feature//2) self.batchnorm_2 = nn.BatchNorm1d(input_feature//4) self.batchnorm_3 = nn.BatchNorm1d(input_feature//16) self.linear = nn.Linear(input_feature//16, 1) nn.init.constant_(self.linear1.weight, 0.1) nn.init.constant_(self.linear2.weight, 0.1) nn.init.constant_(self.linear3.weight, 0.1) nn.init.constant_(self.linear4.weight, 0.1) nn.init.constant_(self.linear.weight, 0.1) self.relu = nn.ReLU() # self.leakyrelu = nn.LeakyReLU(0.1) self.dropout = nn.Dropout(0.15) self.softmax = nn.Softmax() def forward(self, x): x = self.linear1(x) # x = self.batchnorm_1(x) x = self.relu(x) x = self.dropout(x) x = self.linear2(x) # x = self.batchnorm_2(x) x = self.relu(x) # x = self.dropout(x) x = self.linear3(x) # x = self.batchnorm_3(x) x = self.relu(x) x = self.linear6(x) x = self.relu(x) output = self.linear(x) return output.float() batch_size = 100000 train_ds = TensorDataset(train_input, train_output) train_dl = DataLoader(train_ds, batch_size= batch_size, shuffle=False) %matplotlib inline def fit(num_epochs, model, loss_fn, train_input, train_output, val_input, val_output, test_input, test_output, model_path): best_loss = float('inf') train_pred_output = [] val_pred_output = [] train_error = [] val_error = [] test_error = [] epochs = [] train_returns = [] val_returns = [] test_returns = [] train_sum = [] val_sum = [] test_sum = [] for epoch in range(num_epochs): for x,y in train_dl: model = model.train() opt.zero_grad() pred = model(x) y = torch.reshape(y, (y.shape[0], 1)) loss = loss_fn(pred, y) loss.backward() opt.step() if epoch % 50 == 0: model = model.eval() train_pred = model(train_input) train_pred_index = (torch.sign(train_pred)+1)//2 train_output = torch.reshape(train_output, (train_output.shape[0], 1)) train_loss = loss_fn(train_output, train_pred) # train_loss = loss_fn(train_pred, train_output.long().squeeze()) train_loss = train_loss.cpu().detach().numpy() val_pred = model(val_input) val_pred_index = (torch.sign(val_pred)+1)//2 val_output = torch.reshape(val_output, (val_output.shape[0], 1)) val_loss = loss_fn(val_output, val_pred) # val_loss = loss_fn(val_pred, val_output.long().squeeze()) val_loss = val_loss.cpu().detach().numpy() test_pred = model(test_input) test_pred_index = (torch.sign(test_pred)+1)//2 test_output = torch.reshape(test_output, (test_output.shape[0], 1)) test_loss = loss_fn(test_output, test_pred) # test_loss = loss_fn(test_pred, test_output.long().squeeze()) test_loss = test_loss.cpu().detach().numpy() epochs.append(epoch) train_error.append(math.log(train_loss+1)) val_error.append(math.log(val_loss+1)) test_error.append(math.log(test_loss+1)) # figure, ax = plt.subplots(1, 2, figsize = (20, 7)) # ax = ax.flatten() # figure, ax = plt.subplots(1, 4, figsize = (22, 5)) # ax = ax.flatten() # plt.grid(False) # train_conf = confusion_matrix(train_output, train_pred_index) # g1 = sns.heatmap(train_conf, cmap="YlGnBu",cbar=False, ax=ax[0], annot = True) # g1.set_ylabel('True Target') # g1.set_xlabel('Predict Target') # g1.set_title('Train dataset') # plt.grid(False) # val_conf = confusion_matrix(val_output, val_pred_index) # g2 = sns.heatmap(val_conf, cmap="YlGnBu",cbar=False, ax=ax[1], annot = True) # g2.set_ylabel('True Target') # g2.set_xlabel('Predict Target') # g2.set_title('Val dataset') # plt.grid(False) # test_conf = confusion_matrix(test_output, test_pred_index) # g3 = sns.heatmap(test_conf, cmap="YlGnBu",cbar=False, ax=ax[2], annot = True) # g3.set_ylabel('True Target') # g3.set_xlabel('Predict Target') # g3.set_title('Test dataset') train_pred_np = train_pred_index.cpu().detach().numpy() train_output_np = train_output.cpu().detach().numpy() val_pred_np = val_pred_index.cpu().detach().numpy() val_output_np = val_output.cpu().detach().numpy() test_pred_np = test_pred_index.cpu().detach().numpy() test_output_np = test_output.cpu().detach().numpy() # train_max_value = max(max(train_output_np), max(train_pred_np)) # train_min_value = min(min(train_output_np), min(train_pred_np)) # val_max_value = max(max(val_output_np), max(val_pred_np)) # val_min_value = min(min(val_output_np), min(val_pred_np)) # test_max_value = max(max(test_output_np), max(test_pred_np)) # test_min_value = min(min(test_output_np), min(test_pred_np)) # ax[0].scatter(train_output_np, train_pred_np, s = 20, alpha=0.3, c='blue') # ax[1].scatter(val_output_np, val_pred_np, s = 20, alpha=0.3, c='red') # ax[2].scatter(test_output_np, test_pred_np, s = 20, alpha=0.3, c='green') # ax[0].plot(epochs, train_error, c='blue') # ax[0].plot(epochs, val_error, c='red') # ax[0].plot(epochs, test_error, c='green') # ax[0].set_title('Errors vs Epochs', fontsize=15) # ax[0].set_xlabel('Epoch', fontsize=10) # ax[0].set_ylabel('Errors', fontsize=10) # ax[0].legend(['train', 'valid', 'test']) # ax[0].set_xlim([train_min_value, train_max_value]) # ax[0].set_ylim([train_min_value, train_max_value]) # ax[0].set_title('Trainig data', fontsize=15) # ax[0].set_xlabel('Target', fontsize=10) # ax[0].set_ylabel('Prediction', fontsize=10) # ax[0].plot([train_min_value, train_max_value], [train_min_value, train_max_value], 'k-') # ax[1].set_xlim([val_min_value, val_max_value]) # ax[1].set_ylim([val_min_value, val_max_value]) # ax[1].set_title('Validation data', fontsize=15) # ax[1].set_xlabel('Target', fontsize=10) # ax[1].set_ylabel('Prediction', fontsize=10) # ax[1].plot([val_min_value, val_max_value], [val_min_value, val_max_value], 'k-') # ax[2].set_xlim([test_min_value, test_max_value]) # ax[2].set_ylim([test_min_value, test_max_value]) # ax[2].set_title('Testing data', fontsize=15) # ax[2].set_xlabel('Target', fontsize=10) # ax[2].set_ylabel('Prediction', fontsize=10) # ax[2].plot([test_min_value, test_max_value], [test_min_value, test_max_value], 'k-') # ax[3].plot(epochs, train_error, c='blue') # ax[3].plot(epochs, val_error, c='red') # ax[3].plot(epochs, test_error, c='green') # ax[3].set_title('Training and Validation error', fontsize=15) # ax[3].set_xlabel('Epochs', fontsize=10) # ax[3].set_ylabel('MSE error', fontsize=10) # display.clear_output(wait=True) # display.display(pl.gcf()) # print('Epoch ', epoch, 'Train_loss: ', train_loss*1000, ' Validation_loss: ', val_loss*100, ' Test_loss: ', test_loss*100) # print(train_pred_np.shape, train_pred_np) # print(train_pred, train_pred_np) train_pred_np = np.squeeze(train_pred_np) val_pred_np = np.squeeze(val_pred_np) test_pred_np = np.squeeze(test_pred_np) train_res = np.sum(train_data['TARGET'][train_pred_np>0]) train_output_check = np.squeeze(train_output_np) train_check = np.sum(train_data['TARGET'][train_output_check>0]) val_res = np.sum(val_data['TARGET'][val_pred_np>0]) val_output_check = np.squeeze(val_output_np) val_check = np.sum(val_data['TARGET'][val_output_check>0]) test_res = np.sum(test_data['TARGET'][test_pred_np>0]) test_output_check = np.squeeze(test_output_np) test_check = np.sum(test_data['TARGET'][test_output_check>0]) # train_returns.append(train_res) # val_returns.append(val_res) # test_returns.append(test_res) # ax[1].plot(epochs, train_returns, c='blu`e') # ax[1].plot(epochs, val_returns, c='red') # ax[1].plot(epochs, test_returns, c='green') # ax[1].legend(['train', 'valid', 'test']) # ax[1].set_title('Return vs Epochs', fontsize=15) # ax[1].set_xlabel('Epoch', fontsize=10) # ax[1].set_ylabel('Returns', fontsize=10) # display.clear_output(wait=True) # display.display(pl.gcf()) train_sum.append(train_res) val_sum.append(val_res) test_sum.append(test_res) # print(f'Checks: {train_check/auto_train_max*100}%, {val_check/auto_val_max*100}%, {test_check/auto_test_max*100}%') # print(f'Maximum sum train return {train_res}, Total train return: {auto_train_max}, Maximum train percentage return: {train_res/auto_train_max*100}%') # print(f'Maximum sum train return {val_res}, Total train return: {auto_val_max}, Maximum train percentage return: {val_res/auto_val_max*100}%') # print(f'Maximum sum test return {test_res}, Total test return: {auto_test_max}, Maximum test percentage return: {test_res/auto_test_max*100}%') # print('Epoch:', epoch, 'Train loss:', train_loss, 'Val loss:', val_loss, 'Test loss:', test_loss) print(f'Epoch: {epoch}, Train loss: {train_loss}, Train return: {train_res/auto_train_max*100}%, Val loss: {val_loss}, Val return: {val_res/auto_val_max*100}%, Test loss: {test_loss}, Test return: {test_res/auto_test_max*100}%') if val_loss < best_loss: torch.save(model.state_dict(), model_path) best_loss = val_loss # train_pred_output.append([train_pred.cpu().detach().numpy(), train_output.cpu().detach().numpy()]) # val_pred_output.append([val_pred.cpu().detach().numpy(), val_output.cpu().detach().numpy()]) return train_sum, val_sum, test_sum num_epochs = 20000 learning_rate = 0.001 loss_fn = F.mse_loss seed_everything() model = Autoencoder() model = model.to(device) opt = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9) train_sum_1, val_sum_1, test_sum_1 = fit(num_epochs, model, loss_fn, train_input, train_output, val_input, val_output, test_input, test_output, 'model_path_cnn') # fig.savefig("auto_encoder.png", bbox_inches='tight', dpi=600) # model = Autoencoder_model() # model.load_state_dict(torch.load(model_path)) # model.eval() ```
github_jupyter
# Introduction So... you've got some interesting property of the network uncovered. Is it real? How can you trust that what you've found **didn't arise by random chance**? One useful way of thinking by using generative models of random graphs. By "generative" and "random", we mean that the graph was generated using some statistical random model underlying it. This is essentially in the "Bayesian" tradition of modelling. The alternative is a non-parametric way of thinking. This is where we don't assume some model that generated the data, and instead performs randomization on the data on hand to calculate statistics. Here, we assume that our randomization procedure is an appropriate random model for the network. ## Statistics Primer **Discrete Distributions:** - Bernoulli: probability p of success in 1 try (e.g. one flip of coin). - Binomial: probability p of success given n number of tries. `n * bernoulli trials` follows binomial distribution. - Poisson: processes with a per-unit rate. **Continuous Distributions:** - Uniform: equal probability over the range of probable values. Can also be made discrete. - Normal: everyone's favourite. We can also make up an arbitrary distribution of our own. :-) ## Statistical Test of Significance of Busy Routes If we think back to the graph made in the previous notebook, what kind of process might one imagine that gave rise to the data? **brainstorm session...** Group work. ## Discrete uniform distribution over all routes Assumptions of this model: 1. Riders choose start station with uniform probability. 2. They then choose a destination station with uniform probability. 3. They ride it and the data are recorded. Under this model, what is the distribution of number of rides taken throughout the ? We can probably solve this analytically, but since we have ultra-cheap computational capabilities at our fingertips, might as well do it the brute-force way. ``` import networkx as nx G = nx.read_gpickle('datasets/divvy_2013/divvy_graph.pkl') total_trips = sum([d['count'] for _,_,d in G.edges(data=True)]) print(total_trips) ``` ### Exercise Calculate the expected number of trips between any two stations, assuming self-loops are allowed. ### Exercise Can you plot a histogram of the number of trips taken on each route? ### Exercise Can you figure out the 2.5th, 97.5th, and 100th percentile of this distribution? Hint: You may wish to use `numpy`'s `percentile` function. ``` import numpy as np ``` Computing the interval between the 2.5th to the 97.5th percentile effectively gives you a centered 95% mass of the distribution. Knowing this range can be quite useful in other data analysis cases. ### Live Class Exercise Together, we will implement a random model for the number of trips that are taken between two stations. In this procedure, we will copy the nodes graph `G` into a new graph `G_random`. This can be done by: G_random = nx.Graph() G_random.add_nodes_from(G.nodes(data=True)) Following that, we will manually map-reduce this computation. ``` # Grab all the nodes from G. G_random = nx.Graph() G_random.add_nodes_from(G.nodes(data=True)) G_random.nodes(data=True) # Implement the procedure by brute force. You will find it takes a bit of time.......... but because we have so many people # in the class, we can brute force parallelize this :-). from random import sample, choice import math n = 40 # the total number of students with Python 3.4 installed. for i in range(math.ceil(total_trips/n)): if i % 1000 == 0: print(i) n1 = choice(G_random.nodes()) n2 = choice(G_random.nodes()) #note: n1 and n2 might well be the same nodes. if (n1, n2) not in G_random.edges(): G_random.add_edge(n1, n2, count=0) else: G_random.edge[n1][n2]['count'] += 1 your_id = 1 # change this number to the ID given to you. nx.write_gpickle(G_random, 'datasets/divvy_2013/divvy_random{0}.pkl'.format(your_id)) # Load the graphs with randomly re-distributed trip counts as a list of graphs. import os def is_pkl(filename): """ Checks if a file is a pickled graph. """ if filename.split('.')[-1] == 'pkl': return True else: return False def is_divvy_random_graph(filename): """ Checks if it's a Divvy graph. """ if 'divvy_random' in filename: return True else: return False # Load the data into a list of graphs. divvy_random_graphs = [] for f in os.listdir('datasets/divvy_2013'): if is_pkl(f) and is_divvy_random_graph(f): g = nx.read_gpickle('datasets/divvy_2013/{0}'.format(f)) graphs.append(g) # Plot the distribution of counts in a re-distributed set of trips. import matplotlib.pyplot as plt %matplotlib inline G_random_master = nx.Graph() for g in graphs: for sc, sk, d in g.edges(data=True): if (sc, sk) not in G_random_master.edges(): G_random_master.add_edge(sc, sk, count=d['count']) if (sc, sk) in G_random_master.edges(): G_random_master.edge[sc][sk]['count'] += d['count'] # Remove edges that have no counts for sc, sk, d in G_random_master.edges(data=True): if d['count'] == 0: G_random_master.remove_edge(sc, sk) # Plot the distribution of counts throughout the network counts = [d['count'] for _,_,d in G_random_master.edges(data=True)] plt.bar(Counter(counts).keys(), Counter(counts).values()) plt.show() nx.write_gpickle(G_random_master, 'datasets/divvy_2013/divvy_random_master.pkl') ``` **Think about it...** Given the distribution of trip counts in the randomly re-distributed trips, what can we infer about the popularity of certain routes? Are they likely to have shown up in this random model?
github_jupyter
``` import matplotlib #matplotlib.use('Agg') %matplotlib tk %autosave 180 import matplotlib.pyplot as plt from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) import matplotlib.cm as cm from matplotlib import gridspec import parmap import numpy as np import pandas as pd import os import parmap import shutil import cv2 import scipy.io as sio from Specgram import Specgram import csv import glob2 from scipy import stats import scipy from mpl_toolkits.axes_grid1.inset_locator import inset_axes class Visualize(): def __init__(self): self.clr_ctr = 0 # self.animal_names = ['IA1','IA2','IA3','IJ1','IJ2','AQ2'] # self.colors = ['black','blue','red','green','magenta','pink','cyan'] # self.n_colors = [10,19,23,17,20,48] # self.filter=False def load_data(self, fname): self.data = np.load(fname) def format_plot(self, ax): ''' Formats plots for decision choice with 50% and 0 lines ''' # meta data xlims = [-10,10] ylims = [0.4,1.0] ax.plot([0,0], [ylims[0],ylims[1]], 'r--', linewidth=3, color='black', alpha=.5) ax.plot([xlims[0],xlims[1]], [0.5,0.5], 'r--', linewidth=3, color='black', alpha=.5) ax.set_xlim(-10,10) ax.set_ylim(0.4,1.0) def format_plot2(self, ax): ''' Format plots for time prediction ''' # meta data xlims = [-10,0] ylims = [0.0,1.0] ax.plot([0,0], [ylims[0],ylims[1]], 'r--', linewidth=3, color='black', alpha=.5) ax.plot([xlims[0],xlims[1]], [0.1,0.1], 'r--', linewidth=3, color='black', alpha=.5) ax.set_xlim(xlims[0],xlims[1]) ax.set_ylim(ylims[0],ylims[1]) # plt.legend(fontsize=20) def plot_decision_choice(self, clr, label, title=None, ax=None): # if ax is None: ax=plt.subplot(111) # t = np.arange(-9.5, 10.5, 1) # mean = self.data.mean(1) std = np.std(self.data,axis=1) ax.plot(t, mean, c=clr, label = label, linewidth=4) ax.fill_between(t, mean-std, mean+std, alpha = 0.2) self.format_plot(ax) plt.legend(fontsize=20) if title is not None: plt.title(title) # def plot_decision_choice(self, clr, label, ax=None): # # # if ax is None: # ax=plt.subplot(111) # # # t = np.arange(-9.5, 10.5, 1) # # # mean = self.data.mean(1) # std = np.std(self.data,axis=1) # ax.plot(t, mean, # c=clr, # label = label, # linewidth=4) # ax.fill_between(t, mean-std, mean+std, alpha = 0.2) # self.format_plot(ax) # plt.legend(fontsize=20) def plot_significant(self, clr, label, animal_id, session): # fig=plt.figure() ax=plt.subplot(111) t = np.arange(-9.5, 10.5, 1) # mean = self.data.mean(1) std = np.std(self.data,axis=1) plt.plot(t, mean, c=clr, label = label, linewidth=4) plt.fill_between(t, mean-std, mean+std, alpha = 0.2) # grab end points control = np.hstack((self.data[-2:].flatten(), self.data[:2].flatten())) #control = self.data[:3].flatten() print ("controL ", control.shape) sig = [] for k in range(self.data.shape[0]): if True: res = stats.ks_2samp(self.data[k], control) else: res = scipy.stats.ttest_1samp(self.data[k], 0.5) print (res) sig.append(res[1]) sig=np.array(sig)[None] print (sig) thresh = 0.05 idx = np.where(sig>thresh) sig[idx]=np.nan vmin = np.nanmin(sig) vmax = np.nanmax(sig) vmin=0.0 vmax=0.05 axins = ax.inset_axes((0,.95,1,.05)) axins.set_xticks([]) axins.set_yticks([]) im = axins.imshow(sig,vmin=vmin,vmax=vmax, aspect='auto', cmap='viridis_r') # ticks = np.round(np.linspace(vmin, vmax, 4),3) print ("vmin, vmax; ", vmin, vmax, "ticks: ", ticks) fmt = '%1.3f' # cbar = fig.colorbar(im, ax=ax, shrink=0.4, ticks=ticks, format = fmt) cbar.ax.tick_params(labelsize=16) cbar.ax.set_title('Pval', rotation=0, fontsize=16) self.format_plot(ax) plt.title(animal_id + " session: "+str(session)) # def plot_trends(self, clr, label, ax): # t = np.arange(-9.5, 10.5, 1) colors = plt.cm.magma(np.linspace(0,1,self.n_colors)) # mean = self.data.mean(1) std = np.std(self.data,axis=1) ax.plot(t, mean, c=colors[clr], label = label, linewidth=4) self.format_plot(ax) def plot_animal_decision_longitudinal_matrix(self, animal_name, lockout=False, ax=None): if ax is None: fig=plt.figure() ax=plt.subplot(111) # root_dir = self.main_dir+animal_name+'/' fnames = np.sort(glob2.glob(root_dir+'SVM_scores_'+animal_name+"*")) # #fig=plt.figure() img =[] for fname in fnames: if 'lockout' not in fname: if lockout==False: self.load_data(fname) temp = self.data.mean(1) else: # idx = fname.find('SVM_scores_'+animal_name) fname2 = fname[:idx+12]+"_lockout"+fname[idx+12:] self.load_data(fname[:idx+14]+"_lockout"+fname[idx+14:]) temp = self.data.mean(1) if self.filter: temp = self.filter(temp) img.append(temp) img=np.array(img) ax.imshow(img) plt.xticks(np.arange(0,img.shape[1],2), np.arange(-9.5,10,2)) plt.ylabel("Study day") plt.title(animal_name) plt.suptitle("lockout: "+str(lockout)) #ticks = np.linspace(vmin,vmax,4) #cbar = fig.colorbar(im, ax=axes[5], # ticks=ticks) def plot_animal_decision_longitudinal(self, animal_name): animal_names = ['IA1','IA2','IA3','IJ1','IJ2','AQ2'] idx=animal_names.index(animal_name) # root_dir = self.main_dir+animal_name+'/' fnames = np.sort(glob2.glob(root_dir+'SVM_scores_'+animal_name+"*")) # fig=plt.figure() ax1=plt.subplot(1,2,1) ax1.set_title("All") ax2=plt.subplot(1,2,2) ax2.set_title("Lockout") vis.n_colors = self.n_colors[idx] ctr=0 for fname in fnames: if 'lockout' not in fname: print (fname) self.load_data(fname) self.plot_trends(ctr,'all',ax1) idx = fname.find('SVM_scores_'+animal_name) fname2 = fname[:idx+12]+"_lockout"+fname[idx+12:] print (fname2) self.load_data(fname[:idx+14]+"_lockout"+fname[idx+14:]) self.plot_trends(ctr,'lockout', ax2) ctr+=1 plt.suptitle(animal_name) def plot_animal_time_longitudinal(self, animal_name): animal_names = ['IA1','IA2','IA3','IJ1','IJ2','AQ2'] idx=animal_names.index(animal_name) # root_dir = self.main_dir+animal_name+'/' fnames = np.sort(glob2.glob(root_dir+'SVM_scores_'+animal_name+"*")) # fig=plt.figure() ax1=plt.subplot(1,2,1) ax1.set_title("All") ax2=plt.subplot(1,2,2) ax2.set_title("Lockout") self.n_colors = self.ncolors[idx] ctr=0 for fname in fnames: if 'lockout' not in fname: print (fname) self.load_data(fname) self.plot_trends(ctr,'all',ax1) idx = fname.find('SVM_scores_'+animal_name) fname2 = fname[:idx+12]+"_lockout"+fname[idx+12:] print (fname2) self.load_data(fname[:idx+14]+"_lockout"+fname[idx+14:]) self.plot_trends(ctr,'lockout', ax2) ctr+=1 plt.suptitle(animal_name) def plot_animal_decision_AUC_longitudinal(self): # ax1=plt.subplot(121) ax2=plt.subplot(122) # for animal_name in self.animal_names: # root_dir = self.main_dir+animal_name+'/' fnames = np.sort(glob2.glob(root_dir+'SVM_scores_'+animal_name+"*")) # time points 0 to 10 for decoding... width = [0,10] # only look at probs > 0.5 thresh = 0.6 # auc1 = [] auc2 = [] for fname in fnames: if 'lockout' not in fname: self.load_data(fname) temp = self.data.mean(1)[width[0]:width[1]] idx = np.where(temp>thresh)[0] temp = temp[idx] auc1.append(temp.sum(0)) # load lockout version idx = fname.find('SVM_scores_'+animal_name) fname2 = fname[:idx+12]+"_lockout"+fname[idx+12:] self.load_data(fname[:idx+14]+"_lockout"+fname[idx+14:]) temp = self.data.mean(1)[width[0]:width[1]] idx = np.where(temp>thresh)[0] temp = temp[idx] auc2.append(temp.sum(0)) # auc1 = np.array(auc1) auc2 = np.array(auc2) t = np.arange(auc1.shape[0])/(auc1.shape[0]-1) # ax1.plot(t, np.poly1d(np.polyfit(t, auc1, 1))(t), linewidth=4, c=self.colors[self.clr_ctr], label=self.animal_names[self.clr_ctr]) # ax2.plot(t, np.poly1d(np.polyfit(t, auc2, 1))(t), 'r--', linewidth=4, c=self.colors[self.clr_ctr]) self.clr_ctr+=1 ax1.set_xlim(0,1) ax2.set_xlim(0,1) #ax1.set_ylim(9,13) #ax2.set_ylim(9,13) ax1.set_title("All") ax2.set_title("Lockout") ax1.set_xlabel("Duration of study") ax2.set_xlabel("Duration of study") plt.suptitle("AUC fits to SVM decision prediction", fontsize=20) ax1.legend(fontsize=20) def plot_decision_time(self, clr, label, ax=None): # if ax is None: ax=plt.subplot(111) # t = np.arange(-9.5, 0.5, 1) # print (self.data.shape) temp = [] for k in range(self.data.shape[1]): temp.append(self.data[:,k,k]) temp=np.array(temp) # mean = temp.mean(1) std = np.std(temp,axis=1) plt.plot(t, mean, c=clr, label = label, linewidth=4) plt.fill_between(t, mean-std, mean+std, color=clr, alpha = 0.1) plt.legend(fontsize=16) self.format_plot2(ax) def plot_decision_time_animal(self, animal_name): # select dataset and # of recordings t = np.arange(-9.5, 0.5, 1) idx=self.animal_names.index(animal_name) colors = plt.cm.magma(np.linspace(0,1,self.n_colors[idx])) root_dir = self.main_dir+animal_name+'/' fnames = np.sort(glob2.glob(root_dir+'conf_10_'+animal_name+"*")) # traces1 = [] traces2 = [] for fname in fnames: if 'lockout' not in fname: self.load_data(fname) temp = [] for k in range(self.data.shape[1]): temp.append(self.data[:,k,k]) traces1.append(temp) # load lockout version idx = fname.find('conf_10_'+animal_name) fname2 = fname[:idx+11]+"_lockout"+fname[idx+11:] self.load_data(fname2) temp = [] for k in range(self.data.shape[1]): temp.append(self.data[:,k,k]) traces2.append(temp) traces1=np.array(traces1) traces2=np.array(traces2) #print (traces1.shape) # ax1=plt.subplot(1,2,1) ax1.set_title("all") for k in range(traces1.shape[0]): mean=traces1[k].mean(1) #print (mean.shape) ax1.plot(t, mean, c=colors[k], linewidth=4) self.format_plot2(ax1) # ax2=plt.subplot(1,2,2) ax2.set_title("lockout") for k in range(traces2.shape[0]): mean=traces2[k].mean(1) #print (mean.shape) ax2.plot(t, mean, c=colors[k], linewidth=4) self.format_plot2(ax2) plt.suptitle(animal_name) def plot_decision_time_animal_matrix(self, animal_name): # root_dir = self.main_dir+animal_name+'/' fnames = np.sort(glob2.glob(root_dir+'conf_10_'+animal_name+"*")) # traces1 = [] traces2 = [] for fname in fnames: if 'lockout' not in fname: self.load_data(fname) temp = [] for k in range(self.data.shape[1]): temp.append(self.data[:,k,k].mean(0)) traces1.append(temp) # load lockout version idx = fname.find('conf_10_'+animal_name) fname2 = fname[:idx+11]+"_lockout"+fname[idx+11:] self.load_data(fname2) temp = [] for k in range(self.data.shape[1]): temp.append(self.data[:,k,k].mean(0)) traces2.append(temp) traces1=np.array(traces1) traces2=np.array(traces2) print (traces1.shape) # ax1=plt.subplot(1,4,1) ax1.set_title("all") ax1.imshow(traces1,vmin=0,vmax=1.0) # ax2=plt.subplot(1,4,2) ax2.set_title("lockout") ax2.imshow(traces2,vmin=0,vmax=1.0) plt.suptitle(animal_name) # ax2=plt.subplot(1,4,3) ax2.set_title("all - lockout") ax2.imshow(traces1-traces2,vmin=0,vmax=.25) plt.suptitle(animal_name) # ax2=plt.subplot(1,4,4) ax2.set_title("lockout - all") ax2.imshow(traces2-traces1,vmin=0,vmax=.25) plt.suptitle(animal_name) def plot_decision_time_all_matrix(self): vmin = 0 vmax = 0.75 axes=[] fig=plt.figure() for a in range(6): axes.append(plt.subplot(2,3,a+1)) # root_dir = self.main_dir+self.animal_names[a]+'/' fnames = np.sort(glob2.glob(root_dir+'conf_10_'+self.animal_names[a]+"*")) # traces1 = [] for fname in fnames: if 'lockout' not in fname: self.load_data(fname) temp = [] for k in range(self.data.shape[1]): temp.append(self.data[:,k,k].mean(0)) if self.filter: temp = self.filter_trace(temp) traces1.append(temp) traces1=np.array(traces1) axes[a].set_title(self.animal_names[a]) im = axes[a].imshow(traces1,vmin=vmin,vmax=vmax) plt.xticks(np.arange(0,traces1.shape[1],2), np.arange(-9.5,0,2)) plt.ylabel("Study day") ticks = np.linspace(vmin,vmax,4) cbar = fig.colorbar(im, ax=axes[5], ticks=ticks) #cbar.ax.tick_params(labelsize=16) #cbar.ax.set_title('Pval', rotation=0, # fontsize=16) def filter_trace(self,trace,width=1): box = np.ones(width)/width trace_smooth = np.convolve(trace, box, mode='same') return trace_smooth # LEVER PULL vis = Visualize() # lever-related data vis.main_dir = '/media/cat/1TB/yuki/yongxu/lever pull/' ############################################# ############## DECISION TYPE ################ ############################################# animal_id = 'AQ2' session = 10 fname = vis.main_dir+'/'+animal_id+'/SVM_scores_'+animal_id+'_'+str(session)+'.npy' # vis.load_data(fname) # vis.plot_decision_choice('red','all') # fname = vis.main_dir+'/'+animal_id+'/SVM_scores_'+animal_id+'_lockout_'+str(session)+'.npy' # vis.load_data(fname) # vis.plot_decision_choice('blue','lockout') # # # # vis.load_data(fname) # vis.plot_significant('red','all',animal_id, session) # # # vis.plot_animal_decision_longitudinal('IA3') # # vis.plot_animal_decision_AUC_longitudinal() # # # # lockout=False for ctr, name in enumerate(vis.animal_names): ax=plt.subplot(2,3,ctr+1) vis.plot_animal_decision_longitudinal_matrix(name, lockout, ax) ############################################# ############## DECISION TIME ################ ############################################# # #?vis.main_dir = '/media/cat/1TB/yuki/yongxu/lever pull/' # fname = '/media/cat/1TB/yuki/yongxu/lever pull/IA1/conf_10_IA1_0.npy' # vis.load_data(fname) # vis.plot_decision_time('red','all') # animal_name = "AQ2" # vis.plot_decision_time_animal(animal_name) # animal_name = 'AQ2' # vis.plot_decision_time_animal_matrix(animal_name) # # # vis.filter=False # vis.plot_decision_time_all_matrix() ############################################# ############## BODY MOVEMENTS ############### ############################################# # body movement related data vis.main_dir = '/media/cat/1TB/yuki/yongxu/body movement/' # # # body_part = 'right_paw' # fname = vis.main_dir+'/SVM_scores_'+body_part+'_1.npy' # vis.load_data(fname) # vis.plot_decision_choice('red','1', body_part) # fname = vis.main_dir+'/SVM_scores_'+body_part+'_2.npy' # vis.load_data(fname) # vis.plot_decision_choice('blue','2', body_part) # # # fname = vis.main_dir+'/conf_10_left_paw.npy' # vis.load_data(fname) # vis.plot_decision_time('red','left') # fname = vis.main_dir+'/conf_10_right_paw.npy' # vis.load_data(fname) # vis.plot_decision_time('blue','right') # fname = vis.main_dir+'/conf_10_tongue.npy' # vis.load_data(fname) # vis.plot_decision_time('green','tongue') ############################################# ############## DECISION TIME ################ ############################################# TO DO - 1tail test + Yongxu to provide empoirical distributions - AUC: use >0.5% also stat significant points - Also maybe only look at time <0.0 - dataset-non midline filtered ?? RNNs - # from sklearn.decomposition import PCA def pca_denoise(X): # reshape for PCA X = X.reshape(X.shape[0]*X.shape[1], X.shape[2]*X.shape[3]) pca = PCA() pca.fit(X) mu = np.mean(X, axis=0) nComp =100 Xnew = np.dot(pca.transform(X)[:,:nComp], pca.components_[:nComp,:]) Xnew += mu X_deshaped = Xnew.reshape(X.shape[0],X.shape[1], X.shape[2],X.shape[3]) return X_deshaped X = np.random.rand(10,101,128,128) X_denoised = pca_denoise(X_2D) # # grab stm data # X = np.random.rand(10,101,128,128) # print (X.shape) # # reshape for PCA # X_2D = X.reshape(X.shape[0]*X.shape[1], X.shape[2]*X.shape[3]) # print (X_2D.shape) # # Run pca # print ("DATA IN: ", X_2D.shape) # X_denoised = pca_denoise(X_2D) # print ("DATA OUT: ", X_denoised.shape) # X_deshaped = X_denoised.reshape(X.shape[0],X.shape[1], X.shape[2],X.shape[3]) # print ("Final data: ", X_deshaped.shape) import torch from torch import nn from torch.autograd import Variable device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # X = np.random.rand(10,101,128,128) data_in = data_in.reshape(X.shape[0]*X.shape[1],X.shape[2]*X.shape[3]) print (" Data going into GPU SVD: ", data_in.shape) # move data to device a = torch.from_numpy(np.float32(data_in)).float().to(device) print ("Input array size: ", a.shape) print ("...starting GPU-svd") u, s, v = torch.svd(a) print ("u: ", u.shape) print ("s: ", s.shape) print ("v: ", v.shape) # scale temporal component by singular vals torch.save(v,root_dir+'v_gpu.pt') torch.save(s,root_dir+'s_gpu.pt') torch.save(u,root_dir+'u_gpu.pt') v = v.cpu().data.numpy() s = s.cpu().data.numpy() u = u.cpu().data.numpy() np.save(root_dir+'v.npy',v) np.save(root_dir+'s.npy',s) np.save(root_dir+'u.npy',u) Vs = (v*s).transpose(1,0).reshape(-1,128,128) np.save(root_dir+'Vs.npy',Vs) print ("Vs: ", Vs.shape) print ("u: ", u.shape) print ("DONE") # Data in: (39050, 128, 128) # Data going into GPU SVD: (39050, 16384) # Input array size: torch.Size([39050, 16384]) # ...starting GPU-svd # u: torch.Size([39050, 16384]) # s: torch.Size([16384]) # v: torch.Size([16384, 16384]) # Vs: (16384, 128, 128) # u: (39050, 16384) # DONE ```
github_jupyter
# Learn Python for Economic Computation: A Crash Course https://github.com/jlcatonjr/Learn-Python-for-Stats-and-Econ > James Caton > > james.caton@ndsu.edu > > Cameron Harwick > > charwick@brockport.edu ### Table of Contents [Introduction](https://github.com/jlcatonjr/Learn-Python-for-Stats-and-Econ/blob/master/Textbook/Introduction.ipynb) [Chapter 1: The Essentials](https://github.com/jlcatonjr/Learn-Python-for-Stats-and-Econ/blob/master/Textbook/Chapter%201%20-%20The%20Essentials.ipynb) [Chapter 2: Working with Lists](https://github.com/jlcatonjr/Learn-Python-for-Stats-and-Econ/blob/master/Textbook/Chapter%202%20-%20Working%20With%20Lists.ipynb) [Chapter 3: Building Functions](https://github.com/jlcatonjr/Learn-Python-for-Stats-and-Econ/blob/master/Textbook/Chapter%203%20-%20Building%20Functions.ipynb) [Chapter 4: Classes and Methods](https://github.com/jlcatonjr/Learn-Python-for-Stats-and-Econ/blob/master/Textbook/Chapter%204%20-%20Classes%20and%20Methods.ipynb) [Chapter 5: Introduction to numpy, pandas, and matplotlib](https://github.com/jlcatonjr/Learn-Python-for-Stats-and-Econ/blob/master/Textbook/Chapter%205%20-%20Introduction%20to%20numpy%2C%20pandas%2C%20and%20matplotlib.ipynb) [Chapter 6: Importing, Cleaning, and Analyzing Data](https://github.com/jlcatonjr/Learn-Python-for-Stats-and-Econ/blob/master/Textbook/Chapter%206%20-%20Importing%2C%20Cleaning%2C%20and%20Analyzing%20Data.ipynb) [Chapter 7: Building an OLS Regression Model](https://github.com/jlcatonjr/Learn-Python-for-Stats-and-Econ/blob/master/Textbook/Chapter%207%20-%20Building%20an%20OLS%20Regression%20Model.ipynb) [Chapter 8: Advanced Data Analysis](https://github.com/jlcatonjr/Learn-Python-for-Stats-and-Econ/blob/master/Textbook/Chapter%208%20-%20Advanced%20Data%20Analysis.ipynb) ## Introduction: Learn Python for Economic Computation More than ever, programming skills are essential for professional success. This is true whether your work is in business or in academia. Further, programming is often not enough. You must also be able to work with statistical software or libraries. While many books exist to teach you how to program, there are none that I know of that do a good job of providing training in statistic and econometrics while also providing good programming habits. ## The Beginning In learning to program, it is good to have a strong grasp of the fundamental pieces of programming before moving on to complex structures. Many will learn to use moduels - libraries - without understanding the fundamentals of Python. For those like ourselves who use Python for a variety of tasks - especially agent-based modeling and data processing and visualization - it is useful to incur the upfront cost of learning the prgoramming fundamentals. Not long after Jim first began learning to program in Python, he was benefited by Cameron who was already familiar with programming syntax and who also wanted to learn the language. We explored Python with small projects, both of us growing comfortable with the language. The building blocks we learned in the process will be used to help you grow comfortable with Python as well. This book is the fruit of our own development. The aim of this book is to quickly provide the skills necessary to use python for the purpose of economic computation. The book is constructed for those who are just learning Python. Each example in the text and each problem at the end of every chapter will help you develop programming fluency. You will learn by doing. Even if you do not feel like you understand the programs that you build, it is important for you to _reconstruct every script_ presented in the book. As you build programs, we encourage you to consider different means by which you could create a similar program and also to consider how you might develop the script. You will learn how to work with objects and functions that are essential to data management and statistical programming. These include working with basic math functions, lists, dictionaries, tuples, if-statements, "for-loops", classes, and reading and writing files in Python. As we develop understanding of the core interface, we will also build a statistical packages. First, you will learn how to install Python quickly and easily. ### Why Python? You may wonder why you should learn Python as compared to R or MATLAB. Although R and MATLAB are both powerful, they are not well-suited for general purpose programming in the way that Java or C++ are. Python does meet this criterion. Python is intuitive, not differing as much as R from the syntax and structure of traditional programming languages. If you learn Python for data analysis, you will also have a head-start on programming for other purposes like web design, natural language processing, graphical user interfaces, and so on. By the time you finish this book, you will be well prepared to develop a practice that includes a broad set of applications. ### Installation In this book, we will use Python 3. All examples will be generated from Spyder, the Integrated Development Environment provided in Anaconda. Download the Anaconda package at: > https://www.anaconda.com/distribution/#download-section Download the latest installer for Python 3. If you know your system is 64-bit, choose the 64-bit installer. Otherwise, choose the 32-bit installer. **If you have not installed Python on your system before, check the option “Add Anaconda to the system Path environment variable.** This option is “Not recommended” by the installation as it gives Anaconda preference over previously installed software. For our purposes, this should not be a problem. Once you have completed the installation, if you would like to install any package, open the command line (in Windows this is PowerShell) and type conda install package-name where package-name is replaced with the package you wish to install. For example, you can install numpy with conda install numpy. If a package is not available in Anaconda, you can install it using pip install package-name. The latest version of Anaconda will recognize a library installed in this manner. It is sometimes possible to install unofficial releases of packages with Anaconda, but this will not be necessary for the material covered. Unless otherwise specified, the packages we will be using are included in with the Anaconda installation. When you are ready to build your first program, open Spyder. You can find it by using the search function for your operating system. I recommend that you place it somewhere on your desktop or pin it to your taskbar. Python Scripts As you follow along with the book, you may prefer to view the .py scripts in your text editor. All scripts can be found at [the github](https://github.com/jlcatonjr/Learn-Python-for-Stats-and-Econ) for this book.
github_jupyter
# 정규 표현식 시작하기 ## 정규 표현식의 기초, 메타 문자 정규 표현식에서 사용하는 메타 문자(meta characters)에는 다음과 같은 것이 있다. ※ 메타 문자란 원래 그 문자가 가진 뜻이 아닌 특별한 용도로 사용하는 문자를 말한다. - 사용하는 메타 문자: . ^ $ * + ? { } [ ] \ | ( ) 정규 표현식에 위 메타 문자를 사용하면 특별한 의미를 갖게 된다. 자, 그러면 가장 간단한 정규 표현식부터 시작해 각 메타 문자의 의미와 사용법을 알아보자. ## 문자 클래스 [ ] 우리가 가장 먼저 살펴볼 메타 문자는 바로 문자 클래스(character class)인 [ ]이다. 문자 클래스로 만들어진 정규식은 "[ ] 사이의 문자들과 매치"라는 의미를 갖는다. >※ 문자 클래스를 만드는 메타 문자인 [ ] 사이에는 어떤 문자도 들어갈 수 있다. 즉 정규 표현식이 [abc]라면 이 표현식의 의미는 "a, b, c 중 한 개의 문자와 매치"를 뜻한다. 이해를 돕기 위해 문자열 "a", "before", "dude"가 정규식 [abc]와 어떻게 매치되는지 살펴보자. >- "a"는 정규식과 일치하는 문자인 "a"가 있으므로 매치 >- "before"는 정규식과 일치하는 문자인 "b"가 있으므로 매치 >- "dude"는 정규식과 일치하는 문자인 a, b, c 중 어느 하나도 포함하고 있지 않으므로 매치되지 않음 [ ] 안의 두 문자 사이에 하이픈(-)을 사용하면 두 문자 사이의 범위(From - To)를 의미한다. 예를 들어 [a-c]라는 정규 표현식은 [abc]와 동일하고 [0-5]는 [012345]와 동일하다. 다음은 하이픈(-)을 사용한 문자 클래스의 사용 예이다. >- [a-zA-Z] : 알파벳 모두 >- [0-9] : 숫자 문자 클래스([ ]) 안에는 어떤 문자나 메타 문자도 사용할수 있지만 주의해야 할 메타 문자가 1가지 있다. 그것은 바로 ^인데, 문자 클래스 안에 ^ 메타 문자를 사용할 경우에는 반대(not)라는 의미를 갖는다. 예를 들어 [^0-9]라는 정규 표현식은 숫자가 아닌 문자만 매치된다. ### [자주 사용하는 문자 클래스] [0-9] 또는 [a-zA-Z] 등은 무척 자주 사용하는 정규 표현식이다. 이렇게 자주 사용하는 정규식은 별도의 표기법으로 표현할 수 있다. 다음을 기억해 두자. >- \d - 숫자와 매치, [0-9]와 동일한 표현식이다. >- \D - 숫자가 아닌 것과 매치, [^0-9]와 동일한 표현식이다. >- \s - whitespace 문자와 매치, [ \t\n\r\f\v]와 동일한 표현식이다. 맨 앞의 빈 칸은 공백문자(space)를 의미한다. >- \S - whitespace 문자가 아닌 것과 매치, [^ \t\n\r\f\v]와 동일한 표현식이다. >- \w - 문자+숫자(alphanumeric)와 매치, [a-zA-Z0-9_]와 동일한 표현식이다. >- \W - 문자+숫자(alphanumeric)가 아닌 문자와 매치, [^a-zA-Z0-9_]와 동일한 표현식이다. 대문자로 사용된 것은 소문자의 반대임을 추측할 수 있다. ## Dot(.) 정규 표현식의 Dot(.) 메타 문자는 줄바꿈 문자인 \n을 제외한 모든 문자와 매치됨을 의미한다. >※ 나중에 배우겠지만 정규식을 작성할 때 re.DOTALL 옵션을 주면 \n 문자와도 매치된다. 다음 정규식을 보자. >a.b 위 정규식의 의미는 다음과 같다. >"a + 모든문자 + b" 즉 a와 b라는 문자 사이에 어떤 문자가 들어가도 모두 매치된다는 의미이다. 이해를 돕기 위해 문자열 "aab", "a0b", "abc"가 정규식 a.b와 어떻게 매치되는지 살펴보자. >- -"aab"는 가운데 문자 "a"가 모든 문자를 의미하는 .과 일치하므로 정규식과 매치된다. >- "a0b"는 가운데 문자 "0"가 모든 문자를 의미하는 .과 일치하므로 정규식과 매치된다. >- "abc"는 "a"문자와 "b"문자 사이에 어떤 문자라도 하나는있어야 하는 이 정규식과 일치하지 않으므로 매치되지 않는다. 다음 정규식을 보자. >a[.]b 이 정규식의 의미는 다음과 같다. >"a + Dot(.)문자 + b" 따라서 정규식 a[.]b는 "a.b" 문자열과 매치되고, "a0b" 문자열과는 매치되지 않는다. ※ 만약 앞에서 살펴본 문자 클래스([]) 내에 Dot(.) 메타 문자가 사용된다면 이것은 "모든 문자"라는 의미가 아닌 문자 . 그대로를 의미한다. 혼동하지 않도록 주의하자. ## 반복 (*) 다음 정규식을 보자. > ca*t 이 정규식에는 반복을 의미하는 * 메타 문자가 사용되었다. 여기에서 사용한 *은 * 바로 앞에 있는 문자 a가 0부터 무한대로 반복될 수 있다는 의미이다. ※ 여기에서 * 메타 문자의 반복 개수가 무한대라고 표현했는데 사실 메모리 제한으로 2억 개 정도만 가능하다고 한다. 즉 다음과 같은 문자열이 모두 매치된다. ![image.png](attachment:image.png) ## 반복 (+) 반복을 나타내는 또 다른 메타 문자로 +가 있다. +는 최소 1번 이상 반복될 때 사용한다. 즉 *가 반복 횟수 0부터라면 +는 반복 횟수 1부터인 것이다. 다음 정규식을 보자. >ca+t 위 정규식의 의미는 다음과 같다. >"c + a(1번 이상 반복) + t" 위 정규식에 대한 매치여부는 다음 표와 같다. ![image.png](attachment:image.png) ## 반복 ({m,n}, ?) 여기에서 잠깐 생각해 볼 게 있다. 반복 횟수를 3회만 또는 1회부터 3회까지만으로 제한하고 싶을 수도 있지 않을까? { } 메타 문자를 사용하면 반복 횟수를 고정할 수 있다. {m, n} 정규식을 사용하면 반복 횟수가 m부터 n까지 매치할 수 있다. 또한 m 또는 n을 생략할 수도 있다. 만약 {3,}처럼 사용하면 반복 횟수가 3 이상인 경우이고 {,3}처럼 사용하면 반복 횟수가 3 이하를 의미한다. 생략된 m은 0과 동일하며, 생략된 n은 무한대(2억 개 미만)의 의미를 갖는다. ※ {1,}은 +와 동일하고, {0,}은 *와 동일하다. { }을 사용한 몇 가지 정규식을 살펴보자. ### {m} >ca{2}t 위 정규식의 의미는 다음과 같다. >"c + a(반드시 2번 반복) + t" 위 정규식에 대한 매치여부는 다음 표와 같다. ![image.png](attachment:image.png) ### {m, n} >ca{2,5}t 위 정규식의 의미는 다음과 같다: >"c + a(2~5회 반복) + t" 위 정규식에 대한 매치여부는 다음 표와 같다. ![image.png](attachment:image.png) ### ? 반복은 아니지만 이와 비슷한 개념으로 ? 이 있다. ? 메타문자가 의미하는 것은 {0, 1} 이다. 다음 정규식을 보자. ab?c 위 정규식의 의미는 다음과 같다: "a + b(있어도 되고 없어도 된다) + c" 위 정규식에 대한 매치여부는 다음 표와 같다. ![image.png](attachment:image.png) 즉 b 문자가 있거나 없거나 둘 다 매치되는 경우이다. *, +, ? 메타 문자는 모두 {m, n} 형태로 고쳐 쓰는 것이 가능하지만 가급적 이해하기 쉽고 표현도 간결한 *, +, ? 메타 문자를 사용하는 것이 좋다. 지금까지 아주 기초적인 정규 표현식에 대해서 알아보았다. 알아야 할 것들이 아직 많이 남아 있지만 그에 앞에서 파이썬으로 이러한 정규 표현식을 어떻게 사용할 수 있는지 먼저 알아보기로 하자. ## 파이썬에서 정규 표현식을 지원하는 re 모듈 파이썬은 정규 표현식을 지원하기 위해 re(regular expression의 약어) 모듈을 제공한다. re 모듈은 파이썬을 설치할 때 자동으로 설치되는 기본 라이브러리로 사용 방법은 다음과 같다. > import re > p = re.compile('ab*') re.compile을 사용하여 정규 표현식(위 예에서는 ab*)을 컴파일한다. re.compile의 결과로 돌려주는 객체 p(컴파일된 패턴 객체)를 사용하여 그 이후의 작업을 수행할 것이다. ※ 정규식을 컴파일할 때 특정 옵션을 주는 것도 가능한데, 이에 대해서는 뒤에서 자세히 살펴본다. ※ 패턴이란 정규식을 컴파일한 결과이다. ``` import re p=re.compile('ab*') m=p.match('abbbb') # 해당 함수는 바로 다음내용에 설명. print(m) # 매치됨. ``` ## 정규식을 이용한 문자열 검색 이제 컴파일된 패턴 객체를 사용하여 문자열 검색을 수행해 보자. 컴파일된 패턴 객체는 다음과 같은 4가지 메서드를 제공한다. ![image.png](attachment:image.png) match, search는 정규식과 매치될 때는 match 객체를 돌려주고, 매치되지 않을 때는 None을 돌려준다. 이들 메서드에 대한 간단한 예를 살펴보자. ※ match 객체란 정규식의 검색 결과로 돌려주는 객체이다. 우선 다음과 같은 패턴을 만들어 보자. > import re > p = re.compile('[a-z]+') ### match match 메서드는 문자열의 처음부터 정규식과 매치되는지 조사한다. 위 패턴에 match 메서드를 수행해 보자. ``` import re p=re.compile('[a-z]+') m=p.match('python') print(m) ``` "python" 문자열은 [a-z]+ 정규식에 부합되므로 match 객체를 돌려준다. ``` m=p.match('3 python') print(m) ``` "3 python" 문자열은 처음에 나오는 문자 3이 정규식 [a-z]+에 부합되지 않으므로 None을 돌려준다. match의 결과로 match 객체 또는 None을 돌려주기 때문에 파이썬 정규식 프로그램은 보통 다음과 같은 흐름으로 작성한다. ``` p = re.compile('[a-z]+') m = p.match( 'string goes here' ) if m: # 객체에 아무 내용이 없는 none이면 false print('Match found: ', m.group()) else: print('No match') ``` 즉 match의 결괏값이 있을 때만 그다음 작업을 수행하겠다는 것이다. ### search 컴파일된 패턴 객체 p를 가지고 이번에는 search 메서드를 수행해 보자. ``` m=p.search("python") print(m) ``` "python" 문자열에 search 메서드를 수행하면 match 메서드를 수행했을 때와 동일하게 매치된다. ``` m=p.search("3 python") print(m) ``` "3 python" 문자열의 첫 번째 문자는 "3"이지만 search는 문자열의 처음부터 검색하는 것이 아니라 문자열 전체를 검색하기 때문에 "3 " 이후의 "python" 문자열과 매치된다. 이렇듯 match 메서드와 search 메서드는 문자열의 처음부터 검색할지의 여부에 따라 다르게 사용해야 한다. ### findall 이번에는 findall 메서드를 수행해 보자. ``` result=p.findall("life is too shor") print(result) ``` "life is too short" 문자열의 'life', 'is', 'too', 'short' 단어를 각각 [a-z]+ 정규식과 매치해서 리스트로 돌려준다. ### finditer 이번에는 finditer 메서드를 수행해 보자. ``` result=p.finditer("life is too short") print(result) for r in result : print(r) ``` finditer는 findall과 동일하지만 그 결과로 반복 가능한 객체(iterator object)를 돌려준다. 반복 가능한 객체가 포함하는 각각의 요소는 match 객체이다. ## match 객체의 메서드 자, 이제 match 메서드와 search 메서드를 수행한 결과로 돌려준 match 객체에 대해 알아보자. 앞에서 정규식을 사용한 문자열 검색을 수행하면서 아마도 다음과 같은 궁금증이 생겼을 것이다. - 어떤 문자열이 매치되었는가? - 매치된 문자열의 인덱스는 어디서부터 어디까지인가? match 객체의 메서드를 사용하면 이 같은 궁금증을 해결할 수 있다. 다음 표를 보자. method 목적 group() 매치된 문자열을 돌려준다. start() 매치된 문자열의 시작 위치를 돌려준다. end() 매치된 문자열의 끝 위치를 돌려준다. span() 매치된 문자열의 (시작, 끝)에 해당하는 튜플을 돌려준다. 다음 예로 확인해 보자. ``` m=p.match("python") m.group() m.start() m.end() m.span() ``` 예상한 대로 결괏값이 출력되는 것을 확인할 수 있다. match 메서드를 수행한 결과로 돌려준 match 객체의 start()의 결괏값은 항상 0일 수밖에 없다. 왜냐하면 match 메서드는 항상 문자열의 시작부터 조사하기 때문이다. 만약 search 메서드를 사용했다면 start() 값은 다음과 같이 다르게 나올 것이다. ``` m=p.search('3 python') m.group() m.start() m.end() m.span() ``` [모듈 단위로 수행하기] 지금까지 우리는 re.compile을 사용하여 컴파일된 패턴 객체로 그 이후의 작업을 수행했다. re 모듈은 이것을 좀 축약한 형태로 사용할 수 있는 방법을 제공한다. 다음 예를 보자. ``` p=re.compile('[a-z]+') m=p.match("python") ``` 위 코드가 축약된 형태는 다음과 같다. ``` m=re.match("[a-z]+", "python") m.start() ``` 위 예처럼 사용하면 컴파일과 match 메서드를 한 번에 수행할 수 있다. 보통 한 번 만든 패턴 객체를 여러번 사용해야 할 때는 이 방법보다 re.compile을 사용하는 것이 편하다. ## 컴파일 옵션 정규식을 컴파일할 때 다음 옵션을 사용할 수 있다. - DOTALL(S) - . 이 줄바꿈 문자를 포함하여 모든 문자와 매치할 수 있도록 한다. - IGNORECASE(I) - 대소문자에 관계없이 매치할 수 있도록 한다. - MULTILINE(M) - 여러줄과 매치할 수 있도록 한다. (^, $ 메타문자의 사용과 관계가 있는 옵션이다) - VERBOSE(X) - verbose 모드를 사용할 수 있도록 한다. (정규식을 보기 편하게 만들수 있고 주석등을 사용할 수 있게된다.) 옵션을 사용할 때는 re.DOTALL처럼 전체 옵션 이름을 써도 되고 re.S처럼 약어를 써도 된다. ### DOTALL, S . 메타 문자는 줄바꿈 문자(\n)를 제외한 모든 문자와 매치되는 규칙이 있다. 만약 \n 문자도 포함하여 매치하고 싶다면 re.DOTALL 또는 re.S 옵션을 사용해 정규식을 컴파일하면 된다. 다음 예를 보자. ``` import re p=re.compile('a.b') m=p.match("a\nb") print(m) ``` 정규식이 a.b인 경우 문자열 a\nb는 매치되지 않음을 알 수 있다. 왜냐하면 \n은 . 메타 문자와 매치되지 않기 때문이다. \n 문자와도 매치되게 하려면 다음과 같이 re.DOTALL 옵션을 사용해야 한다. ``` p=re.compile('a.b', re.DOTALL) m=p.match('a\nb') print(m) ``` 보통 re.DOTALL 옵션은 여러 줄로 이루어진 문자열에서 \n에 상관없이 검색할 때 많이 사용한다. ### IGNORECASE, I re.IGNORECASE 또는 re.I 옵션은 대소문자 구별 없이 매치를 수행할 때 사용하는 옵션이다. 다음 예제를 보자. ``` p=re.compile('[a-z]',re.I) p.match("python") p.match("Python") p.match("PYTHON") ``` [a-z] 정규식은 소문자만을 의미하지만 re.I 옵션으로 대소문자 구별 없이 매치된다. ### MULTILINE, M ![image.png](attachment:image.png) ``` import re p=re.compile("^python\s\w+") data = '''python one life is too short python two you need python python three''' print(p.findall(data)) ``` 정규식 ^python\s\w+은 python이라는 문자열로 시작하고 그 뒤에 whitespace, 그 뒤에 단어가 와야 한다는 의미이다. 검색할 문자열 data는 여러 줄로 이루어져 있다. 이 스크립트를 실행하면 위와 같은 결과를 돌려준다. ^ 메타 문자에 의해 python이라는 문자열을 사용한 첫 번째 줄만 매치된 것이다. 하지만 ^ 메타 문자를 문자열 전체의 처음이 아니라 각 라인의 처음으로 인식시키고 싶은 경우도 있을 것이다. 이럴 때 사용할 수 있는 옵션이 바로 re.MULTILINE 또는 re.M이다. 위 코드를 다음과 같이 수정해 보자. ``` p=re.compile("^python\s\w+", re.MULTILINE) data = '''python one life is too short python two you need python python three''' print(p.findall(data)) ``` re.MULTILINE 옵션으로 인해 ^ 메타 문자가 문자열 전체가 아닌 각 줄의 처음이라는 의미를 갖게 되었다. 이 스크립트를 실행하면 위와 같은 결과가 출력된다. 즉 re.MULTILINE 옵션은 ^, $ 메타 문자를 문자열의 각 줄마다 적용해 주는 것이다. ### VERBOSE, X 지금껏 알아본 정규식은 매우 간단하지만 정규식 전문가들이 만든 정규식을 보면 거의 암호수준이다. 정규식을 이해하려면 하나하나 조심스럽게 뜯어보아야만 한다. 이렇게 이해하기 어려운 정규식을 주석 또는 줄 단위로 구분할 수 있다면 얼마나 보기 좋고 이해하기 쉬울까? 방법이 있다. 바로 re.VERBOSE 또는 re.X 옵션을 사용하면 된다. 다음 예를 보자. ``` charref = re.compile(r'&[#](0[0-7]+|[0-9]+|x[0-9a-fA-F]+);') ``` 위 정규식이 쉽게 이해되는가? 이제 다음 예를 보자. ``` charref = re.compile(r""" &[#] # Start of a numeric entity reference ( 0[0-7]+ # Octal form | [0-9]+ # Decimal form | x[0-9a-fA-F]+ # Hexadecimal form ) ; # Trailing semicolon """, re.VERBOSE) ``` 첫 번째와 두 번째 예를 비교해 보면 컴파일된 패턴 객체인 charref는 모두 동일한 역할을 한다. 하지만 정규식이 복잡할 경우 두 번째처럼 주석을 적고 여러 줄로 표현하는 것이 훨씬 가독성이 좋다는 것을 알 수 있다. re.VERBOSE 옵션을 사용하면 문자열에 사용된 whitespace는 컴파일할 때 제거된다(단 [ ] 안에 사용한 whitespace는 제외). 그리고 줄 단위로 #기호를 사용하여 주석문을 작성할 수 있다. ## 백슬래시 문제 정규 표현식을 파이썬에서 사용할 때 혼란을 주는 요소가 한 가지 있는데, 바로 백슬래시(\)이다. 예를 들어 어떤 파일 안에 있는 "\section" 문자열을 찾기 위한 정규식을 만든다고 가정해 보자. >\section 이 정규식은 \s 문자가 whitespace로 해석되어 의도한 대로 매치가 이루어지지 않는다. 위 표현은 다음과 동일한 의미이다. >[ \t\n\r\f\v]ection 의도한 대로 매치하고 싶다면 다음과 같이 변경해야 한다. > \\\section 즉 위 정규식에서 사용한 \ 문자가 문자열 자체임을 알려 주기 위해 백슬래시 2개를 사용하여 이스케이프 처리를 해야 한다. 따라서 위 정규식을 컴파일하려면 다음과 같이 작성해야 한다. ``` p=re.compile("\\section") ``` 그런데 여기에서 또 하나의 문제가 발견된다. 위처럼 정규식을 만들어서 컴파일하면 실제 파이썬 정규식 엔진에는 파이썬 문자열 리터럴 규칙에 따라 \\이 \로 변경되어 \section이 전달된다. ※ 이 문제는 위와 같은 정규식을 파이썬에서 사용할 때만 발생한다(파이썬의 리터럴 규칙). 유닉스의 grep, vi 등에서는 이러한 문제가 없다. 결국 정규식 엔진에 \\ 문자를 전달하려면 파이썬은 \\\\처럼 백슬래시를 4개나 사용해야 한다. ``` p = re.compile('\\\\section') ``` 이렇게 해야만 원하는 결과를 얻을 수 있다. 하지만 너무 복잡하지 않은가? 만약 위와 같이 \를 사용한 표현이 계속 반복되는 정규식이라면 너무 복잡해서 이해하기 쉽지않을 것이다. 이러한 문제로 인해 파이썬 정규식에는 Raw String 규칙이 생겨나게 되었다. 즉 컴파일해야 하는 정규식이 Raw String임을 알려 줄 수 있도록 파이썬 문법을 만든 것이다. 그 방법은 다음과 같다. ``` p = re.compile(r'\\section') ``` 위와 같이 정규식 문자열 앞에 r 문자를 삽입하면 이 정규식은 Raw String 규칙에 의하여 백슬래시 2개 대신 1개만 써도 2개를 쓴 것과 동일한 의미를 갖게 된다. ※ 만약 백슬래시를 사용하지 않는 정규식이라면 r의 유무에 상관없이 동일한 정규식이 될 것이다.
github_jupyter
``` # Import libraries import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import matplotlib as mpl from scipy import stats import statsmodels.api as sm import warnings from itertools import product from datetime import datetime warnings.filterwarnings('ignore') plt.style.use('seaborn-poster') import requests from pandas.io.json import json_normalize import math import random import decimal import scipy.linalg import numpy.random as nrand from selenium import webdriver from selenium.common.exceptions import NoSuchElementException import pandas as pd def get_reddit_data(subreddit, date): """ Gets top 26 frontpage titles from 'subreddit' on 'date :param subreddit: ex: 'r/bitcoin' :param date: in 'YYYYMMDD' :return titles: a list of strings of titles """ titles = [] url = "https://web.archive.org/web/" + date + "/reddit.com/" + subreddit #print(url) driver.get(url) try: sitetable = driver.find_element_by_id("siteTable") posts = sitetable.find_elements_by_tag_name("div") for post in posts: if len(post.find_elements_by_class_name("title")) > 0: title = post.find_element_by_class_name("title").text titles.append(title) titles = set(titles) return titles except NoSuchElementException: return ['0'] * 26 def format_date(date): # for way-way-back machine urls """ Reformats date so that wayback machine will like it :param date: in datetime64 :return: """ year = str(date.year) month = str(date.month) if len(month) < 2: month = "0" + month day = str(date.day) if len(day) < 2: day = "0" + day return year + month + day # def get_reddit_dataframe(begin, fin, subreddit, writefile): # """ # Makes a big dataframe indexed by a DatetimeIndex for every day from begin to fin. Values are top reddit posts, columns # separate titles on a given day/row. # :param begin: starting date of dataframe # :param fin: ending date of dataframe # :param subreddit: subreddit to scrape # :return: none # """ # timeindex = pd.DatetimeIndex(freq='d', start=begin, end=fin) # data = pd.DataFrame() # for date in timeindex: # fdate = format_date(date) # titles = get_reddit_data(subreddit, fdate) # i = 1 # for title in titles: # probably can do this without a for loop # data.at[date, i] = title # i += 1 # data.to_csv() # driver = webdriver.Firefox() # subreddit = 'r/bitcoin' # begin = '2018-01-31' # fin = '2018-02-13' # #writefile = 'data/text/2018_2_14_832.csv' # #get_reddit_dataframe(begin, fin, subreddit, writefile) # driver.quit() # #pd.read_csv(filepath_or_buffer='test_data/redditData.csv', infer_datetime_format=True) fdate='2018-01-31' #get_reddit_data(subreddit, fdate) sent= pd.read_csv('reddit_sentiment_data.csv') sent['Avg'] = sent.mean(axis=1) sent['Date']=sent['Unnamed: 0'] sent=sent[['Date','Avg']] import datetime sent['Date']=pd.to_datetime(sent['Date'], format='%Y/%m/%d') sent.index=sent['Date'] sent=sent[['Avg']] prices= sent######### prices['Weighted Price']=prices['Avg'] prices=prices[['Weighted Price']] prices.head() # Resampling to daily frequency prices_daily = prices.resample('d').mean() # Resampling to monthly frequency prices_month = prices.resample('M').mean() # Resampling to annual frequency prices_annual = prices.resample('A-DEC').mean() # Resampling to quarterly frequency prices_Q = prices.resample('Q-DEC').mean() b=requests.get('https://www.quandl.com/api/v3/datasets/BCHARTS/BITSTAMPUSD') A=json_normalize(b.json()) data= A['dataset.data'].all() df = pd.DataFrame(data) df.columns=A['dataset.column_names'].all() date=df['Date'] df=df[::-1] #df.reindex(index=range(0,len(date))) df.index= range(0,len(date)) worth=df[['Date','Weighted Price']] import datetime worth['Date']=pd.to_datetime(worth['Date'], format='%Y/%m/%d') worth.index=worth['Date'] worth=worth[['Weighted Price']] worth=worth.loc['2015-02-01':'2018-02-09'] # # N=100 # # movmean=[] # # movstd=[] # # for p in range(N,len(prices)): # # A= np.mean(prices[p-N:p]) # # movmean.append(A) # # X=np.std(prices[p-N:p]) # # movstd.append(X) # # plt.plot(movmean,label = 'Moving Mean') # # plt.plot(movstd, label = 'Moving Standard Deviation') rolworth = worth.rolling(window=1,center=True).mean() rolprice = prices.rolling(window=100,center=True).mean() fig = plt.figure(figsize=[30, 10]) fig, ax1 = plt.subplots() ax2 = ax1.twinx() a2=ax2.plot(rolworth, color='red', label='Moving Price Avg') a1=ax1.plot(rolprice, color='blue', label='Amplified Moving Sentiment Avg') ax1.set_xlabel('Time') ax1.set_ylabel('Sentiment Data', color='b') ax2.set_ylabel('Bitcoin Price (USD)', color='r') lns = a1+a2 labs = [l.get_label() for l in lns] ax1.legend(lns, labs, loc=2) plt.show() rolworth = worth.rolling(window=10,center=True).mean() rolworthstd=worth.rolling(window=50,center=True).std() plt.plot(worth,label='Unfiltered') plt.plot(rolworth,color='red', label='Moving Price AVG') plt.plot(rolworthstd, color='blue', label='Moving Price STD') plt.title('Bitcoin Rolling Mean & Standard Deviation') plt.xlabel('Year-Month') plt.ylabel('Bitcoin Price USD') plt.legend() plt.show() ################## prices_trains=prices[0:round(len(prices)*.87)] prices=prices_trains prices fig = plt.figure(figsize=[20, 13]) plt.suptitle('Bitcoin Exchange (Mean) at Different Time Intervals ', fontsize=22) plt.subplot(221) plt.plot(prices_daily['Weighted Price']) plt.ylabel('Price ($)') plt.xlabel('Time (Daily Intervals)') plt.subplot(222) plt.plot(prices_month['Weighted Price']) plt.ylabel('Price ($)') plt.xlabel('Time (Monthly Intervals)') plt.subplot(223) plt.plot(prices_annual['Weighted Price']) plt.ylabel('Price ($)') plt.xlabel('Time (Annual Intervals)') plt.subplot(224) plt.plot(prices_Q['Weighted Price']) plt.ylabel('Price ($)') plt.xlabel('Time (Quarterly Intervals)') plt.show() sm.tsa.seasonal_decompose(prices_month['Weighted Price']).plot() plt.show() ``` Non-Stationary Data is data that has constantly changing mean, variance, and covariances over time. (Trends, cycles, random walks (Brownian Motion Approximation) Random Walk= Y[t]=Y[t-1] + E. Variance approaches infinity W/R to time with E being a distributed white noise stoichastic characteristic Random Walk with Drift = Y[t]=Y[t-1] + E + a, a= drift Deterministic Trend = Y[t]= Bt + E + a: Dicky-Fuller: Ho = Data is Non Stationary (there is a unit root). Reject null Hypothesis when P<.05 (Stationary) https://stats.stackexchange.com/questions/29121/intuitive-explanation-of-unit-root Detrending and development of a Stationary Relationship: Y[t]- Y[t-1] = E + a Y[t]- Bt = E + a: BoxCox Tranforms: Creating a Normal Shape for Datum ``` #Normalize the month price data prices_month['Weighted_Price_box'], lmbda = stats.boxcox(prices_month['Weighted Price']) #Determine if this Data is Stationary print("Dickey–Fuller test: p=%f" % sm.tsa.stattools.adfuller(prices_month['Weighted Price'])[1]) #non stationary ## Converting to Stationary Data and then Re-examing with Stationary Test n=12 # 12 month differentiation #Y[t]- Y[t-1] = E (normal) + a prices_month['prices_box_diff'] = prices_month['Weighted_Price_box'] - prices_month.Weighted_Price_box.shift(n) print("Dickey–Fuller test: p=%f" % sm.tsa.stattools.adfuller(prices_month.prices_box_diff[n:])[1]) #stationary #Regular Non Seasonal Differention prices_month['prices_box_diff2'] = prices_month.prices_box_diff - prices_month.prices_box_diff.shift(1) print("Dickey–Fuller test: p=%f" % sm.tsa.stattools.adfuller(prices_month.prices_box_diff2[13:])[1]) ``` AutoCorrelation: Similarity that exists between time series vs. lagged time series Ie calculating the relationship between two times series, however these are the same time series where one is lagged HOW MUCH DOES PAST DATA IMPACT FUTURE DATA (Lagged Correlation) - Stock momentum ``` ax = plt.subplot(211) #perform test on normalized and differentiated (stationary) data #AutoCorrelation sm.graphics.tsa.plot_acf(prices_month.prices_box_diff2[13:].values.squeeze(), lags=20, ax=ax) ax = plt.subplot(212) #Partial AutoCorrelation sm.graphics.tsa.plot_pacf(prices_month.prices_box_diff2[13:].values.squeeze(), lags=20, ax=ax) fig = plt.figure(figsize=[30, 10]) from pandas import concat from matplotlib import pyplot from pandas.plotting import scatter_matrix values=prices['Weighted Price'] lags = 8 values = prices['Weighted Price'] #collected prices values columns = [values] #create list for i in range(1,(lags + 1)): #run for loop for 1 to 8 lag iterations/examinations columns.append(values.shift(i)) #shift the data according to each lag then add to the set dataframe = concat(columns, axis=1) #concactinate all these columns columns = ['t+1'] for i in range(1,(lags + 1)): columns.append('t-' + str(i)) #creats columns title t-(lag#) dataframe.columns = columns #set as columns pyplot.figure(1) for i in range(1,(lags + 1)): # plot data ax = pyplot.subplot(240 + i) ax.set_title('t+1 vs t-' + str(i)) pyplot.scatter(x=dataframe['t+1'].values, y=dataframe['t-'+str(i)].values) pyplot.show() ``` - ARIMA:Auto Regressive Integrated Moving Average - AutoRegressive- using past variable of time to predict future (lags of stationarized series) - Integrated- Time series is differenced to be stationarized - Moving Average- lags of forecast error - A stationary series has no trend, its variations around its mean have a constant amplitude, and it wiggles in a consistent fashion - Autocorrelations are the same for stationary data - The ARIMA forecasting equation for a stationary time series is a linear (i.e., regression-type) equation in which the predictors consist of lags of the dependent variable and/or lags of the forecast errors - PREDICTED Y= weight of previous values + weight of previous errors Required Prior Knowledge: -Box-Jenkins Model ARIMA(P,D,Q) model: - used on random walk autoregressive models, exponential smoothing P= # of autoregressive terms (lag observations) D= # of nonseasonal differences needed for stationarity: how much differencing is occuring Q= # of lagged forecast errors [moving average window] 1. Identification: Of all the data, select a portion of the Model the best summarizes the data Is the data stationary, and how many differences are needed to make it stationary FOR Time Series: x3 x2 x1 ACF= True Corr of x2 & x1 = (corr(x3,x2)) + PACF(X2,x1) PACF(X2,x1) = ACF- (corr(x3,x2)) ACF- Corr(Observation, Lagged Observation) PACF- Corr(Observation, Lagged Observation) Values that cross the 95% interval are significant AR- if ACF has trail off, hard cutoff PACF (p) MA- if PACF has trail off, hard cutoff ACF (q) Stationary= statistical properties are all constant over time. Autocorrleations (prior deviations from mean) are constant ``` #Initial approximation of parameters Qs = range(0, 2) qs = range(0, 3) Ps = range(0, 3) ps = range(0, 3) D=1 d=1 parameters = product(ps, qs, Ps, Qs) parameters_list = list(parameters) len(parameters_list) # Model Determination results = [] best_aic = float("inf") warnings.filterwarnings('ignore') for param in parameters_list: #run through each grouping of parameters (54 total combos) try: model=sm.tsa.statespace.SARIMAX(prices_month.Weighted_Price_box, order=(param[0], d, param[1]), seasonal_order=(param[2], D, param[3], 12)).fit(disp=-1) except ValueError: print('wrong parameters:', param) continue aic = model.aic if aic < best_aic: best_model = model best_aic = aic best_param = param results.append([param, model.aic]) # Best Models result_table = pd.DataFrame(results) result_table.columns = ['parameters', 'aic'] print(result_table.sort_values(by = 'aic', ascending=True).head()) print(best_model.summary()) # Inverse Box-Cox Transformation Function def invboxcox(y,lmbda): if lmbda == 0: return(np.exp(y)) else: return(np.exp(np.log(lmbda*y+1)/lmbda)) # Prediction from datetime import datetime df_month2 = prices_month[['Weighted Price']] date_list = [datetime(2017, 6, 30), datetime(2017, 7, 31), datetime(2017, 8, 31), datetime(2017, 9, 30), datetime(2017, 10, 31), datetime(2017, 11, 30), datetime(2017, 12, 31), datetime(2018, 6, 30),datetime(2018,7, 30),datetime(2018,8, 30)] future = pd.DataFrame(index=date_list, columns= prices_month.columns) df_month2 = pd.concat([df_month2, future]) df_month2['forecast'] = invboxcox(best_model.predict(start=1, end=1500), lmbda) plt.figure(figsize=(15,7)) plt.plot(df_month2['Weighted Price'],label='Measured (Current) Sentiment') df_month2.forecast.plot(color='r', ls='--', label='Predicted Sentiment') plt.legend() plt.title('Bitcoin "Sentiment", By Months') plt.ylabel('Sentiment Data') plt.show() ```
github_jupyter
# Abstract Data Structures: Thinking recursively ## 5.1.1 Identify a situation that requires the use of recursive thinking What is this recursive thing? It's a very practical way of establishing an abstract pattern in our thinking. There are a class of problems that require thinking in a more abstract manner, and trying to identify this pattern of thinking is the subject of this section. First, let's talk about **non-recursive** or **iterative** way of thinking. That's like a procedure, such as brushing your teeth, or the sequence of events on any given day in history. We'll start with this previous function that we wrote that continuously asks the user for valid input ``` // We'll generate 5 integers, finally returning "Y" on the fifth attempt QUEUE = Queue.from_x_characters(4, min="A", max="X") QUEUE.enqueue("Y") sub input(PROMPT) out PROMPT // pop off the stack and if it's a 10, we finally got a yes VALUE = QUEUE.dequeue() if VALUE = 1 then return "Y" else return VALUE end if end sub sub ask_until_valid_nonrecursive(PROMPT, POSSIBLES) // loop infinitely until "break" loop while true RESPONSE = input(PROMPT) if RESPONSE in POSSIBLES then // valid response break // loop stops else // invalid response output RESPONSE, " is not valid!" end if output RESPONSE, " Aahhhhh" return RESPONSE end sub ask_until_valid_nonrecursive("Yes?: ", ["Y"]) ``` This function works just fine. However, there is a way to write this function in a recursive manner. It's a simple example that does not have much use, but we just need to understand what is meant when we say to solve something with recursion. It's basically a function calling itself. Consider this code: ``` def ask_until_valid_recursive(prompt: str, possible_answers: list): response = input(prompt) if response in possible_answers: # valid return response # invalid: recurse! ask_until_valid_recursive(prompt, possible_answers) ask_until_valid_recursive("Get it?", ['Y', 'N']) ``` Basically this function "falls back" on itself. Instead of going in a loop, the code continues the same way a loop does, by further calling itself, and repeating this process until something valid is found. Right, so let's discover a limit to this recursion thing by seeing what happens when nothing valid is every found. ``` ask_until_valid_recursive('Nothing you type will be valid: ', []) # infinite loop ``` In the above call, we have not defined anything as being valid, and so this program will go on forever constantly prompting you to type something. Actually … not quite forever. Although it is technically an infinite loop, the fact of the matter is that there are resource limitations, and it will eventually fail. Let's see it fail by typing a recursive function that never ends: ``` def recurse_forever(prompt): recurse_forever(prompt) recurse_forever("Error") ``` Run the above, and it displays `RecursionError: maximum recursion depth exceeded`. This means that the code we ran failed, and in this case it's because of something called a "stack overflow" which is to say that Python doesn't have enough memory to continue keep doing the same thing over and over again, and so fails. But this `ask_until_valid` example is not really an example that requires recursive thinking, because the version that we had worked just fine with an interative approach (and maybe was easier to understand). So what kind of problem does require recursion? It's in a case in which the problem breaks down into an algorthim that depends on previous steps being completed, and depending on **base cases** where the recursion will inevidibly end up. For that, we'll take another loop at making the fibinocci sequence. We need to understand what this sequence of numbers is, which is provided in this formula: ↓ pattern ↓ base cases $F_n = F_{n-1} + F_{n-2}$, where $F_1 = 1$, and $F_0 = 0$. That is to say that a number `n` is defined as the "F of (n-1) plus F of (n-2)", but "F of 1 is 1" and F of 0 is 0" The solution to calculate this in a procedural way is this: ``` def fibonnoci_iterative(how_many): """ return the how_many-th number of fibonocci numbers """ # base cases: result = [0, 1] for n in range(2, how_many): # we already have the first two # pattern: new = result[n-2] + result[n-1] result.append(new) return result for result in fibonnoci_iterative(10): print(result, end=" ") def fibonnoci_recurse(n): """ return the fibonocci numbers at index n """ # base cases: if n == 0: return 0 elif n == 1: return 1 # pattern return fibonnoci_recurse(n - 1) + fibonnoci_recurse(n - 2) def fibonnoci_recurse_n_terms(how_many): result = [] for i in range(how_many): value = fibonnoci_recurse(i) result.append(value) return result for item in fibonnoci_recurse_n_terms(10): print(item, end=" ") ``` Okay, so we have some of the theory down. We can see that recusion can be used to solve problems when the problem can be described in terms of itself. We have also seen the case where base cases are required to ensure that we don't have an infinite loop. ## 5.1.2 Identify recursive thinking in a specified problem solution But let's get more specific in understanding the kinds of problems that recursion is great at solving. Above, there were two solutions presented, one that was iterative and the other that was recursive. One could argue that the iterative solution is simpler and more understandable. There is no doubt, however, that the recursive solution is far slower. ``` %%timeit list(fibonnoci_iterative(10)) %%timeit list(fibonnoci_recurse_n_terms(10)) ``` The above times how long it takes for the code to execute. The recursive solution is just painfully slow compared to the iterative approach. So what kind of problem is better solved iteratively? Let's imagine that the school is organizing a day at Sunway Lagoon with the whole school. The lead teacher gives his phone number to all of the teachers who attend that day, and they are told to call the him whenever something happens.
github_jupyter
# Creating a Sentiment Analysis Web App ## Using PyTorch and SageMaker _Deep Learning Nanodegree Program | Deployment_ --- Now that we have a basic understanding of how SageMaker works we will try to use it to construct a complete project from end to end. Our goal will be to have a simple web page which a user can use to enter a movie review. The web page will then send the review off to our deployed model which will predict the sentiment of the entered review. ## Instructions Some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this notebook. You will not need to modify the included code beyond what is requested. Sections that begin with '**TODO**' in the header indicate that you need to complete or implement some portion within them. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `# TODO: ...` comment. Please be sure to read the instructions carefully! In addition to implementing code, there will be questions for you to answer which relate to the task and your implementation. Each section where you will answer a question is preceded by a '**Question:**' header. Carefully read each question and provide your answer below the '**Answer:**' header by editing the Markdown cell. > **Note**: Code and Markdown cells can be executed using the **Shift+Enter** keyboard shortcut. In addition, a cell can be edited by typically clicking it (double-click for Markdown cells) or by pressing **Enter** while it is highlighted. ## General Outline Recall the general outline for SageMaker projects using a notebook instance. 1. Download or otherwise retrieve the data. 2. Process / Prepare the data. 3. Upload the processed data to S3. 4. Train a chosen model. 5. Test the trained model (typically using a batch transform job). 6. Deploy the trained model. 7. Use the deployed model. For this project, you will be following the steps in the general outline with some modifications. First, you will not be testing the model in its own step. You will still be testing the model, however, you will do it by deploying your model and then using the deployed model by sending the test data to it. One of the reasons for doing this is so that you can make sure that your deployed model is working correctly before moving forward. In addition, you will deploy and use your trained model a second time. In the second iteration you will customize the way that your trained model is deployed by including some of your own code. In addition, your newly deployed model will be used in the sentiment analysis web app. ## Step 1: Downloading the data As in the XGBoost in SageMaker notebook, we will be using the [IMDb dataset](http://ai.stanford.edu/~amaas/data/sentiment/) > Maas, Andrew L., et al. [Learning Word Vectors for Sentiment Analysis](http://ai.stanford.edu/~amaas/data/sentiment/). In _Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies_. Association for Computational Linguistics, 2011. ``` %mkdir ../data !wget -O ../data/aclImdb_v1.tar.gz http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz !tar -zxf ../data/aclImdb_v1.tar.gz -C ../data ``` ## Step 2: Preparing and Processing the data Also, as in the XGBoost notebook, we will be doing some initial data processing. The first few steps are the same as in the XGBoost example. To begin with, we will read in each of the reviews and combine them into a single input structure. Then, we will split the dataset into a training set and a testing set. ``` import os import glob def read_imdb_data(data_dir='../data/aclImdb'): data = {} labels = {} for data_type in ['train', 'test']: data[data_type] = {} labels[data_type] = {} for sentiment in ['pos', 'neg']: data[data_type][sentiment] = [] labels[data_type][sentiment] = [] path = os.path.join(data_dir, data_type, sentiment, '*.txt') files = glob.glob(path) for f in files: with open(f) as review: data[data_type][sentiment].append(review.read()) # Here we represent a positive review by '1' and a negative review by '0' labels[data_type][sentiment].append(1 if sentiment == 'pos' else 0) assert len(data[data_type][sentiment]) == len(labels[data_type][sentiment]), \ "{}/{} data size does not match labels size".format(data_type, sentiment) return data, labels data, labels = read_imdb_data() print("IMDB reviews: train = {} pos / {} neg, test = {} pos / {} neg".format( len(data['train']['pos']), len(data['train']['neg']), len(data['test']['pos']), len(data['test']['neg']))) ``` Now that we've read the raw training and testing data from the downloaded dataset, we will combine the positive and negative reviews and shuffle the resulting records. ``` from sklearn.utils import shuffle def prepare_imdb_data(data, labels): """Prepare training and test sets from IMDb movie reviews.""" #Combine positive and negative reviews and labels data_train = data['train']['pos'] + data['train']['neg'] data_test = data['test']['pos'] + data['test']['neg'] labels_train = labels['train']['pos'] + labels['train']['neg'] labels_test = labels['test']['pos'] + labels['test']['neg'] #Shuffle reviews and corresponding labels within training and test sets data_train, labels_train = shuffle(data_train, labels_train) data_test, labels_test = shuffle(data_test, labels_test) # Return a unified training data, test data, training labels, test labets return data_train, data_test, labels_train, labels_test train_X, test_X, train_y, test_y = prepare_imdb_data(data, labels) print("IMDb reviews (combined): train = {}, test = {}".format(len(train_X), len(test_X))) ``` Now that we have our training and testing sets unified and prepared, we should do a quick check and see an example of the data our model will be trained on. This is generally a good idea as it allows you to see how each of the further processing steps affects the reviews and it also ensures that the data has been loaded correctly. ``` print(train_X[100]) print(train_y[100]) ``` The first step in processing the reviews is to make sure that any html tags that appear should be removed. In addition we wish to tokenize our input, that way words such as *entertained* and *entertaining* are considered the same with regard to sentiment analysis. ``` import nltk from nltk.corpus import stopwords from nltk.stem.porter import * import re from bs4 import BeautifulSoup def review_to_words(review): nltk.download("stopwords", quiet=True) stemmer = PorterStemmer() text = BeautifulSoup(review, "html.parser").get_text() # Remove HTML tags text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # Convert to lower case words = text.split() # Split string into words words = [w for w in words if w not in stopwords.words("english")] # Remove stopwords words = [PorterStemmer().stem(w) for w in words] # stem return words ``` The `review_to_words` method defined above uses `BeautifulSoup` to remove any html tags that appear and uses the `nltk` package to tokenize the reviews. As a check to ensure we know how everything is working, try applying `review_to_words` to one of the reviews in the training set. ``` # TODO: Apply review_to_words to a review (train_X[100] or any other review) review_to_words(train_X[100]) ``` **Question:** Above we mentioned that `review_to_words` method removes html formatting and allows us to tokenize the words found in a review, for example, converting *entertained* and *entertaining* into *entertain* so that they are treated as though they are the same word. What else, if anything, does this method do to the input? **Answer:** 1. remove html elements 2. stemming 3. stopwords removal 4. lower case all the words The method below applies the `review_to_words` method to each of the reviews in the training and testing datasets. In addition it caches the results. This is because performing this processing step can take a long time. This way if you are unable to complete the notebook in the current session, you can come back without needing to process the data a second time. ``` import pickle cache_dir = os.path.join("../cache", "sentiment_analysis") # where to store cache files os.makedirs(cache_dir, exist_ok=True) # ensure cache directory exists def preprocess_data(data_train, data_test, labels_train, labels_test, cache_dir=cache_dir, cache_file="preprocessed_data.pkl"): """Convert each review to words; read from cache if available.""" # If cache_file is not None, try to read from it first cache_data = None if cache_file is not None: try: with open(os.path.join(cache_dir, cache_file), "rb") as f: cache_data = pickle.load(f) print("Read preprocessed data from cache file:", cache_file) except: pass # unable to read from cache, but that's okay # If cache is missing, then do the heavy lifting if cache_data is None: # Preprocess training and test data to obtain words for each review #words_train = list(map(review_to_words, data_train)) #words_test = list(map(review_to_words, data_test)) words_train = [review_to_words(review) for review in data_train] words_test = [review_to_words(review) for review in data_test] # Write to cache file for future runs if cache_file is not None: cache_data = dict(words_train=words_train, words_test=words_test, labels_train=labels_train, labels_test=labels_test) with open(os.path.join(cache_dir, cache_file), "wb") as f: pickle.dump(cache_data, f) print("Wrote preprocessed data to cache file:", cache_file) else: # Unpack data loaded from cache file words_train, words_test, labels_train, labels_test = (cache_data['words_train'], cache_data['words_test'], cache_data['labels_train'], cache_data['labels_test']) return words_train, words_test, labels_train, labels_test # Preprocess data train_X, test_X, train_y, test_y = preprocess_data(train_X, test_X, train_y, test_y) ``` ## Transform the data In the XGBoost notebook we transformed the data from its word representation to a bag-of-words feature representation. For the model we are going to construct in this notebook we will construct a feature representation which is very similar. To start, we will represent each word as an integer. Of course, some of the words that appear in the reviews occur very infrequently and so likely don't contain much information for the purposes of sentiment analysis. The way we will deal with this problem is that we will fix the size of our working vocabulary and we will only include the words that appear most frequently. We will then combine all of the infrequent words into a single category and, in our case, we will label it as `1`. Since we will be using a recurrent neural network, it will be convenient if the length of each review is the same. To do this, we will fix a size for our reviews and then pad short reviews with the category 'no word' (which we will label `0`) and truncate long reviews. ### (TODO) Create a word dictionary To begin with, we need to construct a way to map words that appear in the reviews to integers. Here we fix the size of our vocabulary (including the 'no word' and 'infrequent' categories) to be `5000` but you may wish to change this to see how it affects the model. > **TODO:** Complete the implementation for the `build_dict()` method below. Note that even though the vocab_size is set to `5000`, we only want to construct a mapping for the most frequently appearing `4998` words. This is because we want to reserve the special labels `0` for 'no word' and `1` for 'infrequent word'. ``` import numpy as np def build_dict(data, vocab_size = 5000): """Construct and return a dictionary mapping each of the most frequently appearing words to a unique integer.""" # TODO: Determine how often each word appears in `data`. Note that `data` is a list of sentences and that a # sentence is a list of words. word_count = {} # A dict storing the words that appear in the reviews along with how often they occur for review in data: for word in review: if word in word_count: word_count[word] += 1 else: word_count[word] = 1 # TODO: Sort the words found in `data` so that sorted_words[0] is the most frequently appearing word and # sorted_words[-1] is the least frequently appearing word. sorted_dict = sorted(word_count.items(), key = lambda x:x[1], reverse = True) sorted_words = [w for w, v in sorted_dict] sorted_dict = None word_dict = {} # This is what we are building, a dictionary that translates words into integers for idx, word in enumerate(sorted_words[:vocab_size - 2]): # The -2 is so that we save room for the 'no word' word_dict[word] = idx + 2 # 'infrequent' labels return word_dict word_dict = build_dict(train_X) ``` **Question:** What are the five most frequently appearing (tokenized) words in the training set? Does it makes sense that these words appear frequently in the training set? **Answer:**['movi', 'film', 'one', 'like', 'time'] ``` # TODO: Use this space to determine the five most frequently appearing words in the training set. import numpy as np def most_freq(data, vocab_size = 5000): """Construct and return a dictionary mapping each of the most frequently appearing words to a unique integer.""" # TODO: Determine how often each word appears in `data`. Note that `data` is a list of sentences and that a # sentence is a list of words. word_count = {} # A dict storing the words that appear in the reviews along with how often they occur for review in data: for word in review: if word in word_count: word_count[word] += 1 else: word_count[word] = 1 # TODO: Sort the words found in `data` so that sorted_words[0] is the most frequently appearing word and # sorted_words[-1] is the least frequently appearing word. sorted_dict = sorted(word_count.items(), key = lambda x:x[1], reverse = True) sorted_words = [w for w, v in sorted_dict] sorted_dict = None return sorted_words[:5] most_freq(train_X) ``` ### Save `word_dict` Later on when we construct an endpoint which processes a submitted review we will need to make use of the `word_dict` which we have created. As such, we will save it to a file now for future use. ``` data_dir = '../data/pytorch' # The folder we will use for storing data if not os.path.exists(data_dir): # Make sure that the folder exists os.makedirs(data_dir) with open(os.path.join(data_dir, 'word_dict.pkl'), "wb") as f: pickle.dump(word_dict, f) ``` ### Transform the reviews Now that we have our word dictionary which allows us to transform the words appearing in the reviews into integers, it is time to make use of it and convert our reviews to their integer sequence representation, making sure to pad or truncate to a fixed length, which in our case is `500`. ``` def convert_and_pad(word_dict, sentence, pad=500): NOWORD = 0 # We will use 0 to represent the 'no word' category INFREQ = 1 # and we use 1 to represent the infrequent words, i.e., words not appearing in word_dict working_sentence = [NOWORD] * pad for word_index, word in enumerate(sentence[:pad]): if word in word_dict: working_sentence[word_index] = word_dict[word] else: working_sentence[word_index] = INFREQ return working_sentence, min(len(sentence), pad) def convert_and_pad_data(word_dict, data, pad=500): result = [] lengths = [] for sentence in data: converted, leng = convert_and_pad(word_dict, sentence, pad) result.append(converted) lengths.append(leng) return np.array(result), np.array(lengths) train_X, train_X_len = convert_and_pad_data(word_dict, train_X) test_X, test_X_len = convert_and_pad_data(word_dict, test_X) ``` As a quick check to make sure that things are working as intended, check to see what one of the reviews in the training set looks like after having been processeed. Does this look reasonable? What is the length of a review in the training set? ``` # Use this cell to examine one of the processed reviews to make sure everything is working as intended. print(train_X[100], train_X_len[100]) ``` **Question:** In the cells above we use the `preprocess_data` and `convert_and_pad_data` methods to process both the training and testing set. Why or why not might this be a problem? **Answer:** It's standard to process both training and testing set in the same way. And only training data is used to built the dictionary. ## Step 3: Upload the data to S3 As in the XGBoost notebook, we will need to upload the training dataset to S3 in order for our training code to access it. For now we will save it locally and we will upload to S3 later on. ### Save the processed training dataset locally It is important to note the format of the data that we are saving as we will need to know it when we write the training code. In our case, each row of the dataset has the form `label`, `length`, `review[500]` where `review[500]` is a sequence of `500` integers representing the words in the review. ``` import pandas as pd pd.concat([pd.DataFrame(train_y), pd.DataFrame(train_X_len), pd.DataFrame(train_X)], axis=1) \ .to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False) ``` ### Uploading the training data Next, we need to upload the training data to the SageMaker default S3 bucket so that we can provide access to it while training our model. ``` import sagemaker sagemaker_session = sagemaker.Session() bucket = sagemaker_session.default_bucket() prefix = 'sagemaker/sentiment_rnn' role = sagemaker.get_execution_role() input_data = sagemaker_session.upload_data(path=data_dir, bucket=bucket, key_prefix=prefix) ``` **NOTE:** The cell above uploads the entire contents of our data directory. This includes the `word_dict.pkl` file. This is fortunate as we will need this later on when we create an endpoint that accepts an arbitrary review. For now, we will just take note of the fact that it resides in the data directory (and so also in the S3 training bucket) and that we will need to make sure it gets saved in the model directory. ## Step 4: Build and Train the PyTorch Model In the XGBoost notebook we discussed what a model is in the SageMaker framework. In particular, a model comprises three objects - Model Artifacts, - Training Code, and - Inference Code, each of which interact with one another. In the XGBoost example we used training and inference code that was provided by Amazon. Here we will still be using containers provided by Amazon with the added benefit of being able to include our own custom code. We will start by implementing our own neural network in PyTorch along with a training script. For the purposes of this project we have provided the necessary model object in the `model.py` file, inside of the `train` folder. You can see the provided implementation by running the cell below. ``` !pygmentize train/model.py ``` The important takeaway from the implementation provided is that there are three parameters that we may wish to tweak to improve the performance of our model. These are the embedding dimension, the hidden dimension and the size of the vocabulary. We will likely want to make these parameters configurable in the training script so that if we wish to modify them we do not need to modify the script itself. We will see how to do this later on. To start we will write some of the training code in the notebook so that we can more easily diagnose any issues that arise. First we will load a small portion of the training data set to use as a sample. It would be very time consuming to try and train the model completely in the notebook as we do not have access to a gpu and the compute instance that we are using is not particularly powerful. However, we can work on a small bit of the data to get a feel for how our training script is behaving. ``` import torch import torch.utils.data # Read in only the first 250 rows train_sample = pd.read_csv(os.path.join(data_dir, 'train.csv'), header=None, names=None, nrows=250) # Turn the input pandas dataframe into tensors train_sample_y = torch.from_numpy(train_sample[[0]].values).float().squeeze() train_sample_X = torch.from_numpy(train_sample.drop([0], axis=1).values).long() # Build the dataset train_sample_ds = torch.utils.data.TensorDataset(train_sample_X, train_sample_y) # Build the dataloader train_sample_dl = torch.utils.data.DataLoader(train_sample_ds, batch_size=50) ``` ### (TODO) Writing the training method Next we need to write the training code itself. This should be very similar to training methods that you have written before to train PyTorch models. We will leave any difficult aspects such as model saving / loading and parameter loading until a little later. ``` def train(model, train_loader, epochs, optimizer, loss_fn, device): for epoch in range(1, epochs + 1): model.train() total_loss = 0 for batch in train_loader: batch_X, batch_y = batch batch_X = batch_X.to(device) batch_y = batch_y.to(device) # TODO: Complete this train method to train the model provided. optimizer.zero_grad() out = model.forward(batch_X) loss = loss_fn(out, batch_y) loss.backward() optimizer.step() total_loss += loss.data.item() print("Epoch: {}, BCELoss: {}".format(epoch, total_loss / len(train_loader))) ``` Supposing we have the training method above, we will test that it is working by writing a bit of code in the notebook that executes our training method on the small sample training set that we loaded earlier. The reason for doing this in the notebook is so that we have an opportunity to fix any errors that arise early when they are easier to diagnose. ``` import torch.optim as optim from train.model import LSTMClassifier device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = LSTMClassifier(32, 100, 5000).to(device) optimizer = optim.Adam(model.parameters()) loss_fn = torch.nn.BCELoss() train(model, train_sample_dl, 5, optimizer, loss_fn, device) ``` In order to construct a PyTorch model using SageMaker we must provide SageMaker with a training script. We may optionally include a directory which will be copied to the container and from which our training code will be run. When the training container is executed it will check the uploaded directory (if there is one) for a `requirements.txt` file and install any required Python libraries, after which the training script will be run. ### (TODO) Training the model When a PyTorch model is constructed in SageMaker, an entry point must be specified. This is the Python file which will be executed when the model is trained. Inside of the `train` directory is a file called `train.py` which has been provided and which contains most of the necessary code to train our model. The only thing that is missing is the implementation of the `train()` method which you wrote earlier in this notebook. **TODO**: Copy the `train()` method written above and paste it into the `train/train.py` file where required. The way that SageMaker passes hyperparameters to the training script is by way of arguments. These arguments can then be parsed and used in the training script. To see how this is done take a look at the provided `train/train.py` file. ``` from sagemaker.pytorch import PyTorch estimator = PyTorch(entry_point="train.py", source_dir="train", role=role, framework_version='0.4.0', train_instance_count=1, train_instance_type='ml.p2.xlarge', hyperparameters={ 'epochs': 10, 'hidden_dim': 200, }) estimator.fit({'training': input_data}) ``` ## Step 5: Testing the model As mentioned at the top of this notebook, we will be testing this model by first deploying it and then sending the testing data to the deployed endpoint. We will do this so that we can make sure that the deployed model is working correctly. ## Step 6: Deploy the model for testing Now that we have trained our model, we would like to test it to see how it performs. Currently our model takes input of the form `review_length, review[500]` where `review[500]` is a sequence of `500` integers which describe the words present in the review, encoded using `word_dict`. Fortunately for us, SageMaker provides built-in inference code for models with simple inputs such as this. There is one thing that we need to provide, however, and that is a function which loads the saved model. This function must be called `model_fn()` and takes as its only parameter a path to the directory where the model artifacts are stored. This function must also be present in the python file which we specified as the entry point. In our case the model loading function has been provided and so no changes need to be made. **NOTE**: When the built-in inference code is run it must import the `model_fn()` method from the `train.py` file. This is why the training code is wrapped in a main guard ( ie, `if __name__ == '__main__':` ) Since we don't need to change anything in the code that was uploaded during training, we can simply deploy the current model as-is. **NOTE:** When deploying a model you are asking SageMaker to launch an compute instance that will wait for data to be sent to it. As a result, this compute instance will continue to run until *you* shut it down. This is important to know since the cost of a deployed endpoint depends on how long it has been running for. In other words **If you are no longer using a deployed endpoint, shut it down!** **TODO:** Deploy the trained model. ``` # TODO: Deploy the trained model predictor = estimator.deploy(initial_instance_count = 1, instance_type = 'ml.m4.xlarge') ``` ## Step 7 - Use the model for testing Once deployed, we can read in the test data and send it off to our deployed model to get some results. Once we collect all of the results we can determine how accurate our model is. ``` test_X = pd.concat([pd.DataFrame(test_X_len), pd.DataFrame(test_X)], axis=1) # We split the data into chunks and send each chunk seperately, accumulating the results. def predict(data, rows=512): split_array = np.array_split(data, int(data.shape[0] / float(rows) + 1)) predictions = np.array([]) for array in split_array: predictions = np.append(predictions, predictor.predict(array)) return predictions predictions = predict(test_X.values) predictions = [round(num) for num in predictions] from sklearn.metrics import accuracy_score accuracy_score(test_y, predictions) ``` **Question:** How does this model compare to the XGBoost model you created earlier? Why might these two models perform differently on this dataset? Which do *you* think is better for sentiment analysis? **Answer:**It's better precision and it makes sense for the following reasons 1. RNN model will learn from early input and build up prediction power. 2. Sentimental analysis is context dependent project, and RNN suits better for this purpose. 3. The order of text (word) matters. ### (TODO) More testing We now have a trained model which has been deployed and which we can send processed reviews to and which returns the predicted sentiment. However, ultimately we would like to be able to send our model an unprocessed review. That is, we would like to send the review itself as a string. For example, suppose we wish to send the following review to our model. ``` test_review = 'The simplest pleasures in life are the best, and this film is one of them. Combining a rather basic storyline of love and adventure this movie transcends the usual weekend fair with wit and unmitigated charm.' ``` The question we now need to answer is, how do we send this review to our model? Recall in the first section of this notebook we did a bunch of data processing to the IMDb dataset. In particular, we did two specific things to the provided reviews. - Removed any html tags and stemmed the input - Encoded the review as a sequence of integers using `word_dict` In order process the review we will need to repeat these two steps. **TODO**: Using the `review_to_words` and `convert_and_pad` methods from section one, convert `test_review` into a numpy array `test_data` suitable to send to our model. Remember that our model expects input of the form `review_length, review[500]`. ``` # TODO: Convert test_review into a form usable by the model and save the results in test_data test_data = review_to_words(test_review) test_data = [np.array(convert_and_pad(word_dict, test_data)[0])] ``` Now that we have processed the review, we can send the resulting array to our model to predict the sentiment of the review. ``` predictor.predict(test_data) ``` Since the return value of our model is close to `1`, we can be certain that the review we submitted is positive. ### Delete the endpoint Of course, just like in the XGBoost notebook, once we've deployed an endpoint it continues to run until we tell it to shut down. Since we are done using our endpoint for now, we can delete it. ``` estimator.delete_endpoint() ``` ## Step 6 (again) - Deploy the model for the web app Now that we know that our model is working, it's time to create some custom inference code so that we can send the model a review which has not been processed and have it determine the sentiment of the review. As we saw above, by default the estimator which we created, when deployed, will use the entry script and directory which we provided when creating the model. However, since we now wish to accept a string as input and our model expects a processed review, we need to write some custom inference code. We will store the code that we write in the `serve` directory. Provided in this directory is the `model.py` file that we used to construct our model, a `utils.py` file which contains the `review_to_words` and `convert_and_pad` pre-processing functions which we used during the initial data processing, and `predict.py`, the file which will contain our custom inference code. Note also that `requirements.txt` is present which will tell SageMaker what Python libraries are required by our custom inference code. When deploying a PyTorch model in SageMaker, you are expected to provide four functions which the SageMaker inference container will use. - `model_fn`: This function is the same function that we used in the training script and it tells SageMaker how to load our model. - `input_fn`: This function receives the raw serialized input that has been sent to the model's endpoint and its job is to de-serialize and make the input available for the inference code. - `output_fn`: This function takes the output of the inference code and its job is to serialize this output and return it to the caller of the model's endpoint. - `predict_fn`: The heart of the inference script, this is where the actual prediction is done and is the function which you will need to complete. For the simple website that we are constructing during this project, the `input_fn` and `output_fn` methods are relatively straightforward. We only require being able to accept a string as input and we expect to return a single value as output. You might imagine though that in a more complex application the input or output may be image data or some other binary data which would require some effort to serialize. ### (TODO) Writing inference code Before writing our custom inference code, we will begin by taking a look at the code which has been provided. ``` !pygmentize serve/predict.py ``` As mentioned earlier, the `model_fn` method is the same as the one provided in the training code and the `input_fn` and `output_fn` methods are very simple and your task will be to complete the `predict_fn` method. Make sure that you save the completed file as `predict.py` in the `serve` directory. **TODO**: Complete the `predict_fn()` method in the `serve/predict.py` file. ### Deploying the model Now that the custom inference code has been written, we will create and deploy our model. To begin with, we need to construct a new PyTorchModel object which points to the model artifacts created during training and also points to the inference code that we wish to use. Then we can call the deploy method to launch the deployment container. **NOTE**: The default behaviour for a deployed PyTorch model is to assume that any input passed to the predictor is a `numpy` array. In our case we want to send a string so we need to construct a simple wrapper around the `RealTimePredictor` class to accomodate simple strings. In a more complicated situation you may want to provide a serialization object, for example if you wanted to sent image data. ``` from sagemaker.predictor import RealTimePredictor from sagemaker.pytorch import PyTorchModel class StringPredictor(RealTimePredictor): def __init__(self, endpoint_name, sagemaker_session): super(StringPredictor, self).__init__(endpoint_name, sagemaker_session, content_type='text/plain') model = PyTorchModel(model_data=estimator.model_data, role = role, framework_version='0.4.0', entry_point='predict.py', source_dir='serve', predictor_cls=StringPredictor) predictor = model.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge') ``` ### Testing the model Now that we have deployed our model with the custom inference code, we should test to see if everything is working. Here we test our model by loading the first `250` positive and negative reviews and send them to the endpoint, then collect the results. The reason for only sending some of the data is that the amount of time it takes for our model to process the input and then perform inference is quite long and so testing the entire data set would be prohibitive. ``` import glob def test_reviews(data_dir='../data/aclImdb', stop=250): results = [] ground = [] # We make sure to test both positive and negative reviews for sentiment in ['pos', 'neg']: path = os.path.join(data_dir, 'test', sentiment, '*.txt') files = glob.glob(path) files_read = 0 print('Starting ', sentiment, ' files') # Iterate through the files and send them to the predictor for f in files: with open(f) as review: # First, we store the ground truth (was the review positive or negative) if sentiment == 'pos': ground.append(1) else: ground.append(0) # Read in the review and convert to 'utf-8' for transmission via HTTP review_input = review.read().encode('utf-8') # Send the review to the predictor and store the results results.append(float(predictor.predict(review_input))) # Sending reviews to our endpoint one at a time takes a while so we # only send a small number of reviews files_read += 1 if files_read == stop: break return ground, results ground, results = test_reviews() from sklearn.metrics import accuracy_score accuracy_score(ground, results) ``` As an additional test, we can try sending the `test_review` that we looked at earlier. ``` predictor.predict(test_review) ``` Now that we know our endpoint is working as expected, we can set up the web page that will interact with it. If you don't have time to finish the project now, make sure to skip down to the end of this notebook and shut down your endpoint. You can deploy it again when you come back. ## Step 7 (again): Use the model for the web app > **TODO:** This entire section and the next contain tasks for you to complete, mostly using the AWS console. So far we have been accessing our model endpoint by constructing a predictor object which uses the endpoint and then just using the predictor object to perform inference. What if we wanted to create a web app which accessed our model? The way things are set up currently makes that not possible since in order to access a SageMaker endpoint the app would first have to authenticate with AWS using an IAM role which included access to SageMaker endpoints. However, there is an easier way! We just need to use some additional AWS services. <img src="Web App Diagram.svg"> The diagram above gives an overview of how the various services will work together. On the far right is the model which we trained above and which is deployed using SageMaker. On the far left is our web app that collects a user's movie review, sends it off and expects a positive or negative sentiment in return. In the middle is where some of the magic happens. We will construct a Lambda function, which you can think of as a straightforward Python function that can be executed whenever a specified event occurs. We will give this function permission to send and recieve data from a SageMaker endpoint. Lastly, the method we will use to execute the Lambda function is a new endpoint that we will create using API Gateway. This endpoint will be a url that listens for data to be sent to it. Once it gets some data it will pass that data on to the Lambda function and then return whatever the Lambda function returns. Essentially it will act as an interface that lets our web app communicate with the Lambda function. ### Setting up a Lambda function The first thing we are going to do is set up a Lambda function. This Lambda function will be executed whenever our public API has data sent to it. When it is executed it will receive the data, perform any sort of processing that is required, send the data (the review) to the SageMaker endpoint we've created and then return the result. #### Part A: Create an IAM Role for the Lambda function Since we want the Lambda function to call a SageMaker endpoint, we need to make sure that it has permission to do so. To do this, we will construct a role that we can later give the Lambda function. Using the AWS Console, navigate to the **IAM** page and click on **Roles**. Then, click on **Create role**. Make sure that the **AWS service** is the type of trusted entity selected and choose **Lambda** as the service that will use this role, then click **Next: Permissions**. In the search box type `sagemaker` and select the check box next to the **AmazonSageMakerFullAccess** policy. Then, click on **Next: Review**. Lastly, give this role a name. Make sure you use a name that you will remember later on, for example `LambdaSageMakerRole`. Then, click on **Create role**. #### Part B: Create a Lambda function Now it is time to actually create the Lambda function. Using the AWS Console, navigate to the AWS Lambda page and click on **Create a function**. When you get to the next page, make sure that **Author from scratch** is selected. Now, name your Lambda function, using a name that you will remember later on, for example `sentiment_analysis_func`. Make sure that the **Python 3.6** runtime is selected and then choose the role that you created in the previous part. Then, click on **Create Function**. On the next page you will see some information about the Lambda function you've just created. If you scroll down you should see an editor in which you can write the code that will be executed when your Lambda function is triggered. In our example, we will use the code below. ```python # We need to use the low-level library to interact with SageMaker since the SageMaker API # is not available natively through Lambda. import boto3 def lambda_handler(event, context): # The SageMaker runtime is what allows us to invoke the endpoint that we've created. runtime = boto3.Session().client('sagemaker-runtime') # Now we use the SageMaker runtime to invoke our endpoint, sending the review we were given response = runtime.invoke_endpoint(EndpointName = '**ENDPOINT NAME HERE**', # The name of the endpoint we created ContentType = 'text/plain', # The data format that is expected Body = event['body']) # The actual review # The response is an HTTP response whose body contains the result of our inference result = response['Body'].read().decode('utf-8') return { 'statusCode' : 200, 'headers' : { 'Content-Type' : 'text/plain', 'Access-Control-Allow-Origin' : '*' }, 'body' : result } ``` Once you have copy and pasted the code above into the Lambda code editor, replace the `**ENDPOINT NAME HERE**` portion with the name of the endpoint that we deployed earlier. You can determine the name of the endpoint using the code cell below. ``` predictor.endpoint ``` Once you have added the endpoint name to the Lambda function, click on **Save**. Your Lambda function is now up and running. Next we need to create a way for our web app to execute the Lambda function. ### Setting up API Gateway Now that our Lambda function is set up, it is time to create a new API using API Gateway that will trigger the Lambda function we have just created. Using AWS Console, navigate to **Amazon API Gateway** and then click on **Get started**. On the next page, make sure that **New API** is selected and give the new api a name, for example, `sentiment_analysis_api`. Then, click on **Create API**. Now we have created an API, however it doesn't currently do anything. What we want it to do is to trigger the Lambda function that we created earlier. Select the **Actions** dropdown menu and click **Create Method**. A new blank method will be created, select its dropdown menu and select **POST**, then click on the check mark beside it. For the integration point, make sure that **Lambda Function** is selected and click on the **Use Lambda Proxy integration**. This option makes sure that the data that is sent to the API is then sent directly to the Lambda function with no processing. It also means that the return value must be a proper response object as it will also not be processed by API Gateway. Type the name of the Lambda function you created earlier into the **Lambda Function** text entry box and then click on **Save**. Click on **OK** in the pop-up box that then appears, giving permission to API Gateway to invoke the Lambda function you created. The last step in creating the API Gateway is to select the **Actions** dropdown and click on **Deploy API**. You will need to create a new Deployment stage and name it anything you like, for example `prod`. You have now successfully set up a public API to access your SageMaker model. Make sure to copy or write down the URL provided to invoke your newly created public API as this will be needed in the next step. This URL can be found at the top of the page, highlighted in blue next to the text **Invoke URL**. ## Step 4: Deploying our web app Now that we have a publicly available API, we can start using it in a web app. For our purposes, we have provided a simple static html file which can make use of the public api you created earlier. In the `website` folder there should be a file called `index.html`. Download the file to your computer and open that file up in a text editor of your choice. There should be a line which contains **\*\*REPLACE WITH PUBLIC API URL\*\***. Replace this string with the url that you wrote down in the last step and then save the file. Now, if you open `index.html` on your local computer, your browser will behave as a local web server and you can use the provided site to interact with your SageMaker model. If you'd like to go further, you can host this html file anywhere you'd like, for example using github or hosting a static site on Amazon's S3. Once you have done this you can share the link with anyone you'd like and have them play with it too! > **Important Note** In order for the web app to communicate with the SageMaker endpoint, the endpoint has to actually be deployed and running. This means that you are paying for it. Make sure that the endpoint is running when you want to use the web app but that you shut it down when you don't need it, otherwise you will end up with a surprisingly large AWS bill. **TODO:** Make sure that you include the edited `index.html` file in your project submission. Now that your web app is working, trying playing around with it and see how well it works. **Question**: Give an example of a review that you entered into your web app. What was the predicted sentiment of your example review? **Answer:**I tried something simple 'This is a good movie' And the result is positive. I got another movie review from internet --- "The questions are fascinating, the performances are great and the emotions are jacked up to the point of discomfort. This is certainly the best work of Plaza's film career. " And it's positive as well. Looks like the web app is working fine! ### Delete the endpoint Remember to always shut down your endpoint if you are no longer using it. You are charged for the length of time that the endpoint is running so if you forget and leave it on you could end up with an unexpectedly large bill. ``` predictor.delete_endpoint() ```
github_jupyter
\title{Digital Latches with myHDL} \author{Steven K Armour} \maketitle # Refs @book{brown_vranesic_2014, place={New York, NY}, edition={3}, title={Fundamentals of digital logic with Verilog design}, publisher={McGraw-Hill}, author={Brown, Stephen and Vranesic, Zvonko G}, year={2014} }, @book{lameres_2017, title={Introduction to logic circuits & logic design with Verilog}, publisher={springer}, author={LaMeres, Brock J}, year={2017} } # Acknowledgments Author of **myHDL** [Jan Decaluwe](http://www.myhdl.org/users/jandecaluwe.html) and the author of the **myHDL Peeker** [XESS Corp.](https://github.com/xesscorp/myhdlpeek) [**Draw.io**](https://www.draw.io/) **Xilinx** # Python Libraries Utilized ``` import numpy as np import pandas as pd from sympy import * init_printing() from myhdl import * from myhdlpeek import * import random #python file of convince tools. Should be located with this notebook from sympy_myhdl_tools import * ``` # Latches vs Flip-Flops Latches and Flip-Flops are both metastaple logic circuit tobologies in that once loaded with a state they hold that state information till that state is upset by a new state or a reset command. But the diffrance between the two is that Flip-Flops are clock controlled devices built upon Latches where as Latches are not clock dependent # SR-Latch ## Symbol and Internals The Symbol for a SR-Latch and one representation of it's internals is shown below <img style="float: center;" src="SRLatchSymbolInternal.jpg"> ## Definition ## State Diagram ## myHDL SR-Latch Gate and Testing Need Help Getting this Latch via Combo Cirucits working geting AlwayCombError in using out signal as argument in out signals next state out ## myHDL SR-Latch Behavioral and Testing ``` def SRLatch(S_in, rst, Q_out, Qn_out): @always_comb def logic(): if S_in and rst==0: Q_out.next=1 Qn_out.next=0 elif S_in==0 and rst: Q_out.next=0 Qn_out.next=1 elif S_in and rst: Q_out.next=0 Qn_out.next=0 return logic S_in, rst, Q_out, Qn_out=[Signal(bool(0)) for _ in range(4)] Peeker.clear() Peeker(S_in, 'S_in'); Peeker(rst, 'rst') Peeker(Q_out, 'Q_out'); Peeker(Qn_out, 'Qn_out') DUT=SRLatch(S_in=S_in, rst=rst, Q_out=Q_out, Qn_out=Qn_out) inputs=[S_in, rst] sim=Simulation(DUT, Combo_TB(inputs), *Peeker.instances()).run() Peeker.to_wavedrom(start_time=0, stop_time=2*2**len(inputs), tock=True, title='SRLatch Behavioral simulation', caption=f'after clock cycle {2**len(inputs)-1} ->random input') MakeDFfromPeeker(Peeker.to_wavejson(start_time=0, stop_time=2**len(inputs) -1)) ``` ## myHDL SR-Latch Behavioral HDL Synthesis ``` toVerilog(SRLatch, S_in, rst, Q_out, Qn_out) #toVHDL(SRLatch, S_in, rst, Q_out, Qn_out) _=VerilogTextReader('SRLatch') ``` The following shows the **Xilinx**'s _Vivado 2016.1_ RTL generated schematic of our Behaviorla SRLatch from the synthesised verilog code. We can see that the systhizied version is quite apstract from fig lakdfjkaj. <img style="float: center;" src="SRLatchBehaviroalRTLSch.PNG"> # Gated SR-Latch ## myHDL SR-Latch Behavioral and Testing ``` def GSRLatch(S_in, rst, ena, Q_out, Qn_out): @always_comb def logic(): if ena: if S_in and rst==0: Q_out.next=1 Qn_out.next=0 elif S_in==0 and rst: Q_out.next=0 Qn_out.next=1 elif S_in and rst: Q_out.next=0 Qn_out.next=0 else: pass return logic S_in, rst, ena, Q_out, Qn_out=[Signal(bool(0)) for _ in range(5)] Peeker.clear() Peeker(S_in, 'S_in'); Peeker(rst, 'rst'); Peeker(ena, 'ena') Peeker(Q_out, 'Q_out'); Peeker(Qn_out, 'Qn_out') DUT=GSRLatch(S_in=S_in, rst=rst, ena=ena, Q_out=Q_out, Qn_out=Qn_out) inputs=[S_in, rst, ena] sim=Simulation(DUT, Combo_TB(inputs), *Peeker.instances()).run() Peeker.to_wavedrom(start_time=0, stop_time=2*2**len(inputs), tock=True, title='GSRLatch Behavioral simulation', caption=f'after clock cycle {2**len(inputs)-1} ->random input') MakeDFfromPeeker(Peeker.to_wavejson(start_time=0, stop_time=2**len(inputs) -1)) ``` ## myHDL SR-Latch Behavioral HDL Synthesis ``` toVerilog(GSRLatch, S_in, rst, ena, Q_out, Qn_out) #toVHDL(GSRLatch, S_in, rst,ena, Q_out, Qn_out) _=VerilogTextReader('GSRLatch') ``` The following shows the **Xilinx**'s _Vivado 2016.1_ RTL generated schematic of our Behaviorla Gated SRLatch from the synthesised verilog code. We can see that the systhizied version is quite apstract from fig lakdfjkaj. <img style="float: center;" src="GSRLatchBehaviroalRTLSch.PNG"> # D-Latch ## myHDL Behavioral D-Latch and Testing ``` def DLatch(D_in, ena, Q_out, Qn_out): #Normal Qn_out is not specifed since a not gate is so easily implimented @always_comb def logic(): if ena: Q_out.next=D_in Qn_out.next=not D_in return logic D_in, ena, Q_out, Qn_out=[Signal(bool(0)) for _ in range(4)] Peeker.clear() Peeker(D_in, 'D_in'); Peeker(ena, 'ena') Peeker(Q_out, 'Q_out'); Peeker(Qn_out, 'Qn_out') DUT=DLatch(D_in=D_in, ena=ena, Q_out=Q_out, Qn_out=Qn_out) inputs=[D_in, ena] sim=Simulation(DUT, Combo_TB(inputs), *Peeker.instances()).run() Peeker.to_wavedrom(start_time=0, stop_time=2*2**len(inputs), tock=True, title='DLatch Behavioral simulation', caption=f'after clock cycle {2**len(inputs)-1} ->random input') MakeDFfromPeeker(Peeker.to_wavejson(start_time=0, stop_time=2**len(inputs) -1)) ``` ## myHDL DLatch Behavioral HDL Synthesis ``` toVerilog(DLatch, D_in, ena, Q_out, Qn_out) #toVHDL(DLatch,D_in, ena, Q_out, Qn_out) _=VerilogTextReader('DLatch') ``` The following shows the **Xilinx**'s _Vivado 2016.1_ RTL generated schematic of our myHDL Dlatch with a exsplisit $\bar{Q}$ verilog code. Note that becouse $\bar{Q}$ is not normal declared in HDL code Vivado produced two RTL DLatchs and used a NOT Gate to acount for the negated output <img style="float: center;" src="DLatchBehavioralRTLSch.PNG"> # Examples
github_jupyter
``` %%capture # { display-mode: 'form' } # @title PyTTI-Tools [EzMode]: VQGAN # @markdown ## Setup # @markdown This may take a few minutes. ## 1. Install stuff try: import pytti except ImportError: !pip install kornia pytorch-lightning transformers !pip install jupyter loguru einops PyGLM ftfy regex tqdm hydra-core exrex !pip install seaborn adjustText bunch matplotlib-label-lines !pip install --upgrade gdown !pip install --upgrade git+https://github.com/pytti-tools/AdaBins.git !pip install --upgrade git+https://github.com/pytti-tools/GMA.git !pip install --upgrade git+https://github.com/pytti-tools/taming-transformers.git !pip install --upgrade git+https://github.com/openai/CLIP.git !pip install --upgrade git+https://github.com/pytti-tools/pytti-core.git # These are notebook specific !pip install --upgrade natsort try: import mmc except: # install mmc !git clone https://github.com/dmarx/Multi-Modal-Comparators !pip install poetry !cd Multi-Modal-Comparators; poetry build !cd Multi-Modal-Comparators; pip install dist/mmc*.whl !python Multi-Modal-Comparators/src/mmc/napm_installs/__init__.py from natsort import natsorted from omegaconf import OmegaConf from pathlib import Path import mmc.loaders !python -m pytti.warmup notebook_params = {} def get_output_paths(): outv = [str(p.resolve()) for p in Path('outputs/').glob('**/*.png')] #outv.sort() outv = natsorted(outv) return outv resume = True # @param {type:"boolean"} if resume: inits = get_output_paths() if inits: notebook_params.update({ 'init_image':inits[-1], }) # @markdown ## Basic Settings prompts = "a photograph of albert einstein" # @param {type:"string"} height = 512 # @param {type:"integer"} width = 512 # @param {type:"integer"} cell_params = { "scenes": prompts, "height":height, "width":width, } notebook_params.update(cell_params) # @markdown ## Advanced Settings vqgan_model = "coco" # @param ["coco","sflickr","imagenet","wikiart","openimages"] cell_params = { "vqgan_model": vqgan_model, } notebook_params.update(cell_params) invariants = """ ## Invariant settings ## steps_per_frame: 50 steps_per_scene: 500 pixel_size: 1 image_model: VQGAN use_mmc: true mmc_models: - architecture: clip publisher: openai id: ViT-B/16 """ from omegaconf import OmegaConf from pathlib import Path cfg_invariants = OmegaConf.create(invariants) nb_cfg = OmegaConf.create(notebook_params) conf = OmegaConf.merge(cfg_invariants, nb_cfg) with open("config/conf/this_run.yaml", "w") as f: outstr = "# @package _global_\n" outstr += OmegaConf.to_yaml(conf) print(outstr) f.write( outstr ) #Path("config/conf/ezmode/").mkdir(parents=True, exist_ok=True) ## Do the run ! python -m pytti.workhorse conf=this_run # @title Show Outputs from IPython.display import Image, display outputs = list(Path('outputs/').glob('**/*.png')) outputs.sort() im_path = str(outputs[-1]) Image(im_path, height=height, width=width) # @markdown compile images into a video of the generative process from PIL import Image as pilImage from subprocess import Popen, PIPE from tqdm.notebook import tqdm fps = 12 # @param {type:'number'} fpaths = get_output_paths() frames = [] for filename in tqdm(fpaths): frames.append(pilImage.open(filename)) cmd_in = ['ffmpeg', '-y', '-f', 'image2pipe', '-vcodec', 'png', '-r', str(fps), '-i', '-'] cmd_out = ['-vcodec', 'libx264', '-r', str(fps), '-pix_fmt', 'yuv420p', '-crf', '1', '-preset', 'veryslow', f'output.mp4'] cmd = cmd_in + cmd_out p = Popen(cmd, stdin=PIPE) for im in tqdm(frames): im.save(p.stdin, 'PNG') p.stdin.close() print("Encoding video...") p.wait() print("Video saved to output.mp4.") ```
github_jupyter
# Example Script for Logistic Regression for Fishing Detection ``` %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt from patsy import dmatrices from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn.model_selection import cross_val_score ``` Load data. This dataset contains coordinates, speeds, and courses for fishing vessels, hand labeled to be fishing or not fishing. ``` data = np.load('/Users/mnksmith/Documents/Oceana_MPA_data/kristina_trawl_Trawlers.measures.labels.npz') df = pd.DataFrame(data['x']) list(df) ``` ## Some preliminary exploration: ``` df.groupby('is_fishing').mean() plt.figure() plt.tight_layout() plt.subplot(1,2,1) plt.xlim(0,20) fishy = df.loc[df.is_fishing==1] unfishy = df.loc[df.is_fishing==0] #plt.hist(fishy.speed, range=(0,20)) plt.hist([fishy['speed'],unfishy['speed']], stacked=True, range = (0,20)) plt.subplot(1,2,2) #measure_speed = 1.0 - min(1, speed-over-ground / 17) plt.hist([fishy['measure_speed'],unfishy['measure_speed']], stacked=True, range = (0,1)) plt.show() ``` The "measure_speed" feature is equivalent to the speed-over-ground, capped at 17 knots. ``` plt.figure() plt.tight_layout() plt.subplot(1,2,1) plt.hist([fishy['course'],unfishy['course']], stacked=True, range = (0,20)) plt.subplot(1,2,2) #measure_course = course-over-ground / 360) plt.hist([fishy['measure_course'],unfishy['measure_course']], stacked=True, range = (0,1)) plt.show() ``` The "measure_course" feature is equivalent to the course-over-ground, divided by 360. ``` plt.figure() plt.xlim(0,1) plt.hist([fishy['measure_coursestddev_21600'],unfishy['measure_coursestddev_21600']], stacked=True, bins=20, range=(0,1)) plt.show() plt.figure() plt.xlim(0,0.25) plt.hist([fishy['measure_speedstddev_21600'],unfishy['measure_speedstddev_21600']], stacked=True, bins=20, range=(0,0.2)) plt.show() plt.figure() plt.xlim(0,1) plt.hist([fishy['measure_speedavg_21600'].values,unfishy['measure_speedavg_21600'].values], stacked=True, bins=20, range=(0,1)) plt.xlabel('Measure Speed') #plt.show() plt.savefig('example_feature.png', dpi=300) df = fishy[['measure_speedavg_21600']].reset_index() df['measure_speedavg_21600_not_Fishing'] = unfishy['measure_speedavg_21600'] df.head(10) # Create a Pandas Excel writer using XlsxWriter as the engine. writer = pd.ExcelWriter('MPA_feature_example.xlsx', engine='xlsxwriter') # Convert the dataframe to an XlsxWriter Excel object. df.to_excel(writer, sheet_name='Sheet1') # Close the Pandas Excel writer and output the Excel file. writer.save() ``` Now plot a scatter matrix to look for corellations between some select features ``` df_slim = df[['measure_speedavg_21600','measure_speedstddev_21600','measure_coursestddev_21600']].copy() pd.plotting.scatter_matrix(df_slim, c = 'y', figsize = [8, 8], s=150, marker = 'D') ``` ## Logistic Regression Model Each of the key features (average speed, standard deviation of speed, and standard deviation of course) are averaged over 0.5, 1, 3, 6, 12, and 24 hours to fill out the list of features. Powers (up to 6) of each of these features are included to uncover non-linear behavior. For example, from the plots above it is clear that vessels moving very slowly or very quickly are less likely to be fishing, while there's a "sweet spot" in the middle. A linear fit will not capture this behavior. ``` times = [1800,3600,10800,21600,43200,86400] for p in range(2,7): for time in times: avgspd = 'measure_speedavg_' + str(time) stdspd = 'measure_speedstddev_' + str(time) + '_log' stdcs = 'measure_coursestddev_' + str(time) + '_log' avgspd_p = 'measure_speedavg_' + str(time) + '_' + str(p) stdspd_p = 'measure_speedstddev_' + str(time) + '_log_' + str(p) stdcs_p = 'measure_coursestddev_' + str(time) + '_log_' + str(p) df[avgspd_p] = pow(df[avgspd],p) df[stdspd_p] = pow(df[stdspd],p) df[stdcs_p] = pow(df[stdcs],p) list(df) y, X = dmatrices('is_fishing ~ measure_speedavg_1800 + measure_speedavg_3600 + measure_speedavg_10800 + \ measure_speedavg_21600 + measure_speedavg_43200 + measure_speedavg_86400 + \ measure_speedstddev_1800_log + measure_speedstddev_3600_log + measure_speedstddev_10800_log + \ measure_speedstddev_21600_log + measure_speedstddev_43200_log + measure_speedstddev_86400_log + \ measure_coursestddev_1800_log + measure_coursestddev_3600_log + measure_coursestddev_10800_log + \ measure_coursestddev_21600_log + measure_coursestddev_43200_log + measure_coursestddev_86400_log + \ measure_speedavg_1800_2 + measure_speedavg_3600_2 + measure_speedavg_10800_2 + \ measure_speedavg_21600_2 + measure_speedavg_43200_2 + measure_speedavg_86400_2 + \ measure_speedstddev_1800_log_2 + measure_speedstddev_3600_log_2 + measure_speedstddev_10800_log_2 + \ measure_speedstddev_21600_log_2 + measure_speedstddev_43200_log_2 + measure_speedstddev_86400_log_2 + \ measure_coursestddev_1800_log_2 + measure_coursestddev_3600_log_2 + measure_coursestddev_10800_log_2 + \ measure_coursestddev_21600_log_2 + measure_coursestddev_43200_log_2 + measure_coursestddev_86400_log_2 + \ measure_speedavg_1800_3 + measure_speedavg_3600_3 + measure_speedavg_10800_3 + \ measure_speedavg_21600_3 + measure_speedavg_43200_3 + measure_speedavg_86400_3 + \ measure_speedstddev_1800_log_3 + measure_speedstddev_3600_log_3 + measure_speedstddev_10800_log_3 + \ measure_speedstddev_21600_log_3 + measure_speedstddev_43200_log_3 + measure_speedstddev_86400_log_3 + \ measure_coursestddev_1800_log_3 + measure_coursestddev_3600_log_3 + measure_coursestddev_10800_log_3 + \ measure_coursestddev_21600_log_3 + measure_coursestddev_43200_log_3 + measure_coursestddev_86400_log_3 + \ measure_speedavg_1800_4 + measure_speedavg_3600_4 + measure_speedavg_10800_4 + \ measure_speedavg_21600_4 + measure_speedavg_43200_4 + measure_speedavg_86400_4 + \ measure_speedstddev_1800_log_4 + measure_speedstddev_3600_log_4 + measure_speedstddev_10800_log_4 + \ measure_speedstddev_21600_log_4 + measure_speedstddev_43200_log_4 + measure_speedstddev_86400_log_4 + \ measure_coursestddev_1800_log_4 + measure_coursestddev_3600_log_4 + measure_coursestddev_10800_log_4 + \ measure_coursestddev_21600_log_4 + measure_coursestddev_43200_log_4 + measure_coursestddev_86400_log_4 + \ measure_speedavg_1800_5 + measure_speedavg_3600_5 + measure_speedavg_10800_5 + \ measure_speedavg_21600_5 + measure_speedavg_43200_5 + measure_speedavg_86400_5 + \ measure_speedstddev_1800_log_5 + measure_speedstddev_3600_log_5 + measure_speedstddev_10800_log_5 + \ measure_speedstddev_21600_log_5 + measure_speedstddev_43200_log_5 + measure_speedstddev_86400_log_5 + \ measure_coursestddev_1800_log_5 + measure_coursestddev_3600_log_5 + measure_coursestddev_10800_log_5 + \ measure_coursestddev_21600_log_5 + measure_coursestddev_43200_log_5 + measure_coursestddev_86400_log_5 + \ measure_speedavg_1800_6 + measure_speedavg_3600_6 + measure_speedavg_10800_6 + \ measure_speedavg_21600_6 + measure_speedavg_43200_6 + measure_speedavg_86400_6 + \ measure_speedstddev_1800_log_6 + measure_speedstddev_3600_log_6 + measure_speedstddev_10800_log_6 + \ measure_speedstddev_21600_log_6 + measure_speedstddev_43200_log_6 + measure_speedstddev_86400_log_6 + \ measure_coursestddev_1800_log_6 + measure_coursestddev_3600_log_6 + measure_coursestddev_10800_log_6 + \ measure_coursestddev_21600_log_6 + measure_coursestddev_43200_log_6 + measure_coursestddev_86400_log_6' , df, return_type="dataframe") print(X.columns) y = np.ravel(y) model = LogisticRegression() model = model.fit(X, y) model.score(X, y) ``` Using the powers of each feature I lifted a 78% accuracy to 88% ``` y.mean() ``` If I guessed "0" every time, I'd get 52% accuracy. Now splitting the whole thing into train and test datasets to evaluate the model: ``` X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=432) model.fit(X_train, y_train) predictions = model.predict(X_test) probas = model.predict_proba(X_test) print(metrics.accuracy_score(y_test, predictions)) print(metrics.roc_auc_score(y_test, probas[:, 1])) ``` The accuracy is still 88% on the validation dataset, the same as when we trained and tested on the same set. ``` print(metrics.confusion_matrix(y_test, predictions)) print(metrics.classification_report(y_test, predictions)) scores = cross_val_score(LogisticRegression(), X, y, scoring='accuracy', cv=10) print(scores.mean()) ``` Over a 10-fold cross-validation, the accuracy dips slightly, to 86% ``` fpr, tpr, _ = metrics.roc_curve(y_test,probas[:,1]) plt.plot(fpr,tpr) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.ylim(0,1) plt.xlim(0,1) #plt.show() plt.savefig('roc_curve.png', dpi=300) ``` Finally, a ROC-curve showing how well the model performs in terms of precision and accuracy.
github_jupyter
<img src="../../images/banners/python-basics.png" width="600"/> # <img src="../../images/logos/python.png" width="23"/> Python Program Lexical Structure You have now covered Python variables, operators, and data types in depth, and you’ve seen quite a bit of example code. Up to now, the code has consisted of short individual statements, simply assigning objects to variables or displaying values. But you want to do more than just define data and display it! Let’s start arranging code into more complex groupings. ## <img src="../../images/logos/toc.png" width="20"/> Table of Contents * [Python Statements](#python_statements) * [Line Continuation](#line_continuation) * [Implicit Line Continuation](#implicit_line_continuation) * [Parentheses](#parentheses) * [Curly Braces](#curly_braces) * [Square Brackets](#square_brackets) * [Explicit Line Continuation](#explicit_line_continuation) * [Multiple Statements Per Line](#multiple_statements_per_line) * [Comments](#comments) * [Whitespace](#whitespace) * [Whitespace as Indentation](#whitespace_as_indentation) * [<img src="../../images/logos/checkmark.png" width="20"/> Conclusion](#<img_src="../../images/logos/checkmark.png"_width="20"/>_conclusion) --- <a class="anchor" id="python_statements"></a> ## Python Statements Statements are the basic units of instruction that the Python interpreter parses and processes. In general, the interpreter executes statements sequentially, one after the next as it encounters them. (You will see in the next tutorial on conditional statements that it is possible to alter this behavior.) In a REPL session, statements are executed as they are typed in, until the interpreter is terminated. When you execute a script file, the interpreter reads statements from the file and executes them until end-of-file is encountered. Python programs are typically organized with one statement per line. In other words, each statement occupies a single line, with the end of the statement delimited by the newline character that marks the end of the line. The majority of the examples so far in this tutorial series have followed this pattern: ``` print('Hello, World!') x = [1, 2, 3] print(x[1:2]) ``` <a class="anchor" id="line_continuation"></a> ## Line Continuation Suppose a single statement in your Python code is especially long. For example, you may have an assignment statement with many terms: ``` person1_age = 42 person2_age = 16 person3_age = 71 someone_is_of_working_age = (person1_age >= 18 and person1_age <= 65) or (person2_age >= 18 and person2_age <= 65) or (person3_age >= 18 and person3_age <= 65) someone_is_of_working_age ``` Or perhaps you are defining a lengthy nested list: ``` a = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25]] a ``` You’ll notice that these statements are too long to fit in your browser window, and the browser is forced to render the code blocks with horizontal scroll bars. You may find that irritating. (You have our apologies—these examples are presented that way to make the point. It won’t happen again.) It is equally frustrating when lengthy statements like these are contained in a script file. Most editors can be configured to wrap text, so that the ends of long lines are at least visible and don’t disappear out the right edge of the editor window. But the wrapping doesn’t necessarily occur in logical locations that enhance readability: <img src="./images/line-wrap.webp" alt="line-wrap" width=500 align="center" /> Excessively long lines of code are generally considered poor practice. In fact, there is an official [Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008) put forth by the Python Software Foundation, and one of its stipulations is that the [maximum line length](https://www.python.org/dev/peps/pep-0008/#maximum-line-length) in Python code should be 79 characters. > **Note:** The **Style Guide for Python Code** is also referred to as **PEP 8**. PEP stands for Python Enhancement Proposal. PEPs are documents that contain details about features, standards, design issues, general guidelines, and information relating to Python. For more information, see the Python Software Foundation [Index of PEPs](https://www.python.org/dev/peps). As code becomes more complex, statements will on occasion unavoidably grow long. To maintain readability, you should break them up into parts across several lines. But you can’t just split a statement whenever and wherever you like. Unless told otherwise, the interpreter assumes that a newline character terminates a statement. If the statement isn’t syntactically correct at that point, an exception is raised: ``` someone_is_of_working_age = person1_age >= 18 and person1_age <= 65 or ``` In Python code, a statement can be continued from one line to the next in two different ways: implicit and explicit line continuation. <a class="anchor" id="implicit_line_continuation"></a> ### Implicit Line Continuation This is the more straightforward technique for line continuation, and the one that is preferred according to PEP 8. Any statement containing opening parentheses (`'('`), brackets (`'['`), or curly braces (`'{'`) is presumed to be incomplete until all matching parentheses, brackets, and braces have been encountered. Until then, the statement can be implicitly continued across lines without raising an error. For example, the nested list definition from above can be made much more readable using implicit line continuation because of the open brackets: ``` a = [ [1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25] ] a ``` A long expression can also be continued across multiple lines by wrapping it in grouping parentheses. PEP 8 explicitly advocates using parentheses in this manner when appropriate: ``` someone_is_of_working_age = ( (person1_age >= 18 and person1_age <= 65) or (person2_age >= 18 and person2_age <= 65) or (person3_age >= 18 and person3_age <= 65) ) someone_is_of_working_age ``` If you need to continue a statement across multiple lines, it is usually possible to use implicit line continuation to do so. This is because parentheses, brackets, and curly braces appear so frequently in Python syntax: <a class="anchor" id="parentheses"></a> #### Parentheses - Expression grouping ``` x = ( 1 + 2 + 3 + 4 + 5 + 6 ) x ``` - Function call (functions will be covered later) ``` print( 'foo', 'bar', 'baz' ) ``` - Method call (methods will be covered later) ``` 'abc'.center( 9, '-' ) ``` - Tuple definition ``` t = ( 'a', 'b', 'c', 'd' ) ``` <a class="anchor" id="curly_braces"></a> #### Curly Braces - Dictionary definition ``` d = { 'a': 1, 'b': 2 } ``` - Set definition ``` x1 = { 'foo', 'bar', 'baz' } ``` <a class="anchor" id="square_brackets"></a> #### Square Brackets - List definition ``` a = [ 'foo', 'bar', 'baz', 'qux' ] ``` - Indexing ``` a[ 1 ] ``` - Slicing ``` a[ 1:2 ] ``` - Dictionary key reference ``` d[ 'b' ] ``` > **Note:** Just because something is syntactically allowed, it doesn’t mean you should do it. Some of the examples above would not typically be recommended. Splitting indexing, slicing, or dictionary key reference across lines, in particular, would be unusual. But you can consider it if you can make a good argument that it enhances readability. Remember that if there are multiple parentheses, brackets, or curly braces, then implicit line continuation is in effect until they are all closed: ``` a = [ [ ['foo', 'bar'], [1, 2, 3] ], {1, 3, 5}, { 'a': 1, 'b': 2 } ] a ``` Note how line continuation and judicious use of indentation can be used to clarify the nested structure of the list. <a class="anchor" id="explicit_line_continuation"></a> ### Explicit Line Continuation In cases where implicit line continuation is not readily available or practicable, there is another option. This is referred to as explicit line continuation or explicit line joining. Ordinarily, a newline character (which you get when you press _Enter_ on your keyboard) indicates the end of a line. If the statement is not complete by that point, Python will raise a SyntaxError exception: ``` s = x = 1 + 2 + ``` To indicate explicit line continuation, you can specify a backslash (`\`) character as the final character on the line. In that case, Python ignores the following newline, and the statement is effectively continued on next line: ``` s = \ 'Hello, World!' s x = 1 + 2 \ + 3 + 4 \ + 5 + 6 x ``` **Note that the backslash character must be the last character on the line. Not even whitespace is allowed after it:** ``` # You can't see it, but there is a space character following the \ here: s = \ ``` Again, PEP 8 recommends using explicit line continuation only when implicit line continuation is not feasible. <a class="anchor" id="multiple_statements_per_line"></a> ## Multiple Statements Per Line Multiple statements may occur on one line, if they are separated by a semicolon (`;`) character: ``` x = 1; y = 2; z = 3 print(x); print(y); print(z) ``` Stylistically, this is generally frowned upon, and [PEP 8 expressly discourages it](https://www.python.org/dev/peps/pep-0008/?#other-recommendations). There might be situations where it improves readability, but it usually doesn’t. In fact, it often isn’t necessary. The following statements are functionally equivalent to the example above, but would be considered more typical Python code: ``` x, y, z = 1, 2, 3 print(x, y, z, sep='\n') ``` > The term **Pythonic** refers to code that adheres to generally accepted common guidelines for readability and “best” use of idiomatic Python. When someone says code is not Pythonic, they are implying that it does not express the programmer’s intent as well as might otherwise be done in Python. Thus, the code is probably not as readable as it could be to someone who is fluent in Python. If you find your code has multiple statements on a line, there is probably a more Pythonic way to write it. But again, if you think it’s appropriate or enhances readability, you should feel free to do it. <a class="anchor" id="comments"></a> ## Comments In Python, the hash character (`#`) signifies a comment. The interpreter will ignore everything from the hash character through the end of that line: ``` a = ['foo', 'bar', 'baz'] # I am a comment. a ``` If the first non-whitespace character on the line is a hash, the entire line is effectively ignored: ``` # I am a comment. # I am too. ``` Naturally, a hash character inside a string literal is protected, and does not indicate a comment: ``` a = 'foobar # I am *not* a comment.' a ``` A comment is just ignored, so what purpose does it serve? Comments give you a way to attach explanatory detail to your code: ``` # Calculate and display the area of a circle. pi = 3.1415926536 r = 12.35 area = pi * (r ** 2) print('The area of a circle with radius', r, 'is', area) ``` Up to now, your Python coding has consisted mostly of short, isolated REPL sessions. In that setting, the need for comments is pretty minimal. Eventually, you will develop larger applications contained across multiple script files, and comments will become increasingly important. Good commenting makes the intent of your code clear at a glance when someone else reads it, or even when you yourself read it. Ideally, you should strive to write code that is as clear, concise, and self-explanatory as possible. But there will be times that you will make design or implementation decisions that are not readily obvious from the code itself. That is where commenting comes in. Good code explains how; good comments explain why. Comments can be included within implicit line continuation: ``` x = (1 + 2 # I am a comment. + 3 + 4 # Me too. + 5 + 6) x a = [ 'foo', 'bar', # Me three. 'baz', 'qux' ] a ``` But recall that explicit line continuation requires the backslash character to be the last character on the line. Thus, a comment can’t follow afterward: ``` x = 1 + 2 + \ # I wish to be comment, but I'm not. ``` What if you want to add a comment that is several lines long? Many programming languages provide a syntax for multiline comments (also called block comments). For example, in C and Java, comments are delimited by the tokens `/*` and `*/`. The text contained within those delimiters can span multiple lines: ```c /* [This is not Python!] Initialize the value for radius of circle. Then calculate the area of the circle and display the result to the console. */ ``` Python doesn’t explicitly provide anything analogous to this for creating multiline block comments. To create a block comment, you would usually just begin each line with a hash character: ``` # Initialize value for radius of circle. # # Then calculate the area of the circle # and display the result to the console. pi = 3.1415926536 r = 12.35 area = pi * (r ** 2) print('The area of a circle with radius', r, 'is', area) ``` However, for code in a script file, there is technically an alternative. You saw above that when the interpreter parses code in a script file, it ignores a string literal (or any literal, for that matter) if it appears as statement by itself. More precisely, a literal isn’t ignored entirely: the interpreter sees it and parses it, but doesn’t do anything with it. Thus, a string literal on a line by itself can serve as a comment. Since a triple-quoted string can span multiple lines, it can effectively function as a multiline comment. Consider this script file (name it `foo.py` for example): ``` """Initialize value for radius of circle. Then calculate the area of the circle and display the result to the console. """ pi = 3.1415926536 r = 12.35 area = pi * (r ** 2) print('The area of a circle with radius', r, 'is', area) ``` When this script is run, the output appears as follows: ```bash python foo.py The area of a circle with radius 12.35 is 479.163565508706 ``` The triple-quoted string is not displayed and doesn’t change the way the script executes in any way. It effectively constitutes a multiline block comment. Although this works (and was once put forth as a Python programming tip by Guido himself), PEP 8 actually recommends against it. The reason for this appears to be because of a special Python construct called the **docstring**. A docstring is a special comment at the beginning of a user-defined function that documents the function’s behavior. Docstrings are typically specified as triple-quoted string comments, so PEP 8 recommends that other [block comments](https://www.python.org/dev/peps/pep-0008/?#block-comments) in Python code be designated the usual way, with a hash character at the start of each line. However, as you are developing code, if you want a quick and dirty way to comment out as section of code temporarily for experimentation, you may find it convenient to wrap the code in triple quotes. > You will learn more about docstrings in the upcoming tutorial on functions in Python. <a class="anchor" id="whitespace"></a> ## Whitespace When parsing code, the Python interpreter breaks the input up into tokens. Informally, tokens are just the language elements that you have seen so far: identifiers, keywords, literals, and operators. Typically, what separates tokens from one another is whitespace: blank characters that provide empty space to improve readability. The most common whitespace characters are the following: |Character| ASCII Code |Literal Expression| |:--|:--|:--| |space| `32` `(0x20)` |`' '`| |tab| `9` `(0x9)` |`'\t'`| |newline| `10` `(0xa)` |`'\n'`| There are other somewhat outdated ASCII whitespace characters such as line feed and form feed, as well as some very esoteric Unicode characters that provide whitespace. But for present purposes, whitespace usually means a space, tab, or newline. ``` x = 3 x=2 ``` Whitespace is mostly ignored, and mostly not required, by the Python interpreter. When it is clear where one token ends and the next one starts, whitespace can be omitted. This is usually the case when special non-alphanumeric characters are involved: ``` x=3;y=12 x+y (x==3)and(x<y) a=['foo','bar','baz'] a d={'foo':3,'bar':4} d x,y,z='foo',14,21.1 (x,y,z) z='foo'"bar"'baz'#Comment z ``` Every one of the statements above has no whitespace at all, and the interpreter handles them all fine. That’s not to say that you should write them that way though. Judicious use of whitespace almost always enhances readability, and your code should typically include some. Compare the following code fragments: ``` value1=100 value2=200 v=(value1>=0)and(value1<value2) value1 = 100 value2 = 200 v = (value1 >= 0) and (value1 < value2) ``` Most people would likely find that the added whitespace in the second example makes it easier to read. On the other hand, you could probably find a few who would prefer the first example. To some extent, it is a matter of personal preference. But there are standards for [whitespace in expressions and statements](https://www.python.org/dev/peps/pep-0008/?#whitespace-in-expressions-and-statements) put forth in PEP 8, and you should strongly consider adhering to them as much as possible. ``` x = (1, ) ``` > Note: You can juxtapose string literals, with or without whitespace: > ```python >>> s = "foo"'bar''''baz''' >>> s 'foobarbaz' >>> s = 'foo' "bar" '''baz''' >>> s 'foobarbaz' ``` > The effect is concatenation, exactly as though you had used the + operator. In Python, whitespace is generally only required when it is necessary to distinguish one token from the next. This is most common when one or both tokens are an identifier or keyword. For example, in the following case, whitespace is needed to separate the identifier `s` from the keyword `in`: ``` s = 'bar' s in ['foo', 'bar', 'baz'] sin ['foo', 'bar', 'baz'] ``` Here is an example where whitespace is required to distinguish between the identifier `y` and the numeric constant `20`: ``` y is 20 y is20 ``` In this example, whitespace is needed between two keywords: ``` 'qux' not in ['foo', 'bar', 'baz'] 'qux' notin ['foo', 'bar', 'baz'] ``` Running identifiers or keywords together fools the interpreter into thinking you are referring to a different token than you intended: `sin`, `is20`, and `notin`, in the examples above. Running identifiers or keywords together fools the interpreter into thinking you are referring to a different token than you intended: sin, is20, and notin, in the examples above. All this tends to be rather academic because it isn’t something you’ll likely need to think about much. Instances where whitespace is necessary tend to be intuitive, and you’ll probably just do it by second nature. You should use whitespace where it isn’t strictly necessary as well to enhance readability. Ideally, you should follow the guidelines in PEP 8. > **Deep Dive: Fortran and Whitespace** > >The earliest versions of Fortran, one of the first programming languages created, were designed so that all whitespace was completely ignored. Whitespace characters could be optionally included or omitted virtually anywhere—between identifiers and reserved words, and even in the middle of identifiers and reserved words. > >For example, if your Fortran code contained a variable named total, any of the following would be a valid statement to assign it the value 50: > ```fortran total = 50 to tal = 50 t o t a l=5 0 ``` >This was meant as a convenience, but in retrospect it is widely regarded as overkill. It often resulted in code that was difficult to read. Worse yet, it potentially led to code that did not execute correctly. > >Consider this tale from NASA in the 1960s. A Mission Control Center orbit computation program written in Fortran was supposed to contain the following line of code: > ```fortran DO 10 I = 1,100 ``` >In the Fortran dialect used by NASA at that time, the code shown introduces a loop, a construct that executes a body of code repeatedly. (You will learn about loops in Python in two future tutorials on definite and indefinite iteration). > >Unfortunately, this line of code ended up in the program instead: > ```fortran DO 10 I = 1.100 ``` > If you have a difficult time seeing the difference, don’t feel too bad. It took the NASA programmer a couple weeks to notice that there is a period between `1` and `100` instead of a comma. Because the Fortran compiler ignored whitespace, `DO 10 I` was taken to be a variable name, and the statement `DO 10 I = 1.100` resulted in assigning `1.100` to a variable called `DO10I` instead of introducing a loop. > > Some versions of the story claim that a Mercury rocket was lost because of this error, but that is evidently a myth. It did apparently cause inaccurate data for some time, though, before the programmer spotted the error. > >Virtually all modern programming languages have chosen not to go this far with ignoring whitespace. <a class="anchor" id="whitespace_as_indentation"></a> ## Whitespace as Indentation There is one more important situation in which whitespace is significant in Python code. Indentation—whitespace that appears to the left of the first token on a line—has very special meaning. In most interpreted languages, leading whitespace before statements is ignored. For example, consider this Windows Command Prompt session: ```bash $ echo foo foo $ echo foo foo ``` > **Note:** In a Command Prompt window, the echo command displays its arguments to the console, like the `print()` function in Python. Similar behavior can be observed from a terminal window in macOS or Linux. In the second statement, four space characters are inserted to the left of the echo command. But the result is the same. The interpreter ignores the leading whitespace and executes the same command, echo foo, just as it does when the leading whitespace is absent. Now try more or less the same thing with the Python interpreter: ```python >>> print('foo') foo >>> print('foo') SyntaxError: unexpected indent ``` > **Note:** Running the above code in jupyter notebook does not raise an error as jupyter notebook ignores the whitespaces at the start of a single line command. Say what? Unexpected indent? The leading whitespace before the second `print()` statement causes a `SyntaxError` exception! In Python, indentation is not ignored. Leading whitespace is used to compute a line’s indentation level, which in turn is used to determine grouping of statements. As yet, you have not needed to group statements, but that will change in the next tutorial with the introduction of control structures. Until then, be aware that leading whitespace matters. <a class="anchor" id="conclusion"></a> ## <img src="../../images/logos/checkmark.png" width="20"/> Conclusion This tutorial introduced you to Python program lexical structure. You learned what constitutes a valid Python **statement** and how to use **implicit** and **explicit line continuation** to write a statement that spans multiple lines. You also learned about commenting Python code, and about use of whitespace to enhance readability. Next, you will learn how to group statements into more complex decision-making constructs using **conditional statements**.
github_jupyter
# Load Data From Snowflake ![Snowflake Logo](https://saturn-public-assets.s3.us-east-2.amazonaws.com/example-resources/snowflake.png "doc-image") ## Overview <a href="https://www.snowflake.com/" target='_blank' rel='noopener'>Snowflake</a> is a data platform built for the cloud that allows for fast SQL queries. This example shows how to query data in Snowflake and pull into Saturn Cloud for data science work. We will rely on the <a href="https://docs.snowflake.com/en/user-guide/python-connector.html" target='_blank' rel='noopener'>Snowflake Connector for Python</a> to connect and issue queries from Python code. The images that come with Saturn come with the Snowflake Connector for Python installed. If you are building your own images and want to work with Snowflake, you should include `snowflake-connector-python` in your environment. Before starting this, you should create a Jupyter server resource. See our [quickstart](https://saturncloud.io/docs/start_in_ten/) if you don't know how to do this yet. ## Process ### Add Your Snowflake Credentials to Saturn Cloud Sign in to your Saturn Cloud account and select **Credentials** from the menu on the left. <img src="https://saturn-public-assets.s3.us-east-2.amazonaws.com/example-resources/saturn-credentials-arrow.jpeg" style="width:200px;" alt="Saturn Cloud left menu with arrow pointing to Credentials tab" class="doc-image"> This is where you will add your Snowflake credential information. *This is a secure storage location, and it will not be available to the public or other users without your consent.* At the top right corner of this page, you will find the **New** button. Click here, and you will be taken to the Credentials Creation form. ![Screenshot of Saturn Cloud Create Credentials form](https://saturn-public-assets.s3.us-east-2.amazonaws.com/example-resources/credentials.jpg "doc-image") You will be adding three credentials items: your Snowflake account id, username, and password. Complete the form one time for each item. | Credential | Type | Name| Variable Name | |---|---|---|---| | Snowflake account | Environment Variable | `snowflake-account` | `SNOWFLAKE_ACCOUNT` | Snowflake username | Environment Variable |`snowflake-user` | `SNOWFLAKE_USER` | Snowflake user password | Environment Variable |`snowflake-password` | `SNOWFLAKE_PASSWORD` Enter your values into the *Value* section of the credential creation form. The credential names are recommendations; feel free to change them as needed for your workflow. If you are having trouble finding your Snowflake account id, it is the first part of the URL you use to sign into Snowflake. If you use the url `https://AA99999.us-east-2.aws.snowflakecomputing.com/console/login` to login, your account id is `AA99999`. With this complete, your Snowflake credentials will be accessible by Saturn Cloud resources! You will need to restart any Jupyter Server or Dask Clusters for the credentials to populate to those resources. ### Connect to Data From a notebook where you want to connect to Snowflake, you can use the credentials as environment variables and provide any additional arguments, if necessary. ``` import os import snowflake.connector conn_info = { "account": os.environ["SNOWFLAKE_ACCOUNT"], "user": os.environ["SNOWFLAKE_USER"], "password": os.environ["SNOWFLAKE_PASSWORD"], "warehouse": "MY_WAREHOUSE", "database": "MY_DATABASE", "schema": "MY_SCHEMA", } conn = snowflake.connector.connect(**conn_info) ``` If you changed the *variable name* of any of your credentials, simply change them here for them to populate properly. Now you can simply query the database as you would on a local machine.
github_jupyter
``` %matplotlib inline ``` Spatial Transformer Networks Tutorial ===================================== **Author**: `Ghassen HAMROUNI <https://github.com/GHamrouni>`_ .. figure:: /_static/img/stn/FSeq.png In this tutorial, you will learn how to augment your network using a visual attention mechanism called spatial transformer networks. You can read more about the spatial transformer networks in the `DeepMind paper <https://arxiv.org/abs/1506.02025>`__ Spatial transformer networks are a generalization of differentiable attention to any spatial transformation. Spatial transformer networks (STN for short) allow a neural network to learn how to perform spatial transformations on the input image in order to enhance the geometric invariance of the model. For example, it can crop a region of interest, scale and correct the orientation of an image. It can be a useful mechanism because CNNs are not invariant to rotation and scale and more general affine transformations. One of the best things about STN is the ability to simply plug it into any existing CNN with very little modification. ``` # License: BSD # Author: Ghassen Hamrouni from __future__ import print_function import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision from torchvision import datasets, transforms import matplotlib.pyplot as plt import numpy as np plt.ion() # interactive mode ``` Loading the data ---------------- In this post we experiment with the classic MNIST dataset. Using a standard convolutional network augmented with a spatial transformer network. ``` device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Training dataset train_loader = torch.utils.data.DataLoader( datasets.MNIST(root='data/', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=64, shuffle=True, num_workers=4) # Test dataset test_loader = torch.utils.data.DataLoader( datasets.MNIST(root='data/', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=64, shuffle=True, num_workers=4) ``` Depicting spatial transformer networks -------------------------------------- Spatial transformer networks boils down to three main components : - The localization network is a regular CNN which regresses the transformation parameters. The transformation is never learned explicitly from this dataset, instead the network learns automatically the spatial transformations that enhances the global accuracy. - The grid generator generates a grid of coordinates in the input image corresponding to each pixel from the output image. - The sampler uses the parameters of the transformation and applies it to the input image. .. figure:: /_static/img/stn/stn-arch.png .. Note:: We need the latest version of PyTorch that contains affine_grid and grid_sample modules. ``` class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) # Spatial transformer localization-network self.localization = nn.Sequential( nn.Conv2d(1, 8, kernel_size=7), nn.MaxPool2d(2, stride=2), nn.ReLU(True), nn.Conv2d(8, 10, kernel_size=5), nn.MaxPool2d(2, stride=2), nn.ReLU(True) ) # Regressor for the 3 * 2 affine matrix self.fc_loc = nn.Sequential( nn.Linear(10 * 3 * 3, 32), nn.ReLU(True), nn.Linear(32, 3 * 2) ) # Initialize the weights/bias with identity transformation self.fc_loc[2].weight.data.zero_() self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float)) # Spatial transformer network forward function def stn(self, x): xs = self.localization(x) xs = xs.view(-1, 10 * 3 * 3) theta = self.fc_loc(xs) theta = theta.view(-1, 2, 3) grid = F.affine_grid(theta, x.size()) x = F.grid_sample(x, grid) return x def forward(self, x): # transform the input x = self.stn(x) # Perform the usual forward pass x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=1) model = Net().to(device) ``` Training the model ------------------ Now, let's use the SGD algorithm to train the model. The network is learning the classification task in a supervised way. In the same time the model is learning STN automatically in an end-to-end fashion. ``` optimizer = optim.SGD(model.parameters(), lr=0.01) def train(epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() if batch_idx % 500 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) # # A simple test procedure to measure STN the performances on MNIST. # def test(): with torch.no_grad(): model.eval() test_loss = 0 correct = 0 for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) # sum up batch loss test_loss += F.nll_loss(output, target, size_average=False).item() # get the index of the max log-probability pred = output.max(1, keepdim=True)[1] correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n' .format(test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) ``` Visualizing the STN results --------------------------- Now, we will inspect the results of our learned visual attention mechanism. We define a small helper function in order to visualize the transformations while training. ``` def convert_image_np(inp): """Convert a Tensor to numpy image.""" inp = inp.numpy().transpose((1, 2, 0)) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) inp = std * inp + mean inp = np.clip(inp, 0, 1) return inp # We want to visualize the output of the spatial transformers layer # after the training, we visualize a batch of input images and # the corresponding transformed batch using STN. def visualize_stn(): with torch.no_grad(): # Get a batch of training data data = next(iter(test_loader))[0].to(device) input_tensor = data.cpu() transformed_input_tensor = model.stn(data).cpu() in_grid = convert_image_np( torchvision.utils.make_grid(input_tensor)) out_grid = convert_image_np( torchvision.utils.make_grid(transformed_input_tensor)) # Plot the results side-by-side f, axarr = plt.subplots(1, 2) axarr[0].imshow(in_grid) axarr[0].set_title('Dataset Images') axarr[1].imshow(out_grid) axarr[1].set_title('Transformed Images') for epoch in range(1, 20 + 1): train(epoch) test() # Visualize the STN transformation on some input batch visualize_stn() plt.ioff() plt.show() ```
github_jupyter
``` %matplotlib inline import torch import torch.nn as nn import matplotlib.pyplot as pt import numpy as np device = torch.device('cpu') # if torch.cuda.is_available() > 0: # print("Using GPU") # device = torch.device('cuda') # else: # print("Using CPU") # Sizes of layers and batch size n_in, n_h, n_out = 784, 200, 10 # Load test data import pandas as pd data = pd.read_csv("./train.csv").values # print("Data length: {}".format(len(data))) # training dataset train_size = len(data) train_data = data[0:train_size, 1:] train_label = data[0:train_size,0] # testing data # xtest = data[21000:, 1:] # actual_label = data[21000:, 0] test_index = 0 d = train_data[test_index] d.shape=(28,28) pt.imshow(255-d,cmap='gray') pt.show() ``` ### Training ``` %%time # print(len(trainloader)) # Create model model = nn.Sequential( nn.Linear(n_in, n_h), nn.ReLU(), nn.Linear(n_h, n_out), nn.Sigmoid() ) # if torch.cuda.is_available() > 0: model.to(device) # Criterion criterion = torch.nn.MSELoss() # criterion = nn.CrossEntropyLoss() # Optimizer function optimizer = torch.optim.SGD(model.parameters(), lr=0.001) # Cast as double model.double() # Generate correct results correct_results = [] for i in range(train_size): correct = [] for ci in range(10): if train_label[i] == ci: correct.append(1) else: correct.append(0) correct = torch.from_numpy(np.array(correct).astype(float)).to(device) correct_results.append(correct) # Train stats_step = 5000 for epoch in range(20): running_loss = 0.0 print("epoch: {}".format(epoch)) for i, data in enumerate(train_data, 0): # Load input input = torch.from_numpy(data.astype(float)).to(device) # Generate prediction prediction = model(input) # Correct result correct = correct_results[i] # Compute loss loss = criterion(prediction, correct) # Zero the gradients optimizer.zero_grad() # Perform a backward pass loss.backward() # Update parameters optimizer.step() # Print stats running_loss += loss.item() if (i % stats_step == 0) and (i > 0): print("[{}] loss: {}".format(i, running_loss / stats_step)) running_loss = 0 # Test model from PIL import Image im = Image.open('test1.png') pix = im.load() w, h = im.size pixel_list = [] for x in range(0, w): for y in range(0, h): pixel_list.append(int(255 - sum(pix[y, x][0:3]) / 3)) for x in range(0, w): print() for y in range(0, h): print(1 if pixel_list[x*28 + y] >= 128 else ' ', end=' ') print() input = torch.from_numpy(np.array(pixel_list).astype(float)).to(device) prediction = model(input) max_v = 0 max_n = 0 for n, v in enumerate(prediction): if v > max_v: max_n = n max_v = v if max_v < 0.001: print("Uncertain") for i in prediction: print("{0:.50f}".format(i)) print("number = {}".format(max_n)) # train_size = len(data) # train_data = data[0:train_size, 1:] # train_label = data[0:train_size,0] certain_correct = 0 certain_count = 0 uncertain_correct = 0 uncertain_count = 0 for i in range(train_size): data = train_data[i] input = torch.from_numpy(data.astype(float)).to(device) prediction = model(input) max_v = 0 max_n = 0 for n, v in enumerate(prediction): if v > max_v: max_n = n max_v = v if (max_v > 0.001): certain_count += 1 if (max_n == train_label[i]): certain_correct += 1 if (max_v < 0.001): uncertain_count += 1 if (max_n == train_label[i]): uncertain_correct += 1 if (i % stats_step == 0) and (i > 0): print("[{}] Certain correct {}".format(i, certain_correct / certain_count * 100)) print("[{}] Uncertain correct {}".format(i, uncertain_correct / uncertain_count * 100)) print("[{}] Correct percentage {}".format(i, (certain_correct + uncertain_correct) / i * 100)) print("Final statistics") print("Certain correct {}".format(certain_correct / certain_count * 100)) print("Uncertain correct {}".format(uncertain_correct / uncertain_count * 100)) print("Correct percentage {}".format((certain_correct + uncertain_correct) / train_size * 100)) print("Certain {}".format(certain_count)) print("Uncertain {}".format(uncertain_count)) ```
github_jupyter
<a href="https://colab.research.google.com/github/PyTorchLightning/lightning-flash/blob/master/flash_notebooks/image_classification.ipynb" target="_parent"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> In this notebook, we'll go over the basics of lightning Flash by finetuning/predictin with an ImageClassifier on [Hymenoptera Dataset](https://www.kaggle.com/ajayrana/hymenoptera-data) containing ants and bees images. # Finetuning Finetuning consists of four steps: - 1. Train a source neural network model on a source dataset. For computer vision, it is traditionally the [ImageNet dataset](http://www.image-net.org/search?q=cat). As training is costly, library such as [Torchvion](https://pytorch.org/docs/stable/torchvision/index.html) library supports popular pre-trainer model architectures . In this notebook, we will be using their [resnet-18](https://pytorch.org/hub/pytorch_vision_resnet/). - 2. Create a new neural network called the target model. Its architecture replicates the source model and parameters, expect the latest layer which is removed. This model without its latest layer is traditionally called a backbone - 3. Add new layers after the backbone where the latest output size is the number of target dataset categories. Those new layers, traditionally called head will be randomly initialized while backbone will conserve its pre-trained weights from ImageNet. - 4. Train the target model on a target dataset, such as Hymenoptera Dataset with ants and bees. However, freezing some layers at training start such as the backbone tends to be more stable. In Flash, it can easily be done with `trainer.finetune(..., strategy="freeze")`. It is also common to `freeze/unfreeze` the backbone. In `Flash`, it can be done with `trainer.finetune(..., strategy="freeze_unfreeze")`. If one wants more control on the unfreeze flow, Flash supports `trainer.finetune(..., strategy=MyFinetuningStrategy())` where `MyFinetuningStrategy` is subclassing `pytorch_lightning.callbacks.BaseFinetuning`. --- - Give us a ⭐ [on Github](https://www.github.com/PytorchLightning/pytorch-lightning/) - Check out [Flash documentation](https://lightning-flash.readthedocs.io/en/latest/) - Check out [Lightning documentation](https://pytorch-lightning.readthedocs.io/en/latest/) - Join us [on Slack](https://join.slack.com/t/pytorch-lightning/shared_invite/zt-f6bl2l0l-JYMK3tbAgAmGRrlNr00f1A) ``` %%capture ! pip install git+https://github.com/PyTorchLightning/pytorch-flash.git ``` ### The notebook runtime has to be re-started once Flash is installed. ``` # https://github.com/streamlit/demo-self-driving/issues/17 if 'google.colab' in str(get_ipython()): import os os.kill(os.getpid(), 9) import flash from flash.data.utils import download_data from flash.vision import ImageClassificationData, ImageClassifier ``` ## 1. Download data The data are downloaded from a URL, and save in a 'data' directory. ``` download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip", 'data/') ``` <h2>2. Load the data</h2> Flash Tasks have built-in DataModules that you can use to organize your data. Pass in a train, validation and test folders and Flash will take care of the rest. Creates a ImageClassificationData object from folders of images arranged in this way:</h4> train/dog/xxx.png train/dog/xxy.png train/dog/xxz.png train/cat/123.png train/cat/nsdf3.png train/cat/asd932.png Note: Each sub-folder content will be considered as a new class. ``` datamodule = ImageClassificationData.from_folders( train_folder="data/hymenoptera_data/train/", val_folder="data/hymenoptera_data/val/", test_folder="data/hymenoptera_data/test/", ) ``` ### 3. Build the model Create the ImageClassifier task. By default, the ImageClassifier task uses a [resnet-18](https://pytorch.org/hub/pytorch_vision_resnet/) backbone to train or finetune your model. For [Hymenoptera Dataset](https://www.kaggle.com/ajayrana/hymenoptera-data) containing ants and bees images, ``datamodule.num_classes`` will be 2. Backbone can easily be changed with `ImageClassifier(backbone="resnet50")` or you could provide your own `ImageClassifier(backbone=my_backbone)` ``` model = ImageClassifier(num_classes=datamodule.num_classes) ``` ### 4. Create the trainer. Run once on data The trainer object can be used for training or fine-tuning tasks on new sets of data. You can pass in parameters to control the training routine- limit the number of epochs, run on GPUs or TPUs, etc. For more details, read the [Trainer Documentation](https://pytorch-lightning.readthedocs.io/en/latest/trainer.html). In this demo, we will limit the fine-tuning to run just one epoch using max_epochs=2. ``` trainer = flash.Trainer(max_epochs=3) ``` ### 5. Finetune the model ``` trainer.finetune(model, datamodule=datamodule, strategy="freeze_unfreeze") ``` ### 6. Test the model ``` trainer.test() ``` ### 7. Save it! ``` trainer.save_checkpoint("image_classification_model.pt") ``` # Predicting ### 1. Load the model from a checkpoint ``` model = ImageClassifier.load_from_checkpoint("https://flash-weights.s3.amazonaws.com/image_classification_model.pt") ``` ### 2a. Predict what's on a few images! ants or bees? ``` predictions = model.predict([ "data/hymenoptera_data/val/bees/65038344_52a45d090d.jpg", "data/hymenoptera_data/val/bees/590318879_68cf112861.jpg", "data/hymenoptera_data/val/ants/540543309_ddbb193ee5.jpg", ]) print(predictions) ``` ### 2b. Or generate predictions with a whole folder! ``` datamodule = ImageClassificationData.from_folders(predict_folder="data/hymenoptera_data/predict/") predictions = flash.Trainer().predict(model, datamodule=datamodule) print(predictions) ``` <code style="color:#792ee5;"> <h1> <strong> Congratulations - Time to Join the Community! </strong> </h1> </code> Congratulations on completing this notebook tutorial! If you enjoyed it and would like to join the Lightning movement, you can do so in the following ways! ### Help us build Flash by adding support for new data-types and new tasks. Flash aims at becoming the first task hub, so anyone can get started to great amazing application using deep learning. If you are interested, please open a PR with your contributions !!! ### Star [Lightning](https://github.com/PyTorchLightning/pytorch-lightning) on GitHub The easiest way to help our community is just by starring the GitHub repos! This helps raise awareness of the cool tools we're building. * Please, star [Lightning](https://github.com/PyTorchLightning/pytorch-lightning) ### Join our [Slack](https://join.slack.com/t/pytorch-lightning/shared_invite/zt-f6bl2l0l-JYMK3tbAgAmGRrlNr00f1A)! The best way to keep up to date on the latest advancements is to join our community! Make sure to introduce yourself and share your interests in `#general` channel ### Interested by SOTA AI models ! Check out [Bolt](https://github.com/PyTorchLightning/lightning-bolts) Bolts has a collection of state-of-the-art models, all implemented in [Lightning](https://github.com/PyTorchLightning/pytorch-lightning) and can be easily integrated within your own projects. * Please, star [Bolt](https://github.com/PyTorchLightning/lightning-bolts) ### Contributions ! The best way to contribute to our community is to become a code contributor! At any time you can go to [Lightning](https://github.com/PyTorchLightning/pytorch-lightning) or [Bolt](https://github.com/PyTorchLightning/lightning-bolts) GitHub Issues page and filter for "good first issue". * [Lightning good first issue](https://github.com/PyTorchLightning/pytorch-lightning/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) * [Bolt good first issue](https://github.com/PyTorchLightning/lightning-bolts/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) * You can also contribute your own notebooks with useful examples ! ### Great thanks from the entire Pytorch Lightning Team for your interest ! <img src="https://raw.githubusercontent.com/PyTorchLightning/lightning-flash/18c591747e40a0ad862d4f82943d209b8cc25358/docs/source/_static/images/logo.svg" width="800" height="200" />
github_jupyter
``` import pandas as pd import pathlib # Store filepath in a variable GDP_path = pathlib.Path("../Extraction/GDP_per_capita.csv") # Read and display the CSV with Pandas GDP_file_df = pd.read_csv(GDP_path, encoding="ISO-8859-1") GDP_file_df # Delete columns: 1960 through 2004, Indicator Name, and Indicator Code) GDP_DropC_df = GDP_file_df.drop(GDP_file_df.iloc[:, 2:49], axis = 1) GDP_DropC_df.head() # Delete column (2020) GDP_Drop2020_df = GDP_DropC_df.drop(columns="2020") GDP_Drop2020_df.head() # List all the columns in the table GDP_Drop2020_df.columns # Load in file EU_Country_path = "../Extraction/EU_Countries.csv" # Read and display the CSV with Pandas EU_file_df = pd.read_csv(EU_Country_path) GDP_file_df.head() # Merge two dataframes using a left join merge_GDP_df = pd.merge( left=EU_file_df, right=GDP_Drop2020_df, on="Country Code", how="left" ) merge_GDP_df # List all the columns in the table merge_GDP_df.columns # Delete column (Country Name_y) merge_GDP_df_Clean = (merge_GDP_df .drop(columns="Country Name_y") .round(decimals=0)) merge_GDP_df_Clean.head() # Rename column (Country Name_x) Final_GDP_df_Clean = merge_GDP_df_Clean.rename(columns={"Country Name_x":"Country Name"}) Final_GDP_df_Clean # Unpivot three dataframes. FV_GDP_df = pd.melt(Final_GDP_df_Clean,id_vars=["Country Name", "Country Code"],var_name = 'Year', value_name = 'GDP') FV_GDP_df FV_GDP_Sort_df = FV_GDP_df.sort_values(['Country Name', 'Year' ], ascending=[True, True]) FV_GDP_Sort_df GDP_lag_df = FV_GDP_Sort_df.groupby(['Country Name'])['GDP'].shift(1) GDP_lag_df FV_GDP_Sort_df['lastValue'] = "" FV_GDP_Sort_df FV_GDP_Sort_df['lastValue'] = GDP_lag_df FV_GDP_Sort_df FV_GDP_Sort_df.loc[FV_GDP_Sort_df['lastValue'].isnull(),'lastValue'] = FV_GDP_Sort_df['GDP'] FV_GDP_Sort_df FV_GDP_Sort_df.to_csv("Resources/Clean_GDP.csv", index=False) # Dependencies import pandas as pd import pathlib # Load in file CPI_path = "../Extraction/CPI.csv" # Read and display the CSV with Pandas CPI_file_df = pd.read_csv(CPI_path) CPI_file_df.head() # Delete columns: 1960 through 2004, Indicator Name, and Indicator Code) CPI_DropC_df = CPI_file_df.drop(CPI_file_df.iloc[:, 2:49], axis = 1) CPI_DropC_df.head() # Delete column (2020) CPI_Drop2020_df = CPI_DropC_df.drop(columns="2020") CPI_Drop2020_df.head() # List all the columns in the table CPI_Drop2020_df.columns # Merge two dataframes using a left join merge_CPI_df = pd.merge( left=EU_file_df, right=CPI_Drop2020_df, on="Country Code", how="left" ) merge_CPI_df # Delete column (Country Name_y) merge_CPI_df_Clean = merge_CPI_df.drop(columns="Country Name_y") merge_CPI_df_Clean.head() # Rename column (Country Name_x) Final_CPI_df_Clean = merge_CPI_df_Clean.rename(columns={"Country Name_x":"Country Name"}) Final_CPI_df_Clean Final_CPI_df_ND = Final_CPI_df_Clean.round(decimals=2) Final_CPI_df_ND Final_CPI_pc_df = (Final_CPI_df_ND .set_index(["Country Name", "Country Code"]) .pct_change(axis='columns') .drop(['2005'], axis = 1)) Final_CPI_pc_df Final_CPI_pc_NI_df = (Final_CPI_pc_df .reset_index() .round(decimals=3)) Final_CPI_pc_NI_df Final_CPI_pc_NI_df[Final_CPI_pc_NI_df.select_dtypes(include=['number']).columns] *= 100 Final_CPI_pc_NI_df # Unpivot three dataframes. FV_CPI_df = pd.melt(Final_CPI_pc_NI_df,id_vars=["Country Name", "Country Code"],var_name = 'Year', value_name = 'CPI') FV_CPI_df FV_CPI_Sort_df = FV_CPI_df.sort_values(['Country Name', 'Year' ], ascending=[True, True]) FV_CPI_Sort_df FV_CPI_Sort_df.to_csv("Resources/Clean_CPI.csv", index=False) # Dependencies import pandas as pd import pathlib # Store filepath in a variable Unemployment_path = pathlib.Path("../Extraction/unemployment.csv") Unemployment_file = pd.read_csv(Unemployment_path, encoding="ISO-8859-1") Unemployment_file # Delete columns: 1960 through 2004, Indicator Name, and Indicator Code) Unemp_DropC_df = Unemployment_file.drop(Unemployment_file.iloc[:, 2:49], axis = 1) Unemp_DropC_df.head() # Delete column (2020) Unemp_Drop2020_df = Unemp_DropC_df.drop(columns="2020") Unemp_Drop2020_df.head() # List all the columns in the table Unemp_Drop2020_df.columns # Merge two dataframes using a left join merge_Unemp_df = pd.merge( left=EU_file_df, right=Unemp_Drop2020_df, on="Country Code", how="left" ) merge_Unemp_df # Delete column (Country Name_y) merge_Unemp_df_Clean = merge_Unemp_df.drop(columns="Country Name_y") merge_Unemp_df_Clean.head() # Rename column (Country Name_x) Final_Unemp_df_Clean = merge_Unemp_df_Clean.rename(columns={"Country Name_x":"Country Name"}) Final_Unemp_df_Clean FV_Unemp_df_ND = Final_Unemp_df_Clean.round(decimals=2) FV_Unemp_df_ND # Unpivot three dataframes. FV_Unemp_df = pd.melt(FV_Unemp_df_ND,id_vars=["Country Name", "Country Code"],var_name = 'Year', value_name = 'Unemployment') FV_Unemp_df FV_Unemp_df.sort_values("Country Name") FV_Unemp_df_ND = FV_Unemp_df.round(decimals=2) FV_Unemp_df_ND FV_Unemp_df_ND.to_csv("Resources/Clean_Unemployment.csv", index=False) ```
github_jupyter
# Symbolic Regression This example combines neural differential equations with regularised evolution to discover the equations $\frac{\mathrm{d} x}{\mathrm{d} t}(t) = \frac{y(t)}{1 + y(t)}$ $\frac{\mathrm{d} y}{\mathrm{d} t}(t) = \frac{-x(t)}{1 + x(t)}$ directly from data. **References:** This example appears as an example in: ```bibtex @phdthesis{kidger2021on, title={{O}n {N}eural {D}ifferential {E}quations}, author={Patrick Kidger}, year={2021}, school={University of Oxford}, } ``` Whilst drawing heavy inspiration from: ```bibtex @inproceedings{cranmer2020discovering, title={{D}iscovering {S}ymbolic {M}odels from {D}eep {L}earning with {I}nductive {B}iases}, author={Cranmer, Miles and Sanchez Gonzalez, Alvaro and Battaglia, Peter and Xu, Rui and Cranmer, Kyle and Spergel, David and Ho, Shirley}, booktitle={Advances in Neural Information Processing Systems}, publisher={Curran Associates, Inc.}, year={2020}, } @software{cranmer2020pysr, title={PySR: Fast \& Parallelized Symbolic Regression in Python/Julia}, author={Miles Cranmer}, publisher={Zenodo}, url={http://doi.org/10.5281/zenodo.4041459}, year={2020}, } ``` This example is available as a Jupyter notebook [here](https://github.com/patrick-kidger/diffrax/blob/main/examples/symbolic_regression.ipynb). ``` import tempfile from typing import List import equinox as eqx # https://github.com/patrick-kidger/equinox import jax import jax.numpy as jnp import optax # https://github.com/deepmind/optax import pysr # https://github.com/MilesCranmer/PySR import sympy # Note that PySR, which we use for symbolic regression, uses Julia as a backend. # You'll need to install a recent version of Julia if you don't have one. # (And can get funny errors if you have a too-old version of Julia already.) # You may also need to restart Python after running `pysr.install()` the first time. pysr.silence_julia_warning() pysr.install(quiet=True) ``` Now for a bunch of helpers. We'll use these in a moment; skip over them for now. ``` def quantise(expr, quantise_to): if isinstance(expr, sympy.Float): return expr.func(round(float(expr) / quantise_to) * quantise_to) elif isinstance(expr, sympy.Symbol): return expr else: return expr.func(*[quantise(arg, quantise_to) for arg in expr.args]) class SymbolicFn(eqx.Module): fn: callable parameters: jnp.ndarray def __call__(self, x): # Dummy batch/unbatching. PySR assumes its JAX'd symbolic functions act on # tensors with a single batch dimension. return jnp.squeeze(self.fn(x[None], self.parameters)) class Stack(eqx.Module): modules: List[eqx.Module] def __call__(self, x): return jnp.stack([module(x) for module in self.modules], axis=-1) def expr_size(expr): return sum(expr_size(v) for v in expr.args) + 1 def _replace_parameters(expr, parameters, i_ref): if isinstance(expr, sympy.Float): i_ref[0] += 1 return expr.func(parameters[i_ref[0]]) elif isinstance(expr, sympy.Symbol): return expr else: return expr.func( *[_replace_parameters(arg, parameters, i_ref) for arg in expr.args] ) def replace_parameters(expr, parameters): i_ref = [-1] # Distinctly sketchy approach to making this conversion. return _replace_parameters(expr, parameters, i_ref) ``` Okay, let's get started. We start by running the [Neural ODE example](./neural_ode.ipynb). Then we extract the learnt neural vector field, and symbolically regress across this. Finally we fine-tune the resulting symbolic expression. ``` def main( symbolic_dataset_size=2000, symbolic_num_populations=100, symbolic_population_size=20, symbolic_migration_steps=4, symbolic_mutation_steps=30, symbolic_descent_steps=50, pareto_coefficient=2, fine_tuning_steps=500, fine_tuning_lr=3e-3, quantise_to=0.01, ): # # First obtain a neural approximation to the dynamics. # We begin by running the previous example. # # Runs the Neural ODE example. # This defines the variables `ts`, `ys`, `model`. print("Training neural differential equation.") %run neural_ode.ipynb # # Now symbolically regress across the learnt vector field, to obtain a Pareto # frontier of symbolic equations, that trades loss against complexity of the # equation. Select the "best" from this frontier. # print("Symbolically regressing across the vector field.") vector_field = model.func.mlp # noqa: F821 dataset_size, length_size, data_size = ys.shape # noqa: F821 in_ = ys.reshape(dataset_size * length_size, data_size) # noqa: F821 in_ = in_[:symbolic_dataset_size] out = jax.vmap(vector_field)(in_) with tempfile.TemporaryDirectory() as tempdir: symbolic_regressor = pysr.PySRRegressor( niterations=symbolic_migration_steps, ncyclesperiteration=symbolic_mutation_steps, populations=symbolic_num_populations, npop=symbolic_population_size, optimizer_iterations=symbolic_descent_steps, optimizer_nrestarts=1, procs=1, verbosity=0, tempdir=tempdir, temp_equation_file=True, output_jax_format=True, ) symbolic_regressor.fit(in_, out) best_equations = symbolic_regressor.get_best() expressions = [b.sympy_format for b in best_equations] symbolic_fns = [ SymbolicFn(b.jax_format["callable"], b.jax_format["parameters"]) for b in best_equations ] # # Now the constants in this expression have been optimised for regressing across # the neural vector field. This was good enough to obtain the symbolic expression, # but won't quite be perfect -- some of the constants will be slightly off. # # To fix this we now plug our symbolic function back into the original dataset # and apply gradient descent. # print("Optimising symbolic expression.") symbolic_fn = Stack(symbolic_fns) flat, treedef = jax.tree_flatten( model, is_leaf=lambda x: x is model.func.mlp # noqa: F821 ) flat = [symbolic_fn if f is model.func.mlp else f for f in flat] # noqa: F821 symbolic_model = jax.tree_unflatten(treedef, flat) @eqx.filter_grad def grad_loss(symbolic_model): vmap_model = jax.vmap(symbolic_model, in_axes=(None, 0)) pred_ys = vmap_model(ts, ys[:, 0]) # noqa: F821 return jnp.mean((ys - pred_ys) ** 2) # noqa: F821 optim = optax.adam(fine_tuning_lr) opt_state = optim.init(eqx.filter(symbolic_model, eqx.is_inexact_array)) @eqx.filter_jit def make_step(symbolic_model, opt_state): grads = grad_loss(symbolic_model) updates, opt_state = optim.update(grads, opt_state) symbolic_model = eqx.apply_updates(symbolic_model, updates) return symbolic_model, opt_state for _ in range(fine_tuning_steps): symbolic_model, opt_state = make_step(symbolic_model, opt_state) # # Finally we round each constant to the nearest multiple of `quantise_to`. # trained_expressions = [] for module, expression in zip(symbolic_model.func.mlp.modules, expressions): expression = replace_parameters(expression, module.parameters.tolist()) expression = quantise(expression, quantise_to) trained_expressions.append(expression) print(f"Expressions found: {trained_expressions}") main() ```
github_jupyter
``` # %cd /Users/Kunal/Projects/TCH_CardiacSignals_F20/ from numpy.random import seed seed(1) import numpy as np import os import matplotlib.pyplot as plt import tensorflow tensorflow.random.set_seed(2) from tensorflow import keras from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.regularizers import l1, l2 from tensorflow.keras.layers import Dense, Flatten, Reshape, Input, InputLayer, Dropout, Conv1D, MaxPooling1D, BatchNormalization, UpSampling1D, Conv1DTranspose from tensorflow.keras.models import Sequential, Model from src.preprocess.dim_reduce.patient_split import * from src.preprocess.heartbeat_split import heartbeat_split from sklearn.model_selection import train_test_split def read_in(file_index, normalized, train, ratio): """ Reads in a file and can toggle between normalized and original files :param file_index: patient number as string :param normalized: binary that determines whether the files should be normalized or not :param train: int that determines whether or not we are reading in data to train the model or for encoding :param ratio: ratio to split the files into train and test :return: returns npy array of patient data across 4 leads """ # filepath = os.path.join("Working_Data", "Normalized_Fixed_Dim_HBs_Idx" + file_index + ".npy") # filepath = os.path.join("Working_Data", "1000d", "Normalized_Fixed_Dim_HBs_Idx35.npy") filepath = "Working_Data/Training_Subset/Normalized/two_hbs/Normalized_Fixed_Dim_HBs_Idx" + str(file_index) + ".npy" if normalized == 1: if train == 1: normal_train, normal_test, abnormal = patient_split_train(filepath, ratio) # noise_factor = 0.5 # noise_train = normal_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=normal_train.shape) return normal_train, normal_test elif train == 0: training, test, full = patient_split_all(filepath, ratio) return training, test, full elif train == 2: train_, test, full = patient_split_all(filepath, ratio) noise_factor = 0.5 noise_train = train_ + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=train_.shape) return train_, noise_train, test, full else: data = np.load(os.path.join("Working_Data", "Fixed_Dim_HBs_Idx" + file_index + ".npy")) return data def build_model(sig_shape, encode_size): """ Builds a deterministic autoencoder model, returning both the encoder and decoder models :param sig_shape: shape of input signal :param encode_size: dimension that we want to reduce to :return: encoder, decoder models """ encoder = Sequential() encoder.add(InputLayer(sig_shape)) encoder.add(Flatten()) encoder.add(Dense(200, activation='tanh', kernel_initializer='glorot_normal')) encoder.add(Dense(125, activation='relu', kernel_initializer='glorot_normal')) encoder.add(Dense(100, activation='relu', kernel_initializer='glorot_normal')) encoder.add(Dense(50, activation='relu', kernel_initializer='glorot_normal')) encoder.add(Dense(25, activation='relu', kernel_initializer='glorot_normal')) encoder.add(Dense(encode_size)) # Decoder decoder = Sequential() decoder.add(InputLayer((encode_size,))) decoder.add(Dense(25, activation='relu', kernel_initializer='glorot_normal')) decoder.add(Dense(50, activation='relu', kernel_initializer='glorot_normal')) decoder.add(Dense(100, activation='relu', kernel_initializer='glorot_normal')) decoder.add(Dense(125, activation='relu', kernel_initializer='glorot_normal')) decoder.add(Dense(200, activation='tanh', kernel_initializer='glorot_normal')) decoder.add(Dense(np.prod(sig_shape), activation='linear')) decoder.add(Reshape(sig_shape)) return encoder, decoder def training_ae(num_epochs, reduced_dim, file_index): """ Training function for deterministic autoencoder model, saves the encoded and reconstructed arrays :param num_epochs: number of epochs to use :param reduced_dim: goal dimension :param file_index: patient number :return: None """ normal, abnormal, all = read_in(file_index, 1, 0, 0.3) normal_train = normal[:round(len(normal)*.85),:] normal_valid = normal[round(len(normal)*.85):,:] signal_shape = normal.shape[1:] batch_size = round(len(normal) * 0.1) encoder, decoder = build_model(signal_shape, reduced_dim) encode = encoder(Input(signal_shape)) reconstruction = decoder(encode) inp = Input(signal_shape) encode = encoder(inp) reconstruction = decoder(encode) autoencoder = Model(inp, reconstruction) opt = keras.optimizers.Adam(learning_rate=0.0008) autoencoder.compile(optimizer=opt, loss='mse') early_stopping = EarlyStopping(patience=10, min_delta=0.0001, mode='min') autoencoder = autoencoder.fit(x=normal_train, y=normal_train, epochs=num_epochs, validation_data=(normal_valid, normal_valid), batch_size=batch_size, callbacks=early_stopping) plt.plot(autoencoder.history['loss']) plt.plot(autoencoder.history['val_loss']) plt.title('model loss patient' + str(file_index)) plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.show() # save out the model # filename = 'ae_patient_' + str(file_index) + '_dim' + str(reduced_dim) + '_model' # autoencoder.save(filename + '.h5') # print('Model saved for ' + 'patient ' + str(file_index)) # using AE to encode other data encoded = encoder.predict(all) reconstruction = decoder.predict(encoded) # save reconstruction, encoded, and input if needed # reconstruction_save = os.path.join("Working_Data", "reconstructed_ae_" + str(reduced_dim) + "d_Idx" + str(file_index) + ".npy") # encoded_save = os.path.join("Working_Data", "reduced_ae_" + str(reduced_dim) + "d_Idx" + str(file_index) + ".npy") reconstruction_save = "Working_Data/Training_Subset/Model_Output/reconstructed_2hb_ae_" + str(file_index) + ".npy" encoded_save = "Working_Data/Training_Subset/Model_Output/encoded_2hb_ae_" + str(file_index) + ".npy" np.save(reconstruction_save, reconstruction) np.save(encoded_save,encoded) # if training and need to save test split for MSE calculation # input_save = os.path.join("Working_Data","1000d", "original_data_test_ae" + str(100) + "d_Idx" + str(35) + ".npy") # np.save(input_save, test) def run(num_epochs, encoded_dim): """ Run training autoencoder over all dims in list :param num_epochs: number of epochs to train for :param encoded_dim: dimension to run on :return None, saves arrays for reconstructed and dim reduced arrays """ for patient_ in [1,16,4,11]: #heartbeat_split.indicies: print("Starting on index: " + str(patient_)) training_ae(num_epochs, encoded_dim, patient_) print("Completed " + str(patient_) + " reconstruction and encoding, saved test data to assess performance") #################### Training to be done for 100 epochs for all dimensions ############################################ run(100, 10) # run(100,100) ```
github_jupyter
``` import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.decomposition import PCA # Principal Component Analysis module from sklearn.cluster import KMeans # KMeans clustering import matplotlib.pyplot as plt # Python defacto plotting library import seaborn as sns # More snazzy plotting library import pylab as pl %matplotlib inline #TCP Conn Logs # import pandas as pd # import os # os.chdir("/home/rk9cx/HP/") # df = pd.read_csv("10_08_conn_logs_tcp.csv", index_col=False, header=None) # df.columns =["ts","uid","src_ip","src_port","resp_ip","resp_port","duration","orig_bytes","resp_bytes","conn_state", # "history","orig_pkts","resp_pkts","tunnel_parents","local"] # cols_num = ["resp_port","duration","orig_bytes","resp_bytes","orig_pkts","resp_pkts"] # df[cols_num] = df[cols_num].apply(pd.to_numeric, errors='coerce') # cols_char = ["src_ip","resp_ip"] # df[cols_char] = df[cols_char].astype(str) df.head() df_clean = pd.DataFrame(df.groupby('src_ip').agg({'src_port': 'nunique','resp_port': 'nunique', 'resp_ip': 'nunique', 'duration': 'mean','orig_bytes':'sum', 'resp_bytes':'sum','orig_pkts':'sum', 'resp_pkts':'sum'})) df_clean.reset_index(inplace=True) df_clean.head() df_clean = df_clean.dropna() df_clean.shape Y = df_clean.loc[:,df_clean.columns == 'src_ip'] X = df_clean.loc[:, df_clean.columns != 'src_ip'] Nc = range(1, 20) kmeans = [KMeans(n_clusters=i) for i in Nc] score = [kmeans[i].fit(X).score(X) for i in range(len(kmeans))] pl.plot(Nc,score) pl.xlabel('Number of Clusters') pl.ylabel('Score') pl.title('Elbow Curve') pl.show() #Scaling from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() X_scaled = pd.DataFrame(scaler.fit_transform(X), columns=X.columns) km = KMeans(n_clusters=3).fit(X_scaled) df_clean['cluster'] = km.labels_ X_scaled['cluster'] = km.labels_ df_clean["cluster"].unique() df_clean.groupby('cluster').size() final_df = df_clean import numpy as np import matplotlib.pyplot as plt plt.scatter(df["orig_bytes"], df["resp_bytes"] ) plt.scatter(final_df["orig_pkts"], final_df["resp_pkts"] , c=final_df["cluster"]) #removing outliers from pkts p = final_df["orig_pkts"].quantile(0.99) q = final_df["resp_pkts"].quantile(0.99) r = final_df["orig_bytes"].quantile(0.99) s = final_df["resp_bytes"].quantile(0.99) test = final_df.loc[(final_df['orig_pkts'] < p) & (final_df['resp_pkts'] < q) & (final_df['orig_bytes'] < r) & (final_df['resp_bytes'] < s),] test.shape fig = plt.figure(figsize = (8,8)) ax = fig.add_subplot(1,1,1) ax.set_xlabel('orig_pkts', fontsize = 15) ax.set_ylabel('resp_pkts', fontsize = 15) ax.set_title('Clustering', fontsize = 20) targets = [0,1,2] colors = ['r', 'g','y'] for target, color in zip(targets,colors): indicesToKeep = test['cluster'] == target ax.scatter(test.loc[indicesToKeep, 'orig_pkts'] , test.loc[indicesToKeep, 'resp_pkts'] , c = color , s = 50) ax.legend(targets) ax.grid() fig = plt.figure(figsize = (8,8)) ax = fig.add_subplot(1,1,1) ax.set_xlabel('orig_bytes', fontsize = 15) ax.set_ylabel('resp_bytes', fontsize = 15) ax.set_title('Clustering', fontsize = 20) targets = [0,1,2] colors = ['r', 'g','y'] for target, color in zip(targets,colors): indicesToKeep = test['cluster'] == target ax.scatter(test.loc[indicesToKeep, 'orig_bytes'] , test.loc[indicesToKeep, 'resp_bytes'] , c = color , s = 50) ax.legend(targets) ax.grid() #removing outliers from pkts p = X_scaled["orig_pkts"].quantile(0.90) q = X_scaled["resp_pkts"].quantile(0.90) r = X_scaled["orig_bytes"].quantile(0.90) s = X_scaled["resp_bytes"].quantile(0.90) test = X_scaled.loc[(X_scaled['orig_pkts'] < p) & (X_scaled['orig_pkts'] < q) & (X_scaled['orig_bytes'] < r) & (X_scaled['resp_bytes'] < s),] test.shape #plt.scatter(final_df["resp_ip"], final_df["resp_port"] , c=final_df["cluster"]) fig = plt.figure(figsize = (8,8)) ax = fig.add_subplot(1,1,1) ax.set_xlabel('resp_ip', fontsize = 15) ax.set_ylabel('resp_port', fontsize = 15) ax.set_title('Clustering', fontsize = 20) targets = [0,1,2] colors = ['r', 'g','y'] for target, color in zip(targets,colors): indicesToKeep = test['cluster'] == target ax.scatter(test.loc[indicesToKeep, 'resp_ip'] , test.loc[indicesToKeep, 'resp_port'] , c = color , s = 50) ax.legend(targets) ax.grid() #plt.scatter(final_df["resp_ip"], final_df["duration"] , c=final_df["cluster"]) fig = plt.figure(figsize = (8,8)) ax = fig.add_subplot(1,1,1) ax.set_xlabel('resp_ip', fontsize = 15) ax.set_ylabel('duration', fontsize = 15) ax.set_title('Clustering', fontsize = 20) targets = [0,1,2] colors = ['r', 'g','y'] for target, color in zip(targets,colors): indicesToKeep = test['cluster'] == target ax.scatter(test.loc[indicesToKeep, 'resp_ip'] , test.loc[indicesToKeep, 'duration'] , c = color , s = 50) ax.legend(targets) ax.grid() #plt.scatter(final_df["resp_port"], final_df["src_port"] , c=final_df["cluster"]) fig = plt.figure(figsize = (8,8)) ax = fig.add_subplot(1,1,1) ax.set_xlabel('resp_port', fontsize = 15) ax.set_ylabel('src_port', fontsize = 15) ax.set_title('Clustering', fontsize = 20) targets = [0,1,2] colors = ['r', 'g','y'] for target, color in zip(targets,colors): indicesToKeep = test['cluster'] == target ax.scatter(test.loc[indicesToKeep, 'resp_port'] , test.loc[indicesToKeep, 'src_port'] , c = color , s = 50) ax.legend(targets) ax.grid() #plt.scatter(test["resp_pkts"], test["resp_bytes"] , c=test["cluster"]) fig = plt.figure(figsize = (8,8)) ax = fig.add_subplot(1,1,1) ax.set_xlabel('resp_pkts', fontsize = 15) ax.set_ylabel('resp_bytes', fontsize = 15) ax.set_title('Clustering', fontsize = 20) targets = [0,1,2] colors = ['r', 'g','y'] for target, color in zip(targets,colors): indicesToKeep = test['cluster'] == target ax.scatter(test.loc[indicesToKeep, 'resp_pkts'] , test.loc[indicesToKeep, 'resp_bytes'] , c = color , s = 50) ax.legend(targets) ax.grid() #UDP Conn Logs import pandas as pd import os os.chdir("/home/rk9cx/HP/") df_udp = pd.read_csv("10_08_2018_udp.csv", index_col=False, header=None) df_udp.columns =["ts","uid","src_ip","src_port","resp_ip","resp_port","duration","orig_bytes","resp_bytes","conn_state", "history","orig_pkts","resp_pkts","tunnel_parents","local"] cols_num = ["resp_port","duration","orig_bytes","resp_bytes","orig_pkts","resp_pkts"] df_udp[cols_num] = df_udp[cols_num].apply(pd.to_numeric, errors='coerce') cols_char = ["src_ip","resp_ip"] df_udp[cols_char] = df_udp[cols_char].astype(str) df_udp.dtypes #Other Conn Logs import pandas as pd import os os.chdir("/home/rk9cx/HP/") df_other = pd.read_csv("10_08_2018_Other.csv", index_col=False, header=None) df_other.columns =["ts","uid","src_ip","src_port","resp_ip","resp_port","duration","orig_bytes","resp_bytes","conn_state", "history","orig_pkts","resp_pkts","tunnel_parents","local"] cols_num = ["resp_port","duration","orig_bytes","resp_bytes","orig_pkts","resp_pkts"] df_other[cols_num] = df_other[cols_num].apply(pd.to_numeric, errors='coerce') cols_char = ["src_ip","resp_ip"] df_other[cols_char] = df_other[cols_char].astype(str) df_other.dtypes ```
github_jupyter
# Lecture 3: Birthday Problem, Properties of Probability ## The Birthday Problem Given $k$ people, what is the probability of at least 2 people having the same birthday? First, we need to define the problem: 1. there are 365 days in a year (no leap-years) 1. births can be on any day with equal probability (birthdays are independent of one another) 1. treat people as distinguishable, because. $k \le 1$ is meaningless, so we will not consider those cases. Now consider the case where you have more people than there are days in a year. In such a case, $$ P(k\ge365) = 1$$ Now think about the event of _no matches_. We can compute this probability using the na&iuml;ve definition of probability: $$ P(\text{no match}) = \frac{365 \times 364 \times \cdots \times 365-k+1}{365^k} $$ Now the event of _at least one match_ is the complement of _no matches_, so \begin{align} P(\text{at least one match}) &= 1 - P(\text{no match}) \\ &= 1 - \frac{365 \times 364 \times \cdots \times 365-k+1}{365^k} \end{align} ``` def bday_prob(k): def no_match(k): num = 1.0 for n in [(365-e) for e in range(k)]: num *= n return num / 365**k return 1.0 - no_match(k) print("At k=23 people, the probability of a match is {:0f}, already exceeding 0.5.".format(bday_prob(23))) print("And at k=50 people, the probability of a match is {:0f} is very close to 1.0.".format(bday_prob(50))) ``` ## Properties Let's derive some properties using nothing but the 2 axioms stated earlier. ### Property 1 > _The probability of an event $A$ is 1 minus the probability of that event's inverse (or complement)._ > > \begin\{align\} > P(A^{c}) &= 1 - P(A) \\\\ > \\\\ > \because 1 &= P(S) \\\\ > &= P(A \cup A^{c}) \\\\ > &= P(A) + P(A^{c}) & \quad \text{since } A \cap A^{c} = \emptyset ~~ \blacksquare > \end\{align\} ### Property 2 > _If $A$ is contained within $B$, then the probability of $A$ must be less than or equal to that for $B$._ > > \begin\{align\} > \text{If } A &\subseteq B \text{, then } P(A) \leq P(B) \\\\ > \\\\ > \because B &= A \cup ( B \cap A^{c}) \\\\ > P(B) &= P(A) + P(B \cap A^{c}) \\\\ > \\\\ > \implies P(B) &\geq P(A) \text{, since } P(B \cap A^{c}) \geq 0 & \quad \blacksquare > \end\{align\} ### Property 3, or the Inclusion/Exclusion Principle _The probability of a union of 2 events $A$ and $B$_ > \begin\{align\} > P(A \cup B) &= P(A) + P(B) - P(A \cap B) \\\\ > \\\\ > \text{since } P(A \cup B) &= P(A \cup (B \cap A^{c})) \\\\ > &= P(A) + P(B \cap A^{c}) \\\\ > \\\\ > \text{but note that } P(B) &= P(B \cap A) + P(B \cap A^{c}) \\\\ > \text{and since } P(B) - P(A \cap B) &= P(B \cap A^{c}) \\\\ > \\\\ > \implies P(A \cup B) &= P(A) + P(B) - P(A \cap B) ~~~~ \blacksquare > \end\{align\} ![title](images/L0301.png) This is the simplest case of the [principle of inclusion/exclusion](https://en.wikipedia.org/wiki/Inclusion%E2%80%93exclusion_principle). Considering the 3-event case, we have: > \begin\{align\} > P(A \cup B \cup C) &= P(A) + P(B) + P(C) - P(A \cap B) - P(B \cap C) - P(A \cap C) + P(A \cap B \cap C) > \end\{align\} > > ...where we sum up all of the separate events; > and then subtract each of the pair-wise intersections; > and finally add back in that 3-event intersection since that was subtracted in the previous step. ![title](images/L0302.png) For the general case, we have: $$ P(A_1 \cup A_2 \cup \cdots \cup A_n) = \sum_{j=1}^n P(A_{j}) - \sum_{i<j} P(A_i \cap A_j) + \sum_{i<j<k} P(A_i \cap A_j \cap A_k) \cdots + (-1)^{n-1} P(A_1 \cap A_2 \cap \cdots \cap A_n) $$ ... where we - sum up all of the separate events - subtract the sums of all _even-numbered_ index intersections - add back the sums of all _odd-numbered_ index intersections ## de Montmort's problem (1713) Again from a gambling problem, say we have a deck of $n$ cards, labeled 1 through $n$. The deck is thoroughly shuffled. The cards are then flipped over, one at a time. A win is when the card labeled $k$ is the $k^{th}$ card flipped. What is the probability of a win? Let $A_k$ be the event that card $k$ is the $k^{th}$ card flipped. The probability of a win is when _at least one of the $n$ cards_ is in the correct position. Therefore, what we are interested in is $$ P(A_1 \cup A_2 \cup \cdots \cup A_n) $$ Now, consider the following: \begin\{align\} P(A_1) &= \frac{1}{n} & \quad \text{since all outcomes are equally likely} \\\\ P(A_1 \cap A_2) &= \frac{1}{n} \left(\frac{1}{n-1}\right) = \frac{(n-2)!}{n!} \\\\ &\vdots \\\\ P(A_1 \cap A_2 \cap \cdots \cap A_k) &= \frac{(n-k)!}{n!} & \quad \text{because, symmetry} \\\\ \end\{align\} Which leads us to: \begin{align} P(A_1 \cup A_2 \cup \cdots \cup A_k) &= \binom{n}{1}\frac{1}{n} - \binom{n}{2}\frac{1}{n(n-1)} + \binom{n}{3}\frac{1}{n(n-1)(n-2)} - \cdots \\\\ &= n \frac{1}{n} - \left(\frac{n(n-1)}{2!}\right)\frac{1}{n(n-1)} + \left(\frac{n(n-1)(n-2)}{3!}\right)\frac{1}{n(n-1)(n-2)} - \cdots \\\\ &= 1 - \frac{1}{2!} + \frac{1}{3!} - \frac{1}{4!} \cdots (-1)^{n-1}\frac{1}{n!} \\\\ &= 1 - \sum_{k=1}^{\infty} \frac{(-1)^{k-1}}{k!} \\\\ &= 1 - \frac{1}{e} \\\\ \\\\ \text{ since } e^{-1} &= \frac{(-1)^0}{0!} + \frac{-1}{1!} + \frac{(-1)^2}{2!} + \frac{(-1)^3}{3!} + \cdots + \frac{(-1)^n}{n!} ~~~~ \text{ from the Taylor expansion of } e^{x} \end{align}
github_jupyter
# Web Data Extraction (1) by Dr Liang Jin - Step 1: access crawler.idx files from SEC EDGAR - Step 2: re-write crawler data to csv files - Step 3: retrieve 10K filing information including URLs - Step 4: read text from html ## Step 0: Setup... ``` # import packages as usual import os, requests, csv, webbrowser from urllib.request import urlopen, urlretrieve from bs4 import BeautifulSoup # define some global variables such as sample periods beg_yr = 2016 end_yr = 2017 ``` ## Step 1: Access Crawler.idx Files... SEC stores tons of filings in its archives and fortunately they provide index files. We can access to the index files using following url as an example: [https://www.sec.gov/Archives/edgar/full-index/](https://www.sec.gov/Archives/edgar/full-index/) And individual crawler.idx files are stored in a structured way: `https://www.sec.gov/Archives/edgar/full-index/{}/{}/crawler.idx` where `{ }/{ }` are year and quarter ``` # create a list containning all the URLs for .idx file idx_urls = [] for year in range(beg_yr, end_yr+1): for qtr in ['QTR1', 'QTR2', 'QTR3', 'QTR4']: idx_url = 'https://www.sec.gov/Archives/edgar/full-index/{}/{}/crawler.idx'.format(year, qtr) idx_urls.append(idx_url) # check on our URLs idx_urls # let's try downloading one of the files urlretrieve(idx_urls[0], './example.idx'); ``` ### Task 1: Have a look at the downloaded file? ## Step 2: Rewrite Crawler data into CSV files... The original Crawler.idx files come with extra information: - **Company Name**: hmmm...not really useful - **Form Type**: i.e., 10K, 10Q and others - **CIK**: Central Index Key, claimed to be unique key to identify entities in SEC universe - **Date Filed**: the exact filing date, NOTE, it is not necessary to be the reporting date - **URL**: filing page address which contains the link to the actual filing in HTML format - **Meta-data** on the crawler.idx itself - **Other information** including headers and seperators ### Retrieve the data inside the .idx file ``` # Ok, let's get cracking url = idx_urls[0] # use requests package to access the contents r = requests.get(url) # then focus on the text data only and split the whole file into lines lines = r.text.splitlines() ``` ### Raw data processing ``` # Let's peek the contents lines[:10] # identify the location of the header row # its the eighth row, so in Python the index is 7 header_loc = 7 # double check lines[header_loc] # retrieve the location of individual columns name_loc = lines[header_loc].find('Company Name') type_loc = lines[header_loc].find('Form Type') cik_loc = lines[header_loc].find('CIK') date_loc = lines[header_loc].find('Date Filed') url_loc = lines[header_loc].find('URL') ``` ### Re-organise the data ``` # identify the location of the first row # its NO.10 row, so in Python the index is 9 firstdata_loc = 9 # double check lines[firstdata_loc] # create an empty list rows = [] # loop through lines in .idx file for line in lines[firstdata_loc:]: # collect the data from the begining until the char before 485BPOS (Form Type) # then strip the string, i.e., removing the heading and trailing white spaces company_name = line[:type_loc].strip() form_type = line[type_loc:cik_loc].strip() cik = line[cik_loc:date_loc].strip() date_filed = line[date_loc:url_loc].strip() page_url = line[url_loc:].strip() # store these collected data to a row (tuple) row = (company_name, form_type, cik, date_filed, page_url) # then append this row to the empty list rows rows.append(row) ``` ### Task 2: Can you update the codes to store 10-K file only? ``` # peek again rows[:5] ``` ### Write to CSV file ``` # where to write? # define directory to store data csv_dir = './CSV/' # recommend to put this on top # a future-proof way to create directory # only create the folder when there is no existing one if not os.path.isdir(csv_dir): os.mkdir(csv_dir) # But file names? since we will have multiple files to process eventually # create file name based on the original idx file _ = url.split('/') _ ``` How about create a sensible naming scheme can be easily refered to? How about something like **2017Q4**? ``` # get year from idx URL file_yr = url.split('/')[-3] # get quarter from idx URL file_qtr = url.split('/')[-2][-1] # Combine year, quarter, and extension to create file name file_name = file_yr + "Q" + file_qtr + '.csv' # then create a path so that we can write the data to local drive file_path = os.path.join(csv_dir, file_name) # Check on the path file_path # create and write to csv file with open(file_path, 'w') as wf: writer = csv.writer(wf, delimiter = ',') writer.writerows(rows) ``` ### Task 3: Can you loop through idx files from 2016 to 2017?
github_jupyter
``` import pandas as pd import numpy as np from numpy import linalg import matplotlib.pyplot as plt import seaborn as sbn from scipy.stats import invgamma import logging from notebookutils import root_dir; root_dir() from model.utils import read_clean_kv17, read_testdata1, read_party_keys, matrix, vector, party_name_from_key, generate_Tau_trace_df from model.distributionplotter import DistributionPlotter from model.traceplotter import TracePlotter from model.parameterframe import ParameterFrame from model.parameters import Parameters np.random.seed(100) data = read_clean_kv17(drop_party_key = True) labels = read_party_keys() data = matrix(data) #making testing different data set simple sigma_array = np.array([np.random.normal()**2 for _ in range(15)]) F = np.matrix([np.random.normal(size=3) for _ in range(1215)]) Beta = np.matrix(np.random.normal(size=(15,3))) Ystar = np.matrix(np.random.normal(size=(1215,15))) tau = [- np.inf] + [-2, -0.5, 0.5, 2] + [np.inf] Tau = dict() for i in range(15): Tau[i] = tau import numpy as np from numpy import linalg from scipy.stats import invgamma from scipy.stats import truncnorm import logging from model.utils import vector class OrderedProbitGibbsSampler(): logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) def __init__(self, n_factors, data, n_answers): self.y = data self.k = n_factors self.p = data.shape[1] self.T = data.shape[0] self.I_k = np.identity(self.k) self.I_p = np.identity(self.p) self.n_answers = n_answers self.v = 0.5 self.s_sq = 2 self.C0 = 2 self.mu0 = 1 self.Beta_list = list() self.Sigma_list = list() self.F_list = list() self.Tau_list = list() self.Ystar_list = list() print('number of variables:', self.p, ' number of observations:', self.T) def f_t(self, Beta, Sigma, t): """Posterior of f_t""" y_t = self.Ystar[[t]].T S_inv = linalg.inv(Sigma * self.I_p) scale = linalg.inv(self.I_k + np.dot(np.dot(Beta.T, S_inv), Beta)) loc = vector(np.dot(np.dot(np.dot(scale, Beta.T), S_inv), y_t)) return np.random.multivariate_normal(loc, scale) def Beta_i(self, Sigma, F, i): if i < self.k: C_i = self.C_i(F, Sigma, i) m_i = self.m_i(C_i, F, Sigma, i) B_i = np.random.multivariate_normal(m_i, C_i) if B_i[i] < 0: # print(B_i[i]) # possible bug #B_i = np.random.multivariate_normal(m_i, C_i) B_i[i] = 0 if i < self.k: B_i = np.append(B_i, np.zeros(self.k - i - 1)) elif i >= self.k: C_k = self.C_k(F, Sigma, i) m_k = self.m_k(C_k, F, Sigma, i) B_i = np.random.multivariate_normal(m_k, C_k) else: raise ValueError('k is {0}, i is {1} - Beta_i probs'.format(self.k, i)) return vector(B_i) def C_i(self, F, Sigma, i): """If i <= k """ F_i = F.T[:i + 1].T sigma_i = Sigma[i] identity_i = np.identity(i + 1) return linalg.inv((1 / self.C0) * identity_i + (1 / sigma_i) * np.dot(F_i.T, F_i)) def C_k(self, F, Sigma, i): """if i > k""" sigma_i = Sigma[i] return linalg.inv((1 / self.C0) * self.I_k + (1 / sigma_i) * np.dot(F.T, F)) def m_i(self, C_i, F, Sigma, i): """If i <= k """ F_i = F[:, :i + 1] # 2000 X i sigma_i = Sigma[i] # 1 x 1 ones_i = np.matrix(np.ones(i + 1)).T y_i = self.Ystar[:, [i]] #changed to Ystar tmp = (1 / self.C0) * self.mu0 * ones_i + (1 / sigma_i) * np.dot(F_i.T, y_i) return vector(np.dot(C_i, tmp)) def m_k(self, C_k, F, Sigma, i): """if i > k""" sigma_i = Sigma[i] # 1 x 1 ones_k = np.matrix(np.ones(self.k)).T y_i = self.Ystar[:, [i]] #changed to Ystar tmp = (1 / self.C0) * self.mu0 * ones_k + (1 / sigma_i) * np.dot(F.T, y_i) return vector(np.dot(C_k, tmp)) def calc_Beta(self): B = np.matrix([self.Beta_i(self.Sigma, self.F, i) for i in range(self.p)]) self.Beta_list.append(B) return B def calc_F(self): F = np.matrix([self.f_t(self.Beta, self.Sigma, t) for t in range(self.T)]) self.add('F', F) return F def calc_Ystar(self): #defined here to not make multiple call of dot product (specially for Psi) Y = self.y Psi = self.Psi Ystar = np.matrix([self.calc_Ystar_i(Psi[i], Y[i]) for i in range(self.T)]) self.add('Ystar', Ystar) return Ystar def calc_Ystar_i(self, Psi_i, Y_i): Y_star_i = list() for question in range(self.p): psi_i_j = Psi_i[0,question] y_i_j = self.get_y_i_j(Y_i,question) lb, ub = self.get_bounds(y_i_j, question) Y_star_i.append(self.calc_Ystar_i_j(lb, ub, psi_i_j)) return Y_star_i def calc_Ystar_i_j(self, lower_bound, upper_bound, mean): #weird implementation of python required following: ub = upper_bound - mean lb = lower_bound - mean return truncnorm.rvs(a = lb, b = ub, loc = mean, scale = 1) def get_bounds(self, y_i_j, question): tau = self.Tau[question] upper_bound = tau[y_i_j] lower_bound = tau[y_i_j - 1] return lower_bound, upper_bound @staticmethod def get_y_i_j(Y_i, question): """To handle unfortunate 0'es""" res = Y_i[0, question] if res == 0: res = 3 return res def calc_Tau(self): Tau = dict() for question in range(self.p): Tau[question] = self.calc_Tau_p(question) self.add('Tau', Tau) return Tau def calc_Tau_p(self, p): """Cutpoints/answers for given quiestion p""" _tau = [self.calc_tau_l_p(p, l) for l in range(1, self.n_answers)] return [- np.inf] + _tau + [np.inf] def calc_tau_l_p(self, p, l): _ , Ystar_max = self.get_Ystar_min_max(p, l) Ystar_min, _ = self.get_Ystar_min_max(p, l + 1) l_plus = self.get_tau_l_p(p, l+1) l_minus = self.get_tau_l_p(p, l-1) _min = min(Ystar_min, l_plus) _max = max(Ystar_max, l_minus) #print('tau', 'p=',p,', l=',l) #print(l_plus, Ystar_min) #print(l_minus, Ystar_max) try: return np.random.uniform(_min, _max) except OverflowError as e: print(_min, _max) raise e def get_Ystar_min_max(self, question, answer): """ answer is the answer of question (1, 2, 3, 4, 5)""" Y_l = self.y[:,question] Ystar_l = self.Ystar[:,question] #Ystar observation where y_l == 0 Ystar_legal = [] for i in range(self.T): if Y_l[i] == answer : Ystar_legal.append(Ystar_l[i]) return float(min(Ystar_legal)), float(max(Ystar_legal)) def get_tau_l_p(self, p, l): return self.Tau[p][l] @property def Beta(self): return self.Beta_list[-1] @property def F(self): return self.F_list[-1] @property def Sigma(self): #return self.Sigma_list[-1] return [1 for _ in range(self.p)] @property def Psi(self): return np.dot(self.F ,self.Beta.T) @property def Tau(self): return self.Tau_list[-1] @property def Ystar(self): return self.Ystar_list[-1] def add(self, param, value): """ add to Sigma_list, Beta_list or F_list Parameters ========== param: (str) string that should be of {'Sigma', 'F', 'Beta'} value: (obj) appropriate object for given list """ if param == 'Sigma': self.Sigma_list.append(value) elif param == 'Tau': self.Tau_list.append(value) elif param == 'Ystar': self.Ystar_list.append(value) elif param == 'F': self.F_list.append(value) elif param == 'Beta': self.Beta_list.append(value) else: raise ValueError("Param must be in {'F', 'Sigma', 'Beta', 'Ystar', 'Tau'}") def sampler(self, n_iterations): logging.info("Sampling begins") for i in range(n_iterations): self.calc_Ystar() self.calc_Tau() self.calc_F() self.calc_Beta() if (i % 5 == 0): logging.info("run {0} simulations".format(i)) gs = OrderedProbitGibbsSampler(3, data, n_answers = 5) gs.add('Sigma',sigma_array) gs.add('F', F) gs.add('Beta',Beta) gs.add('Tau', Tau) gs.sampler(3) beta_params = ParameterFrame(gs.Beta_list, 'beta') beta_trace_estimation = beta_params.get_trace_df() beta_trace_estimation.to_pickle('data//probit_v2_estimation_beta_trace_df.pkl') factor = ParameterFrame(gs.F_list, 'factor_estimation') factor_trace_estimation = factor.get_trace_df() factor_trace_estimation.to_pickle('data//probit_v2_estimation_factor_trace_df.pkl') ystar = ParameterFrame(gs.Ystar_list, 'ystar') ystar_trace_estimation = ystar.get_trace_df() ystar_trace_estimation.to_pickle('data//probit_v2_ystar_trace_df.pkl') tau_trace = generate_Tau_trace_df(gs.Tau_list) tau_trace.to_pickle('data//probit_v2_tau_trace_df.pkl') ```
github_jupyter
``` %load_ext autoreload %autoreload 2 import numpy as np import matplotlib.pyplot as plt from astropy import units as u from astroduet.bbmag import bb_abmag_fluence from astroduet.image_utils import construct_image, find from astroduet.config import Telescope from astroduet.background import background_pixel_rate from astroduet.diff_image import py_zogy from astroduet.image_utils import estimate_background duet = Telescope() duet.info() [bgd_band1, bgd_band2] = background_pixel_rate(duet, low_zodi = True, diag=True) read_noise = duet.read_noise # Define image simulation parameters exposure = 300 * u.s frame = np.array([30,30]) # Dimensions of the image I'm simulating in DUET pixels (30x30 ~ 3x3 arcmin) # Define source bbtemp = 20000 * u.K swiftmag = 18 * u.ABmag src_fluence1, src_fluence2 = bb_abmag_fluence(bbtemp=bbtemp, swiftmag=swiftmag, duet=duet) print("Source fluences: {}, {}".format(src_fluence1,src_fluence2)) src_rate1 = duet.trans_eff * duet.eff_area * src_fluence1 print("Source rate (band 1): {}".format(src_rate1)) # Define galaxy galaxy = 'dwarf' gal_params = None # Construct the simulated image image = construct_image(frame, exposure, source=src_rate1, gal_type=galaxy, gal_params=gal_params, sky_rate=bgd_band1, duet=duet) plt.figure(figsize=[8,6]) plt.imshow(image.value, cmap='viridis', aspect=1, origin='lower') plt.colorbar() # Single-exposure point source detection # Run DAOPhot-like Find command print("DAOPhot find:") psf_fwhm_pix = duet.psf_fwhm / duet.pixel star_tbl, bkg_image, threshold = find(image,psf_fwhm_pix.value,method='daophot',frame='single') plt.figure(figsize=[16,6]) plt.subplot(121) plt.title('DAOPhot Find') plt.imshow(image.value, cmap='viridis', aspect=1, origin='lower') plt.colorbar() plt.scatter(star_tbl['x'],star_tbl['y'],marker='o',s=1000,facecolors='none',edgecolors='r',lw=1) # Run find_peaks command print("\nFind peaks:") star_tbl, bkg_image, threshold = find(image,psf_fwhm_pix.value,method='peaks',frame='single') plt.subplot(122) plt.title('Find Peaks') plt.imshow((image-bkg_image).value, cmap='viridis', aspect=1, origin='lower') plt.colorbar() plt.scatter(star_tbl['x'],star_tbl['y'],marker='o',s=1000,facecolors='none',edgecolors='r',lw=1) # Single-exposure photometry print("Real source count rate: {}".format(src_rate1)) # Convert to count rate image_rate = image / exposure from astroduet.image_utils import run_daophot, ap_phot # Run aperture photometry result, apertures, annulus_apertures = ap_phot(image_rate,star_tbl,read_noise,exposure) print(result['xcenter','ycenter','aperture_sum','aper_sum_bkgsub','aperture_sum_err']) print("\n") # Run PSF-fitting photometry result, residual_image = run_daophot(image,threshold,star_tbl,niters=1) print(result['x_fit','y_fit','flux_fit','flux_unc']) # Plots plt.figure(figsize=[16,12]) plt.subplot(221) plt.title('Aperture Photometry') plt.imshow(image_rate.value, cmap='viridis', aspect=1, origin='lower') plt.colorbar() apertures.plot() annulus_apertures.plot() plt.subplot(223) plt.title('DAOPhot PSF-fitting') plt.imshow(image_rate.value, cmap='viridis', aspect=1, origin='lower') plt.colorbar() plt.scatter(result['x_fit'],result['y_fit'],marker='o',s=1000,facecolors='none',edgecolors='r',lw=1) plt.subplot(224) plt.title('DAOPhot Residual Image') plt.imshow(residual_image.value, cmap='viridis', aspect=1, origin='lower') plt.colorbar() # Part 2, simulate reference image, without source, 5 exposures # Currently a perfect co-add n_exp = 5 ref_image = construct_image(frame, exposure, \ gal_type=galaxy, gal_params=gal_params, source=None, sky_rate=bgd_band1, n_exp=n_exp) ref_image_rate = ref_image / (n_exp * exposure) plt.figure(figsize=[8,6]) plt.imshow(ref_image_rate.value, cmap='viridis', aspect=1, origin='lower') plt.colorbar() # Part 3, make a difference image # Make a 2D array containing the PSF (oversample then bin up for more accurate PSF) oversample = 5 pixel_size_init = duet.pixel / oversample psf_model = duet.psf_model(pixel_size=pixel_size_init, x_size=25, y_size=25) psf_os = psf_model.array #psf_os = gaussian_psf(psf_fwhm,(25,25),pixel_size_init) shape = (5, 5, 5, 5) psf_array = psf_os.reshape(shape).sum(-1).sum(1) # Use ZOGY algorithm to create difference image image_bkg, image_bkg_rms_median = estimate_background(image_rate, sigma=2, method='1D') ref_bkg, ref_bkg_rms_median = estimate_background(ref_image_rate, sigma=2, method='1D') image_rate_bkgsub, ref_rate_bkgsub = image_rate - image_bkg, ref_image_rate - ref_bkg s_n, s_r = np.sqrt(image_rate), np.sqrt(ref_image_rate) # 2D uncertainty (sigma) - that is, noise on the background sn, sr = np.mean(s_n), np.mean(s_r) # Average uncertainty (sigma) dx, dy = 0.1, 0.01 # Astrometric uncertainty (sigma) diff_image, d_psf, s_corr = py_zogy(image_rate_bkgsub.value, ref_rate_bkgsub.value, psf_array,psf_array, s_n.value,s_r.value, sn.value,sr.value,dx,dy) diff_image *= image_rate_bkgsub.unit plt.imshow(diff_image.value) plt.colorbar() plt.show() # Part 4, find and photometry on the difference image print("Real source count rate: {}".format(src_rate1)) # Run find star_tbl, bkg_image, threshold = find(diff_image,psf_fwhm_pix.value,method='peaks') print('threshold = ', threshold) # Run aperture photometry result, apertures, annulus_apertures = ap_phot(diff_image,star_tbl,read_noise,exposure) result['percent_error'] = result['aperture_sum_err'] / result['aper_sum_bkgsub'] * 100 print(result['xcenter','ycenter','aperture_sum','aper_sum_bkgsub','aperture_sum_err','percent_error']) print("\n") # Run PSF-fitting photometry result, residual_image = run_daophot(diff_image,threshold,star_tbl,niters=1) result['percent_error'] = result['flux_unc'] / result['flux_fit'] * 100 print(result['id','flux_fit','flux_unc','percent_error']) # Plots plt.figure(figsize=[16,12]) plt.subplot(221) plt.title('Aperture Photometry') plt.imshow(diff_image.value, cmap='viridis', aspect=1, origin='lower') plt.colorbar() apertures.plot() annulus_apertures.plot() plt.subplot(223) plt.title('DAOPhot PSF-fitting') plt.imshow(diff_image.value, cmap='viridis', aspect=1, origin='lower') plt.colorbar() plt.scatter(result['x_fit'],result['y_fit'],marker='o',s=1000,facecolors='none',edgecolors='r',lw=1) plt.subplot(224) plt.title('DAOPhot Residual Image') plt.imshow(residual_image.value, cmap='viridis', aspect=1, origin='lower') plt.colorbar() ```
github_jupyter
# Reinforcement learning In this Python notebook, we will have you implement a simple reinforcement learning agent for the AI gym mountain car problem. Please first have a look at the description of the task here: <A HREF="https://github.com/openai/gym/wiki/MountainCar-v0" TARGET="_blank">Description</A> ## Heuristic agent Before we dive into the use of reinforcement learning of the mountain car task, you will first make a strategy by hand yourself. There are two very good reasons for doing so. First, making such a "heuristic" agent will give you a good idea of how difficult the task is, given the observations and actions of the agent. In the case of the mountain car task, the agent has to reach a goal position (located at position $0.5$), by choosing from discrete actions $\{0,1,2\}$ corresponding to $\{$ push left, no acceleration, push right $\}$. The starting position is in the middle, at position $-0.5$. The left end of the environment has position $-1.2$. The velocity of the agent is limited to the interval $[-0.07, 0.07]$. Second, making a heuristic agent gives you a good idea of the difficulty of the task, and what kind of solution you might expect the machine learning method to find (reinforcement learning in the current notebook). We will experiment with the original formulation of the car mountain car task. The class we will use is `CMC_original`, which is the same as the normal AI gym version, but with an adapted render-function in order to be able to show the graphics in a Binder notebook. The full code, imported with `import run_cart` in the code block below. You can find it <A HREF="https://github.com/guidoAI/RL_cart_notebook/blob/master/run_cart.py" TARGET="_blank">here</A>. <FONT COLOR="red">Exercise 1.</FONT> <OL> <LI>First run the following block, in which an agent is run that takes random actions. Does it succeed in performing the task? Why so? </LI> <LI>If we want the agent to move to the right, why then not just drive there? Try this strategy out by replacing the random action in the `act` function with 2, which means to just accelerate to the right. What do you observe, and why?</LI> <LI>Write a heuristic method to solve the task. Only make use of the position and velocity in the current time step, as this is what we are going to give to the reinforcement learning agent. Are you able to solve the task? Is your solution optimal you think? Do you think that this will be easy to learn with reinforcement learning?</LI> </OL> ``` %matplotlib inline import os import matplotlib from matplotlib import pyplot as plt import run_cart import gym import numpy as np import random # definition of the agent class class heuristic_agent(object): def act(self, observation, reward, done): """ - observation[0] = position - observation[1] = velocity - reward = the reward received from the environment - done = whether the agent reached the goal """ position = observation[0] velocity = observation[1] # REPLACE THIS CODE WITH A FIXED ACTION OR YOUR HEURISTIC: action = random.randint(0,2) return action # create the agent: agent = heuristic_agent() # apply the agent to the task reward, rewards = run_cart.run_cart_discrete(agent, env=run_cart.CMC_original(), graphics=True) print('Reward = ' + str(reward)) ``` ## Q-learning For this notebook, you will implement a very elementary reinforcement learning method: Q-learning. Please first read up on this method <A HREF="https://en.wikipedia.org/wiki/Q-learning" TARGET="_blank">here</A>. In particular, we will investigate the use of _tabular_ Q-learning, in which the algorithm finds the values for all state-action pairs in the matrix. Since our state is two-dimensional (position and velocity), the state-action space can be discretized without leading to an overly large table. In the code block below, we create the table in the `__init__` function of `Q_learning_agent`, using 10 grid cells per state variable. Also have a look at the start of the `act` function. When we get the two-dimensional state (observation), we transform it to a single state number (row index in the Q state-action table). The table has three columns for the three actions. <FONT COLOR="red">Exercise 2.</FONT> <OL> <LI>Fill in the Q-update function in the `act` function. Remember that variables of the agent object have to be reffered to with `self` like `self.alpha` and `self.Q`.</LI> <LI>Write the code to take actions under an $\epsilon$-greedy policy, where $\epsilon$ is `p_explore` in the code. </LI> <LI>Run the code to have the agent learn. Is it able to do the task (to check this, run the code block two blocks below in order to run the agent without exploratory actions)? Why / why not? If it does not work, can you then change the code at the bottom to make the agent learn the task properly?</LI> </OL> ``` class Q_learning_agent(object): """Simple Q-learning agent for the MountainCarv0 task https://en.wikipedia.org/wiki/Q-learning """ n_actions = 3 def __init__(self, min_speed, max_speed, min_position, max_position, alpha = 0.1, gamma = 0.9, p_explore = 0.1): # number of grids per state variable self.n_grid = 10 self.min_speed = min_speed self.max_speed = max_speed self.speed_step = (max_speed - min_speed) / self.n_grid self.min_position = min_position self.max_position = max_position self.position_step = (max_position - min_position) / self.n_grid # discretizing the 2-variable state results in this number of states: self.n_states = int(self.n_grid**2) # make an empty Q-matrix self.Q = np.zeros([self.n_states, self.n_actions]) # initialize previous state and action self.previous_state = 0 self.previous_action = 0 # learning rate self.alpha = alpha # discount factor: self.gamma = gamma # e-greedy, p_explore results in a random action: self.p_explore = p_explore def act(self, observation, reward, done, verbose = False): # Determine the new state: pos = observation[0] if(pos > self.max_position): pos = self.max_position elif(pos < self.min_position): pos = self.min_position obs_pos = int((pos - self.min_position) // self.position_step) vel = observation[1] if(vel > self.max_speed): vel = self.max_speed elif(vel < self.min_speed): vel = self.min_speed obs_vel = int((vel - self.min_speed) // self.speed_step) new_state = obs_pos * self.n_grid + obs_vel if(verbose): print(f'Velocity {observation[1]}, position {observation[0]}, (grid {self.speed_step}, \ {self.position_step}), state = {new_state}') # ************************************************ # FILL IN YOUR CODE HERE FOR THE Q-UPDATE FUNCTION # ************************************************ # Update the Q-matrix: # self.Q[..., ...] = ... # determine the new action: # FILL IN YOUR CODE FOR SELECTING AN ACTION # update previous state and action self.previous_state = new_state self.previous_action = action # return the action return action env=run_cart.CMC_original() # set up off-policy learning with p_explore = 1 max_velocity = env.max_speed min_velocity = -max_velocity agent = Q_learning_agent(min_velocity, max_velocity, env.min_position, env.max_position, \ alpha = 0.20, gamma = 0.95, p_explore = 1.0) n_episodes = 1000 reward, rewards = run_cart.run_cart_discrete(agent, env=env, graphics=False, n_episodes=n_episodes) print('Reward per episode = ' + str(reward / n_episodes)) n_episodes = 1 agent.p_explore = 0 agent.alpha = 0 reward, rewards = run_cart.run_cart_discrete(agent, env=env, graphics=True, n_episodes=n_episodes) print(f'Reward trained agent {reward}') ``` ## Answers <FONT COLOR="red">Exercise 1.</FONT> <OL> <LI>It typically does not succeed. For example, just when it speeds up to the right it then randomly accelerates to the left, and will not reach the goal.</LI> <LI>The agent just never gets high enough on the up-slope. The exerted force is not sufficient by itself to counter gravity. The agent will likely have to swing up and down to gain more and more speed, which will finally bring it over the right hill.</LI> <LI>The following code block has a solution to the task consisting of a few if / else statements. It always succeeds when starting at the start location. Whether it is the optimal solution is not clear, but it is clear that the state space can be separated quite easily and linked to the appropriate actions. It seems that the task is relatively easy to perform and learning algorithms should be able to do it.</LI> </OL> ``` class heuristic_agent(object): """Guido's heuristic agent""" def act(self, observation, reward, done): position = observation[0] velocity = observation[1] if position < -0.5: if velocity < -0.01: action = 0 else: action = 2 else: if velocity > 0.01: action = 2 else: action = 0 return action agent = heuristic_agent() reward, rewards = run_cart.run_cart_discrete(agent, env=run_cart.CMC_original(), graphics=True) print('Reward = ' + str(reward)) ``` <FONT COLOR="red">Exercise 2.</FONT> <OL> <LI>The code for the update equation is as follows: `self.Q[self.previous_state, self.previous_action] += self.alpha * (reward + self.gamma * max(self.Q[new_state, :]) - self.Q[self.previous_state, self.previous_action])`</LI> <LI>A possible implementation is: `if(random.random() $\lt$ self.p_explore): action = random.randint(0, self.n_actions-1) else: action = np.argmax(self.Q[new_state, :])`</LI> <LI>In order to solve it, I made a learning scheme in three parts: off-policy learning with random actions, on-policy learning with less exploration, and on-policy learning with a lower learning rate and even less exploration.</LI> </OL> ``` class Q_learning_agent(object): """Simple Q-learning agent for the MountainCarv0 task https://en.wikipedia.org/wiki/Q-learning """ n_actions = 3 def __init__(self, min_speed, max_speed, min_position, max_position, alpha = 0.1, gamma = 0.9, p_explore = 0.1): # number of grids per state variable self.n_grid = 10 self.min_speed = min_speed self.max_speed = max_speed self.speed_step = (max_speed - min_speed) / self.n_grid self.min_position = min_position self.max_position = max_position self.position_step = (max_position - min_position) / self.n_grid # discretizing the 2-variable state results in this number of states: self.n_states = int(self.n_grid**2) # make an empty Q-matrix self.Q = np.zeros([self.n_states, self.n_actions]) # initialize previous state and action self.previous_state = 0 self.previous_action = 0 # learning rate self.alpha = alpha # discount factor: self.gamma = gamma # e-greedy, p_explore results in a random action: self.p_explore = p_explore def act(self, observation, reward, done, verbose = False): # Determine the new state: pos = observation[0] if(pos > self.max_position): pos = self.max_position elif(pos < self.min_position): pos = self.min_position obs_pos = int((pos - self.min_position) // self.position_step) vel = observation[1] if(vel > self.max_speed): vel = self.max_speed elif(vel < self.min_speed): vel = self.min_speed obs_vel = int((vel - self.min_speed) // self.speed_step) new_state = obs_pos * self.n_grid + obs_vel if(verbose): print(f'Velocity {observation[1]}, position {observation[0]}, (grid {self.speed_step}, \ {self.position_step}), state = {new_state}') # Update the Q-matrix: self.Q[self.previous_state, self.previous_action] += self.alpha * \ (reward + self.gamma * max(self.Q[new_state, :]) - self.Q[self.previous_state, self.previous_action]) # determine the new action: if(random.random() < self.p_explore): action = random.randint(0, self.n_actions-1) else: action = np.argmax(self.Q[new_state, :]) # update previous state and action self.previous_state = new_state self.previous_action = action # return the action return action env=run_cart.CMC_original() # set up off-policy learning with p_explore = 1 max_velocity = env.max_speed min_velocity = -max_velocity agent = Q_learning_agent(min_velocity, max_velocity, env.min_position, env.max_position, \ alpha = 0.20, gamma = 0.95, p_explore = 1.0) n_episodes = 1000 reward, rewards = run_cart.run_cart_discrete(agent, env=env, graphics=False, n_episodes=n_episodes) print('Reward per episode = ' + str(reward / n_episodes)) # on-policy now with e-greedy agent.p_explore = 0.05 reward, rewards = run_cart.run_cart_discrete(agent, env=env, graphics=False, n_episodes=n_episodes) print('Reward per episode = ' + str(reward / n_episodes)) n_episodes = 100 agent.alpha = 0.05 agent.p_explore = 0.02 reward, rewards = run_cart.run_cart_discrete(agent, env=env, graphics=False, n_episodes=n_episodes) print('Reward per episode = ' + str(reward / n_episodes)) ```
github_jupyter
``` # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Vertex client library: Custom training text binary classification model with custom container for online prediction <table align="left"> <td> <a href="https://colab.research.google.com/github/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/community/gapic/custom/showcase_custom_text_binary_classification_online_container.ipynb"> <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab </a> </td> <td> <a href="https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/community/gapic/custom/showcase_custom_text_binary_classification_online_container.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> </table> <br/><br/><br/> ## Overview This tutorial demonstrates how to use the Vertex client library for Python to train using a custom container and deploy a custom text binary classification model for online prediction. ### Dataset The dataset used for this tutorial is the [IMDB Movie Reviews](https://www.tensorflow.org/datasets/catalog/imdb_reviews) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts whether a review is positive or negative in sentiment. ### Objective In this tutorial, you create a custom model from a Python script in a custom Docker container using the Vertex client library, and then do a prediction on the deployed model by sending data. You can alternatively create custom models using `gcloud` command-line tool or online using Google Cloud Console. The steps performed include: - Create a Vertex custom job for training a model. - Train a TensorFlow model using a custom container. - Retrieve and load the model artifacts. - View the model evaluation. - Upload the model as a Vertex `Model` resource. - Deploy the `Model` resource to a serving `Endpoint` resource. - Make a prediction. - Undeploy the `Model` resource. ### Costs This tutorial uses billable components of Google Cloud (GCP): * Vertex AI * Cloud Storage Learn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage pricing](https://cloud.google.com/storage/pricing), and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage. ## Installation Install the latest version of Vertex client library. ``` import os import sys # Google Cloud Notebook if os.path.exists("/opt/deeplearning/metadata/env_version"): USER_FLAG = "--user" else: USER_FLAG = "" ! pip3 install -U google-cloud-aiplatform $USER_FLAG ``` Install the latest GA version of *google-cloud-storage* library as well. ``` ! pip3 install -U google-cloud-storage $USER_FLAG ``` ### Restart the kernel Once you've installed the Vertex client library and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages. ``` if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ``` ## Before you begin ### GPU runtime *Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU** ### Set up your Google Cloud project **The following steps are required, regardless of your notebook environment.** 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs. 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project) 3. [Enable the Vertex APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component) 4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook. 5. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. ``` PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID ``` #### Region You can also change the `REGION` variable, which is used for operations throughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you. - Americas: `us-central1` - Europe: `europe-west4` - Asia Pacific: `asia-east1` You may not use a multi-regional bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see the [Vertex locations documentation](https://cloud.google.com/vertex-ai/docs/general/locations) ``` REGION = "us-central1" # @param {type: "string"} ``` #### Timestamp If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial. ``` from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") ``` ### Authenticate your Google Cloud account **If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth. **Otherwise**, follow these steps: In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page. **Click Create service account**. In the **Service account name** field, enter a name, and click **Create**. In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**. Click Create. A JSON file that contains your key downloads to your local environment. Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell. ``` # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. # If on Google Cloud Notebook, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' ``` ### Create a Cloud Storage bucket **The following steps are required, regardless of your notebook environment.** When you submit a custom training job using the Vertex client library, you upload a Python package containing your training code to a Cloud Storage bucket. Vertex runs the code from this package. In this tutorial, Vertex also saves the trained model that results from your job in the same bucket. You can then create an `Endpoint` resource based on this output in order to serve online predictions. Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization. ``` BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP ``` **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. ``` ! gsutil mb -l $REGION $BUCKET_NAME ``` Finally, validate access to your Cloud Storage bucket by examining its contents: ``` ! gsutil ls -al $BUCKET_NAME ``` ### Set up variables Next, set up some variables used throughout the tutorial. ### Import libraries and define constants #### Import Vertex client library Import the Vertex client library into our Python environment. ``` import time from google.cloud.aiplatform import gapic as aip from google.protobuf import json_format from google.protobuf.json_format import MessageToJson, ParseDict from google.protobuf.struct_pb2 import Struct, Value ``` #### Vertex constants Setup up the following constants for Vertex: - `API_ENDPOINT`: The Vertex API service endpoint for dataset, model, job, pipeline and endpoint services. - `PARENT`: The Vertex location root path for dataset, model, job, pipeline and endpoint resources. ``` # API service endpoint API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION) # Vertex location root path for your dataset, model and endpoint resources PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION ``` #### Hardware Accelerators Set the hardware accelerators (e.g., GPU), if any, for training and prediction. Set the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4) For GPU, available accelerators include: - aip.AcceleratorType.NVIDIA_TESLA_K80 - aip.AcceleratorType.NVIDIA_TESLA_P100 - aip.AcceleratorType.NVIDIA_TESLA_P4 - aip.AcceleratorType.NVIDIA_TESLA_T4 - aip.AcceleratorType.NVIDIA_TESLA_V100 Otherwise specify `(None, None)` to use a container image to run on a CPU. *Note*: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3 -- which is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support. ``` if os.getenv("IS_TESTING_TRAIN_GPU"): TRAIN_GPU, TRAIN_NGPU = ( aip.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_TRAIN_GPU")), ) else: TRAIN_GPU, TRAIN_NGPU = (None, None) if os.getenv("IS_TESTING_DEPOLY_GPU"): DEPLOY_GPU, DEPLOY_NGPU = ( aip.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_DEPOLY_GPU")), ) else: DEPLOY_GPU, DEPLOY_NGPU = (None, None) ``` #### Container (Docker) image Next, we will set the Docker container images for prediction - Set the variable `TF` to the TensorFlow version of the container image. For example, `2-1` would be version 2.1, and `1-15` would be version 1.15. The following list shows some of the pre-built images available: - TensorFlow 1.15 - `gcr.io/cloud-aiplatform/prediction/tf-cpu.1-15:latest` - `gcr.io/cloud-aiplatform/prediction/tf-gpu.1-15:latest` - TensorFlow 2.1 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-1:latest` - TensorFlow 2.2 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-2:latest` - TensorFlow 2.3 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-3:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-3:latest` - XGBoost - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-2:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-1:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-90:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-82:latest` - Scikit-learn - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-23:latest` - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-22:latest` - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-20:latest` For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers) ``` if os.getenv("IS_TESTING_TF"): TF = os.getenv("IS_TESTING_TF") else: TF = "2-1" if TF[0] == "2": if DEPLOY_GPU: DEPLOY_VERSION = "tf2-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf2-cpu.{}".format(TF) else: if DEPLOY_GPU: DEPLOY_VERSION = "tf-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf-cpu.{}".format(TF) DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION) print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU) ``` #### Machine Type Next, set the machine type to use for training and prediction. - Set the variables `TRAIN_COMPUTE` and `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for training and prediction. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \] *Note: The following is not supported for training:* - `standard`: 2 vCPUs - `highcpu`: 2, 4 and 8 vCPUs *Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*. ``` if os.getenv("IS_TESTING_TRAIN_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Train machine type", TRAIN_COMPUTE) if os.getenv("IS_TESTING_DEPLOY_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Deploy machine type", DEPLOY_COMPUTE) ``` # Tutorial Now you are ready to start creating your own custom model and training for IMDB Movie Reviews. ## Set up clients The Vertex client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex server. You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront. - Model Service for `Model` resources. - Endpoint Service for deployment. - Job Service for batch jobs and custom training. - Prediction Service for serving. ``` # client options same for all services client_options = {"api_endpoint": API_ENDPOINT} def create_job_client(): client = aip.JobServiceClient(client_options=client_options) return client def create_model_client(): client = aip.ModelServiceClient(client_options=client_options) return client def create_endpoint_client(): client = aip.EndpointServiceClient(client_options=client_options) return client def create_prediction_client(): client = aip.PredictionServiceClient(client_options=client_options) return client clients = {} clients["job"] = create_job_client() clients["model"] = create_model_client() clients["endpoint"] = create_endpoint_client() clients["prediction"] = create_prediction_client() for client in clients.items(): print(client) ``` ## Train a model There are two ways you can train a custom model using a container image: - **Use a Google Cloud prebuilt container**. If you use a prebuilt container, you will additionally specify a Python package to install into the container image. This Python package contains your code for training a custom model. - **Use your own custom container image**. If you use your own container, the container needs to contain your code for training a custom model. ### Create a Docker file In this tutorial, you train a IMDB Movie Reviews model using your own custom container. To use your own custom container, you build a Docker file. First, you will create a directory for the container components. ### Examine the training package #### Package layout Before you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout. - PKG-INFO - README.md - setup.cfg - setup.py - trainer - \_\_init\_\_.py - task.py The files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image. The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`). #### Package Assembly In the following cells, you will assemble the training package. ``` # Make folder for Python training script ! rm -rf custom ! mkdir custom # Add package information ! touch custom/README.md setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0" ! echo "$setup_cfg" > custom/setup.cfg setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())" ! echo "$setup_py" > custom/setup.py pkg_info = "Metadata-Version: 1.0\n\nName: IMDB Movie Reviews text binary classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: aferlitsch@google.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex" ! echo "$pkg_info" > custom/PKG-INFO # Make the training subfolder ! mkdir custom/trainer ! touch custom/trainer/__init__.py ``` #### Task.py contents In the next cell, you write the contents of the training script task.py. I won't go into detail, it's just there for you to browse. In summary: - Gets the directory where to save the model artifacts from the command line (`--model_dir`), and if not specified, then from the environment variable `AIP_MODEL_DIR`. - Loads IMDB Movie Reviews dataset from TF Datasets (tfds). - Builds a simple RNN model using TF.Keras model API. - Compiles the model (`compile()`). - Sets a training distribution strategy according to the argument `args.distribute`. - Trains the model (`fit()`) with epochs specified by `args.epochs`. - Saves the trained model (`save(args.model_dir)`) to the specified model directory. ``` %%writefile custom/trainer/task.py # Single, Mirror and Multi-Machine Distributed Training for IMDB import tensorflow_datasets as tfds import tensorflow as tf from tensorflow.python.client import device_lib import argparse import os import sys tfds.disable_progress_bar() parser = argparse.ArgumentParser() parser.add_argument('--model-dir', dest='model_dir', default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.') parser.add_argument('--lr', dest='lr', default=1e-4, type=float, help='Learning rate.') parser.add_argument('--epochs', dest='epochs', default=20, type=int, help='Number of epochs.') parser.add_argument('--steps', dest='steps', default=100, type=int, help='Number of steps per epoch.') parser.add_argument('--distribute', dest='distribute', type=str, default='single', help='distributed training strategy') args = parser.parse_args() print('Python Version = {}'.format(sys.version)) print('TensorFlow Version = {}'.format(tf.__version__)) print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found'))) print(device_lib.list_local_devices()) # Single Machine, single compute device if args.distribute == 'single': if tf.test.is_gpu_available(): strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0") else: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") # Single Machine, multiple compute device elif args.distribute == 'mirror': strategy = tf.distribute.MirroredStrategy() # Multiple Machine, multiple compute device elif args.distribute == 'multi': strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() # Multi-worker configuration print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync)) # Preparing dataset BUFFER_SIZE = 10000 BATCH_SIZE = 64 def make_datasets(): dataset, info = tfds.load('imdb_reviews/subwords8k', with_info=True, as_supervised=True) train_dataset, test_dataset = dataset['train'], dataset['test'] encoder = info.features['text'].encoder padded_shapes = ([None],()) return train_dataset.shuffle(BUFFER_SIZE).padded_batch(BATCH_SIZE, padded_shapes), encoder train_dataset, encoder = make_datasets() # Build the Keras model def build_and_compile_rnn_model(encoder): model = tf.keras.Sequential([ tf.keras.layers.Embedding(encoder.vocab_size, 64), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(args.lr), metrics=['accuracy']) return model with strategy.scope(): # Creation of dataset, and model building/compiling need to be within # `strategy.scope()`. model = build_and_compile_rnn_model(encoder) # Train the model model.fit(train_dataset, epochs=args.epochs, steps_per_epoch=args.steps) model.save(args.model_dir) ``` #### Write the Docker file contents Your first step in containerizing your code is to create a Docker file. In your Docker you’ll include all the commands needed to run your container image. It’ll install all the libraries you’re using and set up the entry point for your training code. 1. Install a pre-defined container image from TensorFlow repository for deep learning images. 2. Copies in the Python training code, to be shown subsequently. 3. Sets the entry into the Python training script as `trainer/task.py`. Note, the `.py` is dropped in the ENTRYPOINT command, as it is implied. ``` %%writefile custom/Dockerfile FROM gcr.io/deeplearning-platform-release/tf2-cpu.2-1 WORKDIR /root WORKDIR / # Copies the trainer code to the docker image. COPY trainer /trainer # Sets up the entry point to invoke the trainer. ENTRYPOINT ["python", "-m", "trainer.task"] ``` #### Build the container locally Next, you will provide a name for your customer container that you will use when you submit it to the Google Container Registry. ``` TRAIN_IMAGE = "gcr.io/" + PROJECT_ID + "/imdb:v1" ``` Next, build the container. ``` ! docker build custom -t $TRAIN_IMAGE ``` #### Test the container locally Run the container within your notebook instance to ensure it’s working correctly. You will run it for 5 epochs. ``` ! docker run $TRAIN_IMAGE --epochs=5 ``` #### Register the custom container When you’ve finished running the container locally, push it to Google Container Registry. ``` ! docker push $TRAIN_IMAGE ``` #### Store training script on your Cloud Storage bucket Next, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket. ``` ! rm -f custom.tar custom.tar.gz ! tar cvf custom.tar custom ! gzip custom.tar ! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_imdb.tar.gz ``` ## Prepare your custom job specification Now that your clients are ready, your first step is to create a Job Specification for your custom training job. The job specification will consist of the following: - `worker_pool_spec` : The specification of the type of machine(s) you will use for training and how many (single or distributed) - `container_spec` : The specification of the custom container. ### Prepare your machine specification Now define the machine specification for your custom training job. This tells Vertex what type of machine instance to provision for the training. - `machine_type`: The type of GCP instance to provision -- e.g., n1-standard-8. - `accelerator_type`: The type, if any, of hardware accelerator. In this tutorial if you previously set the variable `TRAIN_GPU != None`, you are using a GPU; otherwise you will use a CPU. - `accelerator_count`: The number of accelerators. ``` if TRAIN_GPU: machine_spec = { "machine_type": TRAIN_COMPUTE, "accelerator_type": TRAIN_GPU, "accelerator_count": TRAIN_NGPU, } else: machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0} ``` ### Prepare your disk specification (optional) Now define the disk specification for your custom training job. This tells Vertex what type and size of disk to provision in each machine instance for the training. - `boot_disk_type`: Either SSD or Standard. SSD is faster, and Standard is less expensive. Defaults to SSD. - `boot_disk_size_gb`: Size of disk in GB. ``` DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard] DISK_SIZE = 200 # GB disk_spec = {"boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE} ``` ### Prepare your container specification Now define the container specification for your custom training container: - `image_uri`: The custom container image. - `args`: The command-line arguments to pass to the executable that is set as the entry point into the container. - `--model-dir` : For our demonstrations, we use this command-line argument to specify where to store the model artifacts. - direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or - indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification. - `"--epochs=" + EPOCHS`: The number of epochs for training. - `"--steps=" + STEPS`: The number of steps per epoch. ``` JOB_NAME = "_custom_container" + TIMESTAMP MODEL_DIR = "{}/{}".format(BUCKET_NAME, JOB_NAME) EPOCHS = 20 STEPS = 100 DIRECT = True if DIRECT: CMDARGS = [ "--model-dir=" + MODEL_DIR, "--epochs=" + str(EPOCHS), "--steps=" + str(STEPS), ] else: CMDARGS = [ "--epochs=" + str(EPOCHS), "--steps=" + str(STEPS), ] container_spec = { "image_uri": TRAIN_IMAGE, "args": CMDARGS, } ``` ### Define the worker pool specification Next, you define the worker pool specification for your custom training job. The worker pool specification will consist of the following: - `replica_count`: The number of instances to provision of this machine type. - `machine_spec`: The hardware specification. - `disk_spec` : (optional) The disk storage specification. - `container_spec`: The Docker container to install on the VM instance(s). ``` worker_pool_spec = [ { "replica_count": 1, "machine_spec": machine_spec, "container_spec": container_spec, "disk_spec": disk_spec, } ] ``` ### Assemble a job specification Now assemble the complete description for the custom job specification: - `display_name`: The human readable name you assign to this custom job. - `job_spec`: The specification for the custom job. - `worker_pool_specs`: The specification for the machine VM instances. - `base_output_directory`: This tells the service the Cloud Storage location where to save the model artifacts (when variable `DIRECT = False`). The service will then pass the location to the training script as the environment variable `AIP_MODEL_DIR`, and the path will be of the form: <output_uri_prefix>/model ``` if DIRECT: job_spec = {"worker_pool_specs": worker_pool_spec} else: job_spec = { "worker_pool_specs": worker_pool_spec, "base_output_directory": {"output_uri_prefix": MODEL_DIR}, } custom_job = {"display_name": JOB_NAME, "job_spec": job_spec} ``` ### Train the model Now start the training of your custom training job on Vertex. Use this helper function `create_custom_job`, which takes the following parameter: -`custom_job`: The specification for the custom job. The helper function calls job client service's `create_custom_job` method, with the following parameters: -`parent`: The Vertex location path to `Dataset`, `Model` and `Endpoint` resources. -`custom_job`: The specification for the custom job. You will display a handful of the fields returned in `response` object, with the two that are of most interest are: `response.name`: The Vertex fully qualified identifier assigned to this custom training job. You save this identifier for using in subsequent steps. `response.state`: The current state of the custom training job. ``` def create_custom_job(custom_job): response = clients["job"].create_custom_job(parent=PARENT, custom_job=custom_job) print("name:", response.name) print("display_name:", response.display_name) print("state:", response.state) print("create_time:", response.create_time) print("update_time:", response.update_time) return response response = create_custom_job(custom_job) ``` Now get the unique identifier for the custom job you created. ``` # The full unique ID for the custom job job_id = response.name # The short numeric ID for the custom job job_short_id = job_id.split("/")[-1] print(job_id) ``` ### Get information on a custom job Next, use this helper function `get_custom_job`, which takes the following parameter: - `name`: The Vertex fully qualified identifier for the custom job. The helper function calls the job client service's`get_custom_job` method, with the following parameter: - `name`: The Vertex fully qualified identifier for the custom job. If you recall, you got the Vertex fully qualified identifier for the custom job in the `response.name` field when you called the `create_custom_job` method, and saved the identifier in the variable `job_id`. ``` def get_custom_job(name, silent=False): response = clients["job"].get_custom_job(name=name) if silent: return response print("name:", response.name) print("display_name:", response.display_name) print("state:", response.state) print("create_time:", response.create_time) print("update_time:", response.update_time) return response response = get_custom_job(job_id) ``` # Deployment Training the above model may take upwards of 20 minutes time. Once your model is done training, you can calculate the actual time it took to train the model by subtracting `end_time` from `start_time`. For your model, we will need to know the location of the saved model, which the Python script saved in your local Cloud Storage bucket at `MODEL_DIR + '/saved_model.pb'`. ``` while True: response = get_custom_job(job_id, True) if response.state != aip.JobState.JOB_STATE_SUCCEEDED: print("Training job has not completed:", response.state) model_path_to_deploy = None if response.state == aip.JobState.JOB_STATE_FAILED: break else: if not DIRECT: MODEL_DIR = MODEL_DIR + "/model" model_path_to_deploy = MODEL_DIR print("Training Time:", response.update_time - response.create_time) break time.sleep(60) print("model_to_deploy:", model_path_to_deploy) ``` ## Load the saved model Your model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction. To load, you use the TF.Keras `model.load_model()` method passing it the Cloud Storage path where the model is saved -- specified by `MODEL_DIR`. ``` import tensorflow as tf model = tf.keras.models.load_model(MODEL_DIR) ``` ## Evaluate the model Now let's find out how good the model is. ### Load evaluation data You will load the IMDB Movie Review test (holdout) data from `tfds.datasets`, using the method `load()`. This will return the dataset as a tuple of two elements. The first element is the dataset and the second is information on the dataset, which will contain the predefined vocabulary encoder. The encoder will convert words into a numerical embedding, which was pretrained and used in the custom training script. When you trained the model, you needed to set a fix input length for your text. For forward feeding batches, the `padded_batch()` property of the corresponding `tf.dataset` was set to pad each input sequence into the same shape for a batch. For the test data, you also need to set the `padded_batch()` property accordingly. ``` import tensorflow_datasets as tfds dataset, info = tfds.load("imdb_reviews/subwords8k", with_info=True, as_supervised=True) test_dataset = dataset["test"] encoder = info.features["text"].encoder BATCH_SIZE = 64 padded_shapes = ([None], ()) test_dataset = test_dataset.padded_batch(BATCH_SIZE, padded_shapes) ``` ### Perform the model evaluation Now evaluate how well the model in the custom job did. ``` model.evaluate(test_dataset) ``` ## Upload the model for serving Next, you will upload your TF.Keras model from the custom job to Vertex `Model` service, which will create a Vertex `Model` resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to Vertex, your serving function ensures that the data is decoded on the model server before it is passed as input to your model. ### How does the serving function work When you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a `tf.string`. The serving function consists of two parts: - `preprocessing function`: - Converts the input (`tf.string`) to the input shape and data type of the underlying model (dynamic graph). - Performs the same preprocessing of the data that was done during training the underlying model -- e.g., normalizing, scaling, etc. - `post-processing function`: - Converts the model output to format expected by the receiving application -- e.q., compresses the output. - Packages the output for the the receiving application -- e.g., add headings, make JSON object, etc. Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content. One consideration you need to consider when building serving functions for TF.Keras models is that they run as static graphs. That means, you cannot use TF graph operations that require a dynamic graph. If you do, you will get an error during the compile of the serving function which will indicate that you are using an EagerTensor which is not supported. ## Get the serving function signature You can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer. When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request. ``` loaded = tf.saved_model.load(model_path_to_deploy) serving_input = list( loaded.signatures["serving_default"].structured_input_signature[1].keys() )[0] print("Serving function input:", serving_input) ``` ### Upload the model Use this helper function `upload_model` to upload your model, stored in SavedModel format, up to the `Model` service, which will instantiate a Vertex `Model` resource instance for your model. Once you've done that, you can use the `Model` resource instance in the same way as any other Vertex `Model` resource instance, such as deploying to an `Endpoint` resource for serving predictions. The helper function takes the following parameters: - `display_name`: A human readable name for the `Endpoint` service. - `image_uri`: The container image for the model deployment. - `model_uri`: The Cloud Storage path to our SavedModel artificat. For this tutorial, this is the Cloud Storage location where the `trainer/task.py` saved the model artifacts, which we specified in the variable `MODEL_DIR`. The helper function calls the `Model` client service's method `upload_model`, which takes the following parameters: - `parent`: The Vertex location root path for `Dataset`, `Model` and `Endpoint` resources. - `model`: The specification for the Vertex `Model` resource instance. Let's now dive deeper into the Vertex model specification `model`. This is a dictionary object that consists of the following fields: - `display_name`: A human readable name for the `Model` resource. - `metadata_schema_uri`: Since your model was built without an Vertex `Dataset` resource, you will leave this blank (`''`). - `artificat_uri`: The Cloud Storage path where the model is stored in SavedModel format. - `container_spec`: This is the specification for the Docker container that will be installed on the `Endpoint` resource, from which the `Model` resource will serve predictions. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated. Uploading a model into a Vertex Model resource returns a long running operation, since it may take a few moments. You call response.result(), which is a synchronous call and will return when the Vertex Model resource is ready. The helper function returns the Vertex fully qualified identifier for the corresponding Vertex Model instance upload_model_response.model. You will save the identifier for subsequent steps in the variable model_to_deploy_id. ``` IMAGE_URI = DEPLOY_IMAGE def upload_model(display_name, image_uri, model_uri): model = { "display_name": display_name, "metadata_schema_uri": "", "artifact_uri": model_uri, "container_spec": { "image_uri": image_uri, "command": [], "args": [], "env": [{"name": "env_name", "value": "env_value"}], "ports": [{"container_port": 8080}], "predict_route": "", "health_route": "", }, } response = clients["model"].upload_model(parent=PARENT, model=model) print("Long running operation:", response.operation.name) upload_model_response = response.result(timeout=180) print("upload_model_response") print(" model:", upload_model_response.model) return upload_model_response.model model_to_deploy_id = upload_model("imdb-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy) ``` ### Get `Model` resource information Now let's get the model information for just your model. Use this helper function `get_model`, with the following parameter: - `name`: The Vertex unique identifier for the `Model` resource. This helper function calls the Vertex `Model` client service's method `get_model`, with the following parameter: - `name`: The Vertex unique identifier for the `Model` resource. ``` def get_model(name): response = clients["model"].get_model(name=name) print(response) get_model(model_to_deploy_id) ``` ## Deploy the `Model` resource Now deploy the trained Vertex custom `Model` resource. This requires two steps: 1. Create an `Endpoint` resource for deploying the `Model` resource to. 2. Deploy the `Model` resource to the `Endpoint` resource. ### Create an `Endpoint` resource Use this helper function `create_endpoint` to create an endpoint to deploy the model to for serving predictions, with the following parameter: - `display_name`: A human readable name for the `Endpoint` resource. The helper function uses the endpoint client service's `create_endpoint` method, which takes the following parameter: - `display_name`: A human readable name for the `Endpoint` resource. Creating an `Endpoint` resource returns a long running operation, since it may take a few moments to provision the `Endpoint` resource for serving. You call `response.result()`, which is a synchronous call and will return when the Endpoint resource is ready. The helper function returns the Vertex fully qualified identifier for the `Endpoint` resource: `response.name`. ``` ENDPOINT_NAME = "imdb_endpoint-" + TIMESTAMP def create_endpoint(display_name): endpoint = {"display_name": display_name} response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint) print("Long running operation:", response.operation.name) result = response.result(timeout=300) print("result") print(" name:", result.name) print(" display_name:", result.display_name) print(" description:", result.description) print(" labels:", result.labels) print(" create_time:", result.create_time) print(" update_time:", result.update_time) return result result = create_endpoint(ENDPOINT_NAME) ``` Now get the unique identifier for the `Endpoint` resource you created. ``` # The full unique ID for the endpoint endpoint_id = result.name # The short numeric ID for the endpoint endpoint_short_id = endpoint_id.split("/")[-1] print(endpoint_id) ``` ### Compute instance scaling You have several choices on scaling the compute instances for handling your online prediction requests: - Single Instance: The online prediction requests are processed on a single compute instance. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one. - Manual Scaling: The online prediction requests are split across a fixed number of compute instances that you manually specified. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and online prediction requests are evenly distributed across them. - Auto Scaling: The online prediction requests are split across a scaleable number of compute instances. - Set the minimum (`MIN_NODES`) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions. The minimum number of compute instances corresponds to the field `min_replica_count` and the maximum number of compute instances corresponds to the field `max_replica_count`, in your subsequent deployment request. ``` MIN_NODES = 1 MAX_NODES = 1 ``` ### Deploy `Model` resource to the `Endpoint` resource Use this helper function `deploy_model` to deploy the `Model` resource to the `Endpoint` resource you created for serving predictions, with the following parameters: - `model`: The Vertex fully qualified model identifier of the model to upload (deploy) from the training pipeline. - `deploy_model_display_name`: A human readable name for the deployed model. - `endpoint`: The Vertex fully qualified endpoint identifier to deploy the model to. The helper function calls the `Endpoint` client service's method `deploy_model`, which takes the following parameters: - `endpoint`: The Vertex fully qualified `Endpoint` resource identifier to deploy the `Model` resource to. - `deployed_model`: The requirements specification for deploying the model. - `traffic_split`: Percent of traffic at the endpoint that goes to this model, which is specified as a dictionary of one or more key/value pairs. - If only one model, then specify as **{ "0": 100 }**, where "0" refers to this model being uploaded and 100 means 100% of the traffic. - If there are existing models on the endpoint, for which the traffic will be split, then use `model_id` to specify as **{ "0": percent, model_id: percent, ... }**, where `model_id` is the model id of an existing model to the deployed endpoint. The percents must add up to 100. Let's now dive deeper into the `deployed_model` parameter. This parameter is specified as a Python dictionary with the minimum required fields: - `model`: The Vertex fully qualified model identifier of the (upload) model to deploy. - `display_name`: A human readable name for the deployed model. - `disable_container_logging`: This disables logging of container events, such as execution failures (default is container logging is enabled). Container logging is typically enabled when debugging the deployment and then disabled when deployed for production. - `dedicated_resources`: This refers to how many compute instances (replicas) that are scaled for serving prediction requests. - `machine_spec`: The compute instance to provision. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated. - `min_replica_count`: The number of compute instances to initially provision, which you set earlier as the variable `MIN_NODES`. - `max_replica_count`: The maximum number of compute instances to scale to, which you set earlier as the variable `MAX_NODES`. #### Traffic Split Let's now dive deeper into the `traffic_split` parameter. This parameter is specified as a Python dictionary. This might at first be a tad bit confusing. Let me explain, you can deploy more than one instance of your model to an endpoint, and then set how much (percent) goes to each instance. Why would you do that? Perhaps you already have a previous version deployed in production -- let's call that v1. You got better model evaluation on v2, but you don't know for certain that it is really better until you deploy to production. So in the case of traffic split, you might want to deploy v2 to the same endpoint as v1, but it only get's say 10% of the traffic. That way, you can monitor how well it does without disrupting the majority of users -- until you make a final decision. #### Response The method returns a long running operation `response`. We will wait sychronously for the operation to complete by calling the `response.result()`, which will block until the model is deployed. If this is the first time a model is deployed to the endpoint, it may take a few additional minutes to complete provisioning of resources. ``` DEPLOYED_NAME = "imdb_deployed-" + TIMESTAMP def deploy_model( model, deployed_model_display_name, endpoint, traffic_split={"0": 100} ): if DEPLOY_GPU: machine_spec = { "machine_type": DEPLOY_COMPUTE, "accelerator_type": DEPLOY_GPU, "accelerator_count": DEPLOY_NGPU, } else: machine_spec = { "machine_type": DEPLOY_COMPUTE, "accelerator_count": 0, } deployed_model = { "model": model, "display_name": deployed_model_display_name, "dedicated_resources": { "min_replica_count": MIN_NODES, "max_replica_count": MAX_NODES, "machine_spec": machine_spec, }, "disable_container_logging": False, } response = clients["endpoint"].deploy_model( endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split ) print("Long running operation:", response.operation.name) result = response.result() print("result") deployed_model = result.deployed_model print(" deployed_model") print(" id:", deployed_model.id) print(" model:", deployed_model.model) print(" display_name:", deployed_model.display_name) print(" create_time:", deployed_model.create_time) return deployed_model.id deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id) ``` ## Make a online prediction request Now do a online prediction to your deployed model. ### Prepare the request content Since the dataset is a `tf.dataset`, which acts as a generator, we must use it as an iterator to access the data items in the test data. We do the following to get a single data item from the test data: - Set the property for the number of batches to draw per iteration to one using the method `take(1)`. - Iterate once through the test data -- i.e., we do a break within the for loop. - In the single iteration, we save the data item which is in the form of a tuple. - The data item will be the first element of the tuple, which you then will convert from an tensor to a numpy array -- `data[0].numpy()`. ``` import tensorflow_datasets as tfds dataset, info = tfds.load("imdb_reviews/subwords8k", with_info=True, as_supervised=True) test_dataset = dataset["test"] test_dataset.take(1) for data in test_dataset: print(data) break test_item = data[0].numpy() ``` ### Send the prediction request Ok, now you have a test data item. Use this helper function `predict_data`, which takes the following parameters: - `data`: The test data item is a 64 padded numpy 1D array. - `endpoint`: The Vertex AI fully qualified identifier for the endpoint where the model was deployed. - `parameters_dict`: Additional parameters for serving. This function uses the prediction client service and calls the `predict` method with the following parameters: - `endpoint`: The Vertex AI fully qualified identifier for the endpoint where the model was deployed. - `instances`: A list of instances (data items) to predict. - `parameters`: Additional parameters for serving. To pass the test data to the prediction service, you must package it for transmission to the serving binary as follows: 1. Convert the data item from a 1D numpy array to a 1D Python list. 2. Convert the prediction request to a serialized Google protobuf (`json_format.ParseDict()`) Each instance in the prediction request is a dictionary entry of the form: {input_name: content} - `input_name`: the name of the input layer of the underlying model. - `content`: The data item as a 1D Python list. Since the `predict()` service can take multiple data items (instances), you will send your single data item as a list of one data item. As a final step, you package the instances list into Google's protobuf format -- which is what we pass to the `predict()` service. The `response` object returns a list, where each element in the list corresponds to the corresponding image in the request. You will see in the output for each prediction: - `predictions` -- the predicated binary sentiment between 0 (negative) and 1 (positive). ``` def predict_data(data, endpoint, parameters_dict): parameters = json_format.ParseDict(parameters_dict, Value()) # The format of each instance should conform to the deployed model's prediction input schema. instances_list = [{serving_input: data.tolist()}] instances = [json_format.ParseDict(s, Value()) for s in instances_list] response = clients["prediction"].predict( endpoint=endpoint, instances=instances, parameters=parameters ) print("response") print(" deployed_model_id:", response.deployed_model_id) predictions = response.predictions print("predictions") for prediction in predictions: print(" prediction:", prediction) predict_data(test_item, endpoint_id, None) ``` ## Undeploy the `Model` resource Now undeploy your `Model` resource from the serving `Endpoint` resoure. Use this helper function `undeploy_model`, which takes the following parameters: - `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed to. - `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` is deployed to. This function calls the endpoint client service's method `undeploy_model`, with the following parameters: - `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed. - `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource is deployed. - `traffic_split`: How to split traffic among the remaining deployed models on the `Endpoint` resource. Since this is the only deployed model on the `Endpoint` resource, you simply can leave `traffic_split` empty by setting it to {}. ``` def undeploy_model(deployed_model_id, endpoint): response = clients["endpoint"].undeploy_model( endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={} ) print(response) undeploy_model(deployed_model_id, endpoint_id) ``` # Cleaning up To clean up all GCP resources used in this project, you can [delete the GCP project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial. Otherwise, you can delete the individual resources you created in this tutorial: - Dataset - Pipeline - Model - Endpoint - Batch Job - Custom Job - Hyperparameter Tuning Job - Cloud Storage Bucket ``` delete_dataset = True delete_pipeline = True delete_model = True delete_endpoint = True delete_batchjob = True delete_customjob = True delete_hptjob = True delete_bucket = True # Delete the dataset using the Vertex fully qualified identifier for the dataset try: if delete_dataset and "dataset_id" in globals(): clients["dataset"].delete_dataset(name=dataset_id) except Exception as e: print(e) # Delete the training pipeline using the Vertex fully qualified identifier for the pipeline try: if delete_pipeline and "pipeline_id" in globals(): clients["pipeline"].delete_training_pipeline(name=pipeline_id) except Exception as e: print(e) # Delete the model using the Vertex fully qualified identifier for the model try: if delete_model and "model_to_deploy_id" in globals(): clients["model"].delete_model(name=model_to_deploy_id) except Exception as e: print(e) # Delete the endpoint using the Vertex fully qualified identifier for the endpoint try: if delete_endpoint and "endpoint_id" in globals(): clients["endpoint"].delete_endpoint(name=endpoint_id) except Exception as e: print(e) # Delete the batch job using the Vertex fully qualified identifier for the batch job try: if delete_batchjob and "batch_job_id" in globals(): clients["job"].delete_batch_prediction_job(name=batch_job_id) except Exception as e: print(e) # Delete the custom job using the Vertex fully qualified identifier for the custom job try: if delete_customjob and "job_id" in globals(): clients["job"].delete_custom_job(name=job_id) except Exception as e: print(e) # Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job try: if delete_hptjob and "hpt_job_id" in globals(): clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id) except Exception as e: print(e) if delete_bucket and "BUCKET_NAME" in globals(): ! gsutil rm -r $BUCKET_NAME ```
github_jupyter
# Parsing Inputs In the chapter on [Grammars](Grammars.ipynb), we discussed how grammars can be used to represent various languages. We also saw how grammars can be used to generate strings of the corresponding language. Grammars can also perform the reverse. That is, given a string, one can decompose the string into its constituent parts that correspond to the parts of grammar used to generate it – the _derivation tree_ of that string. These parts (and parts from other similar strings) can later be recombined using the same grammar to produce new strings. In this chapter, we use grammars to parse and decompose a given set of valid seed inputs into their corresponding derivation trees. This structural representation allows us to mutate, crossover, and recombine their parts in order to generate new valid, slightly changed inputs (i.e., fuzz) ``` from bookutils import YouTubeVideo YouTubeVideo('k39i9de0L54') ``` **Prerequisites** * You should have read the [chapter on grammars](Grammars.ipynb). * An understanding of derivation trees from the [chapter on grammar fuzzer](GrammarFuzzer.ipynb) is also required. ## Synopsis <!-- Automatically generated. Do not edit. --> To [use the code provided in this chapter](Importing.ipynb), write ```python >>> from fuzzingbook.Parser import <identifier> ``` and then make use of the following features. This chapter introduces `Parser` classes, parsing a string into a _derivation tree_ as introduced in the [chapter on efficient grammar fuzzing](GrammarFuzzer.ipynb). Two important parser classes are provided: * [Parsing Expression Grammar parsers](#Parsing-Expression-Grammars) (`PEGParser`). These are very efficient, but limited to specific grammar structure. Notably, the alternatives represent *ordered choice*. That is, rather than choosing all rules that can potentially match, we stop at the first match that succeed. * [Earley parsers](#Parsing-Context-Free-Grammars) (`EarleyParser`). These accept any kind of context-free grammars, and explore all parsing alternatives (if any). Using any of these is fairly easy, though. First, instantiate them with a grammar: ```python >>> from Grammars import US_PHONE_GRAMMAR >>> us_phone_parser = EarleyParser(US_PHONE_GRAMMAR) ``` Then, use the `parse()` method to retrieve a list of possible derivation trees: ```python >>> trees = us_phone_parser.parse("(555)987-6543") >>> tree = list(trees)[0] >>> display_tree(tree) ``` These derivation trees can then be used for test generation, notably for mutating and recombining existing inputs. ``` import bookutils from typing import Dict, List, Tuple, Collection, Set, Iterable, Generator, cast from Fuzzer import Fuzzer # minor dependendcy from Grammars import EXPR_GRAMMAR, START_SYMBOL, RE_NONTERMINAL from Grammars import is_valid_grammar, syntax_diagram, Grammar from GrammarFuzzer import GrammarFuzzer, display_tree, tree_to_string, dot_escape from GrammarFuzzer import DerivationTree from ExpectError import ExpectError from IPython.display import display from Timer import Timer ``` ## Why Parsing for Fuzzing? Why would one want to parse existing inputs in order to fuzz? Let us illustrate the problem with an example. Here is a simple program that accepts a CSV file of vehicle details and processes this information. ``` def process_inventory(inventory): res = [] for vehicle in inventory.split('\n'): ret = process_vehicle(vehicle) res.extend(ret) return '\n'.join(res) ``` The CSV file contains details of one vehicle per line. Each row is processed in `process_vehicle()`. ``` def process_vehicle(vehicle): year, kind, company, model, *_ = vehicle.split(',') if kind == 'van': return process_van(year, company, model) elif kind == 'car': return process_car(year, company, model) else: raise Exception('Invalid entry') ``` Depending on the kind of vehicle, the processing changes. ``` def process_van(year, company, model): res = ["We have a %s %s van from %s vintage." % (company, model, year)] iyear = int(year) if iyear > 2010: res.append("It is a recent model!") else: res.append("It is an old but reliable model!") return res def process_car(year, company, model): res = ["We have a %s %s car from %s vintage." % (company, model, year)] iyear = int(year) if iyear > 2016: res.append("It is a recent model!") else: res.append("It is an old but reliable model!") return res ``` Here is a sample of inputs that the `process_inventory()` accepts. ``` mystring = """\ 1997,van,Ford,E350 2000,car,Mercury,Cougar\ """ print(process_inventory(mystring)) ``` Let us try to fuzz this program. Given that the `process_inventory()` takes a CSV file, we can write a simple grammar for generating comma separated values, and generate the required CSV rows. For convenience, we fuzz `process_vehicle()` directly. ``` import string CSV_GRAMMAR: Grammar = { '<start>': ['<csvline>'], '<csvline>': ['<items>'], '<items>': ['<item>,<items>', '<item>'], '<item>': ['<letters>'], '<letters>': ['<letter><letters>', '<letter>'], '<letter>': list(string.ascii_letters + string.digits + string.punctuation + ' \t\n') } ``` We need some infrastructure first for viewing the grammar. ``` syntax_diagram(CSV_GRAMMAR) ``` We generate `1000` values, and evaluate the `process_vehicle()` with each. ``` gf = GrammarFuzzer(CSV_GRAMMAR, min_nonterminals=4) trials = 1000 valid: List[str] = [] time = 0 for i in range(trials): with Timer() as t: vehicle_info = gf.fuzz() try: process_vehicle(vehicle_info) valid.append(vehicle_info) except: pass time += t.elapsed_time() print("%d valid strings, that is GrammarFuzzer generated %f%% valid entries from %d inputs" % (len(valid), len(valid) * 100.0 / trials, trials)) print("Total time of %f seconds" % time) ``` This is obviously not working. But why? ``` gf = GrammarFuzzer(CSV_GRAMMAR, min_nonterminals=4) trials = 10 time = 0 for i in range(trials): vehicle_info = gf.fuzz() try: print(repr(vehicle_info), end="") process_vehicle(vehicle_info) except Exception as e: print("\t", e) else: print() ``` None of the entries will get through unless the fuzzer can produce either `van` or `car`. Indeed, the reason is that the grammar itself does not capture the complete information about the format. So here is another idea. We modify the `GrammarFuzzer` to know a bit about our format. ``` import copy import random class PooledGrammarFuzzer(GrammarFuzzer): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._node_cache = {} def update_cache(self, key, values): self._node_cache[key] = values def expand_node_randomly(self, node): (symbol, children) = node assert children is None if symbol in self._node_cache: if random.randint(0, 1) == 1: return super().expand_node_randomly(node) return copy.deepcopy(random.choice(self._node_cache[symbol])) return super().expand_node_randomly(node) ``` Let us try again! ``` gf = PooledGrammarFuzzer(CSV_GRAMMAR, min_nonterminals=4) gf.update_cache('<item>', [ ('<item>', [('car', [])]), ('<item>', [('van', [])]), ]) trials = 10 time = 0 for i in range(trials): vehicle_info = gf.fuzz() try: print(repr(vehicle_info), end="") process_vehicle(vehicle_info) except Exception as e: print("\t", e) else: print() ``` At least we are getting somewhere! It would be really nice if _we could incorporate what we know about the sample data in our fuzzer._ In fact, it would be nice if we could _extract_ the template and valid values from samples, and use them in our fuzzing. How do we do that? The quick answer to this question is: Use a *parser*. ## Using a Parser Generally speaking, a _parser_ is the part of a a program that processes (structured) input. The parsers we discuss in this chapter transform an input string into a _derivation tree_ (discussed in the [chapter on efficient grammar fuzzing](GrammarFuzzer.ipynb)). From a user's perspective, all it takes to parse an input is two steps: 1. Initialize the parser with a grammar, as in ``` parser = Parser(grammar) ``` 2. Using the parser to retrieve a list of derivation trees: ```python trees = parser.parse(input) ``` Once we have parsed a tree, we can use it just as the derivation trees produced from grammar fuzzing. We discuss a number of such parsers, in particular * [parsing expression grammar parsers](#Parsing-Expression-Grammars) (`PEGParser`), which are very efficient, but limited to specific grammar structure; and * [Earley parsers](#Parsing-Context-Free-Grammars) (`EarleyParser`), which accept any kind of context-free grammars. If you just want to _use_ parsers (say, because your main focus is testing), you can just stop here and move on [to the next chapter](LangFuzzer.ipynb), where we learn how to make use of parsed inputs to mutate and recombine them. If you want to _understand_ how parsers work, though, this chapter is right for you. ## An Ad Hoc Parser As we saw in the previous section, programmers often have to extract parts of data that obey certain rules. For example, for *CSV* files, each element in a row is separated by *commas*, and multiple raws are used to store the data. To extract the information, we write an ad hoc parser `simple_parse_csv()`. ``` def simple_parse_csv(mystring: str) -> DerivationTree: children: List[DerivationTree] = [] tree = (START_SYMBOL, children) for i, line in enumerate(mystring.split('\n')): children.append(("record %d" % i, [(cell, []) for cell in line.split(',')])) return tree ``` We also change the default orientation of the graph to *left to right* rather than *top to bottom* for easier viewing using `lr_graph()`. ``` def lr_graph(dot): dot.attr('node', shape='plain') dot.graph_attr['rankdir'] = 'LR' ``` The `display_tree()` shows the structure of our CSV file after parsing. ``` tree = simple_parse_csv(mystring) display_tree(tree, graph_attr=lr_graph) ``` This is of course simple. What if we encounter slightly more complexity? Again, another example from the Wikipedia. ``` mystring = '''\ 1997,Ford,E350,"ac, abs, moon",3000.00\ ''' print(mystring) ``` We define a new annotation method `highlight_node()` to mark the nodes that are interesting. ``` def highlight_node(predicate): def hl_node(dot, nid, symbol, ann): if predicate(dot, nid, symbol, ann): dot.node(repr(nid), dot_escape(symbol), fontcolor='red') else: dot.node(repr(nid), dot_escape(symbol)) return hl_node ``` Using `highlight_node()` we can highlight particular nodes that we were wrongly parsed. ``` tree = simple_parse_csv(mystring) bad_nodes = {5, 6, 7, 12, 13, 20, 22, 23, 24, 25} def hl_predicate(_d, nid, _s, _a): return nid in bad_nodes highlight_err_node = highlight_node(hl_predicate) display_tree(tree, log=False, node_attr=highlight_err_node, graph_attr=lr_graph) ``` The marked nodes indicate where our parsing went wrong. We can of course extend our parser to understand quotes. First we define some of the helper functions `parse_quote()`, `find_comma()` and `comma_split()` ``` def parse_quote(string, i): v = string[i + 1:].find('"') return v + i + 1 if v >= 0 else -1 def find_comma(string, i): slen = len(string) while i < slen: if string[i] == '"': i = parse_quote(string, i) if i == -1: return -1 if string[i] == ',': return i i += 1 return -1 def comma_split(string): slen = len(string) i = 0 while i < slen: c = find_comma(string, i) if c == -1: yield string[i:] return else: yield string[i:c] i = c + 1 ``` We can update our `parse_csv()` procedure to use our advanced quote parser. ``` def parse_csv(mystring): children = [] tree = (START_SYMBOL, children) for i, line in enumerate(mystring.split('\n')): children.append(("record %d" % i, [(cell, []) for cell in comma_split(line)])) return tree ``` Our new `parse_csv()` can now handle quotes correctly. ``` tree = parse_csv(mystring) display_tree(tree, graph_attr=lr_graph) ``` That of course does not survive long: ``` mystring = '''\ 1999,Chevy,"Venture \\"Extended Edition, Very Large\\"",,5000.00\ ''' print(mystring) ``` A few embedded quotes are sufficient to confuse our parser again. ``` tree = parse_csv(mystring) bad_nodes = {4, 5} display_tree(tree, node_attr=highlight_err_node, graph_attr=lr_graph) ``` Here is another record from that CSV file: ``` mystring = '''\ 1996,Jeep,Grand Cherokee,"MUST SELL! air, moon roof, loaded",4799.00 ''' print(mystring) tree = parse_csv(mystring) bad_nodes = {5, 6, 7, 8, 9, 10} display_tree(tree, node_attr=highlight_err_node, graph_attr=lr_graph) ``` Fixing this would require modifying both inner `parse_quote()` and the outer `parse_csv()` procedures. We note that each of these features actually documented in the CSV [RFC 4180](https://tools.ietf.org/html/rfc4180) Indeed, each additional improvement falls apart even with a little extra complexity. The problem becomes severe when one encounters recursive expressions. For example, JSON is a common alternative to CSV files for saving data. Similarly, one may have to parse data from an HTML table instead of a CSV file if one is getting the data from the web. One might be tempted to fix it with a little more ad hoc parsing, with a bit of *regular expressions* thrown in. However, that is the [path to insanity](https://stackoverflow.com/a/1732454). It is here that _formal parsers_ shine. The main idea is that, any given set of strings belong to a language, and these languages can be specified by their grammars (as we saw in the [chapter on grammars](Grammars.ipynb)). The great thing about grammars is that they can be _composed_. That is, one can introduce finer and finer details into an internal structure without affecting the external structure, and similarly, one can change the external structure without much impact on the internal structure. ## Grammars in Parsing We briefly describe grammars in the context of parsing. ### Excursion: Grammars and Derivation Trees A grammar, as you have read from the [chapter on grammars](Grammars.ipynb) is a set of _rules_ that explain how the start symbol can be expanded. Each rule has a name, also called a _nonterminal_, and a set of _alternative choices_ in how the nonterminal can be expanded. ``` A1_GRAMMAR: Grammar = { "<start>": ["<expr>"], "<expr>": ["<expr>+<expr>", "<expr>-<expr>", "<integer>"], "<integer>": ["<digit><integer>", "<digit>"], "<digit>": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] } syntax_diagram(A1_GRAMMAR) ``` In the above expression, the rule `<expr> : [<expr>+<expr>,<expr>-<expr>,<integer>]` corresponds to how the nonterminal `<expr>` might be expanded. The expression `<expr>+<expr>` corresponds to one of the alternative choices. We call this an _alternative_ expansion for the nonterminal `<expr>`. Finally, in an expression `<expr>+<expr>`, each of `<expr>`, `+`, and `<expr>` are _symbols_ in that expansion. A symbol could be either a nonterminal or a terminal symbol based on whether its expansion is available in the grammar. Here is a string that represents an arithmetic expression that we would like to parse, which is specified by the grammar above: ``` mystring = '1+2' ``` The _derivation tree_ for our expression from this grammar is given by: ``` tree = ('<start>', [('<expr>', [('<expr>', [('<integer>', [('<digit>', [('1', [])])])]), ('+', []), ('<expr>', [('<integer>', [('<digit>', [('2', [])])])])])]) assert mystring == tree_to_string(tree) display_tree(tree) ``` While a grammar can be used to specify a given language, there could be multiple grammars that correspond to the same language. For example, here is another grammar to describe the same addition expression. ``` A2_GRAMMAR: Grammar = { "<start>": ["<expr>"], "<expr>": ["<integer><expr_>"], "<expr_>": ["+<expr>", "-<expr>", ""], "<integer>": ["<digit><integer_>"], "<integer_>": ["<integer>", ""], "<digit>": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] } syntax_diagram(A2_GRAMMAR) ``` The corresponding derivation tree is given by: ``` tree = ('<start>', [('<expr>', [('<integer>', [('<digit>', [('1', [])]), ('<integer_>', [])]), ('<expr_>', [('+', []), ('<expr>', [('<integer>', [('<digit>', [('2', [])]), ('<integer_>', [])]), ('<expr_>', [])])])])]) assert mystring == tree_to_string(tree) display_tree(tree) ``` Indeed, there could be different classes of grammars that describe the same language. For example, the first grammar `A1_GRAMMAR` is a grammar that sports both _right_ and _left_ recursion, while the second grammar `A2_GRAMMAR` does not have left recursion in the nonterminals in any of its productions, but contains _epsilon_ productions. (An epsilon production is a production that has empty string in its right hand side.) ### End of Excursion ### Excursion: Recursion You would have noticed that we reuse the term `<expr>` in its own definition. Using the same nonterminal in its own definition is called *recursion*. There are two specific kinds of recursion one should be aware of in parsing, as we see in the next section. #### Recursion A grammar is _left recursive_ if any of its nonterminals are left recursive, and a nonterminal is directly left-recursive if the left-most symbol of any of its productions is itself. ``` LR_GRAMMAR: Grammar = { '<start>': ['<A>'], '<A>': ['<A>a', ''], } syntax_diagram(LR_GRAMMAR) mystring = 'aaaaaa' display_tree( ('<start>', [('<A>', [('<A>', [('<A>', []), ('a', [])]), ('a', [])]), ('a', [])])) ``` A grammar is indirectly left-recursive if any of the left-most symbols can be expanded using their definitions to produce the nonterminal as the left-most symbol of the expansion. The left recursion is called a _hidden-left-recursion_ if during the series of expansions of a nonterminal, one reaches a rule where the rule contains the same nonterminal after a prefix of other symbols, and these symbols can derive the empty string. For example, in `A1_GRAMMAR`, `<integer>` will be considered hidden-left recursive if `<digit>` could derive an empty string. Right recursive grammars are defined similarly. Below is the derivation tree for the right recursive grammar that represents the same language as that of `LR_GRAMMAR`. ``` RR_GRAMMAR: Grammar = { '<start>': ['<A>'], '<A>': ['a<A>', ''], } syntax_diagram(RR_GRAMMAR) display_tree(('<start>', [('<A>', [ ('a', []), ('<A>', [('a', []), ('<A>', [('a', []), ('<A>', [])])])])] )) ``` #### Ambiguity To complicate matters further, there could be multiple derivation trees – also called _parses_ – corresponding to the same string from the same grammar. For example, a string `1+2+3` can be parsed in two ways as we see below using the `A1_GRAMMAR` ``` mystring = '1+2+3' tree = ('<start>', [('<expr>', [('<expr>', [('<expr>', [('<integer>', [('<digit>', [('1', [])])])]), ('+', []), ('<expr>', [('<integer>', [('<digit>', [('2', [])])])])]), ('+', []), ('<expr>', [('<integer>', [('<digit>', [('3', [])])])])])]) assert mystring == tree_to_string(tree) display_tree(tree) tree = ('<start>', [('<expr>', [('<expr>', [('<integer>', [('<digit>', [('1', [])])])]), ('+', []), ('<expr>', [('<expr>', [('<integer>', [('<digit>', [('2', [])])])]), ('+', []), ('<expr>', [('<integer>', [('<digit>', [('3', [])])])])])])]) assert tree_to_string(tree) == mystring display_tree(tree) ``` There are many ways to resolve ambiguities. One approach taken by *Parsing Expression Grammars* explained in the next section is to specify a particular order of resolution, and choose the first one. Another approach is to simply return all possible derivation trees, which is the approach taken by *Earley parser* we develop later. ### End of Excursion ## A Parser Class Next, we develop different parsers. To do that, we define a minimal interface for parsing that is obeyed by all parsers. There are two approaches to parsing a string using a grammar. 1. The traditional approach is to use a *lexer* (also called a *tokenizer* or a *scanner*) to first tokenize the incoming string, and feed the grammar one token at a time. The lexer is typically a smaller parser that accepts a *regular language*. The advantage of this approach is that the grammar used by the parser can eschew the details of tokenization. Further, one gets a shallow derivation tree at the end of the parsing which can be directly used for generating the *Abstract Syntax Tree*. 2. The second approach is to use a tree pruner after the complete parse. With this approach, one uses a grammar that incorporates complete details of the syntax. Next, the nodes corresponding to tokens are pruned and replaced with their corresponding strings as leaf nodes. The utility of this approach is that the parser is more powerful, and further there is no artificial distinction between *lexing* and *parsing*. In this chapter, we use the second approach. This approach is implemented in the `prune_tree` method. The *Parser* class we define below provides the minimal interface. The main methods that need to be implemented by the classes implementing this interface are `parse_prefix` and `parse`. The `parse_prefix` returns a tuple, which contains the index until which parsing was completed successfully, and the parse forest until that index. The method `parse` returns a list of derivation trees if the parse was successful. ``` class Parser: """Base class for parsing.""" def __init__(self, grammar: Grammar, *, start_symbol: str = START_SYMBOL, log: bool = False, coalesce: bool = True, tokens: Set[str] = set()) -> None: """Constructor. `grammar` is the grammar to be used for parsing. Keyword arguments: `start_symbol` is the start symbol (default: '<start>'). `log` enables logging (default: False). `coalesce` defines if tokens should be coalesced (default: True). `tokens`, if set, is a set of tokens to be used.""" self._grammar = grammar self._start_symbol = start_symbol self.log = log self.coalesce_tokens = coalesce self.tokens = tokens def grammar(self) -> Grammar: """Return the grammar of this parser.""" return self._grammar def start_symbol(self) -> str: """Return the start symbol of this parser.""" return self._start_symbol def parse_prefix(self, text: str) -> Tuple[int, Iterable[DerivationTree]]: """Return pair (cursor, forest) for longest prefix of text. To be defined in subclasses.""" raise NotImplementedError def parse(self, text: str) -> Iterable[DerivationTree]: """Parse `text` using the grammar. Return an iterable of parse trees.""" cursor, forest = self.parse_prefix(text) if cursor < len(text): raise SyntaxError("at " + repr(text[cursor:])) return [self.prune_tree(tree) for tree in forest] def parse_on(self, text: str, start_symbol: str) -> Generator: old_start = self._start_symbol try: self._start_symbol = start_symbol yield from self.parse(text) finally: self._start_symbol = old_start def coalesce(self, children: List[DerivationTree]) -> List[DerivationTree]: last = '' new_lst: List[DerivationTree] = [] for cn, cc in children: if cn not in self._grammar: last += cn else: if last: new_lst.append((last, [])) last = '' new_lst.append((cn, cc)) if last: new_lst.append((last, [])) return new_lst def prune_tree(self, tree: DerivationTree) -> DerivationTree: name, children = tree assert isinstance(children, list) if self.coalesce_tokens: children = self.coalesce(cast(List[DerivationTree], children)) if name in self.tokens: return (name, [(tree_to_string(tree), [])]) else: return (name, [self.prune_tree(c) for c in children]) ``` ### Excursion: Canonical Grammars The `EXPR_GRAMMAR` we import from the [chapter on grammars](Grammars.ipynb) is oriented towards generation. In particular, the production rules are stored as strings. We need to massage this representation a little to conform to a _canonical representation_ where each token in a rule is represented separately. The `canonical` format uses separate tokens to represent each symbol in an expansion. ``` CanonicalGrammar = Dict[str, List[List[str]]] import re def single_char_tokens(grammar: Grammar) -> Dict[str, List[List[Collection[str]]]]: g_ = {} for key in grammar: rules_ = [] for rule in grammar[key]: rule_ = [] for token in rule: if token in grammar: rule_.append(token) else: rule_.extend(token) rules_.append(rule_) g_[key] = rules_ return g_ def canonical(grammar: Grammar) -> CanonicalGrammar: def split(expansion): if isinstance(expansion, tuple): expansion = expansion[0] return [token for token in re.split( RE_NONTERMINAL, expansion) if token] return { k: [split(expression) for expression in alternatives] for k, alternatives in grammar.items() } CE_GRAMMAR: CanonicalGrammar = canonical(EXPR_GRAMMAR) CE_GRAMMAR ``` We also provide a convenience method for easier display of canonical grammars. ``` def recurse_grammar(grammar, key, order): rules = sorted(grammar[key]) old_len = len(order) for rule in rules: for token in rule: if token not in grammar: continue if token not in order: order.append(token) new = order[old_len:] for ckey in new: recurse_grammar(grammar, ckey, order) def show_grammar(grammar, start_symbol=START_SYMBOL): order = [start_symbol] recurse_grammar(grammar, start_symbol, order) return {k: sorted(grammar[k]) for k in order} show_grammar(CE_GRAMMAR) ``` We provide a way to revert a canonical expression. ``` def non_canonical(grammar): new_grammar = {} for k in grammar: rules = grammar[k] new_rules = [] for rule in rules: new_rules.append(''.join(rule)) new_grammar[k] = new_rules return new_grammar non_canonical(CE_GRAMMAR) ``` It is easier to work with the `canonical` representation during parsing. Hence, we update our parser class to store the `canonical` representation also. ``` class Parser(Parser): def __init__(self, grammar, **kwargs): self._start_symbol = kwargs.get('start_symbol', START_SYMBOL) self.log = kwargs.get('log', False) self.tokens = kwargs.get('tokens', set()) self.coalesce_tokens = kwargs.get('coalesce', True) canonical_grammar = kwargs.get('canonical', False) if canonical_grammar: self.cgrammar = single_char_tokens(grammar) self._grammar = non_canonical(grammar) else: self._grammar = dict(grammar) self.cgrammar = single_char_tokens(canonical(grammar)) # we do not require a single rule for the start symbol if len(grammar.get(self._start_symbol, [])) != 1: self.cgrammar['<>'] = [[self._start_symbol]] ``` We update the `prune_tree()` to account for the phony start symbol if it was insserted. ``` class Parser(Parser): def prune_tree(self, tree): name, children = tree if name == '<>': assert len(children) == 1 return self.prune_tree(children[0]) if self.coalesce_tokens: children = self.coalesce(children) if name in self.tokens: return (name, [(tree_to_string(tree), [])]) else: return (name, [self.prune_tree(c) for c in children]) ``` ### End of Excursion ## Parsing Expression Grammars A _[Parsing Expression Grammar](http://bford.info/pub/lang/peg)_ (*PEG*) \cite{Ford2004} is a type of _recognition based formal grammar_ that specifies the sequence of steps to take to parse a given string. A _parsing expression grammar_ is very similar to a _context-free grammar_ (*CFG*) such as the ones we saw in the [chapter on grammars](Grammars.ipynb). As in a CFG, a parsing expression grammar is represented by a set of nonterminals and corresponding alternatives representing how to match each. For example, here is a PEG that matches `a` or `b`. ``` PEG1 = { '<start>': ['a', 'b'] } ``` However, unlike the _CFG_, the alternatives represent *ordered choice*. That is, rather than choosing all rules that can potentially match, we stop at the first match that succeed. For example, the below _PEG_ can match `ab` but not `abc` unlike a _CFG_ which will match both. (We call the sequence of ordered choice expressions *choice expressions* rather than alternatives to make the distinction from _CFG_ clear.) ``` PEG2 = { '<start>': ['ab', 'abc'] } ``` Each choice in a _choice expression_ represents a rule on how to satisfy that particular choice. The choice is a sequence of symbols (terminals and nonterminals) that are matched against a given text as in a _CFG_. Beyond the syntax of grammar definitions we have seen so far, a _PEG_ can also contain a few additional elements. See the exercises at the end of the chapter for additional information. The PEGs model the typical practice in handwritten recursive descent parsers, and hence it may be considered more intuitive to understand. ### The Packrat Parser for Predicate Expression Grammars Short of hand rolling a parser, _Packrat_ parsing is one of the simplest parsing techniques, and is one of the techniques for parsing PEGs. The _Packrat_ parser is so named because it tries to cache all results from simpler problems in the hope that these solutions can be used to avoid re-computation later. We develop a minimal _Packrat_ parser next. We derive from the `Parser` base class first, and we accept the text to be parsed in the `parse()` method, which in turn calls `unify_key()` with the `start_symbol`. __Note.__ While our PEG parser can produce only a single unambiguous parse tree, other parsers can produce multiple parses for ambiguous grammars. Hence, we return a list of trees (in this case with a single element). ``` class PEGParser(Parser): def parse_prefix(self, text): cursor, tree = self.unify_key(self.start_symbol(), text, 0) return cursor, [tree] ``` ### Excursion: Implementing `PEGParser` #### Unify Key The `unify_key()` algorithm is simple. If given a terminal symbol, it tries to match the symbol with the current position in the text. If the symbol and text match, it returns successfully with the new parse index `at`. If on the other hand, it was given a nonterminal, it retrieves the choice expression corresponding to the key, and tries to match each choice *in order* using `unify_rule()`. If **any** of the rules succeed in being unified with the given text, the parse is considered a success, and we return with the new parse index returned by `unify_rule()`. ``` class PEGParser(PEGParser): """Packrat parser for Parsing Expression Grammars (PEGs).""" def unify_key(self, key, text, at=0): if self.log: print("unify_key: %s with %s" % (repr(key), repr(text[at:]))) if key not in self.cgrammar: if text[at:].startswith(key): return at + len(key), (key, []) else: return at, None for rule in self.cgrammar[key]: to, res = self.unify_rule(rule, text, at) if res is not None: return (to, (key, res)) return 0, None mystring = "1" peg = PEGParser(EXPR_GRAMMAR, log=True) peg.unify_key('1', mystring) mystring = "2" peg.unify_key('1', mystring) ``` #### Unify Rule The `unify_rule()` method is similar. It retrieves the tokens corresponding to the rule that it needs to unify with the text, and calls `unify_key()` on them in sequence. If **all** tokens are successfully unified with the text, the parse is a success. ``` class PEGParser(PEGParser): def unify_rule(self, rule, text, at): if self.log: print('unify_rule: %s with %s' % (repr(rule), repr(text[at:]))) results = [] for token in rule: at, res = self.unify_key(token, text, at) if res is None: return at, None results.append(res) return at, results mystring = "0" peg = PEGParser(EXPR_GRAMMAR, log=True) peg.unify_rule(peg.cgrammar['<digit>'][0], mystring, 0) mystring = "12" peg.unify_rule(peg.cgrammar['<integer>'][0], mystring, 0) mystring = "1 + 2" peg = PEGParser(EXPR_GRAMMAR, log=False) peg.parse(mystring) ``` The two methods are mutually recursive, and given that `unify_key()` tries each alternative until it succeeds, `unify_key` can be called multiple times with the same arguments. Hence, it is important to memoize the results of `unify_key`. Python provides a simple decorator `lru_cache` for memoizing any function call that has hashable arguments. We add that to our implementation so that repeated calls to `unify_key()` with the same argument get cached results. This memoization gives the algorithm its name – _Packrat_. ``` from functools import lru_cache class PEGParser(PEGParser): @lru_cache(maxsize=None) def unify_key(self, key, text, at=0): if key not in self.cgrammar: if text[at:].startswith(key): return at + len(key), (key, []) else: return at, None for rule in self.cgrammar[key]: to, res = self.unify_rule(rule, text, at) if res is not None: return (to, (key, res)) return 0, None ``` We wrap initialization and calling of `PEGParser` in a method `parse()` already implemented in the `Parser` base class that accepts the text to be parsed along with the grammar. ### End of Excursion Here are a few examples of our parser in action. ``` mystring = "1 + (2 * 3)" peg = PEGParser(EXPR_GRAMMAR) for tree in peg.parse(mystring): assert tree_to_string(tree) == mystring display(display_tree(tree)) mystring = "1 * (2 + 3.35)" for tree in peg.parse(mystring): assert tree_to_string(tree) == mystring display(display_tree(tree)) ``` One should be aware that while the grammar looks like a *CFG*, the language described by a *PEG* may be different. Indeed, only *LL(1)* grammars are guaranteed to represent the same language for both PEGs and other parsers. Behavior of PEGs for other classes of grammars could be surprising \cite{redziejowski2008}. ## Parsing Context-Free Grammars ### Problems with PEG While _PEGs_ are simple at first sight, their behavior in some cases might be a bit unintuitive. For example, here is an example \cite{redziejowski2008}: ``` PEG_SURPRISE: Grammar = { "<A>": ["a<A>a", "aa"] } ``` When interpreted as a *CFG* and used as a string generator, it will produce strings of the form `aa, aaaa, aaaaaa` that is, it produces strings where the number of `a` is $ 2*n $ where $ n > 0 $. ``` strings = [] for nn in range(4): f = GrammarFuzzer(PEG_SURPRISE, start_symbol='<A>') tree = ('<A>', None) for _ in range(nn): tree = f.expand_tree_once(tree) tree = f.expand_tree_with_strategy(tree, f.expand_node_min_cost) strings.append(tree_to_string(tree)) display_tree(tree) strings ``` However, the _PEG_ parser can only recognize strings of the form $2^n$ ``` peg = PEGParser(PEG_SURPRISE, start_symbol='<A>') for s in strings: with ExpectError(): for tree in peg.parse(s): display_tree(tree) print(s) ``` This is not the only problem with _Parsing Expression Grammars_. While *PEGs* are expressive and the *packrat* parser for parsing them is simple and intuitive, *PEGs* suffer from a major deficiency for our purposes. *PEGs* are oriented towards language recognition, and it is not clear how to translate an arbitrary *PEG* to a *CFG*. As we mentioned earlier, a naive re-interpretation of a *PEG* as a *CFG* does not work very well. Further, it is not clear what is the exact relation between the class of languages represented by *PEG* and the class of languages represented by *CFG*. Since our primary focus is *fuzzing* – that is _generation_ of strings – , we next look at _parsers that can accept context-free grammars_. The general idea of *CFG* parser is the following: Peek at the input text for the allowed number of characters, and use these, and our parser state to determine which rules can be applied to complete parsing. We next look at a typical *CFG* parsing algorithm, the Earley Parser. ### The Earley Parser The Earley parser is a general parser that is able to parse any arbitrary *CFG*. It was invented by Jay Earley \cite{Earley1970} for use in computational linguistics. While its computational complexity is $O(n^3)$ for parsing strings with arbitrary grammars, it can parse strings with unambiguous grammars in $O(n^2)$ time, and all *[LR(k)](https://en.wikipedia.org/wiki/LR_parser)* grammars in linear time ($O(n)$ \cite{Leo1991}). Further improvements – notably handling epsilon rules – were invented by Aycock et al. \cite{Aycock2002}. Note that one restriction of our implementation is that the start symbol can have only one alternative in its alternative expressions. This is not a restriction in practice because any grammar with multiple alternatives for its start symbol can be extended with a new start symbol that has the original start symbol as its only choice. That is, given a grammar as below, ``` grammar = { '<start>': ['<A>', '<B>'], ... } ``` one may rewrite it as below to conform to the *single-alternative* rule. ``` grammar = { '<start>': ['<start_>'], '<start_>': ['<A>', '<B>'], ... } ``` Let us implement a class `EarleyParser`, again derived from `Parser` which implements an Earley parser. ### Excursion: Implementing `EarleyParser` We first implement a simpler parser that is a parser for nearly all *CFGs*, but not quite. In particular, our parser does not understand _epsilon rules_ – rules that derive empty string. We show later how the parser can be extended to handle these. We use the following grammar in our examples below. ``` SAMPLE_GRAMMAR: Grammar = { '<start>': ['<A><B>'], '<A>': ['a<B>c', 'a<A>'], '<B>': ['b<C>', '<D>'], '<C>': ['c'], '<D>': ['d'] } C_SAMPLE_GRAMMAR = canonical(SAMPLE_GRAMMAR) syntax_diagram(SAMPLE_GRAMMAR) ``` The basic idea of Earley parsing is the following: * Start with the alternative expressions corresponding to the START_SYMBOL. These represent the possible ways to parse the string from a high level. Essentially each expression represents a parsing path. Queue each expression in our set of possible parses of the string. The parsed index of an expression is the part of expression that has already been recognized. In the beginning of parse, the parsed index of all expressions is at the beginning. Further, each letter gets a queue of expressions that recognizes that letter at that point in our parse. * Examine our queue of possible parses and check if any of them start with a nonterminal. If it does, then that nonterminal needs to be recognized from the input before the given rule can be parsed. Hence, add the alternative expressions corresponding to the nonterminal to the queue. Do this recursively. * At this point, we are ready to advance. Examine the current letter in the input, and select all expressions that have that particular letter at the parsed index. These expressions can now advance one step. Advance these selected expressions by incrementing their parsed index and add them to the queue of expressions in line for recognizing the next input letter. * If while doing these things, we find that any of the expressions have finished parsing, we fetch its corresponding nonterminal, and advance all expressions that have that nonterminal at their parsed index. * Continue this procedure recursively until all expressions that we have queued for the current letter have been processed. Then start processing the queue for the next letter. We explain each step in detail with examples in the coming sections. The parser uses dynamic programming to generate a table containing a _forest of possible parses_ at each letter index – the table contains as many columns as there are letters in the input, and each column contains different parsing rules at various stages of the parse. For example, given an input `adcd`, the Column 0 would contain the following: ``` <start> : ● <A> <B> ``` which is the starting rule that indicates that we are currently parsing the rule `<start>`, and the parsing state is just before identifying the symbol `<A>`. It would also contain the following which are two alternative paths it could take to complete the parsing. ``` <A> : ● a <B> c <A> : ● a <A> ``` Column 1 would contain the following, which represents the possible completion after reading `a`. ``` <A> : a ● <B> c <A> : a ● <A> <B> : ● b <C> <B> : ● <D> <A> : ● a <B> c <A> : ● a <A> <D> : ● d ``` Column 2 would contain the following after reading `d` ``` <D> : d ● <B> : <D> ● <A> : a <B> ● c ``` Similarly, Column 3 would contain the following after reading `c` ``` <A> : a <B> c ● <start> : <A> ● <B> <B> : ● b <C> <B> : ● <D> <D> : ● d ``` Finally, Column 4 would contain the following after reading `d`, with the `●` at the end of the `<start>` rule indicating that the parse was successful. ``` <D> : d ● <B> : <D> ● <start> : <A> <B> ● ``` As you can see from above, we are essentially filling a table (a table is also called a **chart**) of entries based on each letter we read, and the grammar rules that can be applied. This chart gives the parser its other name -- Chart parsing. #### Columns We define the `Column` first. The `Column` is initialized by its own `index` in the input string, and the `letter` at that index. Internally, we also keep track of the states that are added to the column as the parsing progresses. ``` class Column: def __init__(self, index, letter): self.index, self.letter = index, letter self.states, self._unique = [], {} def __str__(self): return "%s chart[%d]\n%s" % (self.letter, self.index, "\n".join( str(state) for state in self.states if state.finished())) ``` The `Column` only stores unique `states`. Hence, when a new `state` is `added` to our `Column`, we check whether it is already known. ``` class Column(Column): def add(self, state): if state in self._unique: return self._unique[state] self._unique[state] = state self.states.append(state) state.e_col = self return self._unique[state] ``` #### Items An item represents a _parse in progress for a specific rule._ Hence the item contains the name of the nonterminal, and the corresponding alternative expression (`expr`) which together form the rule, and the current position of parsing in this expression -- `dot`. **Note.** If you are familiar with [LR parsing](https://en.wikipedia.org/wiki/LR_parser), you will notice that an item is simply an `LR0` item. ``` class Item: def __init__(self, name, expr, dot): self.name, self.expr, self.dot = name, expr, dot ``` We also provide a few convenience methods. The method `finished()` checks if the `dot` has moved beyond the last element in `expr`. The method `advance()` produces a new `Item` with the `dot` advanced one token, and represents an advance of the parsing. The method `at_dot()` returns the current symbol being parsed. ``` class Item(Item): def finished(self): return self.dot >= len(self.expr) def advance(self): return Item(self.name, self.expr, self.dot + 1) def at_dot(self): return self.expr[self.dot] if self.dot < len(self.expr) else None ``` Here is how an item could be used. We first define our item ``` item_name = '<B>' item_expr = C_SAMPLE_GRAMMAR[item_name][1] an_item = Item(item_name, tuple(item_expr), 0) ``` To determine where the status of parsing, we use `at_dot()` ``` an_item.at_dot() ``` That is, the next symbol to be parsed is `<D>` If we advance the item, we get another item that represents the finished parsing rule `<B>`. ``` another_item = an_item.advance() another_item.finished() ``` #### States For `Earley` parsing, the state of the parsing is simply one `Item` along with some meta information such as the starting `s_col` and ending column `e_col` for each state. Hence we inherit from `Item` to create a `State`. Since we are interested in comparing states, we define `hash()` and `eq()` with the corresponding methods. ``` class State(Item): def __init__(self, name, expr, dot, s_col, e_col=None): super().__init__(name, expr, dot) self.s_col, self.e_col = s_col, e_col def __str__(self): def idx(var): return var.index if var else -1 return self.name + ':= ' + ' '.join([ str(p) for p in [*self.expr[:self.dot], '|', *self.expr[self.dot:]] ]) + "(%d,%d)" % (idx(self.s_col), idx(self.e_col)) def copy(self): return State(self.name, self.expr, self.dot, self.s_col, self.e_col) def _t(self): return (self.name, self.expr, self.dot, self.s_col.index) def __hash__(self): return hash(self._t()) def __eq__(self, other): return self._t() == other._t() def advance(self): return State(self.name, self.expr, self.dot + 1, self.s_col) ``` The usage of `State` is similar to that of `Item`. The only difference is that it is used along with the `Column` to track the parsing state. For example, we initialize the first column as follows: ``` col_0 = Column(0, None) item_tuple = tuple(*C_SAMPLE_GRAMMAR[START_SYMBOL]) start_state = State(START_SYMBOL, item_tuple, 0, col_0) col_0.add(start_state) start_state.at_dot() ``` The first column is then updated by using `add()` method of `Column` ``` sym = start_state.at_dot() for alt in C_SAMPLE_GRAMMAR[sym]: col_0.add(State(sym, tuple(alt), 0, col_0)) for s in col_0.states: print(s) ``` #### The Parsing Algorithm The _Earley_ algorithm starts by initializing the chart with columns (as many as there are letters in the input). We also seed the first column with a state representing the expression corresponding to the start symbol. In our case, the state corresponds to the start symbol with the `dot` at `0` is represented as below. The `●` symbol represents the parsing status. In this case, we have not parsed anything. ``` <start>: ● <A> <B> ``` We pass this partial chart to a method for filling the rest of the parse chart. Before starting to parse, we seed the chart with the state representing the ongoing parse of the start symbol. ``` class EarleyParser(Parser): """Earley Parser. This parser can parse any context-free grammar.""" def __init__(self, grammar: Grammar, **kwargs) -> None: super().__init__(grammar, **kwargs) self.chart: List = [] # for type checking def chart_parse(self, words, start): alt = tuple(*self.cgrammar[start]) chart = [Column(i, tok) for i, tok in enumerate([None, *words])] chart[0].add(State(start, alt, 0, chart[0])) return self.fill_chart(chart) ``` The main parsing loop in `fill_chart()` has three fundamental operations. `predict()`, `scan()`, and `complete()`. We discuss `predict` next. #### Predicting States We have already seeded `chart[0]` with a state `[<A>,<B>]` with `dot` at `0`. Next, given that `<A>` is a nonterminal, we `predict` the possible parse continuations of this state. That is, it could be either `a <B> c` or `A <A>`. The general idea of `predict()` is as follows: Say you have a state with name `<A>` from the above grammar, and expression containing `[a,<B>,c]`. Imagine that you have seen `a` already, which means that the `dot` will be on `<B>`. Below, is a representation of our parse status. The left hand side of ● represents the portion already parsed (`a`), and the right hand side represents the portion yet to be parsed (`<B> c`). ``` <A>: a ● <B> c ``` To recognize `<B>`, we look at the definition of `<B>`, which has different alternative expressions. The `predict()` step adds each of these alternatives to the set of states, with `dot` at `0`. ``` <A>: a ● <B> c <B>: ● b c <B>: ● <D> ``` In essence, the `predict()` method, when called with the current nonterminal, fetches the alternative expressions corresponding to this nonterminal, and adds these as predicted _child_ states to the _current_ column. ``` class EarleyParser(EarleyParser): def predict(self, col, sym, state): for alt in self.cgrammar[sym]: col.add(State(sym, tuple(alt), 0, col)) ``` To see how to use `predict`, we first construct the 0th column as before, and we assign the constructed column to an instance of the EarleyParser. ``` col_0 = Column(0, None) col_0.add(start_state) ep = EarleyParser(SAMPLE_GRAMMAR) ep.chart = [col_0] ``` It should contain a single state -- `<start> at 0` ``` for s in ep.chart[0].states: print(s) ``` We apply predict to fill out the 0th column, and the column should contain the possible parse paths. ``` ep.predict(col_0, '<A>', s) for s in ep.chart[0].states: print(s) ``` #### Scanning Tokens What if rather than a nonterminal, the state contained a terminal symbol such as a letter? In that case, we are ready to make some progress. For example, consider the second state: ``` <B>: ● b c ``` We `scan` the next column's letter. Say the next token is `b`. If the letter matches what we have, then create a new state by advancing the current state by one letter. ``` <B>: b ● c ``` This new state is added to the next column (i.e the column that has the matched letter). ``` class EarleyParser(EarleyParser): def scan(self, col, state, letter): if letter == col.letter: col.add(state.advance()) ``` As before, we construct the partial parse first, this time adding a new column so that we can observe the effects of `scan()` ``` ep = EarleyParser(SAMPLE_GRAMMAR) col_1 = Column(1, 'a') ep.chart = [col_0, col_1] new_state = ep.chart[0].states[1] print(new_state) ep.scan(col_1, new_state, 'a') for s in ep.chart[1].states: print(s) ``` #### Completing Processing When we advance, what if we actually `complete()` the processing of the current rule? If so, we want to update not just this state, but also all the _parent_ states from which this state was derived. For example, say we have states as below. ``` <A>: a ● <B> c <B>: b c ● ``` The state `<B>: b c ●` is now complete. So, we need to advance `<A>: a ● <B> c` one step forward. How do we determine the parent states? Note from `predict` that we added the predicted child states to the _same_ column as that of the inspected state. Hence, we look at the starting column of the current state, with the same symbol `at_dot` as that of the name of the completed state. For each such parent found, we advance that parent (because we have just finished parsing that non terminal for their `at_dot`) and add the new states to the current column. ``` class EarleyParser(EarleyParser): def complete(self, col, state): return self.earley_complete(col, state) def earley_complete(self, col, state): parent_states = [ st for st in state.s_col.states if st.at_dot() == state.name ] for st in parent_states: col.add(st.advance()) ``` Here is an example of completed processing. First we complete the Column 0 ``` ep = EarleyParser(SAMPLE_GRAMMAR) col_1 = Column(1, 'a') col_2 = Column(2, 'd') ep.chart = [col_0, col_1, col_2] ep.predict(col_0, '<A>', s) for s in ep.chart[0].states: print(s) ``` Then we use `scan()` to populate Column 1 ``` for state in ep.chart[0].states: if state.at_dot() not in SAMPLE_GRAMMAR: ep.scan(col_1, state, 'a') for s in ep.chart[1].states: print(s) for state in ep.chart[1].states: if state.at_dot() in SAMPLE_GRAMMAR: ep.predict(col_1, state.at_dot(), state) for s in ep.chart[1].states: print(s) ``` Then we use `scan()` again to populate Column 2 ``` for state in ep.chart[1].states: if state.at_dot() not in SAMPLE_GRAMMAR: ep.scan(col_2, state, state.at_dot()) for s in ep.chart[2].states: print(s) ``` Now, we can use `complete()`: ``` for state in ep.chart[2].states: if state.finished(): ep.complete(col_2, state) for s in ep.chart[2].states: print(s) ``` #### Filling the Chart The main driving loop in `fill_chart()` essentially calls these operations in order. We loop over each column in order. * For each column, fetch one state in the column at a time, and check if the state is `finished`. * If it is, then we `complete()` all the parent states depending on this state. * If the state was not finished, we check to see if the state's current symbol `at_dot` is a nonterminal. * If it is a nonterminal, we `predict()` possible continuations, and update the current column with these states. * If it was not, we `scan()` the next column and advance the current state if it matches the next letter. ``` class EarleyParser(EarleyParser): def fill_chart(self, chart): for i, col in enumerate(chart): for state in col.states: if state.finished(): self.complete(col, state) else: sym = state.at_dot() if sym in self.cgrammar: self.predict(col, sym, state) else: if i + 1 >= len(chart): continue self.scan(chart[i + 1], state, sym) if self.log: print(col, '\n') return chart ``` We now can recognize a given string as belonging to a language represented by a grammar. ``` ep = EarleyParser(SAMPLE_GRAMMAR, log=True) columns = ep.chart_parse('adcd', START_SYMBOL) ``` The chart we printed above only shows completed entries at each index. The parenthesized expression indicates the column just before the first character was recognized, and the ending column. Notice how the `<start>` nonterminal shows fully parsed status. ``` last_col = columns[-1] for state in last_col.states: if state.name == '<start>': print(state) ``` Since `chart_parse()` returns the completed table, we now need to extract the derivation trees. #### The Parse Method For determining how far we have managed to parse, we simply look for the last index from `chart_parse()` where the `start_symbol` was found. ``` class EarleyParser(EarleyParser): def parse_prefix(self, text): self.table = self.chart_parse(text, self.start_symbol()) for col in reversed(self.table): states = [ st for st in col.states if st.name == self.start_symbol() ] if states: return col.index, states return -1, [] ``` Here is the `parse_prefix()` in action. ``` ep = EarleyParser(SAMPLE_GRAMMAR) cursor, last_states = ep.parse_prefix('adcd') print(cursor, [str(s) for s in last_states]) ``` The following is adapted from the excellent reference on Earley parsing by [Loup Vaillant](http://loup-vaillant.fr/tutorials/earley-parsing/). Our `parse()` method is as follows. It depends on two methods `parse_forest()` and `extract_trees()` that will be defined next. ``` class EarleyParser(EarleyParser): def parse(self, text): cursor, states = self.parse_prefix(text) start = next((s for s in states if s.finished()), None) if cursor < len(text) or not start: raise SyntaxError("at " + repr(text[cursor:])) forest = self.parse_forest(self.table, start) for tree in self.extract_trees(forest): yield self.prune_tree(tree) ``` #### Parsing Paths The `parse_paths()` method tries to unify the given expression in `named_expr` with the parsed string. For that, it extracts the last symbol in `named_expr` and checks if it is a terminal symbol. If it is, then it checks the chart at `til` to see if the letter corresponding to the position matches the terminal symbol. If it does, extend our start index by the length of the symbol. If the symbol was a nonterminal symbol, then we retrieve the parsed states at the current end column index (`til`) that correspond to the nonterminal symbol, and collect the start index. These are the end column indexes for the remaining expression. Given our list of start indexes, we obtain the parse paths from the remaining expression. If we can obtain any, then we return the parse paths. If not, we return an empty list. ``` class EarleyParser(EarleyParser): def parse_paths(self, named_expr, chart, frm, til): def paths(state, start, k, e): if not e: return [[(state, k)]] if start == frm else [] else: return [[(state, k)] + r for r in self.parse_paths(e, chart, frm, start)] *expr, var = named_expr starts = None if var not in self.cgrammar: starts = ([(var, til - len(var), 't')] if til > 0 and chart[til].letter == var else []) else: starts = [(s, s.s_col.index, 'n') for s in chart[til].states if s.finished() and s.name == var] return [p for s, start, k in starts for p in paths(s, start, k, expr)] ``` Here is the `parse_paths()` in action ``` print(SAMPLE_GRAMMAR['<start>']) ep = EarleyParser(SAMPLE_GRAMMAR) completed_start = last_states[0] paths = ep.parse_paths(completed_start.expr, columns, 0, 4) for path in paths: print([list(str(s_) for s_ in s) for s in path]) ``` That is, the parse path for `<start>` given the input `adcd` included recognizing the expression `<A><B>`. This was recognized by the two states: `<A>` from input(0) to input(2) which further involved recognizing the rule `a<B>c`, and the next state `<B>` from input(3) which involved recognizing the rule `<D>`. #### Parsing Forests The `parse_forest()` method takes the state which represents the completed parse, and determines the possible ways that its expressions corresponded to the parsed expression. For example, say we are parsing `1+2+3`, and the state has `[<expr>,+,<expr>]` in `expr`. It could have been parsed as either `[{<expr>:1+2},+,{<expr>:3}]` or `[{<expr>:1},+,{<expr>:2+3}]`. ``` class EarleyParser(EarleyParser): def forest(self, s, kind, chart): return self.parse_forest(chart, s) if kind == 'n' else (s, []) def parse_forest(self, chart, state): pathexprs = self.parse_paths(state.expr, chart, state.s_col.index, state.e_col.index) if state.expr else [] return state.name, [[(v, k, chart) for v, k in reversed(pathexpr)] for pathexpr in pathexprs] ep = EarleyParser(SAMPLE_GRAMMAR) result = ep.parse_forest(columns, last_states[0]) result ``` #### Extracting Trees What we have from `parse_forest()` is a forest of trees. We need to extract a single tree from that forest. That is accomplished as follows. (For now, we return the first available derivation tree. To do that, we need to extract the parse forest from the state corresponding to `start`.) ``` class EarleyParser(EarleyParser): def extract_a_tree(self, forest_node): name, paths = forest_node if not paths: return (name, []) return (name, [self.extract_a_tree(self.forest(*p)) for p in paths[0]]) def extract_trees(self, forest): yield self.extract_a_tree(forest) ``` We now verify that our parser can parse a given expression. ``` A3_GRAMMAR: Grammar = { "<start>": ["<bexpr>"], "<bexpr>": [ "<aexpr><gt><aexpr>", "<aexpr><lt><aexpr>", "<aexpr>=<aexpr>", "<bexpr>=<bexpr>", "<bexpr>&<bexpr>", "<bexpr>|<bexpr>", "(<bexrp>)" ], "<aexpr>": ["<aexpr>+<aexpr>", "<aexpr>-<aexpr>", "(<aexpr>)", "<integer>"], "<integer>": ["<digit><integer>", "<digit>"], "<digit>": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], "<lt>": ['<'], "<gt>": ['>'] } syntax_diagram(A3_GRAMMAR) mystring = '(1+24)=33' parser = EarleyParser(A3_GRAMMAR) for tree in parser.parse(mystring): assert tree_to_string(tree) == mystring display_tree(tree) ``` We now have a complete parser that can parse almost arbitrary *CFG*. There remains a small corner to fix -- the case of epsilon rules as we will see later. #### Ambiguous Parsing Ambiguous grammars are grammars that can produce multiple derivation trees for some given string. For example, the `A3_GRAMMAR` can parse `1+2+3` in two different ways – `[1+2]+3` and `1+[2+3]`. Extracting a single tree might be reasonable for unambiguous parses. However, what if the given grammar produces ambiguity when given a string? We need to extract all derivation trees in that case. We enhance our `extract_trees()` method to extract multiple derivation trees. ``` import itertools as I class EarleyParser(EarleyParser): def extract_trees(self, forest_node): name, paths = forest_node if not paths: yield (name, []) for path in paths: ptrees = [self.extract_trees(self.forest(*p)) for p in path] for p in I.product(*ptrees): yield (name, p) ``` As before, we verify that everything works. ``` mystring = '1+2' parser = EarleyParser(A1_GRAMMAR) for tree in parser.parse(mystring): assert mystring == tree_to_string(tree) display_tree(tree) ``` One can also use a `GrammarFuzzer` to verify that everything works. ``` gf = GrammarFuzzer(A1_GRAMMAR) for i in range(5): s = gf.fuzz() print(i, s) for tree in parser.parse(s): assert tree_to_string(tree) == s ``` #### The Aycock Epsilon Fix While parsing, one often requires to know whether a given nonterminal can derive an empty string. For example, in the following grammar A can derive an empty string, while B can't. The nonterminals that can derive an empty string are called _nullable_ nonterminals. For example, in the below grammar `E_GRAMMAR_1`, `<A>` is _nullable_, and since `<A>` is one of the alternatives of `<start>`, `<start>` is also _nullable_. But `<B>` is not _nullable_. ``` E_GRAMMAR_1: Grammar = { '<start>': ['<A>', '<B>'], '<A>': ['a', ''], '<B>': ['b'] } ``` One of the problems with the original Earley implementation is that it does not handle rules that can derive empty strings very well. For example, the given grammar should match `a` ``` EPSILON = '' E_GRAMMAR: Grammar = { '<start>': ['<S>'], '<S>': ['<A><A><A><A>'], '<A>': ['a', '<E>'], '<E>': [EPSILON] } syntax_diagram(E_GRAMMAR) mystring = 'a' parser = EarleyParser(E_GRAMMAR) with ExpectError(): trees = parser.parse(mystring) ``` Aycock et al.\cite{Aycock2002} suggests a simple fix. Their idea is to pre-compute the `nullable` set and use it to advance the `nullable` states. However, before we do that, we need to compute the `nullable` set. The `nullable` set consists of all nonterminals that can derive an empty string. Computing the `nullable` set requires expanding each production rule in the grammar iteratively and inspecting whether a given rule can derive the empty string. Each iteration needs to take into account new terminals that have been found to be `nullable`. The procedure stops when we obtain a stable result. This procedure can be abstracted into a more general method `fixpoint`. ##### Fixpoint A `fixpoint` of a function is an element in the function's domain such that it is mapped to itself. For example, 1 is a `fixpoint` of square root because `squareroot(1) == 1`. (We use `str` rather than `hash` to check for equality in `fixpoint` because the data structure `set`, which we would like to use as an argument has a good string representation but is not hashable). ``` def fixpoint(f): def helper(arg): while True: sarg = str(arg) arg_ = f(arg) if str(arg_) == sarg: return arg arg = arg_ return helper ``` Remember `my_sqrt()` from [the first chapter](Intro_Testing.ipynb)? We can define `my_sqrt()` using fixpoint. ``` def my_sqrt(x): @fixpoint def _my_sqrt(approx): return (approx + x / approx) / 2 return _my_sqrt(1) my_sqrt(2) ``` ##### Nullable Similarly, we can define `nullable` using `fixpoint`. We essentially provide the definition of a single intermediate step. That is, assuming that `nullables` contain the current `nullable` nonterminals, we iterate over the grammar looking for productions which are `nullable` -- that is, productions where the entire sequence can yield an empty string on some expansion. We need to iterate over the different alternative expressions and their corresponding nonterminals. Hence we define a `rules()` method converts our dictionary representation to this pair format. ``` def rules(grammar): return [(key, choice) for key, choices in grammar.items() for choice in choices] ``` The `terminals()` method extracts all terminal symbols from a `canonical` grammar representation. ``` def terminals(grammar): return set(token for key, choice in rules(grammar) for token in choice if token not in grammar) def nullable_expr(expr, nullables): return all(token in nullables for token in expr) def nullable(grammar): productions = rules(grammar) @fixpoint def nullable_(nullables): for A, expr in productions: if nullable_expr(expr, nullables): nullables |= {A} return (nullables) return nullable_({EPSILON}) for key, grammar in { 'E_GRAMMAR': E_GRAMMAR, 'E_GRAMMAR_1': E_GRAMMAR_1 }.items(): print(key, nullable(canonical(grammar))) ``` So, once we have the `nullable` set, all that we need to do is, after we have called `predict` on a state corresponding to a nonterminal, check if it is `nullable` and if it is, advance and add the state to the current column. ``` class EarleyParser(EarleyParser): def __init__(self, grammar, **kwargs): super().__init__(grammar, **kwargs) self.epsilon = nullable(self.cgrammar) def predict(self, col, sym, state): for alt in self.cgrammar[sym]: col.add(State(sym, tuple(alt), 0, col)) if sym in self.epsilon: col.add(state.advance()) mystring = 'a' parser = EarleyParser(E_GRAMMAR) for tree in parser.parse(mystring): display_tree(tree) ``` To ensure that our parser does parse all kinds of grammars, let us try two more test cases. ``` DIRECTLY_SELF_REFERRING: Grammar = { '<start>': ['<query>'], '<query>': ['select <expr> from a'], "<expr>": ["<expr>", "a"], } INDIRECTLY_SELF_REFERRING: Grammar = { '<start>': ['<query>'], '<query>': ['select <expr> from a'], "<expr>": ["<aexpr>", "a"], "<aexpr>": ["<expr>"], } mystring = 'select a from a' for grammar in [DIRECTLY_SELF_REFERRING, INDIRECTLY_SELF_REFERRING]: forest = EarleyParser(grammar).parse(mystring) print('recognized', mystring) try: for tree in forest: print(tree_to_string(tree)) except RecursionError as e: print("Recursion error", e) ``` Why do we get recursion error here? The reason is that, our implementation of `extract_trees()` is eager. That is, it attempts to extract _all_ inner parse trees before it can construct the outer parse tree. When there is a self reference, this results in recursion. Here is a simple extractor that avoids this problem. The idea here is that we randomly and lazily choose a node to expand, which avoids the infinite recursion. #### Tree Extractor As you saw above, one of the problems with attempting to extract all trees is that the parse forest can consist of an infinite number of trees. So, here, we solve that problem by extracting one tree at a time. ``` class SimpleExtractor: def __init__(self, parser, text): self.parser = parser cursor, states = parser.parse_prefix(text) start = next((s for s in states if s.finished()), None) if cursor < len(text) or not start: raise SyntaxError("at " + repr(cursor)) self.my_forest = parser.parse_forest(parser.table, start) def extract_a_node(self, forest_node): name, paths = forest_node if not paths: return ((name, 0, 1), []), (name, []) cur_path, i, length = self.choose_path(paths) child_nodes = [] pos_nodes = [] for s, kind, chart in cur_path: f = self.parser.forest(s, kind, chart) postree, ntree = self.extract_a_node(f) child_nodes.append(ntree) pos_nodes.append(postree) return ((name, i, length), pos_nodes), (name, child_nodes) def choose_path(self, arr): length = len(arr) i = random.randrange(length) return arr[i], i, length def extract_a_tree(self): pos_tree, parse_tree = self.extract_a_node(self.my_forest) return self.parser.prune_tree(parse_tree) ``` Using it is as folows: ``` de = SimpleExtractor(EarleyParser(DIRECTLY_SELF_REFERRING), mystring) for i in range(5): tree = de.extract_a_tree() print(tree_to_string(tree)) ``` On the indirect reference: ``` ie = SimpleExtractor(EarleyParser(INDIRECTLY_SELF_REFERRING), mystring) for i in range(5): tree = ie.extract_a_tree() print(tree_to_string(tree)) ``` Note that the `SimpleExtractor` gives no guarantee of the uniqueness of the returned trees. This can however be fixed by keeping track of the particular nodes that were expanded from `pos_tree` variable, and hence, avoiding exploration of the same paths. For implementing this, we extract the random stream passing into the `SimpleExtractor`, and use it to control which nodes are explored. Different exploration paths can then form a tree of nodes. We start with the node definition for a single choice. The `self._chosen` is the current choice made, `self.next` holds the next choice done using `self._chosen`. The `self.total` holds the total number of choices that one can have in this node. ``` class ChoiceNode: def __init__(self, parent, total): self._p, self._chosen = parent, 0 self._total, self.next = total, None def chosen(self): assert not self.finished() return self._chosen def __str__(self): return '%d(%s/%s %s)' % (self._i, str(self._chosen), str(self._total), str(self.next)) def __repr__(self): return repr((self._i, self._chosen, self._total)) def increment(self): # as soon as we increment, next becomes invalid self.next = None self._chosen += 1 if self.finished(): if self._p is None: return None return self._p.increment() return self def finished(self): return self._chosen >= self._total ``` Now we come to the enhanced `EnhancedExtractor()`. ``` class EnhancedExtractor(SimpleExtractor): def __init__(self, parser, text): super().__init__(parser, text) self.choices = ChoiceNode(None, 1) ``` First we define `choose_path()` that given an array and a choice node, returns the element in array corresponding to the next choice node if it exists, or produces a new choice nodes, and returns that element. ``` class EnhancedExtractor(EnhancedExtractor): def choose_path(self, arr, choices): arr_len = len(arr) if choices.next is not None: if choices.next.finished(): return None, None, None, choices.next else: choices.next = ChoiceNode(choices, arr_len) next_choice = choices.next.chosen() choices = choices.next return arr[next_choice], next_choice, arr_len, choices ``` We define `extract_a_node()` here. While extracting, we have a choice. Should we allow infinite forests, or should we have a finite number of trees with no direct recursion? A direct recursion is when there exists a parent node with the same nonterminal that parsed the same span. We choose here not to extract such trees. They can be added back after parsing. This is a recursive procedure that inspects a node, extracts the path required to complete that node. A single path (corresponding to a nonterminal) may again be composed of a sequence of smaller paths. Such paths are again extracted using another call to `extract_a_node()` recursively. What happens when we hit on one of the node recursions we want to avoid? In that case, we return the current choice node, which bubbles up to `extract_a_tree()`. That procedure increments the last choice, which in turn increments up the parents until we reach a choice node that still has options to explore. What if we hit the end of choices for a particular choice node(i.e, we have exhausted paths that can be taken from a node)? In this case also, we return the current choice node, which bubbles up to `extract_a_tree()`. That procedure increments the last choice, which bubbles up to the next choice that has some unexplored paths. ``` class EnhancedExtractor(EnhancedExtractor): def extract_a_node(self, forest_node, seen, choices): name, paths = forest_node if not paths: return (name, []), choices cur_path, _i, _l, new_choices = self.choose_path(paths, choices) if cur_path is None: return None, new_choices child_nodes = [] for s, kind, chart in cur_path: if kind == 't': child_nodes.append((s, [])) continue nid = (s.name, s.s_col.index, s.e_col.index) if nid in seen: return None, new_choices f = self.parser.forest(s, kind, chart) ntree, newer_choices = self.extract_a_node(f, seen | {nid}, new_choices) if ntree is None: return None, newer_choices child_nodes.append(ntree) new_choices = newer_choices return (name, child_nodes), new_choices ``` The `extract_a_tree()` is a depth first extractor of a single tree. It tries to extract a tree, and if the extraction returns `None`, it means that a particular choice was exhausted, or we hit on a recursion. In that case, we increment the choice, and explore a new path. ``` class EnhancedExtractor(EnhancedExtractor): def extract_a_tree(self): while not self.choices.finished(): parse_tree, choices = self.extract_a_node(self.my_forest, set(), self.choices) choices.increment() if parse_tree is not None: return self.parser.prune_tree(parse_tree) return None ``` Note that the `EnhancedExtractor` only extracts nodes that are not directly recursive. That is, if it finds a node with a nonterminal that covers the same span as that of a parent node with the same nonterminal, it skips the node. ``` ee = EnhancedExtractor(EarleyParser(INDIRECTLY_SELF_REFERRING), mystring) i = 0 while True: i += 1 t = ee.extract_a_tree() if t is None: break print(i, t) s = tree_to_string(t) assert s == mystring istring = '1+2+3+4' ee = EnhancedExtractor(EarleyParser(A1_GRAMMAR), istring) i = 0 while True: i += 1 t = ee.extract_a_tree() if t is None: break print(i, t) s = tree_to_string(t) assert s == istring ``` #### More Earley Parsing A number of other optimizations exist for Earley parsers. A fast industrial strength Earley parser implementation is the [Marpa parser](https://jeffreykegler.github.io/Marpa-web-site/). Further, Earley parsing need not be restricted to character data. One may also parse streams (audio and video streams) \cite{qi2018generalized} using a generalized Earley parser. ### End of Excursion Here are a few examples of the Earley parser in action. ``` mystring = "1 + (2 * 3)" earley = EarleyParser(EXPR_GRAMMAR) for tree in earley.parse(mystring): assert tree_to_string(tree) == mystring display(display_tree(tree)) mystring = "1 * (2 + 3.35)" for tree in earley.parse(mystring): assert tree_to_string(tree) == mystring display(display_tree(tree)) ``` In contrast to the `PEGParser`, above, the `EarleyParser` can handle arbitrary context-free grammars. ### Excursion: Testing the Parsers While we have defined two parser variants, it would be nice to have some confirmation that our parses work well. While it is possible to formally prove that they work, it is much more satisfying to generate random grammars, their corresponding strings, and parse them using the same grammar. ``` def prod_line_grammar(nonterminals, terminals): g = { '<start>': ['<symbols>'], '<symbols>': ['<symbol><symbols>', '<symbol>'], '<symbol>': ['<nonterminals>', '<terminals>'], '<nonterminals>': ['<lt><alpha><gt>'], '<lt>': ['<'], '<gt>': ['>'], '<alpha>': nonterminals, '<terminals>': terminals } if not nonterminals: g['<nonterminals>'] = [''] del g['<lt>'] del g['<alpha>'] del g['<gt>'] return g syntax_diagram(prod_line_grammar(["A", "B", "C"], ["1", "2", "3"])) def make_rule(nonterminals, terminals, num_alts): prod_grammar = prod_line_grammar(nonterminals, terminals) gf = GrammarFuzzer(prod_grammar, min_nonterminals=3, max_nonterminals=5) name = "<%s>" % ''.join(random.choices(string.ascii_uppercase, k=3)) return (name, [gf.fuzz() for _ in range(num_alts)]) make_rule(["A", "B", "C"], ["1", "2", "3"], 3) from Grammars import unreachable_nonterminals def make_grammar(num_symbols=3, num_alts=3): terminals = list(string.ascii_lowercase) grammar = {} name = None for _ in range(num_symbols): nonterminals = [k[1:-1] for k in grammar.keys()] name, expansions = \ make_rule(nonterminals, terminals, num_alts) grammar[name] = expansions grammar[START_SYMBOL] = [name] # Remove unused parts for nonterminal in unreachable_nonterminals(grammar): del grammar[nonterminal] assert is_valid_grammar(grammar) return grammar make_grammar() ``` Now we verify if our arbitrary grammars can be used by the Earley parser. ``` for i in range(5): my_grammar = make_grammar() print(my_grammar) parser = EarleyParser(my_grammar) mygf = GrammarFuzzer(my_grammar) s = mygf.fuzz() print(s) for tree in parser.parse(s): assert tree_to_string(tree) == s display_tree(tree) ``` With this, we have completed both implementation and testing of *arbitrary* CFG, which can now be used along with `LangFuzzer` to generate better fuzzing inputs. ### End of Excursion ## Background Numerous parsing techniques exist that can parse a given string using a given grammar, and produce corresponding derivation tree or trees. However, some of these techniques work only on specific classes of grammars. These classes of grammars are named after the specific kind of parser that can accept grammars of that category. That is, the upper bound for the capabilities of the parser defines the grammar class named after that parser. The *LL* and *LR* parsing are the main traditions in parsing. Here, *LL* means left-to-right, leftmost derivation, and it represents a top-down approach. On the other hand, and LR (left-to-right, rightmost derivation) represents a bottom-up approach. Another way to look at it is that LL parsers compute the derivation tree incrementally in *pre-order* while LR parsers compute the derivation tree in *post-order* \cite{pingali2015graphical}). Different classes of grammars differ in the features that are available to the user for writing a grammar of that class. That is, the corresponding kind of parser will be unable to parse a grammar that makes use of more features than allowed. For example, the `A2_GRAMMAR` is an *LL* grammar because it lacks left recursion, while `A1_GRAMMAR` is not an *LL* grammar. This is because an *LL* parser parses its input from left to right, and constructs the leftmost derivation of its input by expanding the nonterminals it encounters. If there is a left recursion in one of these rules, an *LL* parser will enter an infinite loop. Similarly, a grammar is LL(k) if it can be parsed by an LL parser with k lookahead token, and LR(k) grammar can only be parsed with LR parser with at least k lookahead tokens. These grammars are interesting because both LL(k) and LR(k) grammars have $O(n)$ parsers, and can be used with relatively restricted computational budget compared to other grammars. The languages for which one can provide an *LL(k)* grammar is called *LL(k)* languages (where k is the minimum lookahead required). Similarly, *LR(k)* is defined as the set of languages that have an *LR(k)* grammar. In terms of languages, LL(k) $\subset$ LL(k+1) and LL(k) $\subset$ LR(k), and *LR(k)* $=$ *LR(1)*. All deterministic *CFLs* have an *LR(1)* grammar. However, there exist *CFLs* that are inherently ambiguous \cite{ogden1968helpful}, and for these, one can't provide an *LR(1)* grammar. The other main parsing algorithms for *CFGs* are GLL \cite{scott2010gll}, GLR \cite{tomita1987efficient,tomita2012generalized}, and CYK \cite{grune2008parsing}. The ALL(\*) (used by ANTLR) on the other hand is a grammar representation that uses *Regular Expression* like predicates (similar to advanced PEGs – see [Exercise](#Exercise-3:-PEG-Predicates)) rather than a fixed lookahead. Hence, ALL(\*) can accept a larger class of grammars than CFGs. In terms of computational limits of parsing, the main CFG parsers have a complexity of $O(n^3)$ for arbitrary grammars. However, parsing with arbitrary *CFG* is reducible to boolean matrix multiplication \cite{Valiant1975} (and the reverse \cite{Lee2002}). This is at present bounded by $O(2^{23728639}$) \cite{LeGall2014}. Hence, worse case complexity for parsing arbitrary CFG is likely to remain close to cubic. Regarding PEGs, the actual class of languages that is expressible in *PEG* is currently unknown. In particular, we know that *PEGs* can express certain languages such as $a^n b^n c^n$. However, we do not know if there exist *CFLs* that are not expressible with *PEGs*. In Section 2.3, we provided an instance of a counter-intuitive PEG grammar. While important for our purposes (we use grammars for generation of inputs) this is not a criticism of parsing with PEGs. PEG focuses on writing grammars for recognizing a given language, and not necessarily in interpreting what language an arbitrary PEG might yield. Given a Context-Free Language to parse, it is almost always possible to write a grammar for it in PEG, and given that 1) a PEG can parse any string in $O(n)$ time, and 2) at present we know of no CFL that can't be expressed as a PEG, and 3) compared with *LR* grammars, a PEG is often more intuitive because it allows top-down interpretation, when writing a parser for a language, PEGs should be under serious consideration. ## Synopsis This chapter introduces `Parser` classes, parsing a string into a _derivation tree_ as introduced in the [chapter on efficient grammar fuzzing](GrammarFuzzer.ipynb). Two important parser classes are provided: * [Parsing Expression Grammar parsers](#Parsing-Expression-Grammars) (`PEGParser`). These are very efficient, but limited to specific grammar structure. Notably, the alternatives represent *ordered choice*. That is, rather than choosing all rules that can potentially match, we stop at the first match that succeed. * [Earley parsers](#Parsing-Context-Free-Grammars) (`EarleyParser`). These accept any kind of context-free grammars, and explore all parsing alternatives (if any). Using any of these is fairly easy, though. First, instantiate them with a grammar: ``` from Grammars import US_PHONE_GRAMMAR us_phone_parser = EarleyParser(US_PHONE_GRAMMAR) ``` Then, use the `parse()` method to retrieve a list of possible derivation trees: ``` trees = us_phone_parser.parse("(555)987-6543") tree = list(trees)[0] display_tree(tree) ``` These derivation trees can then be used for test generation, notably for mutating and recombining existing inputs. ``` # ignore from ClassDiagram import display_class_hierarchy # ignore display_class_hierarchy([PEGParser, EarleyParser], public_methods=[ Parser.parse, Parser.__init__, Parser.grammar, Parser.start_symbol ], types={ 'DerivationTree': DerivationTree, 'Grammar': Grammar }, project='fuzzingbook') ``` ## Lessons Learned * Grammars can be used to generate derivation trees for a given string. * Parsing Expression Grammars are intuitive, and easy to implement, but require care to write. * Earley Parsers can parse arbitrary Context Free Grammars. ## Next Steps * Use parsed inputs to [recombine existing inputs](LangFuzzer.ipynb) ## Exercises ### Exercise 1: An Alternative Packrat In the _Packrat_ parser, we showed how one could implement a simple _PEG_ parser. That parser kept track of the current location in the text using an index. Can you modify the parser so that it simply uses the current substring rather than tracking the index? That is, it should no longer have the `at` parameter. **Solution.** Here is a possible solution: ``` class PackratParser(Parser): def parse_prefix(self, text): txt, res = self.unify_key(self.start_symbol(), text) return len(txt), [res] def parse(self, text): remain, res = self.parse_prefix(text) if remain: raise SyntaxError("at " + res) return res def unify_rule(self, rule, text): results = [] for token in rule: text, res = self.unify_key(token, text) if res is None: return text, None results.append(res) return text, results def unify_key(self, key, text): if key not in self.cgrammar: if text.startswith(key): return text[len(key):], (key, []) else: return text, None for rule in self.cgrammar[key]: text_, res = self.unify_rule(rule, text) if res: return (text_, (key, res)) return text, None mystring = "1 + (2 * 3)" for tree in PackratParser(EXPR_GRAMMAR).parse(mystring): assert tree_to_string(tree) == mystring display_tree(tree) ``` ### Exercise 2: More PEG Syntax The _PEG_ syntax provides a few notational conveniences reminiscent of regular expressions. For example, it supports the following operators (letters `T` and `A` represents tokens that can be either terminal or nonterminal. `ε` is an empty string, and `/` is the ordered choice operator similar to the non-ordered choice operator `|`): * `T?` represents an optional greedy match of T and `A := T?` is equivalent to `A := T/ε`. * `T*` represents zero or more greedy matches of `T` and `A := T*` is equivalent to `A := T A/ε`. * `T+` represents one or more greedy matches – equivalent to `TT*` If you look at the three notations above, each can be represented in the grammar in terms of basic syntax. Remember the exercise from [the chapter on grammars](Grammars.ipynb) that developed `define_ex_grammar()` that can represent grammars as Python code? extend `define_ex_grammar()` to `define_peg()` to support the above notational conveniences. The decorator should rewrite a given grammar that contains these notations to an equivalent grammar in basic syntax. ### Exercise 3: PEG Predicates Beyond these notational conveniences, it also supports two predicates that can provide a powerful lookahead facility that does not consume any input. * `T&A` represents an _And-predicate_ that matches `T` if `T` is matched, and it is immediately followed by `A` * `T!A` represents a _Not-predicate_ that matches `T` if `T` is matched, and it is *not* immediately followed by `A` Implement these predicates in our _PEG_ parser. ### Exercise 4: Earley Fill Chart In the `Earley Parser`, `Column` class, we keep the states both as a `list` and also as a `dict` even though `dict` is ordered. Can you explain why? **Hint**: see the `fill_chart` method. **Solution.** Python allows us to append to a list in flight, while a dict, eventhough it is ordered does not allow that facility. That is, the following will work ```python values = [1] for v in values: values.append(v*2) ``` However, the following will result in an error ```python values = {1:1} for v in values: values[v*2] = v*2 ``` In the `fill_chart`, we make use of this facility to modify the set of states we are iterating on, on the fly. ### Exercise 5: Leo Parser One of the problems with the original Earley parser is that while it can parse strings using arbitrary _Context Free Gramamrs_, its performance on right-recursive grammars is quadratic. That is, it takes $O(n^2)$ runtime and space for parsing with right-recursive grammars. For example, consider the parsing of the following string by two different grammars `LR_GRAMMAR` and `RR_GRAMMAR`. ``` mystring = 'aaaaaa' ``` To see the problem, we need to enable logging. Here is the logged version of parsing with the `LR_GRAMMAR` ``` result = EarleyParser(LR_GRAMMAR, log=True).parse(mystring) for _ in result: pass # consume the generator so that we can see the logs ``` Compare that to the parsing of `RR_GRAMMAR` as seen below: ``` result = EarleyParser(RR_GRAMMAR, log=True).parse(mystring) for _ in result: pass ``` As can be seen from the parsing log for each letter, the number of states with representation `<A>: a <A> ● (i, j)` increases at each stage, and these are simply a left over from the previous letter. They do not contribute anything more to the parse other than to simply complete these entries. However, they take up space, and require resources for inspection, contributing a factor of `n` in analysis. Joop Leo \cite{Leo1991} found that this inefficiency can be avoided by detecting right recursion. The idea is that before starting the `completion` step, check whether the current item has a _deterministic reduction path_. If such a path exists, add a copy of the topmost element of the _deteministic reduction path_ to the current column, and return. If not, perform the original `completion` step. **Definition 2.1**: An item is said to be on the deterministic reduction path above $[A \rightarrow \gamma., i]$ if it is $[B \rightarrow \alpha A ., k]$ with $[B \rightarrow \alpha . A, k]$ being the only item in $ I_i $ with the dot in front of A, or if it is on the deterministic reduction path above $[B \rightarrow \alpha A ., k]$. An item on such a path is called *topmost* one if there is no item on the deterministic reduction path above it\cite{Leo1991}. Finding a _deterministic reduction path_ is as follows: Given a complete state, represented by `<A> : seq_1 ● (s, e)` where `s` is the starting column for this rule, and `e` the current column, there is a _deterministic reduction path_ **above** it if two constraints are satisfied. 1. There exist a *single* item in the form `<B> : seq_2 ● <A> (k, s)` in column `s`. 2. That should be the *single* item in s with dot in front of `<A>` The resulting item is of the form `<B> : seq_2 <A> ● (k, e)`, which is simply item from (1) advanced, and is considered above `<A>:.. (s, e)` in the deterministic reduction path. The `seq_1` and `seq_2` are arbitrary symbol sequences. This forms the following chain of links, with `<A>:.. (s_1, e)` being the child of `<B>:.. (s_2, e)` etc. Here is one way to visualize the chain: ``` <C> : seq_3 <B> ● (s_3, e) | constraints satisfied by <C> : seq_3 ● <B> (s_3, s_2) <B> : seq_2 <A> ● (s_2, e) | constraints satisfied by <B> : seq_2 ● <A> (s_2, s_1) <A> : seq_1 ● (s_1, e) ``` Essentially, what we want to do is to identify potential deterministic right recursion candidates, perform completion on them, and *throw away the result*. We do this until we reach the top. See Grune et al.~\cite{grune2008parsing} for further information. Note that the completions are in the same column (`e`), with each candidates with constraints satisfied in further and further earlier columns (as shown below): ``` <C> : seq_3 ● <B> (s_3, s_2) --> <C> : seq_3 <B> ● (s_3, e) | <B> : seq_2 ● <A> (s_2, s_1) --> <B> : seq_2 <A> ● (s_2, e) | <A> : seq_1 ● (s_1, e) ``` Following this chain, the topmost item is the item `<C>:.. (s_3, e)` that does not have a parent. The topmost item needs to be saved is called a *transitive* item by Leo, and it is associated with the non-terminal symbol that started the lookup. The transitive item needs to be added to each column we inspect. Here is the skeleton for the parser `LeoParser`. ``` class LeoParser(EarleyParser): def complete(self, col, state): return self.leo_complete(col, state) def leo_complete(self, col, state): detred = self.deterministic_reduction(state) if detred: col.add(detred.copy()) else: self.earley_complete(col, state) def deterministic_reduction(self, state): raise NotImplementedError ``` Can you implement the `deterministic_reduction()` method to obtain the topmost element? **Solution.** Here is a possible solution: First, we update our `Column` class with the ability to add transitive items. Note that, while Leo asks the transitive to be added to the set $ I_k $ there is no actual requirement for the transitive states to be added to the `states` list. The transitive items are only intended for memoization and not for the `fill_chart()` method. Hence, we track them separately. ``` class Column(Column): def __init__(self, index, letter): self.index, self.letter = index, letter self.states, self._unique, self.transitives = [], {}, {} def add_transitive(self, key, state): assert key not in self.transitives self.transitives[key] = state return self.transitives[key] ``` Remember the picture we drew of the deterministic path? ``` <C> : seq_3 <B> ● (s_3, e) | constraints satisfied by <C> : seq_3 ● <B> (s_3, s_2) <B> : seq_2 <A> ● (s_2, e) | constraints satisfied by <B> : seq_2 ● <A> (s_2, s_1) <A> : seq_1 ● (s_1, e) ``` We define a function `uniq_postdot()` that given the item `<A> := seq_1 ● (s_1, e)`, returns a `<B> : seq_2 ● <A> (s_2, s_1)` that satisfies the constraints mentioned in the above picture. ``` class LeoParser(LeoParser): def uniq_postdot(self, st_A): col_s1 = st_A.s_col parent_states = [ s for s in col_s1.states if s.expr and s.at_dot() == st_A.name ] if len(parent_states) > 1: return None matching_st_B = [s for s in parent_states if s.dot == len(s.expr) - 1] return matching_st_B[0] if matching_st_B else None lp = LeoParser(RR_GRAMMAR) [(str(s), str(lp.uniq_postdot(s))) for s in columns[-1].states] ``` We next define the function `get_top()` that is the core of deterministic reduction which gets the topmost state above the current state (`A`). ``` class LeoParser(LeoParser): def get_top(self, state_A): st_B_inc = self.uniq_postdot(state_A) if not st_B_inc: return None t_name = st_B_inc.name if t_name in st_B_inc.e_col.transitives: return st_B_inc.e_col.transitives[t_name] st_B = st_B_inc.advance() top = self.get_top(st_B) or st_B return st_B_inc.e_col.add_transitive(t_name, top) ``` Once we have the machinery in place, `deterministic_reduction()` itself is simply a wrapper to call `get_top()` ``` class LeoParser(LeoParser): def deterministic_reduction(self, state): return self.get_top(state) lp = LeoParser(RR_GRAMMAR) columns = lp.chart_parse(mystring, lp.start_symbol()) [(str(s), str(lp.get_top(s))) for s in columns[-1].states] ``` Now, both LR and RR grammars should work within $O(n)$ bounds. ``` result = LeoParser(RR_GRAMMAR, log=True).parse(mystring) for _ in result: pass ``` We verify the Leo parser with a few more right recursive grammars. ``` RR_GRAMMAR2 = { '<start>': ['<A>'], '<A>': ['ab<A>', ''], } mystring2 = 'ababababab' result = LeoParser(RR_GRAMMAR2, log=True).parse(mystring2) for _ in result: pass RR_GRAMMAR3 = { '<start>': ['c<A>'], '<A>': ['ab<A>', ''], } mystring3 = 'cababababab' result = LeoParser(RR_GRAMMAR3, log=True).parse(mystring3) for _ in result: pass RR_GRAMMAR4 = { '<start>': ['<A>c'], '<A>': ['ab<A>', ''], } mystring4 = 'ababababc' result = LeoParser(RR_GRAMMAR4, log=True).parse(mystring4) for _ in result: pass RR_GRAMMAR5 = { '<start>': ['<A>'], '<A>': ['ab<B>', ''], '<B>': ['<A>'], } mystring5 = 'abababab' result = LeoParser(RR_GRAMMAR5, log=True).parse(mystring5) for _ in result: pass RR_GRAMMAR6 = { '<start>': ['<A>'], '<A>': ['a<B>', ''], '<B>': ['b<A>'], } mystring6 = 'abababab' result = LeoParser(RR_GRAMMAR6, log=True).parse(mystring6) for _ in result: pass RR_GRAMMAR7 = { '<start>': ['<A>'], '<A>': ['a<A>', 'a'], } mystring7 = 'aaaaaaaa' result = LeoParser(RR_GRAMMAR7, log=True).parse(mystring7) for _ in result: pass ``` We verify that our parser works correctly on `LR_GRAMMAR` too. ``` result = LeoParser(LR_GRAMMAR, log=True).parse(mystring) for _ in result: pass ``` __Advanced:__ We have fixed the complexity bounds. However, because we are saving only the topmost item of a right recursion, we need to fix our parser to be aware of our fix while extracting parse trees. Can you fix it? __Hint:__ Leo suggests simply transforming the Leo item sets to normal Earley sets, with the results from deterministic reduction expanded to their originals. For that, keep in mind the picture of constraint chain we drew earlier. **Solution.** Here is a possible solution. We first change the definition of `add_transitive()` so that results of deterministic reduction can be identified later. ``` class Column(Column): def add_transitive(self, key, state): assert key not in self.transitives self.transitives[key] = TState(state.name, state.expr, state.dot, state.s_col, state.e_col) return self.transitives[key] ``` We also need a `back()` method to create the constraints. ``` class State(State): def back(self): return TState(self.name, self.expr, self.dot - 1, self.s_col, self.e_col) ``` We update `copy()` to make `TState` items instead. ``` class TState(State): def copy(self): return TState(self.name, self.expr, self.dot, self.s_col, self.e_col) ``` We now modify the `LeoParser` to keep track of the chain of constrains that we mentioned earlier. ``` class LeoParser(LeoParser): def __init__(self, grammar, **kwargs): super().__init__(grammar, **kwargs) self._postdots = {} ``` Next, we update the `uniq_postdot()` so that it tracks the chain of links. ``` class LeoParser(LeoParser): def uniq_postdot(self, st_A): col_s1 = st_A.s_col parent_states = [ s for s in col_s1.states if s.expr and s.at_dot() == st_A.name ] if len(parent_states) > 1: return None matching_st_B = [s for s in parent_states if s.dot == len(s.expr) - 1] if matching_st_B: self._postdots[matching_st_B[0]._t()] = st_A return matching_st_B[0] return None ``` We next define a method `expand_tstate()` that, when given a `TState`, generates all the intermediate links that we threw away earlier for a given end column. ``` class LeoParser(LeoParser): def expand_tstate(self, state, e): if state._t() not in self._postdots: return c_C = self._postdots[state._t()] e.add(c_C.advance()) self.expand_tstate(c_C.back(), e) ``` We define a `rearrange()` method to generate a reversed table where each column contains states that start at that column. ``` class LeoParser(LeoParser): def rearrange(self, table): f_table = [Column(c.index, c.letter) for c in table] for col in table: for s in col.states: f_table[s.s_col.index].states.append(s) return f_table ``` Here is the rearranged table. (Can you explain why the Column 0 has a large number of `<start>` items?) ``` ep = LeoParser(RR_GRAMMAR) columns = ep.chart_parse(mystring, ep.start_symbol()) r_table = ep.rearrange(columns) for col in r_table: print(col, "\n") ``` We save the result of rearrange before going into `parse_forest()`. ``` class LeoParser(LeoParser): def parse(self, text): cursor, states = self.parse_prefix(text) start = next((s for s in states if s.finished()), None) if cursor < len(text) or not start: raise SyntaxError("at " + repr(text[cursor:])) self.r_table = self.rearrange(self.table) forest = self.extract_trees(self.parse_forest(self.table, start)) for tree in forest: yield self.prune_tree(tree) ``` Finally, during `parse_forest()`, we first check to see if it is a transitive state, and if it is, expand it to the original sequence of states using `traverse_constraints()`. ``` class LeoParser(LeoParser): def parse_forest(self, chart, state): if isinstance(state, TState): self.expand_tstate(state.back(), state.e_col) return super().parse_forest(chart, state) ``` This completes our implementation of `LeoParser`. We check whether the previously defined right recursive grammars parse and return the correct parse trees. ``` result = LeoParser(RR_GRAMMAR).parse(mystring) for tree in result: assert mystring == tree_to_string(tree) result = LeoParser(RR_GRAMMAR2).parse(mystring2) for tree in result: assert mystring2 == tree_to_string(tree) result = LeoParser(RR_GRAMMAR3).parse(mystring3) for tree in result: assert mystring3 == tree_to_string(tree) result = LeoParser(RR_GRAMMAR4).parse(mystring4) for tree in result: assert mystring4 == tree_to_string(tree) result = LeoParser(RR_GRAMMAR5).parse(mystring5) for tree in result: assert mystring5 == tree_to_string(tree) result = LeoParser(RR_GRAMMAR6).parse(mystring6) for tree in result: assert mystring6 == tree_to_string(tree) result = LeoParser(RR_GRAMMAR7).parse(mystring7) for tree in result: assert mystring7 == tree_to_string(tree) result = LeoParser(LR_GRAMMAR).parse(mystring) for tree in result: assert mystring == tree_to_string(tree) RR_GRAMMAR8 = { '<start>': ['<A>'], '<A>': ['a<A>', 'a'] } mystring8 = 'aa' RR_GRAMMAR9 = { '<start>': ['<A>'], '<A>': ['<B><A>', '<B>'], '<B>': ['b'] } mystring9 = 'bbbbbbb' result = LeoParser(RR_GRAMMAR8).parse(mystring8) for tree in result: print(repr(tree_to_string(tree))) assert mystring8 == tree_to_string(tree) result = LeoParser(RR_GRAMMAR9).parse(mystring9) for tree in result: print(repr(tree_to_string(tree))) assert mystring9 == tree_to_string(tree) ``` ### Exercise 6: Filtered Earley Parser One of the problems with our Earley and Leo Parsers is that it can get stuck in infinite loops when parsing with grammars that contain token repetitions in alternatives. For example, consider the grammar below. ``` RECURSION_GRAMMAR: Grammar = { "<start>": ["<A>"], "<A>": ["<A>", "<A>aa", "AA", "<B>"], "<B>": ["<C>", "<C>cc", "CC"], "<C>": ["<B>", "<B>bb", "BB"] } ``` With this grammar, one can produce an infinite chain of derivations of `<A>`, (direct recursion) or an infinite chain of derivations of `<B> -> <C> -> <B> ...` (indirect recursion). The problem is that, our implementation can get stuck trying to derive one of these infinite chains. One possibility is to use the `LazyExtractor`. Another, is to simply avoid generating such chains. ``` from ExpectError import ExpectTimeout with ExpectTimeout(1, print_traceback=False): mystring = 'AA' parser = LeoParser(RECURSION_GRAMMAR) tree, *_ = parser.parse(mystring) assert tree_to_string(tree) == mystring display_tree(tree) ``` Can you implement a solution such that any tree that contains such a chain is discarded? **Solution.** Here is a possible solution. ``` class FilteredLeoParser(LeoParser): def forest(self, s, kind, seen, chart): return self.parse_forest(chart, s, seen) if kind == 'n' else (s, []) def parse_forest(self, chart, state, seen=None): if isinstance(state, TState): self.expand_tstate(state.back(), state.e_col) def was_seen(chain, s): if isinstance(s, str): return False if len(s.expr) > 1: return False return s in chain if len(state.expr) > 1: # things get reset if we have a non loop seen = set() elif seen is None: # initialization seen = {state} pathexprs = self.parse_paths(state.expr, chart, state.s_col.index, state.e_col.index) if state.expr else [] return state.name, [[(s, k, seen | {s}, chart) for s, k in reversed(pathexpr) if not was_seen(seen, s)] for pathexpr in pathexprs] ``` With the `FilteredLeoParser`, we should be able to recover minimal parse trees in reasonable time. ``` mystring = 'AA' parser = FilteredLeoParser(RECURSION_GRAMMAR) tree, *_ = parser.parse(mystring) assert tree_to_string(tree) == mystring display_tree(tree) mystring = 'AAaa' parser = FilteredLeoParser(RECURSION_GRAMMAR) tree, *_ = parser.parse(mystring) assert tree_to_string(tree) == mystring display_tree(tree) mystring = 'AAaaaa' parser = FilteredLeoParser(RECURSION_GRAMMAR) tree, *_ = parser.parse(mystring) assert tree_to_string(tree) == mystring display_tree(tree) mystring = 'CC' parser = FilteredLeoParser(RECURSION_GRAMMAR) tree, *_ = parser.parse(mystring) assert tree_to_string(tree) == mystring display_tree(tree) mystring = 'BBcc' parser = FilteredLeoParser(RECURSION_GRAMMAR) tree, *_ = parser.parse(mystring) assert tree_to_string(tree) == mystring display_tree(tree) mystring = 'BB' parser = FilteredLeoParser(RECURSION_GRAMMAR) tree, *_ = parser.parse(mystring) assert tree_to_string(tree) == mystring display_tree(tree) mystring = 'BBccbb' parser = FilteredLeoParser(RECURSION_GRAMMAR) tree, *_ = parser.parse(mystring) assert tree_to_string(tree) == mystring display_tree(tree) ``` As can be seen, we are able to recover minimal parse trees without hitting on infinite chains. ### Exercise 7: Iterative Earley Parser Recursive algorithms are quite handy in some cases but sometimes we might want to have iteration instead of recursion due to memory or speed problems. Can you implement an iterative version of the `EarleyParser`? __Hint:__ In general, you can use a stack to replace a recursive algorithm with an iterative one. An easy way to do this is pushing the parameters onto a stack instead of passing them to the recursive function. **Solution.** Here is a possible solution. First, we define `parse_paths()` that extract paths from a parsed expression, which is very similar to the original. ``` class IterativeEarleyParser(EarleyParser): def parse_paths(self, named_expr_, chart, frm, til_): return_paths = [] path_build_stack = [(named_expr_, til_, [])] def iter_paths(path_prefix, path, start, k, e): x = path_prefix + [(path, k)] if not e: return_paths.extend([x] if start == frm else []) else: path_build_stack.append((e, start, x)) while path_build_stack: named_expr, til, path_prefix = path_build_stack.pop() *expr, var = named_expr starts = None if var not in self.cgrammar: starts = ([(var, til - len(var), 't')] if til > 0 and chart[til].letter == var else []) else: starts = [(s, s.s_col.index, 'n') for s in chart[til].states if s.finished() and s.name == var] for s, start, k in starts: iter_paths(path_prefix, s, start, k, expr) return return_paths ``` Next we used these paths to recover the forest data structure using `parse_forest()`. Since `parse_forest()` does not recurse, we reuse the original definition. Next, we define `extract_a_tree()` Now we are ready to extract trees from the forest using `extract_a_tree()` ``` class IterativeEarleyParser(IterativeEarleyParser): def choose_a_node_to_explore(self, node_paths, level_count): first, *rest = node_paths return first def extract_a_tree(self, forest_node_): start_node = (forest_node_[0], []) tree_build_stack = [(forest_node_, start_node[-1], 0)] while tree_build_stack: forest_node, tree, level_count = tree_build_stack.pop() name, paths = forest_node if not paths: tree.append((name, [])) else: new_tree = [] current_node = self.choose_a_node_to_explore(paths, level_count) for p in reversed(current_node): new_forest_node = self.forest(*p) tree_build_stack.append((new_forest_node, new_tree, level_count + 1)) tree.append((name, new_tree)) return start_node ``` For now, we simply extract the first tree found. ``` class IterativeEarleyParser(IterativeEarleyParser): def extract_trees(self, forest): yield self.extract_a_tree(forest) ``` Let's see if it works with some of the grammars we have seen so far. ``` test_cases: List[Tuple[Grammar, str]] = [ (A1_GRAMMAR, '1-2-3+4-5'), (A2_GRAMMAR, '1+2'), (A3_GRAMMAR, '1+2+3-6=6-1-2-3'), (LR_GRAMMAR, 'aaaaa'), (RR_GRAMMAR, 'aa'), (DIRECTLY_SELF_REFERRING, 'select a from a'), (INDIRECTLY_SELF_REFERRING, 'select a from a'), (RECURSION_GRAMMAR, 'AA'), (RECURSION_GRAMMAR, 'AAaaaa'), (RECURSION_GRAMMAR, 'BBccbb') ] for i, (grammar, text) in enumerate(test_cases): print(i, text) tree, *_ = IterativeEarleyParser(grammar).parse(text) assert text == tree_to_string(tree) ``` As can be seen, our `IterativeEarleyParser` is able to handle recursive grammars. However, it can only extract the first tree found. What should one do to get all possible parses? What we can do, is to keep track of options to explore at each `choose_a_node_to_explore()`. Next, capture in the nodes explored in a tree data structure, adding new paths each time a new leaf is expanded. See the `TraceTree` datastructure in the [chapter on Concolic fuzzing](ConcolicFuzzer.ipynb) for an example. ### Exercise 8: First Set of a Nonterminal We previously gave a way to extract a the `nullable` (epsilon) set, which is often used for parsing. Along with `nullable`, parsing algorithms often use two other sets [`first` and `follow`](https://en.wikipedia.org/wiki/Canonical_LR_parser#FIRST_and_FOLLOW_sets). The first set of a terminal symbol is itself, and the first set of a nonterminal is composed of terminal symbols that can come at the beginning of any derivation of that nonterminal. The first set of any nonterminal that can derive the empty string should contain `EPSILON`. For example, using our `A1_GRAMMAR`, the first set of both `<expr>` and `<start>` is `{0,1,2,3,4,5,6,7,8,9}`. The extraction first set for any self-recursive nonterminal is simple enough. One simply has to recursively compute the first set of the first element of its choice expressions. The computation of `first` set for a self-recursive nonterminal is tricky. One has to recursively compute the first set until one is sure that no more terminals can be added to the first set. Can you implement the `first` set using our `fixpoint()` decorator? **Solution.** The first set of all terminals is the set containing just themselves. So we initialize that first. Then we update the first set with rules that derive empty strings. ``` def firstset(grammar, nullable): first = {i: {i} for i in terminals(grammar)} for k in grammar: first[k] = {EPSILON} if k in nullable else set() return firstset_((rules(grammar), first, nullable))[1] ``` Finally, we rely on the `fixpoint` to update the first set with the contents of the current first set until the first set stops changing. ``` def first_expr(expr, first, nullable): tokens = set() for token in expr: tokens |= first[token] if token not in nullable: break return tokens @fixpoint def firstset_(arg): (rules, first, epsilon) = arg for A, expression in rules: first[A] |= first_expr(expression, first, epsilon) return (rules, first, epsilon) firstset(canonical(A1_GRAMMAR), EPSILON) ``` ### Exercise 9: Follow Set of a Nonterminal The follow set definition is similar to the first set. The follow set of a nonterminal is the set of terminals that can occur just after that nonterminal is used in any derivation. The follow set of the start symbol is `EOF`, and the follow set of any nonterminal is the super set of first sets of all symbols that come after it in any choice expression. For example, the follow set of `<expr>` in `A1_GRAMMAR` is the set `{EOF, +, -}`. As in the previous exercise, implement the `followset()` using the `fixpoint()` decorator. **Solution.** The implementation of `followset()` is similar to `firstset()`. We first initialize the follow set with `EOF`, get the epsilon and first sets, and use the `fixpoint()` decorator to iteratively compute the follow set until nothing changes. ``` EOF = '\0' def followset(grammar, start): follow = {i: set() for i in grammar} follow[start] = {EOF} epsilon = nullable(grammar) first = firstset(grammar, epsilon) return followset_((grammar, epsilon, first, follow))[-1] ``` Given the current follow set, one can update the follow set as follows: ``` @fixpoint def followset_(arg): grammar, epsilon, first, follow = arg for A, expression in rules(grammar): f_B = follow[A] for t in reversed(expression): if t in grammar: follow[t] |= f_B f_B = f_B | first[t] if t in epsilon else (first[t] - {EPSILON}) return (grammar, epsilon, first, follow) followset(canonical(A1_GRAMMAR), START_SYMBOL) ``` ### Exercise 10: A LL(1) Parser As we mentioned previously, there exist other kinds of parsers that operate left-to-right with right most derivation (*LR(k)*) or left-to-right with left most derivation (*LL(k)*) with _k_ signifying the amount of lookahead the parser is permitted to use. What should one do with the lookahead? That lookahead can be used to determine which rule to apply. In the case of an *LL(1)* parser, the rule to apply is determined by looking at the _first_ set of the different rules. We previously implemented `first_expr()` that takes a an expression, the set of `nullables`, and computes the first set of that rule. If a rule can derive an empty set, then that rule may also be applicable if of sees the `follow()` set of the corresponding nonterminal. #### Part 1: A LL(1) Parsing Table The first part of this exercise is to implement the _parse table_ that describes what action to take for an *LL(1)* parser on seeing a terminal symbol on lookahead. The table should be in the form of a _dictionary_ such that the keys represent the nonterminal symbol, and the value should contain another dictionary with keys as terminal symbols and the particular rule to continue parsing as the value. Let us illustrate this table with an example. The `parse_table()` method populates a `self.table` data structure that should conform to the following requirements: ``` class LL1Parser(Parser): def parse_table(self): self.my_rules = rules(self.cgrammar) self.table = ... # fill in here to produce def rules(self): for i, rule in enumerate(self.my_rules): print(i, rule) def show_table(self): ts = list(sorted(terminals(self.cgrammar))) print('Rule Name\t| %s' % ' | '.join(t for t in ts)) for k in self.table: pr = self.table[k] actions = list(str(pr[t]) if t in pr else ' ' for t in ts) print('%s \t| %s' % (k, ' | '.join(actions))) ``` On invocation of `LL1Parser(A2_GRAMMAR).show_table()` It should result in the following table: ``` for i, r in enumerate(rules(canonical(A2_GRAMMAR))): print("%d\t %s := %s" % (i, r[0], r[1])) ``` |Rule Name || + | - | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9| |-----------||---|---|---|---|---|---|---|---|---|---|---|--| |start || | | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0| |expr || | | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1| |expr_ || 2 | 3 | | | | | | | | | | | |integer || | | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5| |integer_ || 7 | 7 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6| |digit || | | 8 | 9 |10 |11 |12 |13 |14 |15 |16 |17| **Solution.** We define `predict()` as we explained before. Then we use the predicted rules to populate the parse table. ``` class LL1Parser(LL1Parser): def predict(self, rulepair, first, follow, epsilon): A, rule = rulepair rf = first_expr(rule, first, epsilon) if nullable_expr(rule, epsilon): rf |= follow[A] return rf def parse_table(self): self.my_rules = rules(self.cgrammar) epsilon = nullable(self.cgrammar) first = firstset(self.cgrammar, epsilon) # inefficient, can combine the three. follow = followset(self.cgrammar, self.start_symbol()) ptable = [(i, self.predict(rule, first, follow, epsilon)) for i, rule in enumerate(self.my_rules)] parse_tbl = {k: {} for k in self.cgrammar} for i, pvals in ptable: (k, expr) = self.my_rules[i] parse_tbl[k].update({v: i for v in pvals}) self.table = parse_tbl ll1parser = LL1Parser(A2_GRAMMAR) ll1parser.parse_table() ll1parser.show_table() ``` #### Part 2: The Parser Once we have the parse table, implementing the parser is as follows: Consider the first item from the sequence of tokens to parse, and seed the stack with the start symbol. While the stack is not empty, extract the first symbol from the stack, and if the symbol is a terminal, verify that the symbol matches the item from the input stream. If the symbol is a nonterminal, use the symbol and input item to lookup the next rule from the parse table. Insert the rule thus found to the top of the stack. Keep track of the expressions being parsed to build up the parse table. Use the parse table defined previously to implement the complete LL(1) parser. **Solution.** Here is the complete parser: ``` class LL1Parser(LL1Parser): def parse_helper(self, stack, inplst): inp, *inplst = inplst exprs = [] while stack: val, *stack = stack if isinstance(val, tuple): exprs.append(val) elif val not in self.cgrammar: # terminal assert val == inp exprs.append(val) inp, *inplst = inplst or [None] else: if inp is not None: i = self.table[val][inp] _, rhs = self.my_rules[i] stack = rhs + [(val, len(rhs))] + stack return self.linear_to_tree(exprs) def parse(self, inp): self.parse_table() k, _ = self.my_rules[0] stack = [k] return self.parse_helper(stack, inp) def linear_to_tree(self, arr): stack = [] while arr: elt = arr.pop(0) if not isinstance(elt, tuple): stack.append((elt, [])) else: # get the last n sym, n = elt elts = stack[-n:] if n > 0 else [] stack = stack[0:len(stack) - n] stack.append((sym, elts)) assert len(stack) == 1 return stack[0] ll1parser = LL1Parser(A2_GRAMMAR) tree = ll1parser.parse('1+2') display_tree(tree) ```
github_jupyter
<a href="https://colab.research.google.com/github/txusser/Master_IA_Sanidad/blob/main/Modulo_2/2_3_3_Extraccion_de_caracteristicas.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Extracción de características ## Análisis de la componente principal (PCA) ``` from sklearn.datasets import load_breast_cancer import pandas as pd # Cargamos los datos cancer_data = load_breast_cancer() df = pd.DataFrame(data=cancer_data.data, columns=cancer_data.feature_names) # Y mostramos algunas variables por pantalla print(df.head()) print(df.describe()) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() # Rescalamos los datos teniendo en cuenta la media y desviación estándar de cada variable scaler.fit(df.values) X_scaled = scaler.transform(df.values) print("X_scaled:\n", X_scaled) # Vamos a utilizar las funciones de Sci-kit learn para análisis PCA from sklearn.decomposition import PCA # Para evaluar los resultados, utilizaremos el conjunto completo de variables pca = PCA(n_components=30, random_state=2020) pca.fit(X_scaled) X_pca = pca.transform(X_scaled) # La variable anterior almacena los valores de los (30) componentes principales print("X_pca:\n", X_pca) # Puesto que seleccionamos el conjunto completo de variables las componenete # seleccionadas deben dar cuenta del 100% de la varianza en los datos print("\n => Varianza explicada por las componentes:", sum(pca.explained_variance_ratio_ * 100)) # Si representamos la varianza en función del número de componentes podemos observar # cuál es el mínimo número de componenetes que necesitaremos para explicar un cierto # porcentaje de la varianza import matplotlib.pyplot as plt import numpy as np plt.plot(np.cumsum(pca.explained_variance_ratio_ * 100)) plt.xlabel("Número de componenetes") plt.ylabel("Porcentaje de varianza explicado") # Vemos que con solo un tercio de las variables podemos explicar el 95% de la variaza n_var = np.cumsum(pca.explained_variance_ratio_ * 100)[9] print("Varianza 10 primeras componenetes:", n_var) # Alternativamente, podemos construir el conjunto que acomode el 95% de la variaza # del siguiente modo pca_95 = PCA(n_components=0.95, random_state=2020) pca_95.fit(X_scaled) X_pca_95 = pca_95.transform(X_scaled) # Una buena práctica es visualizar la relación de las principales componentes import seaborn as sns sns.scatterplot(X_pca_95[:, 0], X_pca_95[:, 1], hue=cancer_data.target) # Finalmente podemos crear un nuevo marco de datos con el resultado del análisis PCA cols = ['PCA' + str(i) for i in range(10)] df_pca = pd.DataFrame(X_pca_95, columns=cols) print("Datos (PCA - 95%):\n", df_pca) ``` ## Análisis de Componentes Independientes (ICA) ``` # Utilizaremos datos de fMRI para nuestro ejemplo con ICA # Para ello, comenzamos instalando la librería nilearn !python -m pip install nilearn from nilearn import datasets # Descargamos un sujeto del estudio con RM funcional dataset = datasets.fetch_development_fmri(n_subjects=1) file_name = dataset.func[0] # Preprocesado de la imagen from nilearn.input_data import NiftiMasker # Aplicamos una máscara para extraer el fondo de la imagen (vóxeles no cerebrales) masker = NiftiMasker(smoothing_fwhm=8, memory='nilearn_cache', memory_level=1, mask_strategy='epi', standardize=True) data_masked = masker.fit_transform(file_name) from sklearn.decomposition import FastICA import numpy as np # Seleccionamos 10 componentes ica = FastICA(n_components=10, random_state=42) components_masked = ica.fit_transform(data_masked.T).T # Aplicamos un corte (80% señal) en los datos después de normalizar según # la media y desviación estándar de los datos components_masked -= components_masked.mean(axis=0) components_masked /= components_masked.std(axis=0) components_masked[np.abs(components_masked) < .8] = 0 # Invertimos la transformación para recuperar la estructura 3D component_img = masker.inverse_transform(components_masked) # Finalmete, visualizamos el resultado de las operaciones de reducción from nilearn import image from nilearn.plotting import plot_stat_map, show mean_img = image.mean_img(func_filename) plot_stat_map(image.index_img(component_img, 0), mean_img) plot_stat_map(image.index_img(component_img, 1), mean_img) ```
github_jupyter
# Multi-linear regression: how many variables? [![Latest release](https://badgen.net/github/release/Naereen/Strapdown.js)](https://github.com/eabarnes1010/course_objective_analysis/tree/main/code) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/eabarnes1010/course_objective_analysis/blob/main/code/minimum_corr_for_added_value.ipynb) If I have two predictors $x_1$ and $x_2$, under what circumstances is the second one useful for predicting $y$? ``` #............................................. # IMPORT STATEMENTS #............................................. import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl import importlib from sklearn import linear_model from sklearn import metrics mpl.rcParams['figure.facecolor'] = 'white' mpl.rcParams['figure.dpi']= 150 dpiFig = 300. np.random.seed(300) ``` Let's start by creating two predictors, x1 and x2, and predictand y. x1 will be totally random, and the others will build upon that. ``` x1 = np.random.normal(0.,1.,size=100,) print(np.shape(x1)) ``` Now we create x2. ``` a = 0.8 b = np.sqrt(1. - a**2) x2 = [] # create red-noise time series iteratively for it in np.arange(0,100,1): x2.append(a*x1[it] + b*np.random.normal(size=1)) x2 = np.asarray(x2)[:,0] print(np.shape(x2)) ``` Now let's make $y$, which is composed of pieces of x1, x2 and noise. ``` a = 0.3 b = np.sqrt(1. - a**2) y = [] # create red-noise time series iteratively for it in np.arange(0,100,1): y.append(a*x1[it] + (.05)*x2[it] + b*np.random.normal(size=1)) y = np.asarray(y)[:,0] print(np.shape(y)) ``` We can calculate the correlations of the predictors and predictands just to confirm that they all have some relationship with one another. ``` c12 = np.corrcoef(x1,x2)[0,1] c1y = np.corrcoef(x1,y)[0,1] c2y = np.corrcoef(y,x2)[0,1] print('corr(x1,x2) = ' + str(np.round(c12,3))) print('corr(x1,y) = ' + str(np.round(c1y,3))) print('corr(x2,y) = ' + str(np.round(c2y,3))) ``` ### Theory Based on theory, the minimum useful correlation of c2y is the following (from theory)... ``` minUseful = np.abs(c1y*c12) print('minimum useful corr(x2,y) = ' + str(np.round(minUseful,3))) ``` Furthermore, we can show analytically that the variance explained between using x1 versus x1 and x2 is practically identical since x2 doesn't appear to add additional information (i.e. |c2y| < minUseful). ``` #just using x1 R2 = c1y**2 print('theory: y variance explained by x1 = ' + str(np.round(R2,3))) #using x1 and x2 R2 = (c1y**2 + c2y**2 - 2*c1y*c2y*c12)/(1-c12**2) print('theory: y variance explained by x1 & x2 = ' + str(np.round(R2,3))) ``` ### Actual fits We can confirm the theory now through some fun examples where we actually fit y using x1 and x2. In fact, we see that the fits indeed give us exactly what is expected by theory. ``` # only x1 predictor X = np.swapaxes([x1],1,0) Y = np.swapaxes([y],1,0) # with sklearn regr = linear_model.LinearRegression() regr.fit(X, Y) R2_x1 = metrics.r2_score(Y,regr.predict(X)) print('y variance explained by x1 fit = ' + str(np.round(R2_x1,5))) #--------------------------------------------- # both x1 and x2 predictors X = np.swapaxes([x1,x2],1,0) Y = np.swapaxes([y],1,0) # with sklearn regr = linear_model.LinearRegression() regr.fit(X, Y) R2_x12 = metrics.r2_score(Y,regr.predict(X)) print('y variance explained by x1 & x2 fit = ' + str(np.round(R2_x12,5))) ``` But what is going on here? Why is the $R^2$ slightly higher when we added x2? I thought theory said it shouldn't improve my variance explained _at all_? ## What about more predictors? (aka _overfitting_) ``` X = np.random.normal(0.,1.,size=(100,40)) Y = np.random.normal(0.,1.,size=100,) rval = [] for n in np.arange(0,np.shape(X)[1]): # with sklearn regr = linear_model.LinearRegression() regr.fit(X[:,0:n+1], Y) R2 = metrics.r2_score(Y,regr.predict(X[:,0:n+1])) rval.append(R2) plt.figure(figsize=(8,6)) plt.plot(np.arange(0,np.shape(X)[1]),rval,'o-') plt.xlabel('number of random predictors') plt.ylabel('fraction variance explained') plt.title('Variance Explained') plt.show() ``` ### Adjusted R$^2$ There is a great solution to this - known as the _adjusted $R^2$_. It is a measure of explained variance, but you are penalized (the number decreases) when too many predictors are used. The adjusted $R^2$ increases only if the new term improves the model more than would be expected by chance. ``` def adjustRsquared(r2,n,p): adjustR2 = 1 - (1-r2)*(n-1)/(n-p-1) return adjustR2 # only fitting with x1 p=1 n = len(x1) adjustR2 = adjustRsquared(R2_x1,n,p) print('fit with x1 only') print(' R-squared = ' + str(np.round(R2_x1,3)) + ', Adjusted R-squared = ' + str(np.round(adjustR2,3))) # fitting with x1 and x2 p = 2 n = len(x1) adjustR2 = adjustRsquared(R2_x12,n,p) print('fit with x1 and x2 only') print(' R-squared = ' + str(np.round(R2_x12,3)) + ', Adjusted R-squared = ' + str(np.round(adjustR2,3))) ``` In our silly example above with 40 predictors, the adjusted R2 is the following... ``` n = len(Y) p = np.arange(0,np.shape(X)[1]) + 1 adjustR2 = adjustRsquared(np.asarray(rval),n,p) plt.figure(figsize=(8,6)) plt.axhline(y=0,color='gray') plt.plot(np.arange(1,np.shape(X)[1]+1),rval,'o-', label='R2') plt.plot(np.arange(1,np.shape(X)[1]+1),adjustR2,'o-',color='red', label='adjusted R2') plt.xlabel('number of predictors') plt.ylabel('fraction variance explained') plt.legend() plt.title('Adjusted R-squared') plt.show() ``` ### Significance of Adjusted $R^2$ To end, let's compute the adjusted R-squared many times for a lot of random data to get a feeling of the spread of possible adjusted R-squared values by chance alone. ``` rVec = np.zeros(shape=(40,500)) for nvar in (np.arange(1,np.shape(rVec)[0]+1)): r = [] for n in np.arange(0,500): X = np.random.normal(0.,1.,size=(100,nvar)) Y = np.random.normal(0.,1.,size=100,) # with sklearn regr = linear_model.LinearRegression() regr.fit(X[:,0:n+1], Y) R2 = metrics.r2_score(Y,regr.predict(X[:,0:n+1])) r.append(R2) rVec[nvar-1,:] = adjustRsquared(np.asarray(r),100,nvar) pTop = np.percentile(rVec,97.5,axis=1) pBot = np.percentile(rVec,2.5,axis=1) plt.figure(figsize=(8,6)) plt.axhline(y=0,color='gray') plt.plot(np.arange(1,np.shape(X)[1]+1),adjustR2,'o-',color='red', label='adjusted R2') plt.fill_between(np.arange(1,len(p)+1), pBot, pTop,color='lightgray', label='confidence bounds') plt.xlabel('number of predictors') plt.ylabel('fraction variance explained') plt.legend() plt.title('Adjusted R2') plt.ylim(-1,1) plt.show() ```
github_jupyter
## Keras implementation of https://phillipi.github.io/pix2pix ``` import os os.environ['KERAS_BACKEND']='theano' # can choose theano, tensorflow, cntk os.environ['THEANO_FLAGS']='floatX=float32,device=cuda,optimizer=fast_run,dnn.library_path=/usr/lib' #os.environ['THEANO_FLAGS']='floatX=float32,device=cuda,optimizer=fast_compile,dnn.library_path=/usr/lib' import keras.backend as K if os.environ['KERAS_BACKEND'] =='theano': channel_axis=1 K.set_image_data_format('channels_first') channel_first = True else: K.set_image_data_format('channels_last') channel_axis=-1 channel_first = False from keras.models import Sequential, Model from keras.layers import Conv2D, ZeroPadding2D, BatchNormalization, Input, Dropout from keras.layers import Conv2DTranspose, Reshape, Activation, Cropping2D, Flatten from keras.layers import Concatenate from keras.layers.advanced_activations import LeakyReLU from keras.activations import relu from keras.initializers import RandomNormal # Weights initializations # bias are initailized as 0 def __conv_init(a): print("conv_init", a) k = RandomNormal(0, 0.02)(a) # for convolution kernel k.conv_weight = True return k conv_init = RandomNormal(0, 0.02) gamma_init = RandomNormal(1., 0.02) # for batch normalization # HACK speed up theano if K._BACKEND == 'theano': import keras.backend.theano_backend as theano_backend def _preprocess_conv2d_kernel(kernel, data_format): #return kernel if hasattr(kernel, "original"): print("use original") return kernel.original elif hasattr(kernel, '_keras_shape'): s = kernel._keras_shape print("use reshape",s) kernel = kernel.reshape((s[3], s[2],s[0], s[1])) else: kernel = kernel.dimshuffle((3, 2, 0, 1)) return kernel theano_backend._preprocess_conv2d_kernel = _preprocess_conv2d_kernel # Basic discriminator def conv2d(f, *a, **k): return Conv2D(f, kernel_initializer = conv_init, *a, **k) def batchnorm(): return BatchNormalization(momentum=0.9, axis=channel_axis, epsilon=1.01e-5, gamma_initializer = gamma_init) def BASIC_D(nc_in, ndf, max_layers=3, use_sigmoid=True): """DCGAN_D(nc, ndf, max_layers=3) nc: channels ndf: filters of the first layer max_layers: max hidden layers """ if channel_first: input_a = Input(shape=(nc_in, None, None)) else: input_a = Input(shape=(None, None, nc_in)) _ = input_a _ = conv2d(ndf, kernel_size=4, strides=2, padding="same", name = 'First') (_) _ = LeakyReLU(alpha=0.2)(_) for layer in range(1, max_layers): out_feat = ndf * min(2**layer, 8) _ = conv2d(out_feat, kernel_size=4, strides=2, padding="same", use_bias=False, name = 'pyramid.{0}'.format(layer) ) (_) _ = batchnorm()(_, training=1) _ = LeakyReLU(alpha=0.2)(_) out_feat = ndf*min(2**max_layers, 8) _ = ZeroPadding2D(1)(_) _ = conv2d(out_feat, kernel_size=4, use_bias=False, name = 'pyramid_last') (_) _ = batchnorm()(_, training=1) _ = LeakyReLU(alpha=0.2)(_) # final layer _ = ZeroPadding2D(1)(_) _ = conv2d(1, kernel_size=4, name = 'final'.format(out_feat, 1), activation = "sigmoid" if use_sigmoid else None) (_) return Model(inputs=[input_a], outputs=_) def UNET_G(isize, nc_in=3, nc_out=3, ngf=64, fixed_input_size=True): max_nf = 8*ngf def block(x, s, nf_in, use_batchnorm=True, nf_out=None, nf_next=None): # print("block",x,s,nf_in, use_batchnorm, nf_out, nf_next) assert s>=2 and s%2==0 if nf_next is None: nf_next = min(nf_in*2, max_nf) if nf_out is None: nf_out = nf_in x = conv2d(nf_next, kernel_size=4, strides=2, use_bias=(not (use_batchnorm and s>2)), padding="same", name = 'conv_{0}'.format(s)) (x) if s>2: if use_batchnorm: x = batchnorm()(x, training=1) x2 = LeakyReLU(alpha=0.2)(x) x2 = block(x2, s//2, nf_next) x = Concatenate(axis=channel_axis)([x, x2]) x = Activation("relu")(x) x = Conv2DTranspose(nf_out, kernel_size=4, strides=2, use_bias=not use_batchnorm, kernel_initializer = conv_init, name = 'convt.{0}'.format(s))(x) x = Cropping2D(1)(x) if use_batchnorm: x = batchnorm()(x, training=1) if s <=8: x = Dropout(0.5)(x, training=1) return x s = isize if fixed_input_size else None if channel_first: _ = inputs = Input(shape=(nc_in, s, s)) else: _ = inputs = Input(shape=(s, s, nc_in)) _ = block(_, isize, nc_in, False, nf_out=nc_out, nf_next=ngf) _ = Activation('tanh')(_) return Model(inputs=inputs, outputs=[_]) nc_in = 3 nc_out = 3 ngf = 64 ndf = 64 use_lsgan = True λ = 10 if use_lsgan else 100 loadSize = 143 imageSize = 128 batchSize = 1 lrD = 2e-4 lrG = 2e-4 netDA = BASIC_D(nc_in, ndf, use_sigmoid = not use_lsgan) netDB = BASIC_D(nc_out, ndf, use_sigmoid = not use_lsgan) netDA.summary() from IPython.display import SVG from keras.utils.vis_utils import model_to_dot netGB = UNET_G(imageSize, nc_in, nc_out, ngf) netGA = UNET_G(imageSize, nc_out, nc_in, ngf) #SVG(model_to_dot(netG, show_shapes=True).create(prog='dot', format='svg')) netGA.summary() from keras.optimizers import RMSprop, SGD, Adam if use_lsgan: loss_fn = lambda output, target : K.mean(K.abs(K.square(output-target))) else: loss_fn = lambda output, target : -K.mean(K.log(output+1e-12)*target+K.log(1-output+1e-12)*(1-target)) def cycle_variables(netG1, netG2): real_input = netG1.inputs[0] fake_output = netG1.outputs[0] rec_input = netG2([fake_output]) fn_generate = K.function([real_input], [fake_output, rec_input]) return real_input, fake_output, rec_input, fn_generate real_A, fake_B, rec_A, cycleA_generate = cycle_variables(netGB, netGA) real_B, fake_A, rec_B, cycleB_generate = cycle_variables(netGA, netGB) def D_loss(netD, real, fake, rec): output_real = netD([real]) output_fake = netD([fake]) loss_D_real = loss_fn(output_real, K.ones_like(output_real)) loss_D_fake = loss_fn(output_fake, K.zeros_like(output_fake)) loss_G = loss_fn(output_fake, K.ones_like(output_fake)) loss_D = loss_D_real+loss_D_fake loss_cyc = K.mean(K.abs(rec-real)) return loss_D, loss_G, loss_cyc loss_DA, loss_GA, loss_cycA = D_loss(netDA, real_A, fake_A, rec_A) loss_DB, loss_GB, loss_cycB = D_loss(netDB, real_B, fake_B, rec_B) loss_cyc = loss_cycA+loss_cycB loss_G = loss_GA+loss_GB+λ*loss_cyc loss_D = loss_DA+loss_DB weightsD = netDA.trainable_weights + netDB.trainable_weights weightsG = netGA.trainable_weights + netGB.trainable_weights training_updates = Adam(lr=lrD, beta_1=0.5).get_updates(weightsD,[],loss_D) netD_train = K.function([real_A, real_B],[loss_DA/2, loss_DB/2], training_updates) training_updates = Adam(lr=lrG, beta_1=0.5).get_updates(weightsG,[], loss_G) netG_train = K.function([real_A, real_B], [loss_GA, loss_GB, loss_cyc], training_updates) from PIL import Image import numpy as np import glob from random import randint, shuffle def load_data(file_pattern): return glob.glob(file_pattern) def read_image(fn): im = Image.open(fn).convert('RGB') im = im.resize( (loadSize, loadSize), Image.BILINEAR ) arr = np.array(im)/255*2-1 w1,w2 = (loadSize-imageSize)//2,(loadSize+imageSize)//2 h1,h2 = w1,w2 img = arr[h1:h2, w1:w2, :] if randint(0,1): img=img[:,::-1] if channel_first: img = np.moveaxis(img, 2, 0) return img #data = "edges2shoes" data = "horse2zebra" train_A = load_data('CycleGAN/{}/trainA/*.jpg'.format(data)) train_B = load_data('CycleGAN/{}/trainB/*.jpg'.format(data)) assert len(train_A) and len(train_B) def minibatch(data, batchsize): length = len(data) epoch = i = 0 tmpsize = None while True: size = tmpsize if tmpsize else batchsize if i+size > length: shuffle(data) i = 0 epoch+=1 rtn = [read_image(data[j]) for j in range(i,i+size)] i+=size tmpsize = yield epoch, np.float32(rtn) def minibatchAB(dataA, dataB, batchsize): batchA=minibatch(dataA, batchsize) batchB=minibatch(dataB, batchsize) tmpsize = None while True: ep1, A = batchA.send(tmpsize) ep2, B = batchB.send(tmpsize) tmpsize = yield max(ep1, ep2), A, B from IPython.display import display def showX(X, rows=1): assert X.shape[0]%rows == 0 int_X = ( (X+1)/2*255).clip(0,255).astype('uint8') if channel_first: int_X = np.moveaxis(int_X.reshape(-1,3,imageSize,imageSize), 1, 3) else: int_X = int_X.reshape(-1,imageSize,imageSize, 3) int_X = int_X.reshape(rows, -1, imageSize, imageSize,3).swapaxes(1,2).reshape(rows*imageSize,-1, 3) display(Image.fromarray(int_X)) train_batch = minibatchAB(train_A, train_B, 6) _, A, B = next(train_batch) showX(A) showX(B) del train_batch, A, B def showG(A,B): assert A.shape==B.shape def G(fn_generate, X): r = np.array([fn_generate([X[i:i+1]]) for i in range(X.shape[0])]) return r.swapaxes(0,1)[:,:,0] rA = G(cycleA_generate, A) rB = G(cycleB_generate, B) arr = np.concatenate([A,B,rA[0],rB[0],rA[1],rB[1]]) showX(arr, 3) import time from IPython.display import clear_output t0 = time.time() niter = 150 gen_iterations = 0 epoch = 0 errCyc_sum = errGA_sum = errGB_sum = errDA_sum = errDB_sum = 0 display_iters = 50 #val_batch = minibatch(valAB, 6, direction) train_batch = minibatchAB(train_A, train_B, batchSize) while epoch < niter: epoch, A, B = next(train_batch) errDA, errDB = netD_train([A, B]) errDA_sum +=errDA errDB_sum +=errDB # epoch, trainA, trainB = next(train_batch) errGA, errGB, errCyc = netG_train([A, B]) errGA_sum += errGA errGB_sum += errGB errCyc_sum += errCyc gen_iterations+=1 if gen_iterations%display_iters==0: #if gen_iterations%(5*display_iters)==0: clear_output() print('[%d/%d][%d] Loss_D: %f %f Loss_G: %f %f loss_cyc %f' % (epoch, niter, gen_iterations, errDA_sum/display_iters, errDB_sum/display_iters, errGA_sum/display_iters, errGB_sum/display_iters, errCyc_sum/display_iters), time.time()-t0) _, A, B = train_batch.send(4) showG(A,B) errCyc_sum = errGA_sum = errGB_sum = errDA_sum = errDB_sum = 0 ```
github_jupyter
# Practical session 1 - Some Python basics Course: [SDIA-Python](https://github.com/guilgautier/sdia-python) Dates: 09/21/2021-09/22/2021 Instructor: [Guillaume Gautier](https://guilgautier.github.io/) Students (pair): - [Hadrien SALEM]([link](https://github.com/SnowHawkeye)) - [Emilie SALEM]([link](https://github.com/EmilieSalem)) ``` %load_ext autoreload %autoreload 2 ``` ## Documentation To display the documentation associated to a Python function in your notebook (launched in VSCode), you can either - run `?functionname` or `help(my_adder)` - place your mouse pointer on the function name - write the name of the function and start opening the brackets ## Zen of Python ``` import this ``` ## Reserved keywords ``` import keyword print(keyword.kwlist) import keyword print(keyword.kwlist) ``` ## Arithmetic Compute 4 raised to the power 8 ``` pow(4,8) ``` Compute the quotient and the remainder of the euclidean division of 17 by 3. ``` 17 // 3 17 % 3 ``` Create two integer variables `a, b` of your choice and swap their content. ``` a = 1 b = 2 a,b = b,a print ("a = " + str(a)) print("b = " + str(b)) ``` Modify `a` inplace, so that the new value of `a` is twice its previous value. ``` a = 2*a print(a) ``` Define a complex number `z` and print it as `real=real-part, imag=imaginary-part` ``` z = complex(3,1) print("real = " + str(z.real)) print("imag = " + str(z.imag)) z2 = 3 + 1j # different notation ``` ## Strings Propose two ways to define an empty string ``` string1 = "" string2 = str() print("string1 --->" + string1 + "<---") print("string2 --->" + string2 + "<---") ``` Define a string made of the 26 letters of the alphabet (lower case), then - What is the 12th letter of the alphabet - What are the 10 last letters of the alphabet - What are the 5th to the 10th letters of the alphabet - Create a new string where the 26 letters are converted to upper case. ``` alphabet = "abcdefghijklmnopqrstuvwxyz" print("12th letter: " + alphabet[11]) print("Last 10 letters: " + alphabet[-10:]) print("5th to 10th letter: " + alphabet[4:10]) alphabetUpperCase = str.upper(alphabet) print("Upper case: " + alphabetUpperCase) ``` Extract the first 4 digits of the number `x = 2341324530045968` (answer is `2341`). ``` x = 2341324530045968 print(str(x)[0:4]) # different method print(x // 10**12) ``` ## Lists Propose two ways to define an empty list. ``` list1 = [] list2 = list() print("list1 --->" + str(list1) + "<---") print("list2 --->" + str(list2) + "<---") ``` Create the list of consecutive integers from 5 to 11. ``` integerList = [i for i in range(5,12)] print(integerList) ``` Add `100` to this list. ``` integerList.append(100) print(integerList) ``` Given the following list, Change the `"sdia"` to its reverse (`"aids"`). ``` x = [10, [3, 4], [5, [100, 200, ["sdia"]], 23, 11], 1, 7] x[2][1][2][0] = x[2][1][2][0][::-1] print(x) ``` Given the following list, 1. extract the first two items 2. extract the last two items 3. find the index of the first occurence of `9` 4. extract the items located at odd indices 5. create a new list which corresponds to the reversed version of the originial list 6. create a new list where each item of the original list is raised to the power 3 ``` x = [3, 7, 5, 3, 8, 6, 8, 8, 0, 7, 3, 9, 3, 4, 2, 7, 0, 9, 5, 0, 0, 9, 0, 9, 3, 1, 4, 0, 5, 5, 5, 8, 9, 9, 5, 5, 4, 9, 5, 6, 2, 8, 5, 2, 4, 9, 2, 2, 3, 1] print("first two items : " + str(x[0:2])) print("last two items : " + str(x[-2::])) print("index of tthe first occurence of 9 : " + str(x.index(9))) print("items located at odd indices : " + str(x[1::2])) reversedList = x.copy()[::-1] print("reversed version of original list : " + str(reversedList)) power3 = [pow(y,3) for y in x] print("list where each item of the original list is raised to the power 3 : " + str(power3)) ``` ## Tuples Propose two ways to define an empty tuple. ``` tuple1 = () tuple2 = tuple() print("tuple1 --->" + str(tuple1) + "<---") print("tuple2 --->" + str(tuple2) + "<---") ``` Given the following tuple - try to replace the second item (`4`) to `8`. - modify the list so that the second element of the first item is raised to the power 5 x = ([1, 2, 3], 4) ``` x = ([1, 2, 3], 4) try: x[1] = 8 except TypeError: print("An exception was thrown because tuples are immutable.") x = ([1, 2, 3], 4) x[0][1] = pow(x[0][1],2) print(x) ``` What is the main difference between a list and a tuple? > Tuples are immutable, whereas list are mutable. ## Sets Create an empty set ``` newSet = {} # ou set(), ou encore set([]) print("set --->" + str(newSet) + "<---") ``` Compute the intersection between $A = \{1, 2, 3\}$ and $B = \{2, 4, 5, 6\}$. ``` A = {1,2,3} B = {2,4,5,6} A.intersection(B) ``` From the following set - add the numbers 4, 5, 6 - remove the numbers 1, 2, 3 - check if the resulting set is contained in `{1, 2, 4, 5, 6, 10}` ``` x = {0, 1, 2, 3, 6, 7, 8, 9, 10} x.add(4) x.add(5) x.add(6) x.remove(1) x.remove(2) x.remove(3) compared = {1, 2, 4, 5, 6, 10} print(x) print("x is contained in the set >> " + str(x.issubset(compared))) ``` ## Dictionnaries Propose two ways to define an empty dictionary ``` dict1 = {} dict2 = dict() print("dict1 --->" + str(dict1) + "<---") print("dict2 --->" + str(dict2) + "<---") ``` Propose at least 3 ways to define a dictionary with - keys `("a", "b", "c", "d")` - values `(1, 3, 5 , 7)` ``` dictDirect = { "a" : 1, "b" : 3, "c" : 5, "d" : 7, } keys = ["a", "b", "c", "d"] values = [1, 3, 5 , 7] dictFromLists = dict(zip(keys, values)) dictAdd = dict() dictAdd["a"] = 1 dictAdd["b"] = 3 dictAdd["c"] = 5 dictAdd["d"] = 7 print("dictDirect --->" + str(dictDirect)) print("dictFromLists --->" + str(dictFromLists)) print("dictAdd --->" + str(dictAdd)) ``` Given the following dictionary, use indexing to get to the word `"sdia"` ``` d = { "outer": [ 1, 2, 3, {"inner": ["this", "is", "inception", {"inner_inner": [1, 2, 3, "sdia"]}]}, ] } print(d["outer"][3]["inner"][3]["inner_inner"][3]) ``` From the following dictionary, - Create the list of keys - Create the list of values - Check if the key `"abc"` is present, if not, return `123`. - Merge the initial dictionary with `{"e": -1, 4: True}` ``` d1 = dict(zip([(0, 1), "e", 1, None], ["abc", 4, None, 1 + 2j])) keys = d1.keys() values = d1.values() print(keys) print(values) ``` ## Conditions Propose two ways to construct the list of odd values ranging from 0 to 30 From the following list, create another list which contains - `"fiz"` if the item is a multiple of 5, - `"buz"` if the item is a multiple of 7, - `"fizbuz"` if the item is a both a multiple of 5 and 7, - the index of the item otherwise. ``` x = [2, 1, 3, 31, 35, 20, 70, 132, 144, 49] new_list = [] for i in range(0,len(x)) : if (x[i]%5 == 0 and x[i]%7 == 0) : new_list.append("fizbuz") elif x[i]%5 == 0 : new_list.append("fiz") elif x[i]%7 == 0 : new_list.append("buz") else : new_list.append(i) print(new_list) ``` Given a string variable `course`, construct a `if/elif/else` statement that prints: - `"That is very useful!"` if `course` is `"maths"` (any kind of capitalization) - `"That is very useful!"` if `course` is `"python"` (any kind of capitalization) - `"How nice!"` if `course` is `"meditation"` (any kind of capitalization) - `"You're not at Hogwarts"` if `course` is `"magic"` (any kind of capitalization) - otherwise `"What is this COURSE?"` if `course` is anything else. ``` course = "python" if(course.lower() == "maths" or course.lower() == "python") : print("That is very useful!") elif(course.lower() == "meditation") : print("How nice!") elif(course.lower() == "magic") : print("You're not at Hogwarts") else : print("What is this COURSE?") ``` Given the following variables, create and print the string ``quantity`` ``fruit``(s) cost(s) $``price`` such that - there is an `"s"` at `"cost"` when there is more only one fruit, - there is an `"s"` at `"fruit"` when there is more that one fruit, - the price is displayed as ...,xxx,xxx.yy where yy corresponds rounded cents value ``` quantity = 3 fruit = "lemon" price = 1.8912392e4 pluralNoun = "s" if quantity > 1 else "" pluralVerb = "s" if quantity == 1 else "" formattedPrice = f"{price:,}" print(str(quantity) + " " + fruit + pluralNoun + " cost" + pluralVerb + " $" + formattedPrice) ``` ## Functions - Create your function in `src/sdia_python/lab1/functions.py` files - Import them using `from sdia_python.lab1.functions import MYFUNCTION` ``` from sdia_python.lab1.functions import * printMyString() ``` Define the function `is_unique(x)` which takes a list of integers `x` as input and returns `True` if the there is no duplicate items otherwise return `False`. ``` from sdia_python.lab1.functions import * x1 = ["3", "3", "4"] x2 = ["3", "4"] print(is_unique(x1)) print(is_unique(x2)) ``` Define a function `triangle_shape(height)`, that returns - `""` if `height=0`, - otherwise it returns a string that forms a triangle with height prescribed by `height`. **Examples** height=1 ``` x ``` height=2 ``` x xxx ``` height=3 ``` x xxx xxxxx ``` height=4 ``` x xxx xxxxx xxxxxxx ``` height=5 ``` x xxx xxxxx xxxxxxx xxxxxxxxx ``` height=6 ``` x xxx xxxxx xxxxxxx xxxxxxxxx xxxxxxxxxxx ``` ``` from sdia_python.lab1.functions import * print("height = 0\n" + triangle_shape(0)) print("height = 1\n" + triangle_shape(1)) print("height = 2\n" + triangle_shape(2)) print("height = 3\n" + triangle_shape(3)) ```
github_jupyter
``` import gc import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import torch import torch.nn as nn import torchvision # To view tensorboard metrics # tensorboard --logdir=logs --port=6006 --bind_all from torch.utils.tensorboard import SummaryWriter from functools import partial from evolver import CrossoverType, MutationType, VectorEvolver, InitType from unet import UNet from dataset_utils import PartitionType from cuda_utils import maybe_get_cuda_device, clear_cuda from landcover_dataloader import get_landcover_dataloaders from ignite.contrib.handlers.tensorboard_logger import * from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator from ignite.metrics import Accuracy, Loss, ConfusionMatrix, mIoU from ignite.handlers import ModelCheckpoint from ignite.utils import setup_logger from ignite.engine import Engine import sys # Define directories for data, logging and model saving. base_dir = os.getcwd() dataset_name = "landcover_large" dataset_dir = os.path.join(base_dir, "data/" + dataset_name) experiment_name = "backprop_single_point_finetuning_test_test" model_name = "best_model_30_validation_accuracy=0.9409.pt" model_path = os.path.join(base_dir, "logs/" + dataset_name + "/" + model_name) log_dir = os.path.join(base_dir, "logs/" + dataset_name + "_" + experiment_name) # Create DataLoaders for each partition of Landcover data. dataloader_params = { 'batch_size': 1, 'shuffle': True, 'num_workers': 6, 'pin_memory': True} partition_types = [PartitionType.TRAIN, PartitionType.VALIDATION, PartitionType.FINETUNING, PartitionType.TEST] data_loaders = get_landcover_dataloaders(dataset_dir, partition_types, dataloader_params, force_create_dataset=True) finetuning_loader = data_loaders[2] test_loader = data_loaders[3] # Get GPU device if available. device = maybe_get_cuda_device() # Determine model and training params. params = { 'max_epochs': 10, 'n_classes': 4, 'in_channels': 4, 'depth': 5, 'learning_rate': 0.01, 'log_steps': 1, 'save_top_n_models': 4 } clear_cuda() model = UNet(in_channels = params['in_channels'], n_classes = params['n_classes'], depth = params['depth']) model.load_state_dict(torch.load(model_path)) model criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=params['learning_rate']) # Determine metrics for evaluation. train_metrics = { "accuracy": Accuracy(), "loss": Loss(criterion), "mean_iou": mIoU(ConfusionMatrix(num_classes = params['n_classes'])), } validation_metrics = { "accuracy": Accuracy(), "loss": Loss(criterion), "mean_iou": mIoU(ConfusionMatrix(num_classes = params['n_classes'])), } import matplotlib.pyplot as plt import seaborn as sns for batch in finetuning_loader: batch_x = batch[0] _ = model(batch_x) break drop_out_layers = model.get_dropout_layers() def mask_from_vec(vec, matrix_size): mask = np.ones(matrix_size) for i in range(len(vec)): if vec[i] == 0: mask[i, :, :] = 0 elif vec[i] == 1: mask[i, :, :] = 1 return mask for layer in drop_out_layers: layer_name = layer.name size = layer.x_size[1:] sizes = [size] clear_cuda() model = UNet(in_channels = params['in_channels'], n_classes = params['n_classes'], depth = params['depth']) model.load_state_dict(torch.load(model_path)) model.eval() criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=params['learning_rate']) num_channels = size[0] evolver = VectorEvolver(num_channels, CrossoverType.UNIFORM, MutationType.FLIP_BIT, InitType.BINOMIAL, flip_bit_prob=None, flip_bit_decay=1.0, binomial_prob=0.8) print("LAYER", layer_name, size) with torch.no_grad(): batch_x, batch_y = batch loss = sys.float_info.max child_mask_prev = None for i in range(300): child_vec = evolver.spawn_child() child_mask = mask_from_vec(child_vec, size) model.set_dropout_masks({layer_name: torch.tensor(child_mask, dtype=torch.float32)}) outputs = model(batch_x) current_loss = criterion(outputs[:, :, 127:128,127:128], batch_y[:,127:128,127:128]).item() evolver.add_child(child_mask, 1.0 / current_loss) print("Current", current_loss) loss = min(loss, current_loss) if current_loss == 0.0: current_loss = sys.float_info.max else: current_loss = 1.0 / current_loss evolver.add_child(child_vec, current_loss) priority, best_child = evolver.get_best_child() best_mask = mask_from_vec(best_child, size) model.set_dropout_masks({layer_name: torch.tensor(best_mask, dtype=torch.float32).to(device)}) # f, ax = plt.subplots(1, 3, figsize=(20, 8)) # ax[0].imshow((np.array(batch_y.detach().numpy()[0, :, :]))) # ax[1].imshow(np.argmax(np.moveaxis(np.array(outputs.detach().numpy()[0, :, :, :]), [0],[ 2]), axis=2)) # ax[2].imshow(child_mask[0, :, :]) # child_mask_prev = child_mask # plt.show() print("best_loss", 1.0/evolver.get_best_child()[0]) ```
github_jupyter
The $k$-Means Algorithm ==================== Again, we start by generating some artificial data: ``` import numpy as np import matplotlib.pyplot as plt #from IPython.display import HTML # This line tells the notebook to show plots inside of the notebook %matplotlib inline plt.jet() # set the color map. When your colors are lost, re-run this. import sklearn.datasets as datasets X, Y = datasets.make_blobs(centers=4, cluster_std=0.5, random_state=0) ``` As always, we first *plot* the data to get a feeling of what we're dealing with: ``` plt.scatter(X[:,0], X[:,1]); ``` The data looks like it may contain four different "types" of data point. In fact, this is how it was created above. We can plot this information as well, using color: ``` plt.scatter(X[:,0], X[:,1], c=Y); ``` Normally, you do not know the information in `Y`, however. You could try to recover it from the data alone. This is what the kMeans algorithm does. ``` from sklearn.cluster import KMeans kmeans = KMeans(4, random_state=8) Y_hat = kmeans.fit(X).labels_ ``` Now the label assignments should be quite similar to `Y`, up to a different ordering of the colors: ``` plt.scatter(X[:,0], X[:,1], c=Y_hat); ``` Often, you're not so much interested in the assignments to the means. You'll want to have a closer look at the means $\mu$. The means in $\mu$ can be seen as *representatives* of their respective cluster. ``` plt.scatter(X[:,0], X[:,1], c=Y_hat, alpha=0.4) mu = kmeans.cluster_centers_ plt.scatter(mu[:,0], mu[:,1], s=100, c=np.unique(Y_hat)) print mu ``` ## $k$-Means on Images In this final example, we use the $k$-Means algorithm on the classical MNIST dataset. The MNIST dataset contains images of hand-written digits. Let's first fetch the dataset from the internet (which may take a while, note the asterisk [*]): ``` from sklearn.datasets import fetch_mldata from sklearn.cluster import KMeans from sklearn.utils import shuffle X_digits, _,_, Y_digits = fetch_mldata("MNIST Original").values() # fetch dataset from internet X_digits, Y_digits = shuffle(X_digits,Y_digits) # shuffle dataset (which is ordered!) X_digits = X_digits[-5000:] # take only the last instances, to shorten runtime of KMeans ``` Let's have a look at some of the instances in the dataset we just loaded: ``` plt.rc("image", cmap="binary") # use black/white palette for plotting for i in xrange(10): plt.subplot(2,5,i+1) plt.imshow(X_digits[i].reshape(28,28)) plt.xticks(()) plt.yticks(()) plt.tight_layout() ``` **Warning**: This takes quite a few seconds, so be patient until the asterisk [*] disappears! ``` kmeans = KMeans(20) mu_digits = kmeans.fit(X_digits).cluster_centers_ ``` Let's have a closer look at the means. Even though there are 10 digits, some of them are over/under-represented. Do you understand why? ``` plt.figure(figsize=(16,6)) for i in xrange(2*(mu_digits.shape[0]/2)): # loop over all means plt.subplot(2,mu_digits.shape[0]/2,i+1) plt.imshow(mu_digits[i].reshape(28,28)) plt.xticks(()) plt.yticks(()) plt.tight_layout() ``` ## Playing with $k$-Means You can try other datasets to explore k-means : http://archive.ics.uci.edu/ml/ Try to get a feeling of how the algorithm proceeds. Homework ========================== Try to see what happens when you - Increase the standard deviation of the clusters in this notebook - Choose a "wrong" number of clusters by: 1. changing the number of clusters generated 2. changing the number of clusters used by KMeans - What happens to result of the $k$-Means algorithm when you have multiplied one axis of the matrix $X$ with a large value? For example, the 0-th axis with 100: `X[:,0] *= 100` Why does the result change? - Combine the $k$-Means algorithm with the PCA algorithm
github_jupyter
# Exploring Data with Python A significant part of a a data scientist's role is to explore, analyze, and visualize data. There's a wide range of tools and programming languages that they can use to do this; and of of the most popular approaches is to use Jupyter notebooks (like this one) and Python. Python is a flexible programming language that is used in a wide range of scenarios; from web applications to device programming. It's extremely popular in the data science and machine learning community because of the many packages it supports for data analysis and visualization. In this notebook, we'll explore some of these packages, and apply basic techniques to analyze data. This is not intended to be a comprehensive Python programming exercise; or even a deep dive into data analysis. Rather, it's intended as a crash course in some of the common ways in which data scientists can use Python to work with data. > **Note**: If you've never used the Jupyter Notebooks environment before, there are a few things you should be aware of: > > - Notebooks are made up of *cells*. Some cells (like this one) contain *markdown* text, while others (like the one beneath this one) contain code. > - The notebook is connected to a Python *kernel* (you can see which one at the top right of the page - if you're running this noptebook in an Azure Machine Learning compute instance it should be connected to the **Python 3.6 - AzureML** kernel). If you stop the kernel or disconnect from the server (for example, by closing and reopening the notebook, or ending and resuming your session), the output from cells that have been run will still be displayed; but any variables or functions defined in those cells will have been lost - you must rerun the cells before running any subsequent cells that depend on them. > - You can run each code cell by using the **&#9658; Run** button. The **&#9711;** symbol next to the kernel name at the top right will briefly turn to **&#9899;** while the cell runs before turning back to **&#9711;**. > - The output from each code cell will be displayed immediately below the cell. > - Even though the code cells can be run individually, some variables used in the code are global to the notebook. That means that you should run all of the code cells <u>**in order**</u>. There may be dependencies between code cells, so if you skip a cell, subsequent cells might not run correctly. ## Exploring data arrays with NumPy Lets start by looking at some simple data. Suppose a college takes a sample of student grades for a data science class. Run the code in the cell below by clicking the **&#9658; Run** button to see the data. ``` import sys print(sys.version) data = [50,50,47,97,49,3,53,42,26,74,82,62,37,15,70,27,36,35,48,52,63,64] print(data) ``` The data has been loaded into a Python **list** structure, which is a good data type for general data manipulation, but not optimized for numeric analysis. For that, we're going to use the **NumPy** package, which includes specific data types and functions for working with *Num*bers in *Py*thon. Run the cell below to load the data into a NumPy **array**. ``` import numpy as np grades = np.array(data) print(grades) ``` Just in case you're wondering about the differences between a **list** and a NumPy **array**, let's compare how these data types behave when we use them in an expression that multiplies them by 2. ``` print (type(data),'x 2:', data * 2) print('---') print (type(grades),'x 2:', grades * 2) ``` Note that multiplying a list by 2 creates a new list of twice the length with the original sequence of list elements repeated. Multiplying a NumPy array on the other hand performs an element-wise calculation in which the array behaves like a *vector*, so we end up with an array of the same size in which each element has been multipled by 2. The key takeaway from this is that NumPy arrays are specifically designed to support mathematical operations on numeric data - which makes them more useful for data analysis than a generic list. You might have spotted that the class type for the numpy array above is a **numpy.ndarray**. The **nd** indicates that this is a structure that can consists of multiple *dimensions* (it can have *n* dimensions). Our specific instance has a single dimension of student grades. Run the cell below to view the **shape** of the array. ``` grades.shape ``` The shape confirms that this array has only one dimension, which contains 22 elements (there are 22 grades in the original list). You can access the individual elements in the array by their zer0-based ordinal position. Let's get the first element (the one in position 0). ``` grades[0] ``` Alright, now you know your way around a NumPy array, it's time to perform some analysis of the grades data. You can apply aggregations across the elements in the array, so let's find the simple average grade (in other words, the *mean* grade value). ``` grades.mean() ``` So the mean grade is just around 50 - more or less in the middle of the possible range from 0 to 100. Let's add a second set of data for the same students, this time recording the typical number of hours per week they devoted to studying. ``` # Define an array of study hours study_hours = [10.0,11.5,9.0,16.0,9.25,1.0,11.5,9.0,8.5,14.5,15.5, 13.75,9.0,8.0,15.5,8.0,9.0,6.0,10.0,12.0,12.5,12.0] # Create a 2D array (an array of arrays) student_data = np.array([study_hours, grades]) # display the array student_data ``` Now the data consists of a 2-dimensional array - an array of arrays. Let's look at its shape. ``` # Show shape of 2D array student_data.shape ``` The **student_data** array contains two elements, each of which is an array containing 22 elements. To navigate this structure, you need to specify the position of each element in the hierarchy. So to find the first value in the first array (which contains the study hours data), you can use the following code. ``` # Show the first element of the first element student_data[0][0] ``` Now you have a multidimensional array containing both the student's study time and grade information, which you can use to compare data. For example, how does the mean study time compare to the mean grade? ``` # Get the mean value of each sub-array avg_study = student_data[0].mean() avg_grade = student_data[1].mean() print('Average study hours: {:.2f}\nAverage grade: {:.2f}'.format(avg_study, avg_grade)) ``` ## Exploring tabular data with Pandas While NumPy provides a lot of the functionality you need to work with numbers, and specifically arrays of numeric values; when you start to deal with two-dimensional tables of data, the **Pandas** package offers a more convenient structure to work with - the **DataFrame**. Run the following cell to import the Pandas library and create a DataFrame with three columns. The first column is a list of student names, and the second and third columns are the NumPy arrays containing the study time and grade data. ``` import pandas as pd df_students = pd.DataFrame({'Name': ['Dan', 'Joann', 'Pedro', 'Rosie', 'Ethan', 'Vicky', 'Frederic', 'Jimmie', 'Rhonda', 'Giovanni', 'Francesca', 'Rajab', 'Naiyana', 'Kian', 'Jenny', 'Jakeem','Helena','Ismat','Anila','Skye','Daniel','Aisha'], 'StudyHours':student_data[0], 'Grade':student_data[1]}) df_students ``` Note that in addition to the columns you specified, the DataFrame includes an *index* to unique identify each row. We could have specified the index explicitly, and assigned any kind of appropriate value (for example, an email address); but because we didn't specify an index, one has been created with a unique integer value for each row. ### Finding and filtering data in a DataFrame You can use the DataFrame's **loc** method to retrieve data for a specific index value, like this. ``` # Get the data for index value 5 df_students.loc[5] ``` You can also get the data at a range of index values, like this: ``` # Get the rows with index values from 0 to 5 df_students.loc[0:5] ``` In addition to being able to use the **loc** method to find rows based on the index, you can use the **iloc** method to find rows based on their ordinal position in the DataFrame (regardless of the index): ``` # Get data in the first five rows df_students.iloc[0:5] ``` Look carefully at the `iloc[0:5]` results, and compare them to the `loc[0:5]` results you obtained previously. Can you spot the difference? The **loc** method returned rows with index *label* in the list of values from *0* to *5* - which includes *0*, *1*, *2*, *3*, *4*, and *5* (six rows). However, the **iloc** method returns the rows in the *positions* included in the range 0 to 5, and since integer ranges don't include the upper-bound value, this includes positions *0*, *1*, *2*, *3*, and *4* (five rows). **iloc** identifies data values in a DataFrame by *position*, which extends beyond rows to columns. So for example, you can use it to find the values for the columns in positions 1 and 2 in row 0, like this: ``` df_students.iloc[0,[1,2]] ``` Let's return to the **loc** method, and see how it works with columns. Remember that **loc** is used to locate data items based on index values rather than positions. In the absence of an explicit index column, the rows in our dataframe are indexed as integer values, but the columns are identified by name: ``` df_students.loc[0,'Grade'] ``` Here's another useful trick. You can use the **loc** method to find indexed rows based on a filtering expression that references named columns other than the index, like this: ``` df_students.loc[df_students['Name']=='Aisha'] ``` Actually, you don't need to explicitly use the **loc** method to do this - you can simply apply a DataFrame filtering expression, like this: ``` df_students[df_students['Name']=='Aisha'] ``` And for good measure, you can achieve the same results by using the DataFrame's **query** method, like this: ``` df_students.query('Name=="Aisha"') ``` The three previous examples underline an occassionally confusing truth about working with Pandas. Often, there are multiple ways to achieve the same results. Another example of this is the way you refer to a DataFrame column name. You can specify the column name as a named index value (as in the `df_students['Name']` examples we've seen so far), or you can use the column as a property of the DataFrame, like this: ``` df_students[df_students.Name == 'Aisha'] ``` ### Loading a DataFrame from a file We constructed the DataFrame from some existing arrays. However, in many real-world scenarios, data is loaded from sources such as files. Let's replace the student grades DataFrame with the contents of a text file. ``` df_students = pd.read_csv('data/grades.csv',delimiter=',',header='infer') df_students.head() ``` The DataFrame's **read_csv** method is used to load data from text files. As you can see in the example code, you can specify options such as the column delimiter and which row (if any) contains column headers (in this case, the delimter is a comma and the first row contains the column names - these are the default settings, so the parameters could have been omitted). ### Handling missing values One of the most common issues data scientists need to deal with is incomplete or missing data. So how would we know that the DataFrame contains missing values? You can use the **isnull** method to identify which individual values are null, like this: ``` df_students.isnull() ``` Of course, with a larger DataFrame, it would be inefficient to review all of the rows and columns individually; so we can get the sum of missing values for each column, like this: ``` df_students.isnull().sum() ``` So now we know that there's one missing **StudyHours** value, and two missing **Grade** values. To see them in context, we can filter the dataframe to include only rows where any of the columns (axis 1 of the DataFrame) are null. ``` df_students[df_students.isnull().any(axis=1)] ``` When the DataFrame is retrieved, the missing numeric values show up as **NaN** (*not a number*). So now that we've found the null values, what can we do about them? One common approach is to *impute* replacement values. For example, if the number of study hours is missing, we could just assume that the student studied for an average amount of time and replace the missing value with the mean study hours. To do this, we can use the **fillna** method, like this: ``` df_students.StudyHours = df_students.StudyHours.fillna(df_students.StudyHours.mean()) df_students ``` Alternatively, it might be important to ensure that you only use data you know to be absolutely correct; so you can drop rows or columns that contains null values by using the **dropna** method. In this case, we'll remove rows (axis 0 of the DataFrame) where any of the columns contain null values. ``` df_students = df_students.dropna(axis=0, how='any') df_students ``` ### Explore data in the DataFrame Now that we've cleaned up the missing values, we're ready to explore the data in the DataFrame. Let's start by comparing the mean study hours and grades. ``` # Get the mean study hours using to column name as an index mean_study = df_students['StudyHours'].mean() # Get the mean grade using the column name as a property (just to make the point!) mean_grade = df_students.Grade.mean() # Print the mean study hours and mean grade print('Average weekly study hours: {:.2f}\nAverage grade: {:.2f}'.format(mean_study, mean_grade)) ``` OK, let's filter the DataFrame to find only the students who studied for more than the average amount of time. ``` # Get students who studied for the mean or more hours df_students[df_students.StudyHours > mean_study] ``` Note that the filtered result is itself a DataFrame, so you can work with its columns just like any other DataFrame. For example, let's find the average grade for students who undertook more than the average amount of study time. ``` # What was their mean grade? df_students[df_students.StudyHours > mean_study].Grade.mean() ``` Let's assume that the passing grade for the course is 60. We can use that information to add a new column to the DataFrame, indicating whether or not each student passed. First, we'll create a Pandas **Series** containing the pass/fail indicator (True or False), and then we'll concatenate that series as a new column (axis 1) in the DataFrame. ``` passes = pd.Series(df_students['Grade'] >= 60) df_students = pd.concat([df_students, passes.rename("Pass")], axis=1) df_students ``` DataFrames are designed for tabular data, and you can use them to perform many of the kinds of data analytics operation you can do in a relational database; such as grouping and aggregating tables of data. For example, you can use the **groupby** method to group the student data into groups based on the **Pass** column you added previously, and count the number of names in each group - in other words, you can determine how many students passed and failed. ``` print(df_students.groupby(df_students.Pass).Name.count()) ``` You can aggregate multiple fields in a group using any available aggregation function. For example, you can find the mean study time and grade for the groups of students who passed and failed the course. ``` print(df_students.groupby(df_students.Pass)['StudyHours', 'Grade'].mean()) ``` DataFrames are amazingly versatile, and make it easy to manipulate data. Many DataFrame operations return a new copy of the DataFrame; so if you want to modify a DataFrame but keep the existing variable, you need to assign the result of the operation to the existing variable. For example, the following code sorts the student data into descending order of Grade, and assigns the resulting sorted DataFrame to the original **df_students** variable. ``` # Create a DataFrame with the data sorted by Grade (descending) df_students = df_students.sort_values('Grade', ascending=False) # Show the DataFrame df_students ``` ## Visualizing data with Matplotlib DataFrames provide a great way to explore an analyze tabular data, but sometimes a picture is worth a thousand rows and columns. The **Matplotlib** library provides the foundation for plotting data visualizations that can greatly enhance your ability the analyze the data. Let's start with a simple bar chart that shows the grade of each student. ``` # Ensure plots are displayed inline in the notebook %matplotlib inline from matplotlib import pyplot as plt # Create a bar plot of name vs grade plt.bar(x=df_students.Name, height=df_students.Grade) # Display the plot plt.show() ``` Well, that worked; but the chart could use some improvements to make it clearer what we're looking at. Note that you used the **pyplot** class from Matplotlib to plot the chart. This class provides a whole bunch of ways to improve the visual elements of the plot. For example, the following code: - Specifies the color of the bar chart. - Adds a title to the chart (so we know what it represents) - Adds labels to the X and Y (so we know which axis shows which data) - Adds a grid (to make it easier to determine the values for the bars) - Rotates the X markers (so we can read them) ``` # Create a bar plot of name vs grade plt.bar(x=df_students.Name, height=df_students.Grade, color='orange') # Customize the chart plt.title('Student Grades') plt.xlabel('Student') plt.ylabel('Grade') plt.grid(color='#95a5a6', linestyle='--', linewidth=2, axis='y', alpha=0.7) plt.xticks(rotation=90) # Display the plot plt.show() ``` A plot is technically contained with a **Figure**. In the previous examples, the figure was created implicitly for you; but you can create it explicitly. For example, the following code creates a figure with a specific size. ``` # Create a Figure fig = plt.figure(figsize=(8,3)) # Create a bar plot of name vs grade plt.bar(x=df_students.Name, height=df_students.Grade, color='orange') # Customize the chart plt.title('Student Grades') plt.xlabel('Student') plt.ylabel('Grade') plt.grid(color='#95a5a6', linestyle='--', linewidth=2, axis='y', alpha=0.7) plt.xticks(rotation=90) # Show the figure plt.show() ``` A figure can contain multiple subplots, each on its own *axis*. For example, the following code creates a figure with two subplots - one is a bar chart showing student grades, and the other is a pie chart comparing the number of passing grades to non-passing grades. ``` # Create a figure for 2 subplots (1 row, 2 columns) fig, ax = plt.subplots(1, 2, figsize = (10,4)) # Create a bar plot of name vs grade on the first axis ax[0].bar(x=df_students.Name, height=df_students.Grade, color='orange') ax[0].set_title('Grades') ax[0].set_xticklabels(df_students.Name, rotation=90) # Create a pie chart of pass counts on the second axis pass_counts = df_students['Pass'].value_counts() ax[1].pie(pass_counts, labels=pass_counts) ax[1].set_title('Passing Grades') ax[1].legend(pass_counts.keys().tolist()) # Add a title to the Figure fig.suptitle('Student Data') # Show the figure fig.show() ``` Until now, you've used methods of the Matplotlib.pyplot object to plot charts. However, Matplotlib is so foundational to graphics in Python that many packages, including Pandas, provide methods that abstract the underlying Matplotlib functions and simplify plotting. For example, the DataFrame provides its own methods for plotting data, as shown in the following example to plot a bar chart of study hours. ``` df_students.plot.bar(x='Name', y='StudyHours', color='teal', figsize=(6,4)) ``` ## Getting started with statistical analysis Now that you know how to use Python to manipulate and visualize data, you can start analyzing it. A lot of data science is rooted in *statistics*, so we'll explore some basic statistical techniques. > **Note**: This is <u>not</u> intended to teach you statistics - that's much too big a topic for this notebook. It will however introduce you to some statistical concepts and techniques that data scientists use as they explore data in preparation for machine learning modeling. ### Descriptive statistics and data distribution When examining a *variable* (for example a sample of student grades), data scientists are particularly interested in its *distribution* (in other words, how are all the different grade values spread across the sample). The starting point for this exploration is often to visualize the data as a histogram, and see how frequently each value for the variable occurs. ``` # Get the variable to examine var_data = df_students['Grade'] # Create a Figure fig = plt.figure(figsize=(10,4)) # Plot a histogram plt.hist(var_data) # Add titles and labels plt.title('Data Distribution') plt.xlabel('Value') plt.ylabel('Frequency') # Show the figure fig.show() ``` The histogram for grades is a symmetric shape, where the most frequently occuring grades tend to be in the middle of the range (around 50), with fewer grades at the extreme ends of the scale. #### Measures of central tendency To understand the distribution better, we can examine so-called *measures of central tendency*; which is a fancy way of describing statistics that represent the "middle" of the data. The goal of this is to try to find a "typical" value. Common ways to define the middle of the data include: - The *mean*: A simple average based on adding together all of the values in the sample set, and then dividing the total by the number of samples. - The *median*: The value in the middle of the range of all of the sample values. - The *mode*: The most commonly occuring value in the sample set<sup>\*</sup>. Let's calculate these values, along with the minimum and maximum values for comparison, and show them on the histogram. > <sup>\*</sup>Of course, in some sample sets , there may be a tie for the most common value - in which case the dataset is described as *bimodal* or even *multimodal*. ``` # Get the variable to examine var = df_students['Grade'] # Get statistics min_val = var.min() max_val = var.max() mean_val = var.mean() med_val = var.median() mod_val = var.mode()[0] print('Minimum:{:.2f}\nMean:{:.2f}\nMedian:{:.2f}\nMode:{:.2f}\nMaximum:{:.2f}\n'.format(min_val, mean_val, med_val, mod_val, max_val)) # Create a Figure fig = plt.figure(figsize=(10,4)) # Plot a histogram plt.hist(var) # Add lines for the statistics plt.axvline(x=min_val, color = 'gray', linestyle='dashed', linewidth = 2) plt.axvline(x=mean_val, color = 'cyan', linestyle='dashed', linewidth = 2) plt.axvline(x=med_val, color = 'red', linestyle='dashed', linewidth = 2) plt.axvline(x=mod_val, color = 'yellow', linestyle='dashed', linewidth = 2) plt.axvline(x=max_val, color = 'gray', linestyle='dashed', linewidth = 2) # Add titles and labels plt.title('Data Distribution') plt.xlabel('Value') plt.ylabel('Frequency') # Show the figure fig.show() ``` For the grade data, the mean, median, and mode all seem to be more or less in the middle of the minimum and maximum, at around 50. Another way to visualize the distribution of a variable is to use a *box* plot (sometimes called a *box-and-whiskers* plot). Let's create one for the grade data. ``` # Get the variable to examine var = df_students['Grade'] # Create a Figure fig = plt.figure(figsize=(10,4)) # Plot a histogram plt.boxplot(var) # Add titles and labels plt.title('Data Distribution') # Show the figure fig.show() ``` The box plot shows the distribution of the grade values in a different format to the histogram. The *box* part of the plot shows where the inner two *quartiles* of the data reside - so in this case, half of the grades are between approximately 36 and 63. The *whiskers* extending from the box show the outer two quartiles; so the other half of the grades in this case are between 0 and 36 or 63 and 100. The line in the box indicates the *median* value. It's often useful to combine histograms and box plots, with the box plot's orientation changed to align it with the histogram (in some ways, it can be helpful to think of the histogram as a "front elevation" view of the distribution, and the box plot as a "plan" view of the distribution from above.) ``` # Create a function that we can re-use def show_distribution(var_data): from matplotlib import pyplot as plt # Get statistics min_val = var_data.min() max_val = var_data.max() mean_val = var_data.mean() med_val = var_data.median() mod_val = var_data.mode()[0] print('Minimum:{:.2f}\nMean:{:.2f}\nMedian:{:.2f}\nMode:{:.2f}\nMaximum:{:.2f}\n'.format(min_val, mean_val, med_val, mod_val, max_val)) # Create a figure for 2 subplots (2 rows, 1 column) fig, ax = plt.subplots(2, 1, figsize = (10,4)) # Plot the histogram ax[0].hist(var_data) ax[0].set_ylabel('Frequency') # Add lines for the mean, median, and mode ax[0].axvline(x=min_val, color = 'gray', linestyle='dashed', linewidth = 2) ax[0].axvline(x=mean_val, color = 'cyan', linestyle='dashed', linewidth = 2) ax[0].axvline(x=med_val, color = 'red', linestyle='dashed', linewidth = 2) ax[0].axvline(x=mod_val, color = 'yellow', linestyle='dashed', linewidth = 2) ax[0].axvline(x=max_val, color = 'gray', linestyle='dashed', linewidth = 2) # Plot the boxplot ax[1].boxplot(var_data, vert=False) ax[1].set_xlabel('Value') # Add a title to the Figure fig.suptitle('Data Distribution') # Show the figure fig.show() # Get the variable to examine col = df_students['Grade'] # Call the function show_distribution(col) ``` All of the measurements of central tendency are right in the middle of the data distribution, which is symmetric with values becoming progressively lower in both directions from the middle. To explore this distribution in more detail, you need to understand that statistics is fundamentally about taking *samples* of data and using probability functions to extrapolate information about the full *population* of data. For example, the student data consists of 22 samples, and for each sample there is a grade value. You can think of each sample grade as a variable that's been randomly selected from the set of all grades awarded for this course. With enough of these random variables, you can calculate something called a *probability density function*, which estimates the distribution of grades for the full population. The Pandas DataFrame class provides a helpful plot function to show this density. ``` def show_density(var_data): from matplotlib import pyplot as plt fig = plt.figure(figsize=(10,4)) # Plot density var_data.plot.density() # Add titles and labels plt.title('Data Density') # Show the mean, median, and mode plt.axvline(x=var_data.mean(), color = 'cyan', linestyle='dashed', linewidth = 2) plt.axvline(x=var_data.median(), color = 'red', linestyle='dashed', linewidth = 2) plt.axvline(x=var_data.mode()[0], color = 'yellow', linestyle='dashed', linewidth = 2) # Show the figure plt.show() # Get the density of Grade col = df_students['Grade'] show_density(col) ``` As expected from the histogram of the sample, the density shows the characteristic 'bell curve" of what statisticians call a *normal* distribution with the mean and mode at the center and symmetric tails. Now let's take a look at the distribution of the study hours data. ``` # Get the variable to examine col = df_students['StudyHours'] # Call the function show_distribution(col) ``` The distribution of the study time data is significantly different from that of the grades. Note that the whiskers of the box plot only extend to around 6.0, indicating that the vast majority of the first quarter of the data is above this value. The minimum is marked with an **o**, indicating that it is statistically an *outlier* - a value that lies significantly outside the range of the rest of the distribution. Outliers can occur for many reasons. Maybe a student meant to record "10" hours of study time, but entered "1" and missed the "0". Or maybe the student was abnormally lazy when it comes to studying! Either way, it's a statistical anomaly that doesn't represent a typical student. Let's see what the distribution looks like without it. ``` # Get the variable to examine col = df_students[df_students.StudyHours>1]['StudyHours'] # Call the function show_distribution(col) ``` In thie example, the datadt is small enough to clearly see that the value **1** is an outlier for the **StudyHours** column, so you can exclude it explicitly. In most real-world cases, it's easier to consider outliers as being values that fall below or above percentiles within which most of the data lie. For example, the following code uses the Pandas **quantile** function to exclude observations below the 0.01th percentile (the value above which 99% of the data reside). ``` q01 = df_students.StudyHours.quantile(0.01) # Get the variable to examine col = df_students[df_students.StudyHours>q01]['StudyHours'] # Call the function show_distribution(col) ``` > **Tip**: You can also eliminate outliers at the upper end of the distribution by defining a threshold at a high percentile value - for example, you could use the **quantile** function to find the 0.99 percentile below which 99% of the data reside. With the outliers removed, the box plot shows all data within the four quartiles. Note that the distribution is not symmetric like it is for the grade data though - there are some students with very high study times of around 16 hours, but the bulk of the data is between 7 and 13 hours; The few extremely high values pull the mean towards the higher end of the scale. Let's look at the density for this distribution. ``` # Get the density of StudyHours show_density(col) ``` This kind of distribution is called *right skewed*. The mass of the data is on the left side of the distribution, creating a long tail to the right because of the values at the extreme high end; which pull the mean to the right. #### Measures of variance So now we have a good idea where the middle of the grade and study hours data distributions are. However, there's another aspect of the distributions we should examine: how much variability is there in the data? Typical statistics that measure variability in the data include: - **Range**: The difference between the maximum and minimum. There's no built-in function for this, but it's easy to calculate using the **min** and **max** functions. - **Variance**: The average of the squared difference from the mean. You can use the built-in **var** function to find this. - **Standard Deviation**: The square root of the variance. You can use the built-in **std** function to find this. ``` for col_name in ['Grade','StudyHours']: col = df_students[col_name] rng = col.max() - col.min() var = col.var() std = col.std() print('\n{}:\n - Range: {:.2f}\n - Variance: {:.2f}\n - Std.Dev: {:.2f}'.format(col_name, rng, var, std)) ``` Of these statistics, the standard deviation is generally the most useful. It provides a measure of variance in the data on the same scale as the data itself (so grade points for the Grade distribution and hours for the StudyHours distribution). The higher the standard deviation, the more variance there is when comparing values in the distribution to the distribution mean - in other words, the data is more spread out. When working with a *normal* distribution, the standard deviation works with the particular characteristics of a normal distribution to provide even greater insight. Run the cell below to see the relationship between standard deviations and the data in the normal distribution. ``` import scipy.stats as stats # Get the Grade column col = df_students['Grade'] # get the density density = stats.gaussian_kde(col) # Plot the density col.plot.density() # Get the mean and standard deviation s = col.std() m = col.mean() # Annotate 1 stdev x1 = [m-s, m+s] y1 = density(x1) plt.plot(x1,y1, color='magenta') plt.annotate('1 std (68.26%)', (x1[1],y1[1])) # Annotate 2 stdevs x2 = [m-(s*2), m+(s*2)] y2 = density(x2) plt.plot(x2,y2, color='green') plt.annotate('2 std (95.45%)', (x2[1],y2[1])) # Annotate 3 stdevs x3 = [m-(s*3), m+(s*3)] y3 = density(x3) plt.plot(x3,y3, color='orange') plt.annotate('3 std (99.73%)', (x3[1],y3[1])) # Show the location of the mean plt.axvline(col.mean(), color='cyan', linestyle='dashed', linewidth=1) plt.axis('off') plt.show() ``` The horizontal lines show the percentage of data within 1, 2, and 3 standard deviations of the mean (plus or minus). In any normal distribution: - Approximately 68.26% of values fall within one standard deviation from the mean. - Approximately 95.45% of values fall within two standard deviations from the mean. - Approximately 99.73% of values fall within three standard deviations from the mean. So, since we know that the mean grade is 49.8, the standard deviation is 21.47, and distribution of grades is approximately normal; we can calculate that 68.26% of students should achieve a grade between 28.33 and 71.27. The descriptive statistics we've used to understand the distribution of the student data variables are the basis of statistical analysis; and because they're such an important part of exploring your data, there's a built-in **Describe** method of the DataFrame object that returns the main descriptive statistics for all numeric columns. ``` df_students.describe() ``` ## Comparing data Now that you know something about the statistical distribution of the data in your dataset, you're ready to examine your data to identify any apparent relationships between variables. First of all, let's get rid of any rows that contain outliers so that we have a sample that is representative of a typical class of students. We identified that the StudyHours column contains some outliers with extremely low values, so we'll remove those rows. ``` df_sample = df_students[df_students['StudyHours']>1] df_sample ``` ### Comparing numeric and categorical variables The data includes two *numeric* variables (**StudyHours** and **Grade**) and two *categorical* variables (**Name** and **Pass**). Let's start by comparing the numeric **StudyHours** column to the categorical **Pass** column to see if there's an apparent relationship between the number of hours studied and a passing grade. To make this comparison, let's create box plots showing the distribution of StudyHours for each possible Pass value (true and false). ``` df_sample.boxplot(column='StudyHours', by='Pass', figsize=(8,5)) ``` Comparing the StudyHours distributions, it's immediately apparent (if not particularly surprising) that students who passed the course tended to study for more hours than students who didn't. So if you wanted to predict whether or not a student is likely to pass the course, the amount of time they spend studying may be a good predictive feature. ### Comparing numeric variables Now let's compare two numeric variables. We'll start by creating a bar chart that shows both grade and study hours. ``` # Create a bar plot of name vs grade and study hours df_sample.plot(x='Name', y=['Grade','StudyHours'], kind='bar', figsize=(8,5)) ``` The chart shows bars for both grade and study hours for each student; but it's not easy to compare because the values are on different scales. Grades are measured in grade points, and range from 3 to 97; while study time is measured in hours and ranges from 1 to 16. A common technique when dealing with numeric data in different scales is to *normalize* the data so that the values retain their proportional distribution, but are measured on the same scale. To accomplish this, we'll use a technique called *MinMax* scaling that distributes the values proportionally on a scale of 0 to 1. You could write the code to apply this transformation; but the **Scikit-Learn** library provides a scaler to do it for you. ``` from sklearn.preprocessing import MinMaxScaler # Get a scaler object scaler = MinMaxScaler() # Create a new dataframe for the scaled values df_normalized = df_sample[['Name', 'Grade', 'StudyHours']].copy() # Normalize the numeric columns df_normalized[['Grade','StudyHours']] = scaler.fit_transform(df_normalized[['Grade','StudyHours']]) # Plot the normalized values df_normalized.plot(x='Name', y=['Grade','StudyHours'], kind='bar', figsize=(8,5)) ``` With the data normalized, it's easier to see an apparent relationship between grade and study time. It's not an exact match, but it definitely seems like students with higher grades tend to have studied more. So there seems to be a correlation between study time and grade; and in fact, there's a statistical *correlation* measurement we can use to quantify the relationship between these columns. ``` df_normalized.Grade.corr(df_normalized.StudyHours) ``` The correlation statistic is a value between -1 and 1 that indicates the strength of a relationship. Values above 0 indicate a *positive* correlation (high values of one variable tend to coincide with high values of the other), while values below 0 indicate a *negative* correlation (high values of one variable tend to coincide with low values of the other). In this case, the correlation value is close to 1; showing a strongly positive correlation between study time and grade. > **Note**: Data scientists often quote the maxim "*correlation* is not *causation*". In other words, as tempting as it might be, you shouldn't interpret the statistical correlation as explaining *why* one of the values is high. In the case of the student data, the statistics demonstrates that students with high grades tend to also have high amounts of study time; but this is not the same as proving that they achieved high grades *because* they studied a lot. The statistic could equally be used as evidence to support the nonsensical conclusion that the students studied a lot *because* their grades were going to be high. Another way to visualise the apparent correlation between two numeric columns is to use a *scatter* plot. ``` # Create a scatter plot df_sample.plot.scatter(title='Study Time vs Grade', x='StudyHours', y='Grade') ``` Again, it looks like there's a discernible pattern in which the students who studied the most hours are also the students who got the highest grades. We can see this more clearly by adding a *regression* line (or a *line of best fit*) to the plot that shows the general trend in the data. To do this, we'll use a statistical technique called *least squares regression*. > **Warning - Math Ahead!** > > Cast your mind back to when you were learning how to solve linear equations in school, and recall that the *slope-intercept* form of a linear equation lookes like this: > > \begin{equation}y = mx + b\end{equation} > > In this equation, *y* and *x* are the coordinate variables, *m* is the slope of the line, and *b* is the y-intercept (where the line goes through the Y axis). > > In the case of our scatter plot for our student data, we already have our values for *x* (*StudyHours*) and *y* (*Grade*), so we just need to calculate the intercept and slope of the straight line that lies closest to those points. Then we can form a linear equation that calculates a new *y* value on that line for each of our *x* (*StudyHours*) values - to avoid confusion, we'll call this new *y* value *f(x)* (because it's the output from a linear equation ***f***unction based on *x*). The difference between the original *y* (*Grade*) value and the *f(x)* value is the *error* between our regression line and the actual *Grade* achieved by the student. Our goal is to calculate the slope and intercept for a line with the lowest overall error. > > Specifically, we define the overall error by taking the error for each point, squaring it, and adding all the squared errors together. The line of best fit is the line that gives us the lowest value for the sum of the squared errors - hence the name *least squares regression*. Fortunately, you don't need to code the regression calculation yourself - the **SciPy** package includes a **stats** class that provides a **linregress** method to do the hard work for you. This returns (among other things) the coefficients you need for the slope equation - slope (*m*) and intercept (*b*) based on a given pair of variable samples you want to compare. ``` from scipy import stats # df_regression = df_sample[['Grade', 'StudyHours']].copy() # Get the regression slope and intercept m, b, r, p, se = stats.linregress(df_regression['StudyHours'], df_regression['Grade']) print('slope: {:.4f}\ny-intercept: {:.4f}'.format(m,b)) print('so...\n f(x) = {:.4f}x + {:.4f}'.format(m,b)) # Use the function (mx + b) to calculate f(x) for each x (StudyHours) value df_regression['fx'] = (m * df_regression['StudyHours']) + b # Calculate the error between f(x) and the actual y (Grade) value df_regression['error'] = df_regression['fx'] - df_regression['Grade'] # Create a scatter plot of Grade vs Salary df_regression.plot.scatter(x='StudyHours', y='Grade') # Plot the regression line plt.plot(df_regression['StudyHours'],df_regression['fx'], color='cyan') # Display the plot plt.show() ``` Note that this time, the code plotted two distinct things - the scatter plot of the sample study hours and grades is plotted as before, and then a line of best fit based on the least squares regression coefficients is plotted. The slope and intercept coefficients calculated for the regression line are shown above the plot. The line is based on the ***f*(x)** values calculated for each **StudyHours** value. Run the following cell to see a table that includes the following values: - The **StudyHours** for each student. - The **Grade** achieved by each student. - The ***f(x)*** value calculated using the regression line coefficients. - The *error* between the calculated ***f(x)*** value and the actual **Grade** value. Some of the errors, particularly at the extreme ends, and quite large (up to over 17.5 grade points); but in general, the line is pretty close to the actual grades. ``` # Show the original x,y values, the f(x) value, and the error df_regression[['StudyHours', 'Grade', 'fx', 'error']] ``` ### Using the regression coefficients for prediction Now that you have the regression coefficients for the study time and grade relationship, you can use them in a function to estimate the expected grade for a given amount of study. ``` # Define a function based on our regression coefficients def f(x): m = 6.3134 b = -17.9164 return m*x + b study_time = 14 # Get f(x) for study time prediction = f(study_time) # Grade can't be less than 0 or more than 100 expected_grade = max(0,min(100,prediction)) #Print the estimated grade print ('Studying for {} hours per week may result in a grade of {:.0f}'.format(study_time, expected_grade)) ``` So by applying statistics to sample data, you've determined a relationship between study time and grade; and encapsulated that relationship in a general function that can be used to predict a grade for a given amount of study time. This technique is in fact the basic premise of machine learning. You can take a set of sample data that includes one or more *features* (in this case, the number of hours studied) and a known *label* value (in this case, the grade achieved) and use the sample data to derive a function that calculates predicted label values for any given set of features. ## Further Reading To learn more about the Python packages you explored in this notebook, see the following documentation: - [NumPy](https://numpy.org/doc/stable/) - [Pandas](https://pandas.pydata.org/pandas-docs/stable/) - [Matplotlib](https://matplotlib.org/contents.html) ## Challenge: Analyze Flight Data If this notebook has inspired you to try exploring data for yourself, why not take on the challenge of a real-world dataset containing flight records from the US Department of Transportation? You'll find the challenge in the [/challenges/01 - Flights Challenge.ipynb](./challenges/01%20-%20Flights%20Challenge.ipynb) notebook! > **Note**: The time to complete this optional challenge is not included in the estimated time for this exercise - you can spend as little or as much time on it as you like!
github_jupyter
<a href="https://colab.research.google.com/github/mohd-faizy/CAREER-TRACK-Data-Scientist-with-Python/blob/main/Police_Activity_data_for_analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> --- <strong> <h1 align='center'>Preparing the Police Activity data for analysis (Part - 1) </h1> </strong> --- ``` !git clone https://github.com/mohd-faizy/CAREER-TRACK-Data-Scientist-with-Python.git ``` __Change the current working directory__ ``` # import os module import os # to specified path os.chdir('/content/CAREER-TRACK-Data-Scientist-with-Python/21_Analyzing Police Activity with pandas/_dataset') # varify the path using getcwd() cwd = os.getcwd() # print the current directory print("Current working directory is:", cwd) ls ``` ## $\color{green}{\textbf{Dataset_1_police.csv:}}$ [Stanford Open Policing Project dataset](https://openpolicing.stanford.edu/) On a typical day in the United States, police officers make more than 50,000 traffic stops. __THE STANFORD OPEN POLICING PROJECT__ gathers, analyse, and release the records from millions of traffic stops by law enforcement agencies across the __US__. <p align='center'> <a href="#"> <img src='https://policylab.stanford.edu/images/icons/stanford-open-policing-project.png' width=500px height=300px alt=""> </a> </p> ## __01 Examining the dataset__ Before beginning your analysis, it's important that you familiarize yourself with the dataset. In this exercise, we'll read the dataset into pandas, examine the first few rows, and then count the number of missing values. ``` # Import the pandas library as pd import pandas as pd # Read 'police.csv' into a DataFrame named ri ri = pd.read_csv('police.csv') # Examine the head of the DataFrame ri.head() ri.isna().sum() ri.isnull() # Count the number of missing values in each column ri.isnull().sum() ``` ## __02 Dropping columns__ Often, a DataFrame will contain columns that are not useful to our analysis. Such columns should be dropped from the DataFrame, to make it easier for us to focus on the remaining columns. In this exercise, we'll drop the `'county_name'` column because it only contains missing values, and we'll drop the `'state'` column because all of the traffic stops took place in one state (__Rhode Island__). Thus, these columns can be dropped because **they contain no useful information**. ``` # Examine the shape of the DataFrame print(ri.shape) # Drop the 'county_name' and 'state' columns ri.drop(['county_name', 'state'], axis='columns', inplace=True) # Examine the shape of the DataFrame (again) print(ri.shape) ``` ## __03 Dropping rows__ When we know that a **specific column** will be **critical to our analysis**, and only a small fraction of rows are missing a value in that column, *it often makes sense to remove those rows from the dataset.* the `'driver_gender'` column will be critical to many of our analyses. Because only a small fraction of rows are missing `'driver_gender'`, we'll drop those rows from the dataset. ``` # Count the number of missing values in each column print(ri.isnull().sum()) # Drop all rows that are missing 'driver_gender' ri.dropna(subset=['driver_gender'], inplace=True) # Count the number of missing values in each column (again) print(ri.isnull().sum()) # Examine the shape of the DataFrame print(ri.shape) ``` ## __04 Finding an incorrect data type__ ``` ri.dtypes ``` $\color{green}{\textbf{Note:}} $ $\Rightarrow$ `is_arrested` should have a data type of __bool__ ## __05 Fixing a data type__ - `is_arrested column` currently has the __object__ data type. - we have to change the data type to __bool__, which is the most suitable type for a column containing **True** and **False** values. >Fixing the data type will enable us to use __mathematical operations__ on the `is_arrested` column that would not be possible otherwise. ``` # Examine the head of the 'is_arrested' column print(ri.is_arrested.head()) # Change the data type of 'is_arrested' to 'bool' ri['is_arrested'] = ri.is_arrested.astype('bool') # Check the data type of 'is_arrested' print(ri.is_arrested.dtype) ``` ## __06 Combining object columns (datetime format)__ - Currently, the date and time of each traffic stop are stored in separate object columns: **stop_date** and **stop_time**. - we have to **combine** these two columns into a **single column**, and then convert it to **datetime format**. - This will be beneficial because unlike object columns, datetime columns provide date-based attributes that will make our analysis easier. ``` # Concatenate 'stop_date' and 'stop_time' (separated by a space) combined = ri.stop_date.str.cat(ri.stop_time, sep=' ') # Convert 'combined' to datetime format ri['stop_datetime'] = pd.to_datetime(combined) # Examine the data types of the DataFrame print(ri.dtypes) ``` ## __07 Setting the index__ The last step is to set the `stop_datetime` column as the DataFrame's **index**. By **replacing** the **default index** with a **DatetimeIndex**, this will make it easier to analyze the dataset by date and time, which will come in handy later. ``` # Set 'stop_datetime' as the index ri.set_index('stop_datetime', inplace=True) # Examine the index ri.index # Examine the columns ri.columns ri.head() ``` --- <strong> <h1 align='center'>Exploring the relationship between gender and policing (Part - 2) </h1> </strong> --- ## __08 Examining traffic violations__ Before comparing the violations being committed by each gender, we should examine the **violations** committed by all drivers to get a baseline understanding of the data. In this exercise, we'll count the **unique values** in the `violation` column, and then separately express those counts as **proportions**. ``` ri['violation'].value_counts() # dot method # Count the unique values in 'violation' ri.violation.value_counts() # Counting unique values (2) print(ri.violation.value_counts().sum()) print(ri.shape) 48423/86536 # Speeding `55.95%` # Express the counts as proportions ri.violation.value_counts(normalize=True) ``` More than half of all violations are for **speeding**, followed by other moving violations and equipment violations. ## __09 Comparing violations by gender__ The question we're trying to answer is whether male and female drivers tend to commit different types of traffic violations. In this exercise, we'll first create a DataFrame for each gender, and then analyze the violations in each DataFrame separately. ``` # Create a DataFrame of male drivers male = ri[ri.driver_gender == 'M'] # Create a DataFrame of female drivers female = ri[ri.driver_gender == 'F'] # Compute the violations by male drivers (as proportions) print(male.violation.value_counts(normalize=True)) # Compute the violations by female drivers (as proportions) print(female.violation.value_counts(normalize=True)) ``` ## __10 Filtering by multiple conditions__ Which one of these commands would filter the `ri` DataFrame to only include female drivers **who were stopped for a speeding violation**? ``` female_and_speeding = ri[(ri.driver_gender == 'F') & (ri.violation == 'Speeding')] female_and_speeding ``` ## __11 Comparing speeding outcomes by gender__ When a driver is pulled over for `speeding`, **many people believe that gender has an impact on whether the driver will receive a ticket or a warning**. Can you find evidence of this in the dataset? First, you'll create two DataFrames of drivers who were stopped for speeding: one containing females and the other containing males. Then, for each gender, you'll use the `stop_outcome` column to calculate what percentage of stops resulted in a "Citation" (meaning a ticket) versus a "Warning". ``` # Create a DataFrame of female drivers stopped for speeding female_and_speeding = ri[(ri.driver_gender == 'F') & (ri.violation == 'Speeding')] # Compute the stop outcomes for female drivers (as proportions) print(female_and_speeding.stop_outcome.value_counts(normalize=True)) # Create a DataFrame of male drivers stopped for speeding male_and_speeding = ri[(ri.driver_gender == 'M') & (ri.violation == 'Speeding')] # Compute the stop outcomes for male drivers (as proportions) print(male_and_speeding.stop_outcome.value_counts(normalize=True)) ``` $\color{red}{\textbf{Interpretation:}}$ >The numbers are similar for **males** and **females**: about **95%** of stops for speeding result in a ticket. Thus, __the data fails to show that gender has an impact on who gets a ticket for speeding__. ``` # Filtering by multiple conditions (1) female = ri[ri.driver_gender == 'F'] female.shape # Filtering by multiple conditions (2) # Only includes female drivers who were arrested female_and_arrested = ri[(ri.driver_gender == 'F') &(ri.is_arrested == True)] female_and_arrested.shape # Filtering by multiple conditions (3) female_or_arrested = ri[(ri.driver_gender == 'F') | (ri.is_arrested == True)] female_or_arrested.shape ``` - Includes all females - Includes all drivers who were arrested ## __12 Comparing stop outcomes for two groups__ ``` # driver race --> White white = ri[ri.driver_race == 'White'] white.stop_outcome.value_counts(normalize=True) # driver race --> Black black = ri[ri.driver_race =='Black'] black.stop_outcome.value_counts(normalize=True) # driver race --> Asian asian = ri[ri.driver_race =='Asian'] asian.stop_outcome.value_counts(normalize=True) ``` ## __13 Does gender affect whose vehicle is searched?__ **Mean** of **Boolean Series** represents percentage of True values ``` ri.isnull().sum() # Taking the mean of a Boolean Series print(ri.is_arrested.value_counts(normalize=True)) print(ri.is_arrested.mean()) print(ri.is_arrested.dtype) ``` __Comparing groups using groupby (1)__ ``` # Study the arrest rate by police district print(ri.district.unique()) # Mean print(ri[ri.district == 'Zone K1'].is_arrested.mean()) ``` __Comparing groups using groupby (2)__ ``` ri[ri.district == 'Zone K2'].is_arrested.mean() ri.groupby('district').is_arrested.mean() ``` __Grouping by multiple categories__ ``` ri.groupby(['district', 'driver_gender']).is_arrested.mean() ri.groupby(['driver_gender', 'district']).is_arrested.mean() ``` ## __14 Calculating the search rate__ During a traffic stop, the police officer sometimes conducts a search of the vehicle. In this exercise, you'll calculate the percentage of all stops in the ri DataFrame that result in a vehicle search, also known as the search rate. ``` # Check the data type of 'search_conducted' print(ri.search_conducted.dtype) # Calculate the search rate by counting the values print(ri.search_conducted.value_counts(normalize=True)) # Calculate the search rate by taking the mean print(ri.search_conducted.mean()) ``` $\color{red}{\textbf{Interpretation:}}$ >It looks like the search rate is about __3.8%__. ### __Comparing search rates by gender__ Remember that the vehicle **search rate **across all stops is about __3.8%.__ First, we'll filter the DataFrame by gender and calculate the **search rate** for each group separately. Then, you'll perform the same calculation for both genders at once using a `.groupby()`. __Instructions:__ - Filter the DataFrame to only include female drivers, and then calculate the search rate by taking the mean of search_conducted. - Filter the DataFrame to only include male drivers, and then repeat the search rate calculation. - Group by driver gender to calculate the search rate for both groups simultaneously. (It should match the previous results.) ``` # Calculate the search rate for female drivers print(ri[ri.driver_gender == 'F'].search_conducted.mean()) # Calculate the search rate for male drivers print(ri[ri.driver_gender == 'M'].search_conducted.mean()) # Calculate the search rate for both groups simultaneously print(ri.groupby('driver_gender').search_conducted.mean()) ``` $\color{red}{\textbf{Interpretation:}}$ >Male drivers are searched more than twice as often as female drivers. Why might this be? ## __15 Adding a second factor to the analysis__ Even though the **search rate** for **males is much higher than for females**, *it's possible that the difference is mostly due to a second factor.* >For example, we might **hypothesize** that **the search rate varies by violation type**, and the difference in search rate between males and females is because they tend to commit different violations. we can test this hypothesis by examining the **search rate** for **each combination of gender and violation**. If the hypothesis was true, you would find that males and females are searched at about the same rate for each violation. Find out below if that's the case! __Instructions__ - Use a `.groupby()` to calculate the search rate for each combination of gender and violation. Are males and females searched at about the same rate for each violation? - Reverse the ordering to group by violation before gender. The results may be easier to compare when presented this way. ``` # Calculate the search rate for each combination of gender and violation ri.groupby(['driver_gender', 'violation']).search_conducted.mean() # Reverse the ordering to group by violation before gender ri.groupby(['violation', 'driver_gender']).search_conducted.mean() ``` $\color{red}{\textbf{Interpretation:}}$ >For all types of violations, the search rate is higher for males than for females, disproving our hypothesis ## __16 Does gender affect who is frisked during a search?__ ``` ri.search_conducted.value_counts() ``` `.value_counts()` __excludes missing values by default__ ``` ri.search_type.value_counts(dropna=False) ``` - `dropna=False` **displays missing values** **Examining the search types** ``` ri.search_type.value_counts() ``` - Multiple values are separated by commas. - 219 searches in which **"Inventory"** was the only search type. - Locate **"Inventory"** among multiple search types. __Searching for a string (1)__ ``` ri['inventory'] = ri.search_type.str.contains('Inventory', na=False) ``` - `str.contains()` returns - True if string is found - False if not found. - `na=False` returns `False` when it ,finds a missing value __Searching for a string (2)__ ``` ri.inventory.dtype ``` **True** means inventory was done, **False** means it was not ``` ri.inventory.sum() ``` __Calculating the inventory rate__ ``` ri.inventory.mean() ``` **0.5%** of all traffic stops resulted in an inventory. ``` searched = ri[ri.search_conducted == True] searched.inventory.mean() ``` __13.3% of searches included an inventory__ ## __17 Counting protective frisks__ During a vehicle search, the police officer may pat down the driver to check if they have a weapon. This is known as a "protective frisk." In this exercise, you'll first check to see how many times "Protective Frisk" was the only search type. Then, you'll use a string method to locate all instances in which the driver was frisked. __Instructions__ - Count the `search_type` values in the `ri` DataFrame to see how many times "Protective Frisk" was the only search type. - Create a new column, `frisk`, that is `True` if search_type contains the string "Protective Frisk" and `False` otherwise. - Check the data type of `frisk` to confirm that it's a Boolean Series. - Take the sum of `frisk` to count the total number of frisks. ``` # Count the 'search_type' values print(ri.search_type.value_counts()) # Check if 'search_type' contains the string 'Protective Frisk' ri['frisk'] = ri.search_type.str.contains('Protective Frisk', na=False) # Check the data type of 'frisk' print(ri['frisk'].dtype) # Take the sum of 'frisk' print(ri['frisk'].sum()) ``` $\color{red}{\textbf{Interpretation:}}$ >It looks like there were **303 drivers** who were **frisked**. Next, you'll examine whether gender affects who is frisked. ## __18 Comparing frisk rates by gender__ In this exercise, we'll compare the rates at which **female** and **male** drivers are **frisked during a search**. >Are males frisked more often than females, perhaps because police officers consider them to be higher risk? Before doing any calculations, it's important to filter the DataFrame to only include the relevant subset of data, namely stops in which a search was conducted. __Instructions__ - Create a DataFrame, **searched**, that only contains rows in which `search_conducted` is `True`. - Take the mean of the `frisk` column to find out what percentage of searches included a frisk. - Calculate the frisk rate for each gender using a `.groupby()`. ``` # Create a DataFrame of stops in which a search was conducted searched = ri[ri.search_conducted == True] # Calculate the overall frisk rate by taking the mean of 'frisk' print(searched.frisk.mean()) # Calculate the frisk rate for each gender print(searched.groupby('driver_gender').frisk.mean()) ``` $\color{red}{\textbf{Interpretation:}}$ >The **frisk rate** is **higher for males than for females**, though we **can't** conclude that this difference is caused by the driver's gender. --- <strong> <h1 align='center'>Does time of day affect arrest rate?(Part - 3) </h1> </strong> --- ## __19 Calculating the hourly arrest rate__ When a police officer stops a driver, a small percentage of those stops ends in an arrest. This is known as the arrest rate. In this exercise, you'll find out whether the arrest rate varies by time of day. First, you'll calculate the arrest rate across all stops in the **ri** DataFrame. Then, you'll calculate the hourly arrest rate by using the **hour** attribute of the index. The **hour** ranges from 0 to 23, in which: - *0 = midnight* - *12 = noon* - *23 = 11 PM* __Instructions__ - Take the mean of the `is_arrested` column to calculate the overall arrest rate. - Group by the `hour` attribute of the DataFrame index to calculate the hourly arrest rate. - Save the hourly arrest rate Series as a new object, `hourly_arrest_rate`. ``` # Calculate the overall arrest rate print(ri.is_arrested.mean()) # Calculate the hourly arrest rate print(ri.groupby(ri.index.hour).is_arrested.mean()) # Save the hourly arrest rate hourly_arrest_rate = ri.groupby(ri.index.hour).is_arrested.mean() ``` ## __20 Plotting the hourly arrest rate__ In this exercise, we'll create a line plot from the `hourly_arrest_rate` object. A line plot is appropriate in this case because you're showing how a quantity changes over time. This plot should help you to spot some trends that may not have been obvious when examining the raw numbers! ``` # Import matplotlib.pyplot as plt import matplotlib.pyplot as plt # Create a line plot of 'hourly_arrest_rate' hourly_arrest_rate.plot() # Add the xlabel, ylabel, and title plt.xlabel('Hour') plt.ylabel('Arrest Rate') plt.title('Arrest Rate by Time of Day') # Display the plot plt.show() ``` ## __21 Plotting drug-related stops__ __Are drug-related stops on the rise?__ In a small portion of traffic stops, drugs are found in the vehicle during a search. In this exercise, you'll assess whether these drug-related stops are becoming more common over time. The Boolean column `drugs_related_stop` indicates whether drugs were found during a given stop. You'll calculate the annual drug rate by resampling this column, and then you'll use a line plot to visualize how the rate has changed over time. __Instructions__ - Calculate the annual rate of drug-related stops by resampling the `drugs_related_stop` column (on the `'A'` frequency) and taking the mean. - Save the annual drug rate Series as a new object, `annual_drug_rate`. - Create a line plot of `annual_drug_rate` using the `.plot()` method. - Display the plot using the `.show()` function. ``` # Calculate the annual rate of drug-related stops # resampling `drugs_related_stop` represented by 'A' for Annual rate # & chain with mean at end print(ri.drugs_related_stop.resample('A').mean()) # Save the annual rate of drug-related stops annual_drug_rate = ri.drugs_related_stop.resample('A').mean() # Create a line plot of 'annual_drug_rate' annual_drug_rate.plot() # Display the plot plt.xlabel('year') plt.ylabel('Annual drug rate') plt.title('drugs_related_stop') plt.show() ``` **Resampling** Resampling is when we change the frequncy of our __time-series__ obervation. Most commonly used time series frequency are – - **W** : weekly frequency - **M** : month end frequency - **SM** : semi-month end frequency -(15th and end of month) - **Q** : quarter end frequency - **A** : Annual A time series is a series of data points indexed (or listed or graphed) in time order. Most commonly, a time series is a sequence taken at successive equally spaced points in time. It is a Convenience method for **frequency conversion** and resampling of **time serie**s. Object must have a datetime-like index (**DatetimeIndex**, **PeriodIndex**, or **TimedeltaIndex**), or pass datetime-like values to the on or level keyword. ## __22 Comparing drug and search rates__ >From the above plot its evident that rate of `drug-related` stops increased significantly between **2005** and **2015**. we might **hypothesize** that the **rate of vehicle searches** was also **increasing**, which would have led to an **increase** in **drug-related stops** even if more drivers were not carrying drugs. You can test this **hypothesis** by calculating the **annual search rate**, and then plotting it against the **annual drug rate**. *If the hypothesis is true, then you'll see both rates increasing over time.* ``` # Calculate and save the annual search rate annual_search_rate = ri.search_conducted.resample('A').mean() # Concatenate 'annual_drug_rate' and 'annual_search_rate' annual = pd.concat([annual_drug_rate, annual_search_rate], axis='columns') # Create subplots from 'annual' annual.plot(subplots=True) # Display the subplots plt.show() ``` $\color{red}{\textbf{Interpretation:}}$ >The rate of **drug-related** stops **increased** even though the **search rate decreased**, disproving our hypothesis ## __23 What violations are caught in each district?__ ``` # Computing a frequency table table = pd.crosstab(ri.driver_race, ri.driver_gender) table # Driver Asian and Gender is Female ri[(ri.driver_race == 'Asian') & (ri.driver_gender == 'F')].shape # Selecting a DataFrame slice table1 = table.loc['Asian':'Hispanic'] table1 # Line Plot table1.plot() plt.show() ``` **line plot** is not appropriate in this case because it implies a change in time along the **x-axis**, whereas the **x-axis** actually represents **three** distinct categories. ``` # Creating a bar plot table.plot(kind='bar') plt.show() # Creating a bar plot table1.plot(kind='bar') plt.show() # Stacking the bars for table1 table1.plot(kind='bar', stacked=True) plt.show() ``` __Tallying violations by district__ The state of Rhode Island is broken into **six police districts**, also known as zones. How do the zones compare in terms of what violations are caught by police? In this exercise, we'll create a **frequency table** to determine **how many violations of each type took place in each of the six zones**. Then, you'll filter the table to focus on the `"K" zones`, which you'll examine further in the next exercise. __Instructions:__ - Create a frequency table from the `ri` DataFrame's `district` and `violation` columns using the `pd.crosstab()` function. - Save the frequency table as a new object, `all_zones`. - Select rows `'Zone K1'` through `'Zone K3'` from `all_zones` using the `.loc[]` accessor. Save the smaller table as a new object, `k_zones`. ``` # Create a frequency table of districts and violations print(pd.crosstab(ri.district, ri.violation)) # Save the frequency table as 'all_zones' all_zones = pd.crosstab(ri.district, ri.violation) # Select rows 'Zone K1' through 'Zone K3' print(all_zones.loc['Zone K1':'Zone K3']) # Save the smaller table as 'k_zones' k_zones = all_zones.loc['Zone K1':'Zone K3'] ``` ## __24 Plotting violations by district__ we've created a frequency table focused on the `"K"` zones, **visualize** the data to help you compare what **violations** are being caught in each zone. >**First** we'll create a **bar plot**, which is an appropriate plot type since we're **comparing categorical data**. >Then we'll create a **stacked bar** plot in order to get a slightly different look at the data to find which plot is more insightful? ``` # Creating a bar plot k_zones.plot(kind='bar', figsize=(12, 7)) # Display the plot plt.show() # Create a stacked bar plot of 'k_zones' k_zones.plot(kind='bar', stacked=True, figsize=(12, 7)) # Display the plot plt.show() ``` The vast majority of traffic stops in **Zone K1** are for speeding, and **Zones K2** and **K3** are remarkably similar to one another in terms of violations. ## __25 Converting stop durations to numbers__ In the traffic stops dataset, the `stop_duration` column tells you approximately how long the driver was detained by the officer. Unfortunately, the durations are stored as strings, such as `'0-15 Min'`. How can you make this data easier to analyze? In this exercise, you'll convert the stop durations to integers. Because the precise durations are not available, you'll have to estimate the numbers using reasonable values: - Convert `'0-15 Min'` to `8` - Convert `'16-30 Min'` to `23` - Convert `'30+ Min'` to `45` $\color{red}{\textbf{Note:}}$ >`astype()` method to convert strings to numbers or Booleans. However, `astype()` only works when pandas can infer how the conversion should be done, and that's not the case here. ``` # Print the unique values in 'stop_duration' print(ri.stop_duration.unique()) # Create a dictionary that maps strings to integers mapping = {'0-15 Min':8, '16-30 Min':23, '30+ Min':45 } # Convert the 'stop_duration' strings to integers using the 'mapping' ri['stop_minutes'] = ri.stop_duration.map(mapping) # Print the unique values in 'stop_minutes' print(ri.stop_minutes.unique()) ``` ## __26 Plotting stop length__ >If we were **stopped** for a particular violation, how long might we expect to be detained? In this exercise, we'll visualize the average length of time drivers are stopped for each type of violation. Rather than using the `violation` column in this exercise, you'll use `violation_raw` since it contains more detailed descriptions of the violations. __Instructions__ - For each value in the ri DataFrame's **violation_raw** column, calculate the mean number of **stop_minutes** that a driver is detained. - Save the resulting Series as a new object, **stop_length**. - Sort **stop_length** by its values, and then visualize it using a horizontal bar plot. - Display the plot. ``` # Calculate the mean 'stop_minutes' for each value in 'violation_raw' print(ri.groupby('violation_raw').stop_minutes.mean()) # Save the resulting Series as 'stop_length' stop_length = ri.groupby('violation_raw').stop_minutes.mean() # Sort 'stop_length' by its values and create a horizontal bar plot stop_length.sort_values().plot(kind='barh') # Display the plot plt.show() ``` __Calculating the search rate__ - **Visualizing** how often searches were done after each **violation** type ``` search_rate = ri.groupby('violation').search_conducted.mean() search_rate.sort_values().plot(kind='barh') plt.show() ``` --- <strong> <h1 align='center'>Analyzing the effect of weather on policing(Part - 4) </h1> </strong> --- ``` ls ``` ## $\color{green}{\textbf{Dataset_2_weather.csv:}}$ [National Centers for Environmental Information](https://www.ncei.noaa.gov/) <p align=''center> <a href='#'><img src='https://www.climatecommunication.org/wp-content/uploads/2013/04/Screen-Shot-2016-01-29-at-10.52.21-AM.png'></a> </p> ``` # Import the pandas library as pd import pandas as pd # Read 'weather.csv' into a DataFrame named 'weather' weather = pd.read_csv('weather.csv') # Examine the head of the DataFrame weather.head() ``` - __TAVG , TMIN , TMAX__ : Temperature - __AWND , WSF2 :__ Wind speed - __WT01 ... WT22 :__ Bad weather conditions __The difference between isnull () and isna ()?__ `.isnull()` and `isna()` are the same functions (an alias), so we can choose either one. `df.isnull().sum()` or `df.isna().sum()`. Finding which index (or row number) contains missing values can be done analogously to the previous example, simply by adding axis=1. ``` # Count the number of missing values in each column print(weather.isnull().sum()) # Columns print(weather.columns) # Shape print(weather.shape) ``` ## __27 Plotting the temperature__ ``` # Describe the temperature columns print(weather[['TMIN', 'TAVG', 'TMAX']].describe()) # Create a box plot of the temperature columns weather[['TMIN', 'TAVG', 'TMAX']].plot(kind='box') # Display the plot plt.show() ``` __Examining the wind speed__ ``` print(weather[['AWND', 'WSF2']].head()) print(weather[['AWND', 'WSF2']].describe()) # Creating a box plot weather[['AWND', 'WSF2']].plot(kind='box', figsize=(7, 7)) plt.figure() plt.show() # Creating a histogram (1) weather['WDIFF'] = weather.WSF2 - weather.AWND weather.WDIFF.plot(kind='hist') plt.show() # Creating a histogram (2) weather.WDIFF.plot(kind='hist', bins=20) plt.show() ``` ## __28 Plotting the temperature difference__ ``` # Create a 'TDIFF' column that represents temperature difference weather['TDIFF'] = weather.TMAX - weather.TMIN # Describe the 'TDIFF' column print(weather.TDIFF.describe()) # Create a histogram with 20 bins to visualize 'TDIFF' weather.TDIFF.plot(kind='hist', bins=20) # Display the plot plt.show() ``` ## __29 Categorizing the weather__ ``` # Printing the shape and columns print(weather.shape) print(weather.columns) # Selecting a DataFrame slice temp = weather.loc[:, 'TAVG':'TMAX'] print(temp.shape) print(temp.columns) ``` __Mapping one set of values to another__ ``` print(ri.stop_duration.unique()) mapping = {'0-15 Min':'short', '16-30 Min':'medium', '30+ Min':'long' } ri['stop_length'] = ri.stop_duration.map(mapping) print(ri.stop_length.dtype) print(ri.stop_length.unique()) ``` - Category type stores the data more efficiently. - Allows you to specify a logical order for the categories. ``` ri.stop_length.memory_usage(deep=True) ``` `pandas.DataFrame.memory_usage` __function__ Return the **memory usage** of each column in bytes. - The **memory usage** can optionally include the contribution of the **index** and **elements** of object dtype. - `deep` bool, __default False__ If **True**, introspect the data deeply by interrogating object dtypes for system-level memory consumption, and include it in the returned values. __Changing data type from object to category__ ``` from pandas.api.types import CategoricalDtype # Changing data type from object to category cats = ['short', 'medium', 'long'] cat_type = CategoricalDtype(categories=cats, ordered=True) ri['stop_length'] = ri['stop_length'].astype(cat_type) print(ri.stop_length.memory_usage(deep=True)) # Using ordered categories print(ri.stop_length.value_counts()) ri[ri.stop_length > 'short'].shape ri.groupby('stop_length').is_arrested.mean() ``` ## __30 Counting bad weather conditions__ The `weather` DataFrame contains 20 columns that start with `'WT'`, each of which represents a bad weather condition. For example: - `WT05` indicates "Hail" - `WT11` indicates "High or damaging winds" - `WT17` indicates "Freezing rain" For every row in the dataset, each `WT` column contains either a 1 (meaning the condition was present that day) or `NaN` (meaning the condition was not present). In this exercise, you'll quantify "how bad" the weather was each day by counting the number of `1` values in each row. ``` # Copy 'WT01' through 'WT22' to a new DataFrame WT = weather.loc[:, 'WT01':'WT22'] # Calculate the sum of each row in 'WT' weather['bad_conditions'] = WT.sum(axis='columns') # Replace missing values in 'bad_conditions' with '0' weather['bad_conditions'] = weather.bad_conditions.fillna(0).astype('int') # Create a histogram to visualize 'bad_conditions' weather.bad_conditions.plot(kind='hist') # Display the plot plt.show() ``` $\color{red}{\textbf{Interpretation:}}$ >It looks like many days didn't have any bad weather conditions, and only a small portion of days had more than four bad weather conditions. ## __31 Rating the weather conditions__ In the previous exercise, we have counted the number of bad weather conditions each day. In this exercise, we'll use the counts to create a rating system for the weather. The counts range from 0 to 9, and should be converted to ratings as follows: - Convert 0 to `'good'` - Convert 1 through 4 to `'bad'` - Convert 5 through 9 to `'worse'` ``` # Count the unique values in 'bad_conditions' and sort the index print(weather.bad_conditions.value_counts().sort_index()) # Create a dictionary that maps integers to strings mapping = {0:'good', 1:'bad', 2:'bad', 3:'bad', 4:'bad', 5:'worse', 6:'worse', 7:'worse', 8:'worse', 9:'worse'} # Convert the 'bad_conditions' integers to strings using the 'mapping' weather['rating'] = weather.bad_conditions.map(mapping) # Count the unique values in 'rating' print(weather.rating.value_counts()) ``` This rating system should make the weather condition data easier to understand. ## __32 Changing the data type to category__ Since the `rating` column only has a few possible values, we have to change its **data type to category** in order to store the data more efficiently & then specify a logical order for the categories. ``` from pandas.api.types import CategoricalDtype # Create a list of weather ratings in logical order cats = ['good', 'bad', 'worse'] # Change the data type of 'rating' to category cat_types= CategoricalDtype(categories=cats, ordered=True) weather['rating'] = weather['rating'].astype(cat_types) # Examine the head of 'rating' print(weather.rating.head()) print(weather.rating.memory_usage(deep=True)) # Using ordered categories print(weather.rating.value_counts()) weather.rating.value_counts().plot(kind='bar') plt.show() ``` ## __33 Merging datasets(Preparing the DataFrames)__ In this exercise, we'll prepare the traffic stop and weather rating DataFrames so that they're ready to be merged: 1. With the `ri` DataFrame, you'll move the `stop_datetime` index to a column since the index will be lost during the merge. 2. With the `weather` DataFrame, you'll select the `DATE` and `rating` columns and put them in a new DataFrame. __Instructions__ - Reset the index of the ri DataFrame. - Examine the head of ri to verify that `stop_datetime` is now a DataFrame column, and the index is now the default integer index. - Create a new DataFrame named `weather_rating` that contains only the `DATE` and rating columns from the weather DataFrame. - Examine the head of `weather_rating` to verify that it contains the proper columns. ``` # Reset the index of 'ri' ri.reset_index(inplace=True) # Examine the head of 'ri' print(ri.head()) # Create a DataFrame from the 'DATE' and 'rating' columns weather_rating = weather[['DATE','rating']] # Examine the head of 'weather_rating' print(weather_rating.head()) ``` The **ri** and **weather_rating** DataFrames are now ready to be **merged**. __Merging the DataFrames__ In this exercise, we'll merge the `ri` and `weather_rating` DataFrames into a new DataFrame, ri_weather. The DataFrames will be joined using the `stop_date` column from `ri` and the `DATE` column from `weather_rating`. Thankfully the date formatting matches exactly, which is not always the case! Once the merge is complete, you'll set `stop_datetime` as the index, which is the column you saved in the previous exercise. ``` # Examine the shape of 'ri' print(ri.shape) # Merge 'ri' and 'weather_rating' using a left join ri_weather = pd.merge(left=ri, right=weather_rating, left_on='stop_date', right_on='DATE', how='left') # Examine the shape of 'ri_weather' print(ri_weather.shape) # Set 'stop_datetime' as the index of 'ri_weather' ri_weather.set_index('stop_datetime', inplace=True) ``` ## __34 Does weather affect the arrest rate?__ ``` # Driver gender and vehicle searches print(ri.search_conducted.mean()) print(ri.groupby('driver_gender').search_conducted.mean()) search_rate = ri.groupby(['violation','driver_gender']).search_conducted.mean() search_rate print(type(search_rate)) print(type(search_rate.index)) search_rate.loc['Equipment'] search_rate.loc['Equipment', 'M'] search_rate.unstack() type(search_rate.unstack()) ``` __Converting a multi-indexed Series to a DataFrame__ ``` ri.pivot_table(index='violation', columns='driver_gender', values='search_conducted') ``` ### __Comparing arrest rates by weather rating__ Do police officers arrest drivers more often when the weather is bad? Find out below! - First, you'll calculate the overall arrest rate. - Then, you'll calculate the arrest rate for each of the weather ratings you previously assigned. - Finally, you'll add violation type as a second factor in the analysis, to see if that accounts for any differences in the arrest rate. Since you previously defined a logical order for the weather categories, `good < bad < worse`, they will be sorted that way in the results. __Calculate the overall arrest rate by taking the mean of the `is_arrested` Series__ ``` # Calculate the overall arrest rate print(ri_weather.is_arrested.mean()) ``` __Calculate the arrest rate for each weather `rating` using a `.groupby()`.__ ``` # Calculate the arrest rate for each 'rating' print(ri_weather.groupby('rating').is_arrested.mean()) ``` __Calculate the arrest rate for each combination of `violation` and `rating`. How do the arrest rates differ by group?__ ``` # Calculate the arrest rate for each 'violation' and 'rating' print(ri_weather.groupby(['violation', 'rating']).is_arrested.mean()) ``` $\color{red}{\textbf{Interpretation:}}$ >The arrest rate **increases as the weather gets worse**, and that trend persists across many of the violation types. This doesn't prove a causal link, but it's quite an interesting result ## __35 Selecting from a multi-indexed Series__ The output of a single `.groupby()` operation on multiple columns is a Series with a MultiIndex. Working with this type of object is similar to working with a DataFrame: - The outer index level is like the DataFrame rows. - The inner index level is like the DataFrame columns. In this exercise, you'll practice accessing data from a multi-indexed Series using the `.loc[]` accessor. ``` # Save the output of the groupby operation from the last exercise arrest_rate = ri_weather.groupby(['violation', 'rating']).is_arrested.mean() # Print the 'arrest_rate' Series print(arrest_rate) # Print the arrest rate for moving violations in bad weather print(arrest_rate.loc['Moving violation', 'bad']) # Print the arrest rates for speeding violations in all three weather conditions print(arrest_rate.loc['Speeding']) ``` ## __36 Reshaping the arrest rate data__ In this exercise, we'll the `arrest_rate` Series into a DataFrame. This is a useful step when working with any **multi-indexed Series**, since it enables us to access the *full range of DataFrame methods*. Then, we'll create the exact same DataFrame using a pivot table. This is a great example of how pandas often gives you more than one way to reach the same result! ``` # Unstack the 'arrest_rate' Series into a DataFrame print(arrest_rate.unstack()) # Create the same DataFrame using a pivot table print(ri_weather.pivot_table(index='violation', columns='rating', values='is_arrested')) ``` --- <p align='center'> <a href="https://twitter.com/F4izy"> <img src="https://th.bing.com/th/id/OIP.FCKMemzqNplY37Jwi0Yk3AHaGl?w=233&h=207&c=7&o=5&pid=1.7" width=50px height=50px> </a> <a href="https://www.linkedin.com/in/mohd-faizy/"> <img src='https://th.bing.com/th/id/OIP.idrBN-LfvMIZl370Vb65SgHaHa?pid=Api&rs=1' width=50px height=50px> </a> </p> ---
github_jupyter
# Workshop: Deep Learning 3 Outline 1. Regularization 2. Hand-Written Digits with Convolutional Neural Networks 3. Advanced Image Classification with Convolutional Neural Networks Source: Deep Learning With Python, Part 1 - Chapter 4 ## 1. Regularization To prevent a model from learning misleading or irrelevant patterns found in the training data, the best solution is to get more training data. However, this is in many times out of our control. Another approach is called - by now you should know that - regularization. ### 1.1. Reducing the network’s size ``` The simplest way to prevent overfitting is to reduce the size of the model: the number of learnable parameters in the model (which is determined by the number of layers and the number of units per layer). Or put it this way: A network with more parameters can better memorize stuff... # Unfortunately, there is no closed form solution which gives us the best network size... # So, we need to try out different models (or use grid search) # Original Model from keras import models from keras import layers model = models.Sequential() model.add(layers.Dense(16, activation='relu', input_shape=(28 * 28,))) model.add(layers.Dense(16, activation='relu')) model.add(layers.Dense(10, activation='softmax')) # Simpler Model from keras import models from keras import layers model = models.Sequential() model.add(layers.Dense(16, activation='relu', input_shape=(10000,))) model.add(layers.Dense(16, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) # Bigger Model model = models.Sequential() model.add(layers.Dense(512, activation='relu', input_shape=(10000,))) model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) #### You need to load data, compile the network and then train it (with validation/hold out set) #### Then you plot the validation loss for all these combinations ``` <img src="res/img1.png"></img> <img src="res/img2.png"></img> ``` # This shows us that the bigger model starts to overfit immediately.. ``` Instead of manually searching for the best model architecture (i.e., hyperparameters) you can use a method called grid-search. However, we will not cover this in this lecture - but you can find a tutorial here: https://machinelearningmastery.com/grid-search-hyperparameters-deep-learning-models-python-keras/ Basically, the author conceleates keras with scikit's grid search module. ### 1.2. Adding weight regularization ``` 1. L1 regularization 2. L2 regularization ``` #### 1.2.1 Adding L2 Regularization to the model ``` from keras import regularizers model = models.Sequential() # kernel_regularizer = regularizers.l2(0.001), add those weights to the loss with an alpha of 0.001 # you could use also: regularizers.l1(0.001) for L1 regularization # Documentation: https://keras.io/api/layers/regularizers/ model.add(layers.Dense(16, kernel_regularizer=regularizers.l2(0.001),activation='relu', input_shape=(10000,))) model.add(layers.Dense(16, kernel_regularizer=regularizers.l2(0.001), activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) ``` <img src="res/img3.png"></img> ### 1.2.3 Adding Dropout Idea: Randomly drop out a number of (activation) nodes during training. **Assume**: [0.2, 0.5, 1.3, 0.8, 1.1] is the output of a layer (after activation function). Dropout sets randomly some of these weights to 0. For example: [0, 0.5, 1.3, 0, 1.1]. The *dropout rate* is the fraction of features that are zeroed out (usually between 0.2 and 0.5) ``` # Example Code model = models.Sequential() model.add(layers.Dense(16, activation='relu', input_shape=(10000,))) # Pass dropout rate!!! model.add(layers.Dropout(0.5)) model.add(layers.Dense(16, activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dense(1, activation='sigmoid')) # Compile.. # Fit.. # Evaluate... # Doc: https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dropout ``` <img src="res/img4.png"></img> ### To recap, these are the most common ways to prevent overfitting in neural networks: 1. Get more training data. 2. Reduce the capacity of the network. 3. Add weight regularization. 4. Add dropout. 5. Data Augmentation (for image classification tasks) ## 2 Gradient Descent ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.datasets import california_housing from sklearn.metrics import mean_squared_error housing_data = california_housing.fetch_california_housing() features = pd.DataFrame(housing_data.data, columns=housing_data.feature_names) target = pd.DataFrame(housing_data.target, columns=['Target']) df = features.join(target) X = df.MedInc Y = df.Target def gradient_descent(X, y, lr=0.05, iterations=10): ''' Gradient Descent for a single feature ''' m, b = 0.2, 0.2 # initial random parameters log, mse = [], [] # lists to store learning process N = len(X) # number of samples # MSE = 1/N SUM (y_i - (m*x_i +b))^2 # MSE' w.r.t. m => 1/N * SUM(-2*x_i*(m*x_i+b)) # MSE' w.r.t. b => 1/N * SUM(-2*(m*x_i+b)) for _ in range(iterations): f = y - (m*X + b) # Updating m and b m -= lr * (-2 * X.dot(f).sum() / N) b -= lr * (-2 * f.sum() / N) log.append((m, b)) mse.append(mean_squared_error(y, (m*X + b))) return m, b, log, mse m, b, log, mse = gradient_descent(X, Y, lr=0.01, iterations=10) (m, b) # Analytical Solution (compaed to ) from sklearn.linear_model import LinearRegression reg = LinearRegression().fit(features["MedInc"].to_numpy().reshape(-1, 1), Y) (reg.coef_, reg.intercept_) ``` ##### Stochastic Gradient Descent ``` def stochastic_gradient_descent(X, y, lr=0.05, iterations=10, batch_size=10): ''' Stochastic Gradient Descent for a single feature ''' m, b = 0.5, 0.5 # initial parameters log, mse = [], [] # lists to store learning process for _ in range(iterations): indexes = np.random.randint(0, len(X), batch_size) # random sample Xs = np.take(X, indexes) ys = np.take(y, indexes) N = len(Xs) f = ys - (m*Xs + b) # Updating parameters m and b m -= lr * (-2 * Xs.dot(f).sum() / N) b -= lr * (-2 * f.sum() / N) log.append((m, b)) mse.append(mean_squared_error(y, m*X+b)) return m, b, log, mse m, b, log, mse = stochastic_gradient_descent(X, Y, lr=0.01, iterations=1000) (m,b) ``` ## 2. Using CNNs to Classify Hand-written Digits on MNIST Dataset <img src="res/img5.png"></img> ``` from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout, Conv2D, MaxPool2D from keras.utils import np_utils # Load Data (X_train, y_train), (X_test, y_test) = mnist.load_data() # Shape of data print("X_train shape", X_train.shape) print("y_train shape", y_train.shape) print("X_test shape", X_test.shape) print("y_test shape", y_test.shape) # Flattening the images from the 28x28 pixels to 1D 784 pixels X_train = X_train.reshape(60000, 784) X_test = X_test.reshape(10000, 784) X_train = X_train.astype('float32') X_test = X_test.astype('float32') # normalizing the data to help with the training X_train /= 255 X_test /= 255 # To Categorical (One-Hot Encoding) n_classes = 10 print("Shape before one-hot encoding: ", y_train.shape) Y_train = np_utils.to_categorical(y_train, n_classes) Y_test = np_utils.to_categorical(y_test, n_classes) print("Shape after one-hot encoding: ", Y_train.shape) # Let's build again a very boring neural network model = Sequential() # hidden layer model.add(Dense(100, input_shape=(784,), activation='relu')) # output layer model.add(Dense(10, activation='softmax')) # looking at the model summary model.summary() # Compile model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam') # Traing (####-> Caution, this is dedicated for validation data - I was just lazy...) model.fit(X_train, Y_train, batch_size=128, epochs=10, validation_data=(X_test, Y_test)) # new imports needed from keras.layers import Conv2D, MaxPool2D, Flatten # And now with a convolutional neural network # Doc: https://keras.io/api/layers/convolution_layers/ # Load again data (X_train, y_train), (X_test, y_test) = mnist.load_data() # DONT Vectorize - keep grid structure X_train = X_train.reshape(X_train.shape[0], 28, 28, 1) X_test = X_test.reshape(X_test.shape[0], 28, 28, 1) X_train = X_train.astype('float32') X_test = X_test.astype('float32') # normalize X_train /= 255 X_test /= 255 # Sequential Model model = Sequential() # Convolutional layer # 2D convolutional data # filters: number of kernels # kernel size: (3, 3) pixel filter # stride: (move one to the right, one to the bottom when you reach the end of the row) # padding: "valid" => no padding => feature map is reduced model.add(Conv2D(filters=25, kernel_size=(3,3), strides=(1,1), padding='valid', activation='relu', input_shape=(28,28,1))) model.add(MaxPool2D(pool_size=(1,1))) # flatten output such that the "densly" connected network can be attached model.add(Flatten()) # hidden layer model.add(Dense(100, activation='relu')) # output layer model.add(Dense(10, activation='softmax')) # compiling the sequential model model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam') # training the model for 10 epochs model.fit(X_train, Y_train, batch_size=128, epochs=10, validation_data=(X_test, Y_test)) # More on Classification with CNNs ``` ## 3. Advanced Image Classification with Deep Convolutional Neural Networks <img src="res/img6.png"> ``` # Imports from keras.datasets import cifar10 from keras.models import Sequential from keras.layers import Dense, Dropout, Conv2D, MaxPool2D, Flatten from keras.utils import np_utils # Load Data (X_train, y_train), (X_test, y_test) = cifar10.load_data() # # Keep Grid Structure with 32x32 pixels (times 3; due to color channels) X_train = X_train.reshape(X_train.shape[0], 32, 32, 3) X_test = X_test.reshape(X_test.shape[0], 32, 32, 3) X_train = X_train.astype('float32') X_test = X_test.astype('float32') # Normalize X_train /= 255 X_test /= 255 # One-Hot Encoding n_classes = 10 print("Shape before one-hot encoding: ", y_train.shape) Y_train = np_utils.to_categorical(y_train, n_classes) Y_test = np_utils.to_categorical(y_test, n_classes) print("Shape after one-hot encoding: ", Y_train.shape) # Create Model Object model = Sequential() # Add Conv. Layer model.add(Conv2D(50, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu', input_shape=(32, 32, 3))) ## What happens here? # Stack 2. Conv. Layer model.add(Conv2D(75, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) # Stack 3. Conv. Layer model.add(Conv2D(125, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) # Flatten Output of Conv. Part such that we can add a densly connected network model.add(Flatten()) # Add Hidden Layer and Dropout Reg. model.add(Dense(500, activation='relu')) model.add(Dropout(0.4)) model.add(Dense(250, activation='relu')) model.add(Dropout(0.3)) # Output Layer model.add(Dense(10, activation='softmax')) # Compile model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam') # Train model.fit(X_train, Y_train, batch_size=128, epochs=2, validation_data=(X_test, Y_test)) ```
github_jupyter
<center> <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/Logos/organization_logo/organization_logo.png" width="300" alt="cognitiveclass.ai logo" /> </center> # Density-Based Clustering Estimated time needed: **25** minutes ## Objectives After completing this lab you will be able to: - Use DBSCAN to do Density based clustering - Use Matplotlib to plot clusters Most of the traditional clustering techniques, such as k-means, hierarchical and fuzzy clustering, can be used to group data without supervision. However, when applied to tasks with arbitrary shape clusters, or clusters within cluster, the traditional techniques might be unable to achieve good results. That is, elements in the same cluster might not share enough similarity or the performance may be poor. Additionally, Density-based Clustering locates regions of high density that are separated from one another by regions of low density. Density, in this context, is defined as the number of points within a specified radius. In this section, the main focus will be manipulating the data and properties of DBSCAN and observing the resulting clustering. Import the following libraries: <ul> <li> <b>numpy as np</b> </li> <li> <b>DBSCAN</b> from <b>sklearn.cluster</b> </li> <li> <b>make_blobs</b> from <b>sklearn.datasets.samples_generator</b> </li> <li> <b>StandardScaler</b> from <b>sklearn.preprocessing</b> </li> <li> <b>matplotlib.pyplot as plt</b> </li> </ul> <br> Remember <b> %matplotlib inline </b> to display plots ``` # Notice: For visualization of map, you need basemap package. # if you dont have basemap install on your machine, you can use the following line to install it !conda install -c conda-forge basemap matplotlib==3.1 -y # Notice: you maight have to refresh your page and re-run the notebook after installation import numpy as np from sklearn.cluster import DBSCAN from sklearn.datasets.samples_generator import make_blobs from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt %matplotlib inline ### Data generation The function below will generate the data points and requires these inputs: <ul> <li> <b>centroidLocation</b>: Coordinates of the centroids that will generate the random data. </li> <ul> <li> Example: input: [[4,3], [2,-1], [-1,4]] </li> </ul> <li> <b>numSamples</b>: The number of data points we want generated, split over the number of centroids (# of centroids defined in centroidLocation) </li> <ul> <li> Example: 1500 </li> </ul> <li> <b>clusterDeviation</b>: The standard deviation between the clusters. The larger the number, the further the spacing. </li> <ul> <li> Example: 0.5 </li> </ul> </ul> ``` def createDataPoints(centroidLocation, numSamples, clusterDeviation): # Create random data and store in feature matrix X and response vector y. X, y = make_blobs(n_samples=numSamples, centers=centroidLocation, cluster_std=clusterDeviation) # Standardize features by removing the mean and scaling to unit variance X = StandardScaler().fit_transform(X) return X, y ``` Use <b>createDataPoints</b> with the <b>3 inputs</b> and store the output into variables <b>X</b> and <b>y</b>. ``` X, y = createDataPoints([[4,3], [2,-1], [-1,4]] , 1500, 0.5) ``` ### Modeling DBSCAN stands for Density-Based Spatial Clustering of Applications with Noise. This technique is one of the most common clustering algorithms which works based on density of object. The whole idea is that if a particular point belongs to a cluster, it should be near to lots of other points in that cluster. It works based on two parameters: Epsilon and Minimum Points **Epsilon** determine a specified radius that if includes enough number of points within, we call it dense area **minimumSamples** determine the minimum number of data points we want in a neighborhood to define a cluster. ``` epsilon = 0.3 minimumSamples = 7 db = DBSCAN(eps=epsilon, min_samples=minimumSamples).fit(X) labels = db.labels_ labels ``` ### Distinguish outliers Lets Replace all elements with 'True' in core_samples_mask that are in the cluster, 'False' if the points are outliers. # Firts, create an array of booleans using the labels from db. core_samples_mask = np.zeros_like(db.labels_, dtype=bool) core_samples_mask[db.core_sample_indices_] = True core_samples_mask # Number of clusters in labels, ignoring noise if present. n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) n_clusters_ ``` # Remove repetition in labels by turning it into a set. unique_labels = set(labels) unique_labels ``` ### Data visualization # Create colors for the clusters. colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels))) # Plot the points with colors for k, col in zip(unique_labels, colors): if k == -1: # Black used for noise. col = 'k' class_member_mask = (labels == k) # Plot the datapoints that are clustered xy = X[class_member_mask & core_samples_mask] plt.scatter(xy[:, 0], xy[:, 1],s=50, c=[col], marker=u'o', alpha=0.5) # Plot the outliers xy = X[class_member_mask & ~core_samples_mask] plt.scatter(xy[:, 0], xy[:, 1],s=50, c=[col], marker=u'o', alpha=0.5) ``` from sklearn.cluster import KMeans k = 3 k_means3 = KMeans(init = "k-means++", n_clusters = k, n_init = 12) k_means3.fit(X) fig = plt.figure(figsize=(6, 4)) ax = fig.add_subplot(1, 1, 1) for k, col in zip(range(k), colors): my_members = (k_means3.labels_ == k) plt.scatter(X[my_members, 0], X[my_members, 1], c=col, marker=u'o', alpha=0.5) plt.show() <h1 align=center> Weather Station Clustering using DBSCAN & scikit-learn </h1> <hr> DBSCAN is specially very good for tasks like class identification on a spatial context. The wonderful attribute of DBSCAN algorithm is that it can find out any arbitrary shape cluster without getting affected by noise. For example, this following example cluster the location of weather stations in Canada. <Click 1> DBSCAN can be used here, for instance, to find the group of stations which show the same weather condition. As you can see, it not only finds different arbitrary shaped clusters, can find the denser part of data-centered samples by ignoring less-dense areas or noises. let's start playing with the data. We will be working according to the following workflow: </font> 1. Loading data - Overview data - Data cleaning - Data selection - Clusteing ### About the dataset <h4 align = "center"> Environment Canada Monthly Values for July - 2015 </h4> <html> <head> <style> table { font-family: arial, sans-serif; border-collapse: collapse; width: 100%; } td, th { border: 1px solid #dddddd; text-align: left; padding: 8px; } tr:nth-child(even) { background-color: #dddddd; } </style> </head> <body> <table> <tr> <th>Name in the table</th> <th>Meaning</th> </tr> <tr> <td><font color = "green"><strong>Stn_Name</font></td> <td><font color = "green"><strong>Station Name</font</td> </tr> <tr> <td><font color = "green"><strong>Lat</font></td> <td><font color = "green"><strong>Latitude (North+, degrees)</font></td> </tr> <tr> <td><font color = "green"><strong>Long</font></td> <td><font color = "green"><strong>Longitude (West - , degrees)</font></td> </tr> <tr> <td>Prov</td> <td>Province</td> </tr> <tr> <td>Tm</td> <td>Mean Temperature (°C)</td> </tr> <tr> <td>DwTm</td> <td>Days without Valid Mean Temperature</td> </tr> <tr> <td>D</td> <td>Mean Temperature difference from Normal (1981-2010) (°C)</td> </tr> <tr> <td><font color = "black">Tx</font></td> <td><font color = "black">Highest Monthly Maximum Temperature (°C)</font></td> </tr> <tr> <td>DwTx</td> <td>Days without Valid Maximum Temperature</td> </tr> <tr> <td><font color = "black">Tn</font></td> <td><font color = "black">Lowest Monthly Minimum Temperature (°C)</font></td> </tr> <tr> <td>DwTn</td> <td>Days without Valid Minimum Temperature</td> </tr> <tr> <td>S</td> <td>Snowfall (cm)</td> </tr> <tr> <td>DwS</td> <td>Days without Valid Snowfall</td> </tr> <tr> <td>S%N</td> <td>Percent of Normal (1981-2010) Snowfall</td> </tr> <tr> <td><font color = "green"><strong>P</font></td> <td><font color = "green"><strong>Total Precipitation (mm)</font></td> </tr> <tr> <td>DwP</td> <td>Days without Valid Precipitation</td> </tr> <tr> <td>P%N</td> <td>Percent of Normal (1981-2010) Precipitation</td> </tr> <tr> <td>S_G</td> <td>Snow on the ground at the end of the month (cm)</td> </tr> <tr> <td>Pd</td> <td>Number of days with Precipitation 1.0 mm or more</td> </tr> <tr> <td>BS</td> <td>Bright Sunshine (hours)</td> </tr> <tr> <td>DwBS</td> <td>Days without Valid Bright Sunshine</td> </tr> <tr> <td>BS%</td> <td>Percent of Normal (1981-2010) Bright Sunshine</td> </tr> <tr> <td>HDD</td> <td>Degree Days below 18 °C</td> </tr> <tr> <td>CDD</td> <td>Degree Days above 18 °C</td> </tr> <tr> <td>Stn_No</td> <td>Climate station identifier (first 3 digits indicate drainage basin, last 4 characters are for sorting alphabetically).</td> </tr> <tr> <td>NA</td> <td>Not Available</td> </tr> </table> </body> </html> ``` ### 1-Download data To download the data, we will use **`!wget`**. To download the data, we will use `!wget` to download it from IBM Object Storage. **Did you know?** When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC) ``` !wget -O weather-stations20140101-20141231.csv https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%204/data/weather-stations20140101-20141231.csv ``` ### 2- Load the dataset We will import the .csv then we creates the columns for year, month and day. ``` import csv import pandas as pd import numpy as np filename='weather-stations20140101-20141231.csv' #Read csv pdf = pd.read_csv(filename) pdf.head(5) ``` ### 3-Cleaning Lets remove rows that dont have any value in the **Tm** field. ``` pdf = pdf[pd.notnull(pdf["Tm"])] pdf = pdf.reset_index(drop=True) pdf.head(5) ``` ### 4-Visualization Visualization of stations on map using basemap package. The matplotlib basemap toolkit is a library for plotting 2D data on maps in Python. Basemap does not do any plotting on it’s own, but provides the facilities to transform coordinates to a map projections. Please notice that the size of each data points represents the average of maximum temperature for each station in a year. from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt from pylab import rcParams %matplotlib inline rcParams['figure.figsize'] = (14,10) llon=-140 ulon=-50 llat=40 ulat=65 pdf = pdf[(pdf['Long'] > llon) & (pdf['Long'] < ulon) & (pdf['Lat'] > llat) &(pdf['Lat'] < ulat)] my_map = Basemap(projection='merc', resolution = 'l', area_thresh = 1000.0, llcrnrlon=llon, llcrnrlat=llat, #min longitude (llcrnrlon) and latitude (llcrnrlat) urcrnrlon=ulon, urcrnrlat=ulat) #max longitude (urcrnrlon) and latitude (urcrnrlat) my_map.drawcoastlines() my_map.drawcountries() # my_map.drawmapboundary() my_map.fillcontinents(color = 'white', alpha = 0.3) my_map.shadedrelief() # To collect data based on stations xs,ys = my_map(np.asarray(pdf.Long), np.asarray(pdf.Lat)) pdf['xm']= xs.tolist() pdf['ym'] =ys.tolist() #Visualization1 for index,row in pdf.iterrows(): # x,y = my_map(row.Long, row.Lat) my_map.plot(row.xm, row.ym,markerfacecolor =([1,0,0]), marker='o', markersize= 5, alpha = 0.75) #plt.text(x,y,stn) plt.show() ```
github_jupyter
``` from pyspark import SparkContext, SparkConf from pyspark.sql import SQLContext, SparkSession from pyspark.sql.types import StructType, StructField, DoubleType, IntegerType, StringType from ibm_botocore.client import Config import ibm_boto3 import os from pyspark.sql.functions import unix_timestamp, to_date import pyspark.sql.functions as F import numpy as np import pandas as pd import json import pyspark.sql from pyspark.sql.functions import col, skewness, kurtosis from pyspark.sql.functions import isnan, when, count, col from pyspark.sql.functions import when from pyspark.sql.functions import UserDefinedFunction from pyspark.sql.types import StringType from datetime import date, timedelta, datetime import time sc = SparkContext.getOrCreate(SparkConf().setMaster("local[*]")) from pyspark.sql import SparkSession spark = SparkSession \ .builder \ .getOrCreate() from zipfile import ZipFile credentials = { 'IAM_SERVICE_ID': 'iam-ServiceId-3c1861d6-5e36-4bd6-9a2c-193573560cbf', 'IBM_API_KEY_ID': 'p5ub7Zlafd2TotZeptmWjdN0MbfseCEWl54M7UMVfKtO', 'ENDPOINT': 'https://s3-api.us-geo.objectstorage.service.networklayer.com', 'IBM_AUTH_ENDPOINT': 'https://iam.cloud.ibm.com/oidc/token', 'BUCKET': 'movielensdataanalyticsinsparkandp-donotdelete-pr-7pdjrbcote4rbl', 'FILE': ['ratings.dat.zip', 'movies.dat','users.dat'] } cos = ibm_boto3.client(service_name='s3', ibm_api_key_id=credentials['IBM_API_KEY_ID'], ibm_service_instance_id=credentials['IAM_SERVICE_ID'], ibm_auth_endpoint=credentials['IBM_AUTH_ENDPOINT'], config=Config(signature_version='oauth'), endpoint_url=credentials['ENDPOINT']) cos.download_file(Bucket=credentials['BUCKET'],Key=credentials['FILE'][0],Filename=credentials['FILE'][0]) cos.download_file(Bucket=credentials['BUCKET'],Key=credentials['FILE'][1],Filename=credentials['FILE'][1]) cos.download_file(Bucket=credentials['BUCKET'],Key=credentials['FILE'][2],Filename=credentials['FILE'][2]) with ZipFile(credentials['FILE'][0],'r') as zipObj: zipObj.extractall() import ibmos2spark credentials2 = { 'endpoint': 'https://s3-api.us-geo.objectstorage.service.networklayer.com', 'service_id': 'iam-ServiceId-3c1861d6-5e36-4bd6-9a2c-193573560cbf', 'iam_service_endpoint': 'https://iam.cloud.ibm.com/oidc/token', 'api_key': 'p5ub7Zlafd2TotZeptmWjdN0MbfseCEWl54M7UMVfKtO' } configuration_name = 'os_54ba3cbe01c144af917f4e0e63fb0d9c_configs' cos2 = ibmos2spark.CloudObjectStorage(sc, credentials2, configuration_name, 'bluemix_cos') spark.read.text("movies.dat").createOrReplaceTempView("movies_staging") spark.read.text("ratings.dat").createOrReplaceTempView("ratings_staging") spark.read.text("users.dat").createOrReplaceTempView("users_staging") ## Create a database to store the tables spark.sql("drop database if exists sparkdatalake cascade") spark.sql("create database sparkdatalake"); ## Make appropriate schemas for them ## movies movies_df = spark.sql(""" Select split(value,'::')[0] as movieid, split(value,'::')[1] as title, substring(split(value,'::')[1],length(split(value,'::')[1])-4,4) as year, split(value,'::')[2] as genre from movies_staging """).write.parquet(cos2.url('moviesdataframe.parquet', 'movielensdataanalyticsinsparkandp-donotdelete-pr-7pdjrbcote4rbl')) ## users spark.sql(""" Select split(value,'::')[0] as userid, split(value,'::')[1] as gender, split(value,'::')[2] as age, split(value,'::')[3] as occupation, split(value,'::')[4] as zipcode from users_staging """).write.parquet(cos2.url('usersdataframe.parquet', 'movielensdataanalyticsinsparkandp-donotdelete-pr-7pdjrbcote4rbl')) ## ratings spark.sql(""" Select split(value,'::')[0] as userid, split(value,'::')[1] as movieid, split(value,'::')[2] as rating, split(value,'::')[3] as timestamp from ratings_staging """).write.parquet(cos2.url('ratingsdataframe.parquet', 'movielensdataanalyticsinsparkandp-donotdelete-pr-7pdjrbcote4rbl')) dfreadback = spark.read.parquet(cos.url('moviesdataframe.parquet', 'movielensdataanalyticsinsparkandp-donotdelete-pr-7pdjrbcote4rbl')) dfreadback.take(4) ```
github_jupyter
``` from math import sqrt import pandas as pd import numpy as np import matplotlib.pyplot as plt # for the ML pipeline from sklearn.model_selection import train_test_split from sklearn.impute import SimpleImputer from sklearn.preprocessing import OrdinalEncoder from sklearn.ensemble import GradientBoostingRegressor from sklearn.feature_selection import SelectFromModel from sklearn.metrics import mean_squared_error, r2_score # from sklearn.pipeline import Pipeline # from sklearn.compose import ColumnTransformer from feature_engine.categorical_encoders import RareLabelCategoricalEncoder # to visualise al the columns in the dataframe pd.pandas.set_option('display.max_columns', None) # load dataset # remember to download the data set from Kaggle and save it into # the same folder from where you run this notebook data = pd.read_csv('houseprice.csv') print(data.shape) data.head() ``` ### Separate dataset into train and test Before beginning to engineer our features, it is important to separate our data intro training and testing set. This is to avoid over-fitting. This step involves randomness, therefore, we need to set the seed. ``` # Let's separate into train and test set X_train, X_test, y_train, y_test = train_test_split( data.drop('SalePrice', axis=1), # predictors data.SalePrice, # target test_size=0.1, random_state=0) # for reproducibility X_train.shape, X_test.shape, y_train.shape, y_test.shape ``` ### Missing values ``` # make lists capturing the different variables types in our dataset: # ----------------------------------------- # one list to capture date variables # one list to capture categorical variables # one list to capture numerical variables vars_dates = ['YearBuilt', 'YearRemodAdd', 'GarageYrBlt'] vars_cat = [var for var in X_train.columns if X_train[var].dtypes == 'O'] vars_num = [var for var in X_train.columns if X_train[var].dtypes != 'O' and var not in ['Id']] # check for missing values in our date variables X_train[vars_dates].isnull().mean().sort_values(ascending=False) # check for missing values in our numerical variables X_train[vars_num].isnull().mean().sort_values(ascending=False) # check for missing values in our categorical variables X_train[vars_cat].isnull().mean().sort_values(ascending=False) # removing missing data # -------------------- # imputation numerical variables imputer = SimpleImputer(strategy='constant', fill_value=-1) X_train['LotFrontage'] = imputer.fit_transform(X_train['LotFrontage'].to_frame()) X_test['LotFrontage'] = imputer.transform(X_test['LotFrontage'].to_frame()) imputer = SimpleImputer(strategy='most_frequent') X_train[vars_num] = imputer.fit_transform(X_train[vars_num]) X_test[vars_num] = imputer.transform(X_test[vars_num]) # imputation categorical variables imputer = SimpleImputer(strategy='constant', fill_value='missing') X_train[vars_cat] = imputer.fit_transform(X_train[vars_cat]) X_test[vars_cat] = imputer.transform(X_test[vars_cat]) ``` ### Temporal variables ``` # let's create new temporal features from our date variables def elapsed_years(df, var): # capture difference between year variable and year the house was sold df[var] = df['YrSold'] - df[var] return df for var in ['YearBuilt', 'YearRemodAdd', 'GarageYrBlt']: X_train = elapsed_years(X_train, var) X_test = elapsed_years(X_test, var) # check that test set does not contain null values in the engineered variables [vr for var in ['YearBuilt', 'YearRemodAdd', 'GarageYrBlt'] if X_test[var].isnull().sum()>0] ``` ### Categorical variable encoding ``` [var for var in X_train.columns if X_train[var].isnull().sum()>0] [var for var in X_train.columns if X_test[var].isnull().sum()>0] # remove rare caregories rare_enc = RareLabelCategoricalEncoder(tol=0.01, n_categories=5, variables = vars_cat) rare_enc.fit(X_train) X_train = rare_enc.transform(X_train) X_test = rare_enc.transform(X_test) # encode with labels ordinal_enc = OrdinalEncoder() X_train[vars_cat] = ordinal_enc.fit_transform(X_train[vars_cat]) X_test[vars_cat] = ordinal_enc.transform(X_test[vars_cat]) [var for var in X_train.columns if X_test[var].isnull().sum()>0] X_train.head() ``` ## Gradient boosting regressor ``` tree_reg = GradientBoostingRegressor(random_state=0, n_estimators=50) tree_reg.fit(X_train, y_train) # evaluate the model: # We will evaluate performance using the mean squared error and the # root of the mean squared error pred = tree_reg.predict(X_train) print('linear train mse: {}'.format(mean_squared_error(y_train, pred))) print('linear train rmse: {}'.format(sqrt(mean_squared_error(y_train, pred)))) print() pred = tree_reg.predict(X_test) print('linear test mse: {}'.format(mean_squared_error(y_test, pred))) print('linear test rmse: {}'.format(sqrt(mean_squared_error(y_test, pred)))) # These are the values produced by our current live model # we new them from the past, so I pase them here for information print(''' Prediction analysis from old Lasso Regression: --------------------------------------------- linear train mse: 1087435415.4414542 linear train rmse: 32976.28565259366 linear test mse: 1405259552.2596064 linear test rmse: 37486.79170400698 ''') # let's evaluate our predictions respect to the original price plt.scatter(y_test, tree_reg.predict(X_test)) plt.xlabel('True House Price') plt.ylabel('Predicted House Price') plt.title('Evaluation of Lasso Predictions') ``` ## Feature Selection ``` # here I will do the model fitting and feature selection # altogether in one line of code # To select features we use Scikit-learn's SelectFromModel # specifying the the Gradient Boosting Regressor model # and we train the SelecgFromModel with the train set. # remember to set the seed, the random state in this function sel_ = SelectFromModel(GradientBoostingRegressor( random_state=0, n_estimators=50)) sel_.fit(X_train, y_train) # let's print the number of total and selected features # this is how we can make a list of the selected features selected_feat = X_train.columns[(sel_.get_support())] # let's print some stats print('total features: {}'.format((X_train.shape[1]))) print('selected features: {}'.format(len(selected_feat))) selected_feat ``` ## Re-build model with selected features ``` tree_reg = GradientBoostingRegressor(random_state=0, n_estimators=50) tree_reg.fit(X_train[selected_feat], y_train) pred = tree_reg.predict(X_train[selected_feat]) print('linear train mse: {}'.format(mean_squared_error(y_train, pred))) print('linear train rmse: {}'.format(sqrt(mean_squared_error(y_train, pred)))) print() pred = tree_reg.predict(X_test[selected_feat]) print('linear test mse: {}'.format(mean_squared_error(y_test, pred))) print('linear test rmse: {}'.format(sqrt(mean_squared_error(y_test, pred)))) data[selected_feat].head() # make a list of the categorical variables that contain missing values vars_dates = ['YearRemodAdd'] vars_cat = ['BsmtQual'] vars_num = ['LotArea', 'OverallQual', 'YearRemodAdd', 'BsmtQual', 'BsmtFinSF1', 'TotalBsmtSF', '1stFlrSF', '2ndFlrSF', 'GrLivArea', 'GarageCars'] ```
github_jupyter
``` prefix = 'ens' midx = '66' import pandas as pd import numpy as np from sklearn.metrics import f1_score, confusion_matrix new = pd.read_csv('leak2d.csv') new = new[new.Questionable != 1] new.reset_index(inplace=True) print(new.head()) print(new.shape) ens = pd.read_csv('sub/'+prefix+midx+'.csv').fillna('') ens = ens.set_index('Id') print(ens.head()) print(ens.shape) # # create onehot matrix for a submission file # def onehot_sub(csv0, num_classes=28): # c0 = pd.read_csv(csv0) # s0 = [s if isinstance(s,str) else '' for s in c0.Predicted] # p0 = [s.split() for s in s0] # y0 = np.zeros((c0.shape[0],num_classes)).astype(int) # # print(p0[:5]) # for i in range(c0.shape[0]): # for j in p0[i]: y0[i,int(j)] = 1 # # print(y0[:5]) # y = pd.DataFrame(y0) # y.columns = ['y_'+str(i) for i in range(num_classes)] # c0 = pd.concat((c0,y),axis=1) # fname = csv0[:-4] + '_onehot.csv' # c0.to_csv(fname) # print(fname) # new['Predicted'] = new['Target'].values # fname = 'sub/leak2dd.csv' # new.to_csv(fname) # onehot_sub(fname) changed = 0 unchanged = 0 for i, item in enumerate(new['Id']): # print(test_preds.loc[i]['Target']) if ens.loc[item]['Predicted'] != new.loc[i]['Target']: print(ens.loc[item]['Predicted'],' => ', new.loc[i]['Target']) changed += 1 ens.loc[item]['Predicted'] = new.loc[i]['Target'] else: unchanged += 1 print('changed',changed,' unchanged',unchanged) print() fname = 'sub/'+prefix+midx+'d.csv' ens.to_csv(fname) print(fname) name_label_dict = { 0: "Nucleoplasm", 1: "Nuclear membrane", 2: "Nucleoli", 3: "Nucleoli fibrillar center", 4: "Nuclear speckles", 5: "Nuclear bodies", 6: "Endoplasmic reticulum", 7: "Golgi apparatus", 8: "Peroxisomes", 9: "Endosomes", 10: "Lysosomes", 11: "Intermediate filaments", 12: "Actin filaments", 13: "Focal adhesion sites", 14: "Microtubules", 15: "Microtubule ends", 16: "Cytokinetic bridge", 17: "Mitotic spindle", 18: "Microtubule organizing center", 19: "Centrosome", 20: "Lipid droplets", 21: "Plasma membrane", 22: "Cell junctions", 23: "Mitochondria", 24: "Aggresome", 25: "Cytosol", 26: "Cytoplasmic bodies", 27: "Rods & rings" } LABEL_MAP = name_label_dict np.set_printoptions(precision=3, suppress=True, linewidth=100) # compute f1 score between two submission files def f1_sub(csv0, csv1, num_classes=28): c0 = pd.read_csv(csv0) c1 = pd.read_csv(csv1) assert c0.shape == c1.shape s0 = [s if isinstance(s,str) else '' for s in c0.Predicted] s1 = [s if isinstance(s,str) else '' for s in c1.Predicted] p0 = [s.split() for s in s0] p1 = [s.split() for s in s1] y0 = np.zeros((c0.shape[0],num_classes)).astype(int) y1 = np.zeros((c0.shape[0],num_classes)).astype(int) # print(p0[:5]) for i in range(c0.shape[0]): for j in p0[i]: y0[i,int(j)] = 1 for j in p1[i]: y1[i,int(j)] = 1 # print(y0[:5]) return f1_score(y0, y1, average='macro') # computute confusion matrices between two submission files def f1_confusion(csv0, csv1, num_classes=28): c0 = pd.read_csv(csv0) c1 = pd.read_csv(csv1) assert c0.shape == c1.shape s0 = [s if isinstance(s,str) else '' for s in c0.Predicted] s1 = [s if isinstance(s,str) else '' for s in c1.Predicted] p0 = [s.split() for s in s0] p1 = [s.split() for s in s1] y0 = np.zeros((c0.shape[0],num_classes)).astype(int) y1 = np.zeros((c0.shape[0],num_classes)).astype(int) # print(p0[:5]) for i in range(c0.shape[0]): for j in p0[i]: y0[i,int(j)] = 1 for j in p1[i]: y1[i,int(j)] = 1 # print(y0[:5]) y0avg = np.average(y0,axis=0) y1avg = np.average(y1,axis=0) cm = [confusion_matrix(y0[:,i], y1[:,i]) for i in range(y0.shape[1])] fm = [f1_score(y0[:,i], y1[:,i]) for i in range(y0.shape[1])] for i in range(y0.shape[1]): print(LABEL_MAP[i]) print(cm[i],' %4.2f' % fm[i],' %6.4f' % y0avg[i],' %6.4f' % y1avg[i], ' %6.4f' % (y0avg[i] - y1avg[i])) print() # print('y0avg') # print(y0avg) # print('y1avg') # print(y1avg) # print('y0avg - y1avg') # print(y0avg-y1avg) print('f1 macro') print(np.mean(fm)) return f1_score(y0, y1, average='macro') f1_sub(fname,'sub/'+prefix+midx+'.csv') f1_sub(fname,'sub/ens43c.csv') f1_sub(fname,'sub/ens45c.csv') f1_sub(fname,'sub/ens53c.csv') f1_sub(fname,'sub/ens53d.csv') f1_sub(fname,'sub/ens55d.csv') f1_sub(fname,'sub/ens56d.csv') f1_sub(fname,'sub/ens58d.csv') f1_sub(fname,'sub/ens59d.csv') f1_sub(fname,'sub/ens61d.csv') f1_sub(fname,'sub/preresnet0d.csv') f1_sub(fname,'sub/resnet15c.csv') f1_sub(fname,'sub/se_resnext11d.csv') # f1_confusion(fname,'sub/'+prefix+midx+'.csv') ```
github_jupyter
# ORF MLP Trying to fix bugs. NEURONS=128 and K={1,2,3}. ``` import time def show_time(): t = time.time() print(time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t))) show_time() PC_TRAINS=8000 NC_TRAINS=8000 PC_TESTS=8000 NC_TESTS=8000 RNA_LEN=1000 MAX_K = 3 INPUT_SHAPE=(None,84) # 4^3 + 4^2 + 4^1 NEURONS=128 DROP_RATE=0.01 EPOCHS=100 # 1000 # 200 SPLITS=5 FOLDS=5 # make this 5 for serious testing import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.utils import shuffle from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score from keras.models import Sequential from keras.layers import Dense,Embedding,Dropout from keras.layers import Flatten,TimeDistributed from keras.losses import BinaryCrossentropy from keras.callbacks import ModelCheckpoint from keras.models import load_model import sys IN_COLAB = False try: from google.colab import drive IN_COLAB = True except: pass if IN_COLAB: print("On Google CoLab, mount cloud-local file, get our code from GitHub.") PATH='/content/drive/' #drive.mount(PATH,force_remount=True) # hardly ever need this drive.mount(PATH) # Google will require login credentials DATAPATH=PATH+'My Drive/data/' # must end in "/" import requests r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_describe.py') with open('RNA_describe.py', 'w') as f: f.write(r.text) from RNA_describe import ORF_counter r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_gen.py') with open('RNA_gen.py', 'w') as f: f.write(r.text) from RNA_gen import Collection_Generator, Transcript_Oracle r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/KmerTools.py') with open('KmerTools.py', 'w') as f: f.write(r.text) from KmerTools import KmerTools else: print("CoLab not working. On my PC, use relative paths.") DATAPATH='data/' # must end in "/" sys.path.append("..") # append parent dir in order to use sibling dirs from SimTools.RNA_describe import ORF_counter from SimTools.RNA_gen import Collection_Generator, Transcript_Oracle from SimTools.KmerTools import KmerTools BESTMODELPATH=DATAPATH+"BestModel" # saved on cloud instance and lost after logout LASTMODELPATH=DATAPATH+"LastModel" # saved on Google Drive but requires login ``` ## Data Load ``` show_time() def make_generators(seq_len): pcgen = Collection_Generator() pcgen.get_len_oracle().set_mean(seq_len) pcgen.set_seq_oracle(Transcript_Oracle()) ncgen = Collection_Generator() ncgen.get_len_oracle().set_mean(seq_len) return pcgen,ncgen pc_sim,nc_sim = make_generators(RNA_LEN) pc_all = pc_sim.get_sequences(PC_TRAINS+PC_TESTS) nc_all = nc_sim.get_sequences(NC_TRAINS+NC_TESTS) print("Generated",len(pc_all),"PC seqs") print("Generated",len(nc_all),"NC seqs") # Describe the sequences def describe_sequences(list_of_seq): oc = ORF_counter() num_seq = len(list_of_seq) rna_lens = np.zeros(num_seq) orf_lens = np.zeros(num_seq) for i in range(0,num_seq): rna_len = len(list_of_seq[i]) rna_lens[i] = rna_len oc.set_sequence(list_of_seq[i]) orf_len = oc.get_max_orf_len() orf_lens[i] = orf_len print ("Average RNA length:",rna_lens.mean()) print ("Average ORF length:",orf_lens.mean()) print("Simulated sequences prior to adjustment:") print("PC seqs") describe_sequences(pc_all) print("NC seqs") describe_sequences(nc_all) show_time() ``` ## Data Prep ``` # Any portion of a shuffled list is a random selection pc_train=pc_all[:PC_TRAINS] nc_train=nc_all[:NC_TRAINS] pc_test=pc_all[PC_TRAINS:PC_TRAINS+PC_TESTS] nc_test=nc_all[NC_TRAINS:NC_TRAINS+PC_TESTS] print("PC train, NC train:",len(pc_train),len(nc_train)) print("PC test, NC test:",len(pc_test),len(nc_test)) # Garbage collection pc_all=None nc_all=None print("First PC train",pc_train[0]) print("First PC test",pc_test[0]) def prepare_x_and_y(seqs1,seqs0): len1=len(seqs1) len0=len(seqs0) total=len1+len0 L1=np.ones(len1,dtype=np.int8) L0=np.zeros(len0,dtype=np.int8) S1 = np.asarray(seqs1) S0 = np.asarray(seqs0) all_labels = np.concatenate((L1,L0)) all_seqs = np.concatenate((S1,S0)) for i in range(0,len0): all_labels[i*2] = L0[i] all_seqs[i*2] = S0[i] all_labels[i*2+1] = L1[i] all_seqs[i*2+1] = S1[i] return all_seqs,all_labels # use this to test unshuffled # bug in next line? X,y = shuffle(all_seqs,all_labels) # sklearn.utils.shuffle #Doesn't fix it #X = shuffle(all_seqs,random_state=3) # sklearn.utils.shuffle #y = shuffle(all_labels,random_state=3) # sklearn.utils.shuffle return X,y Xseq,y=prepare_x_and_y(pc_train,nc_train) print(Xseq[:3]) print(y[:3]) # Tests: show_time() def seqs_to_kmer_freqs(seqs,max_K): tool = KmerTools() # from SimTools collection = [] for seq in seqs: counts = tool.make_dict_upto_K(max_K) # Last param should be True when using Harvester. counts = tool.update_count_one_K(counts,max_K,seq,True) # Given counts for K=3, Harvester fills in counts for K=1,2. counts = tool.harvest_counts_from_K(counts,max_K) fdict = tool.count_to_frequency(counts,max_K) freqs = list(fdict.values()) collection.append(freqs) return np.asarray(collection) Xfrq=seqs_to_kmer_freqs(Xseq,MAX_K) print(Xfrq[:3]) show_time() ``` ## Neural network ``` def make_DNN(): dt=np.float32 print("make_DNN") print("input shape:",INPUT_SHAPE) dnn = Sequential() dnn.add(Dense(NEURONS,activation="sigmoid",dtype=dt)) # relu doesn't work as well dnn.add(Dropout(DROP_RATE)) dnn.add(Dense(NEURONS,activation="sigmoid",dtype=dt)) dnn.add(Dropout(DROP_RATE)) dnn.add(Dense(1,activation="sigmoid",dtype=dt)) dnn.compile(optimizer='adam', # adadelta doesn't work as well loss=BinaryCrossentropy(from_logits=False), metrics=['accuracy']) # add to default metrics=loss dnn.build(input_shape=INPUT_SHAPE) return dnn model = make_DNN() print(model.summary()) def do_cross_validation(X,y): cv_scores = [] fold=0 #mycallbacks = [ModelCheckpoint( # filepath=MODELPATH, save_best_only=True, # monitor='val_accuracy', mode='max')] # When shuffle=True, the valid indices are a random subset. splitter = KFold(n_splits=SPLITS,shuffle=True) model = None for train_index,valid_index in splitter.split(X): if fold < FOLDS: fold += 1 X_train=X[train_index] # inputs for training y_train=y[train_index] # labels for training X_valid=X[valid_index] # inputs for validation y_valid=y[valid_index] # labels for validation print("MODEL") # Call constructor on each CV. Else, continually improves the same model. model = model = make_DNN() print("FIT") # model.fit() implements learning start_time=time.time() history=model.fit(X_train, y_train, epochs=EPOCHS, verbose=1, # ascii art while learning # callbacks=mycallbacks, # called at end of each epoch validation_data=(X_valid,y_valid)) end_time=time.time() elapsed_time=(end_time-start_time) print("Fold %d, %d epochs, %d sec"%(fold,EPOCHS,elapsed_time)) # print(history.history.keys()) # all these keys will be shown in figure pd.DataFrame(history.history).plot(figsize=(8,5)) plt.grid(True) plt.gca().set_ylim(0,1) # any losses > 1 will be off the scale plt.show() return model # parameters at end of training show_time() last_model = do_cross_validation(Xfrq,y) def show_test_AUC(model,X,y): ns_probs = [0 for _ in range(len(y))] bm_probs = model.predict(X) ns_auc = roc_auc_score(y, ns_probs) bm_auc = roc_auc_score(y, bm_probs) ns_fpr, ns_tpr, _ = roc_curve(y, ns_probs) bm_fpr, bm_tpr, _ = roc_curve(y, bm_probs) plt.plot(ns_fpr, ns_tpr, linestyle='--', label='Guess, auc=%.4f'%ns_auc) plt.plot(bm_fpr, bm_tpr, marker='.', label='Model, auc=%.4f'%bm_auc) plt.title('ROC') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.legend() plt.show() print("%s: %.2f%%" %('AUC',bm_auc*100.0)) def show_test_accuracy(model,X,y): scores = model.evaluate(X, y, verbose=0) print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100)) print("Accuracy on training data.") print("Prepare...") show_time() Xseq,y=prepare_x_and_y(pc_train,nc_train) print("Extract K-mer features...") show_time() Xfrq=seqs_to_kmer_freqs(Xseq,MAX_K) print("Plot...") show_time() show_test_AUC(last_model,Xfrq,y) show_test_accuracy(last_model,Xfrq,y) show_time() print("Accuracy on test data.") print("Prepare...") show_time() Xseq,y=prepare_x_and_y(pc_test,nc_test) print("Extract K-mer features...") show_time() Xfrq=seqs_to_kmer_freqs(Xseq,MAX_K) print("Plot...") show_time() show_test_AUC(last_model,Xfrq,y) show_test_accuracy(last_model,Xfrq,y) show_time() ```
github_jupyter
``` import xarray as xr import pandas as pd import pickle as pk import re import requests import os import gc # Sensor E: url = 'https://opendap.oceanobservatories.org/thredds/catalog/ooi/dax.soule@qc.cuny.edu/20181104T104012-RS03ECAL-MJ03E-06-BOTPTA302-streamed-botpt_nano_sample/catalog.html' # Sensor B url = 'https://opendap.oceanobservatories.org/thredds/catalog/ooi/dax.soule@qc.cuny.edu/20181020T213838-RS03ASHS-MJ03B-09-BOTPTA304-streamed-botpt_nano_sample/catalog.html' # Sensor F url = 'https://opendap.oceanobservatories.org/thredds/catalog/ooi/dax.soule@qc.cuny.edu/20181104T041943-RS03CCAL-MJ03F-05-BOTPTA301-streamed-botpt_nano_sample/catalog.html' # url = 'https://opendap.oceanobservatories.org/thredds/catalog/ooi/jazlynnatalie12@gmail.com/20190712T124024454Z-RS03ECAL-MJ03E-06-BOTPTA302-streamed-botpt_nano_sample/catalog.html' url = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/RS03ECAL/MJ03E/06-BOTPTA302/streamed/botpt_nano_sample' tds_url = 'https://opendap.oceanobservatories.org/thredds/dodsC' datasets = requests.get(url).text urls = re.findall(r'href=[\'"]?([^\'" >]+)', datasets) x = re.findall(r'(ooi/.*?.nc)', datasets) for i in x: if i.endswith('.nc') == False: x.remove(i) for i in x: try: float(i[-4]) except: x.remove(i) datasets = [os.path.join(tds_url, i) for i in x] # print(datasets[1]) print(datasets) # ds = xr.open_dataset(datasets[1]) ds = xr.open_dataset(datasets()) # make the output directory new_dir = '/home/jovyan/data/botpt/2019FifteenSecond_mean_dataE/' if not os.path.isdir(new_dir): try: os.makedirs(new_dir) except OSError: if os.path.exists(new_dir): pass else: raise # read in the data directly off THREDDS and write out as subsampled pickled pandas dataframe # NOTE: It takes about one hour to subsample 69499.81 Mbytes of data and write it out to a dataframe. num = 0 for i in datasets: ds = xr.open_dataset(i) ds = ds.swap_dims({'obs': 'time'}) pressure_min = pd.DataFrame() pressure_min['bottom_pressure'] = ds['bottom_pressure'].to_pandas().resample('.25T').mean() del pressure_min.index.name pressure_min = pressure_min.dropna() out = '/home/jovyan/data/botpt/2019FifteenSecond_mean_dataE/' + i.split('/')[-1][:-3] + '_resampled' + '.pd' num = num +1 with open(out, 'wb') as fh: pk.dump(pressure_min,fh) gc.collect() # create a single file with all the pickled data. pressure_min = pd.DataFrame() for path, subdirs, files in os.walk('/home/jovyan/data/botpt/2019FifteenSecond_mean_dataE/'): for name in files: file_name = os.path.join(path, name) with open(file_name, 'rb') as f: pd_df = pk.load(f) pressure_min = pressure_min.append(pd_df) with open('/home/jovyan/data/botpt/2019bottom_pressure15s_E.pkl', 'wb') as f: pk.dump(pressure_min,f) ```
github_jupyter
``` import google.datalab.bigquery as bq import numpy as np from sklearn.metrics import mean_squared_error, mean_absolute_error import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf #training price data training = bq.Query(''' Select date_utc,price from Energy.MarketPT where date_utc between '2015-06-01 00:00:00' and '2015-06-21 23:01:00' order by date_utc ''').execute(bq.QueryOutput.dataframe()).result() #validation price data validation = bq.Query(''' Select date_utc,price from Energy.MarketPT where date_utc between '2015-06-22 00:00:00' and '2015-06-28 23:01:00' order by date_utc ''').execute(bq.QueryOutput.dataframe()).result() #Triple Exponential Smoothing Model #loockback seasonal pattern (same hour last week) c = 24*7 #target shape n_y =1 #number of training examples m = len(training) tf.reset_default_graph() tf.set_random_seed(1) param = { 'A' : tf.get_variable("A", [1,], initializer = tf.constant_initializer(0.5)) ,'B' : tf.get_variable("B", [1,], initializer = tf.constant_initializer(0.5)) ,'G' : tf.get_variable("G", [1,], initializer = tf.constant_initializer(0.5)) } #targets Y = tf.placeholder(tf.float32,[m,n_y], name="Y") #loockback seasonal pattern (same hour last week) C = tf.constant(name="C", dtype=tf.int32, value=c) #initial values for U anbd V (0.0) U = tf.constant(name="U", dtype=tf.float32, value=0.0,shape=[1,]) #initial values for S (average y for first c days) y_avg = np.mean(training['price'][:c]) S = tf.constant(name="S", dtype=tf.float32, value=[y_avg for i in range(c)],shape=[c,]) #auxiliary functions to compute initial U and S def s0f(y,s_c): return y/s_c[0] def u0f(y,s): return y/(s[-1]) #auxiliary functions to compute U,V,S def uf(y,s_c,u_1,v_1): return param['A']*y/(s_c[0])+(1-param['A'])*(u_1+v_1) def vf(u,u_1,v_1): return param['B']*(u-u_1)+(1-param['B'])*v_1 def sf(y,u,s_c): return param['G']*(y/u)+(1-param['G'])*(s_c[0]) #auxiliary function to predict def pf(u_1,v_1,s_c): return (u_1+v_1)*(s_c[0]) #auxiliary function for 1st period (1st week) initializaqtion def s1 (ini,ele): ini['s'] = tf.concat([ini['s'][1:],s0f(ele,ini['s'])],axis=0) ini['u'] = u0f(ele,ini['s']) return ini #auxiliary function for all periods after the first one def s2 (ini,ele): ini['p'] = pf(ini['u'],ini['v'],ini['s']) aux_u = uf(ele,ini['s'],ini['u'],ini['v']) ini['v'] = vf(aux_u,ini['u'],ini['v']) ini['s'] = tf.concat((ini['s'][1:],sf(ele,aux_u,ini['s'])),axis=0) ini['u'] = aux_u return ini #squared mean error def compute_cost(y_p, y): return tf.reduce_mean((y-y_p)**2) #define model def model(Y_train, learning_rate = 0.001, num_epochs = 700, print_cost = True): tf.set_random_seed(1) #keep track of costs costs = [] #run loop (tf.scan) and send initial state for first period (first week) ini_states = tf.scan(s1, elems=Y[0:C], initializer={'u':U ,'s':S}) #make sure parameters A,B,G stay between 0 and 1 }) for k in param.keys(): param[k] = tf.minimum(tf.maximum(param[k],0.0),1.0) #run loop (tf.scan) and send initial state for all periods after the first one states = tf.scan(s2, elems=Y[C:], initializer={'u':ini_states['u'][-1] ,'v':U ,'p':U ,'s':ini_states['s'][-1] }) #keep track of latest state for future predictions last_state = {x:states[x][-1] for x in states.keys()} #only compute cost on all periods after the first one (initialization for first period is too noisy) cost = compute_cost(states['p'], Y[C:]) #optimizer optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) init = tf.global_variables_initializer() #loop for number of epochs for training with tf.Session() as sess: sess.run(init) for epoch in range(num_epochs): epoch_cost = 0. _ , batch_cost, l_s = sess.run([optimizer, cost,last_state], feed_dict={Y: Y_train})#, C:c}) epoch_cost = batch_cost if print_cost == True and epoch % 100 == 0: print ("Cost after epoch %i: %f" % (epoch, epoch_cost)) if print_cost == True and epoch % 5 == 0: costs.append(epoch_cost) #plot learning plt.plot(np.squeeze(costs)) plt.ylabel('cost') plt.xlabel('iterations (per tens)') plt.title("Learning rate =" + str(learning_rate)) plt.show() #return trained parameters (A,B,G) and last state to be used for followup predictions return sess.run([param]),l_s #train model par, l_s = model(np.array([[x/100.0] for x in training['price']],dtype=np.float32)) par = par[0] #prediction function (using trained parameters and last state) param = { 'A' : tf.convert_to_tensor(par["A"]) ,'B' : tf.convert_to_tensor(par["B"]) ,'G' : tf.convert_to_tensor(par["G"]) } def predict(ls): X = np.reshape(np.array([ls['p']],dtype=np.float32),[1,1]) x = tf.placeholder("float", [X.shape[0], X.shape[1]],name='px') S = np.reshape(np.array([ls['s']],dtype=np.float32),[168,1]) s = tf.placeholder("float", [S.shape[0],S.shape[1]],name='ps') ls['s'] = s U = np.reshape(np.array([ls['u']],dtype=np.float32),[1,1]) u = tf.placeholder("float", [U.shape[0],U.shape[1]],name='pu') ls['u'] = u V = np.reshape(np.array([ls['v']],dtype=np.float32),[1,1]) v = tf.placeholder("float", [V.shape[0],V.shape[1]],name='pv') ls['v'] = v t1 = s2 (ls,x) sess = tf.Session() return sess.run(t1, feed_dict = {x: X, s: S, u: U, v: V}) #learned alfa, beta, and gamma par #predict using learned parameters and last state (starting with the one out of training) pred = [] ay = l_s.copy() ay['p'] = (ay['u']+ay['v'])*ay['s'][0] pred.append(ay['p'][0]*100.0) for i in range(24*7-1): ay= predict(ay) pred.append(ay['p'][0][0]*100.0) #assess metric/plot aux function def assessmodel(m): plt.plot(list(validation['price'])) plt.plot(m) print('RSME: '+str(np.math.sqrt(mean_squared_error(validation['price'], m)))) print('AE: '+str(mean_absolute_error(validation['price'], m))) #assess predictions assessmodel(pred) ```
github_jupyter
# Inter-annotator agreement between the first 10 annotators of WS-353 Measured in Kappa and Rho: - against the gold standard which is the mean of all annotators, as described in Hill et al 2014 (footnote 6) - against each other Using Kohen's kappa, which is binary, so I average across pairs of annotators. ``` %cd ~/NetBeansProjects/ExpLosion/ from notebooks.common_imports import * from skll.metrics import kappa from scipy.stats import spearmanr from itertools import combinations sns.timeseries.algo.bootstrap = my_bootstrap sns.categorical.bootstrap = my_bootstrap columns = 'Word 1,Word 2,Human (mean),1,2,3,4,5,6,7,8,9,10,11,12,13'.split(',') df1 = pd.read_csv('../thesisgenerator/similarity-data/wordsim353/set1.csv')[columns] df2 = pd.read_csv('../thesisgenerator/similarity-data/wordsim353/set2.csv')[columns] df = pd.concat([df1, df2], ignore_index=True) df_gold = pd.read_csv('../thesisgenerator/similarity-data/wordsim353/combined.csv', names='w1 w2 sim'.split()) # had to remove trailing space from their files to make it parse with pandas marco = pd.read_csv('../thesisgenerator/similarity-data/MEN/agreement/marcos-men-ratings.txt', sep='\t', index_col=[0,1], names=['w1', 'w2', 'sim']).sort_index().convert_objects(convert_numeric=True) elia = pd.read_csv('../thesisgenerator/similarity-data/MEN/agreement/elias-men-ratings.txt', sep='\t', index_col=[0,1], names=['w1', 'w2', 'sim']).sort_index().convert_objects(convert_numeric=True) df.head() # Each index ``i`` returned is such that ``bins[i-1] <= x < bins[i]`` def bin(arr, nbins=2, debug=False): bins = np.linspace(arr.min(), arr.max(), nbins+1) if debug: print('bins are', bins) return np.digitize(arr, bins[1:-1]) bin(df['1'], nbins=5, debug=True)[:10] bin(np.array([0, 2.1, 5.8, 7.9, 10]), debug=True) # 0 and 10 are needed to define the range of values bin(np.array([0, 2.1, 5.8, 7.9, 10]), nbins=3, debug=True) df.describe() elia.describe() ``` # WS353: Kappa against each other/ against mean ``` bin_counts = range(2, 6) # pair, bin count, kappa kappas_pair = [] for name1, name2 in combinations(range(1,14), 2): for b in bin_counts: kappas_pair.append(['%d-%d'%(name1, name2), b, kappa(bin(df[str(name1)], b), bin(df[str(name2)], b))]) kappas_mean = [] for name in range(1, 14): for b in bin_counts: kappas_mean.append(['%d-m'%name, b, kappa(bin(df[str(name)], b), bin(df_gold.sim, b))]) kappas_men = [] # MEN data set- marco vs elia for b in bin_counts: kappas_men.append(['marco-elia', b, kappa(bin(marco.sim.values, b), bin(elia.sim.values, b))]) kappas1 = pd.DataFrame(kappas_pair, columns=['pair', 'bins', 'kappa']) kappas1['kind'] = 'WS353-pairwise' kappas2 = pd.DataFrame(kappas_mean, columns=['pair', 'bins', 'kappa']) kappas2['kind'] = 'WS353-to mean' kappas3 = pd.DataFrame(kappas_men, columns=['pair', 'bins', 'kappa']) kappas3['kind'] = 'MEN' kappas = pd.concat([kappas1, kappas2, kappas3], ignore_index=True) kappas.head(3) with sns.color_palette("cubehelix", 3): ax = sns.tsplot(kappas, time='bins', unit='pair', condition='kind', value='kappa', marker='s', linewidth=4); ax.set_xticklabels(np.arange(kappas.bins.min(), kappas.bins.max() + 0.01, 0.5).astype(np.int)) sparsify_axis_labels(ax) plt.savefig('plot-intrinsic-ws353-kappas.pdf', format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.1) # sns.tsplot(kappas, time='bins', unit='pair', condition='kind', value='kappa', sns.factorplot(data=kappas, x='bins', y='kappa', hue='kind', kind='box') kappas.groupby(['bins', 'kind']).mean() rhos_pair = [] for name1, name2 in combinations(range(1,14), 2): rhos_pair.append(spearmanr(bin(df[str(name1)], b), bin(df[str(name2)], b))[0]) rhos_mean = [] for name in range(1,14): rhos_mean.append(spearmanr(bin(df[str(name)], b), bin(df_gold.sim, b))[0]) sns.distplot(rhos_pair, label='pairwise'); # plt.axvline(np.mean(rhos_pair)); sns.distplot(rhos_mean, label='to mean'); # plt.axvline(np.mean(rhos_mean), color='g'); plt.legend(loc='upper left'); plt.savefig('plot-intrinsic-ws353-rhos.pdf', format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.1) print(np.mean(rhos_pair), np.mean(rhos_mean)) # Fisher transform: http://stats.stackexchange.com/a/19825 and wiki article therein np.tanh(np.arctanh(rhos_pair).mean()), np.tanh(np.arctanh(rhos_mean).mean()) from nltk.metrics.agreement import AnnotationTask AnnotationTask(data=[ ('coder1', 'obj1', 'label1'), ('coder1', 'obj2', 'label2'), ('coder2', 'obj1', 'label1'), ('coder2', 'obj2', 'label2'), ('coder3', 'obj1', 'label1'), ('coder3', 'obj2', 'label1'), ]).multi_kappa() multikappas = [] for name in range(1, 14): for b in bin_counts: labels = bin(df[str(name)], b) # gold_labels = bin(df_gold.sim, b) for i, label in enumerate(labels): multikappas.append(('coder%d'%name, 'wordpair%d'%i, label)) AnnotationTask(multikappas).multi_kappa() # WTF nltk, you are great ``` # The same thing for the MEN dataset Annotations by Marco and Elia ``` spearmanr(marco.sim, elia.sim) # they report .6845 ```
github_jupyter
# **Applied Deep Learning Tutorial** ## **Transfer Learning for Object Classification** ### **Imports** Import the necessary libraries and load the Dogs vs Cats dataset from Kaggle. ``` from __future__ import absolute_import, division, print_function, unicode_literals import os try: # Use the %tensorflow_version magic if in colab. %tensorflow_version 1.x except Exception: pass import tensorflow as tf from tensorflow import keras #print("TensorFlow version is ", tf.__version__) import numpy as np import cv2 import matplotlib.pyplot as plt import matplotlib.image as mpimg ``` ### **Preparing the data** Create directories for training and validation for both classes, such as dog and cat. ``` # Load Cats vs Dogs dataset zip_file = tf.keras.utils.get_file(origin="https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip", fname="cats_and_dogs_filtered.zip", extract=True) base_dir, _ = os.path.splitext(zip_file) print(os.listdir(base_dir)) train_dir = os.path.join(base_dir, 'train') print(os.listdir(train_dir)) validation_dir = os.path.join(base_dir, 'validation') # Directory with our training cat pictures train_cats_dir = os.path.join(train_dir, 'cats') #'''print how many cat images there are in your training dataset'' print('Train cats:' + str(len(os.listdir(train_cats_dir)))) print(os.listdir(train_cats_dir)) # Directory with our training dog pictures train_dogs_dir = os.path.join(train_dir, 'dogs') print('Train dogs:' + str(len(os.listdir(train_dogs_dir)))) print(os.listdir(train_dogs_dir)) # Directory with our validation cat pictures validation_cats_dir = os.path.join(validation_dir, 'cats') print('Validation cats:' + str(len(os.listdir(validation_cats_dir)))) print(os.listdir(validation_cats_dir)) # Directory with our validation dog pictures validation_dogs_dir = os.path.join(validation_dir, 'dogs') print('Validation dogs:' + str(len(os.listdir(validation_dogs_dir)))) print(os.listdir(validation_dogs_dir)) import cv2 import pandas as pd,numpy as np,pylab as pl #n=np.random.randint(0,210,1)[0] #print('Label: ',flower_labels[n],names[flower_labels[n]]) def show_img(img_dir: str) -> None: n=np.random.randint(0,499,1)[0] srcname = os.path.abspath(img_dir) +'/'+ os.listdir(img_dir)[n] img=cv2.imread(srcname) rgb_img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB) pl.figure(figsize=(3,3)) pl.imshow(rgb_img); show_img(train_dogs_dir) show_img(train_cats_dir) ``` Set up a pipeline for data augmentation with Keras ``` image_size = 200 # All images will be resized to 200x200 batch_size = 20 # Rescale all images by 1./255 and apply image augmentation train_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1./255) validation_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1./255) # Flow training images in batches of 20 using train_datagen generator train_generator = train_datagen.flow_from_directory( train_dir, # Source directory for the training images target_size=(image_size, image_size), batch_size=batch_size, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary') # Flow validation images in batches of 20 using test_datagen generator validation_generator = validation_datagen.flow_from_directory( validation_dir, # Source directory for the validation images target_size=(image_size, image_size), batch_size=batch_size, class_mode='binary') ``` ### **Preparing pretrained model** The VGG16 model pre-trained on the ImageNet dataset ``` IMG_SHAPE = (image_size, image_size, 3) # Create the base model from the pre-trained model MobileNet V2 feature_extractor = tf.keras.applications.VGG16(input_shape=IMG_SHAPE, include_top=False, weights='imagenet') ``` ## **Feature Extraction without finetuning** We will freeze the layers of the VGG16 and utilize the feature extractor capabilities of this part of the network. ``` feature_extractor.trainable = False #'''freeze the pretrained graph of the VGG16''' # Let's take a look at the base model architecture (notice the amount of non-trainable params, what do you expect? And why?) feature_extractor.summary() # There are 14,714,688 non-trainable parameters and 0 trainable. We "froze" the model to use the already trained parameters # tf.keras.utils.plot_model(feature_extractor) #'''add a global 2D average pooling layer here, why do we need this?''' '''Global Average Pooling calculates the average output of each feature map in the previous layer. GAP layer reduces each h×w feature map to a single number by simply taking the average of all hw values. GAP reduces the data significantly (from 6x6x512 to 1x1x512) and prepares the model for the final classification layer. ''' # activation='softmax': constant : val_loss: 7.6246 - val_acc: 0.5000 model = tf.keras.Sequential([ feature_extractor, keras.layers.GlobalAveragePooling2D(), keras.layers.Dense(1, activation='sigmoid') ]) model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=0.01), loss='binary_crossentropy', metrics=['accuracy']) model.summary() # Saving the model # Directory where the checkpoints will be saved checkpoint_dir = './training_checkpoints' # Name of the checkpoint files checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_training_vgg16_I") checkpoint_callback=tf.keras.callbacks.ModelCheckpoint( filepath=checkpoint_prefix, save_weights_only=True) epochs = 25 # the pretrained model ckpt_training_vgg16 has been trained for: # 2 epochs reaching a validation accuracy of:0.9610 # 5 epochs reaching a validation accuracy of:0.9600 # 10 epochs reaching a validation accuracy of:0.9570 # 15 epochs reaching a validation accuracy of:0.9620 steps_per_epoch = train_generator.n // batch_size validation_steps = validation_generator.n // batch_size history = model.fit_generator(train_generator, steps_per_epoch = steps_per_epoch, epochs=epochs, validation_data=validation_generator, validation_steps=validation_steps, callbacks=[checkpoint_callback]) def plot_learning_curve( title: str, x: int, y: int, y_test: int, ylim: float = 0.6 ) -> None: plt.figure() plt.title(title) axes = plt.gca() axes.set_ylim([ylim, 1]) plt.xlabel("Epoch") plt.ylabel("Accuracy") train_sizes = x train_scores = y test_scores = y_test plt.grid() plt.plot( train_sizes, train_scores, "o-", color=(177 / 255, 6 / 255, 58 / 255), label="Training accuracy", ) plt.plot( train_sizes, test_scores, "o-", color=(246 / 255, 168 / 255, 0), label="Validation accuracy", ) plt.legend(loc="best") def plot_history(title: str, history: "History", ylim: float = 0.6) -> None: y = history.history["acc"] y_test = history.history["val_acc"] plot_learning_curve(title, np.arange(1, 1 + len(y)), y, y_test, ylim) plot_history('Pretrain model performance', history, 0) ``` ## **Feature Extraction with finetuning** We will freeze the layers of the VGG16 and utilize the feature extractor capabilities of this part of the network. ``` feature_extractor.trainable = True feature_extractor.summary() #add a global 2D average pooling layer model = tf.keras.Sequential([ feature_extractor, keras.layers.GlobalAveragePooling2D(), keras.layers.Dense(1, activation='sigmoid') ]) model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=0.01), loss='binary_crossentropy', metrics=['accuracy']) model.summary() # Saving the model # Directory where the checkpoints will be saved checkpoint_dir = './training_checkpoints' # Name of the checkpoint files checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_training_vgg16_II") checkpoint_callback=tf.keras.callbacks.ModelCheckpoint( filepath=checkpoint_prefix, save_weights_only=True) epochs = 25 # the pretrained model ckpt_training_vgg16 has been trained for: # 2 epochs reaching a validation accuracy of:0.9610 # 5 epochs reaching a validation accuracy of:0.9600 # 10 epochs reaching a validation accuracy of:0.9570 # 15 epochs reaching a validation accuracy of:0.9620 steps_per_epoch = train_generator.n // batch_size validation_steps = validation_generator.n // batch_size history_II = model.fit_generator(train_generator, steps_per_epoch = steps_per_epoch, epochs=epochs, validation_data=validation_generator, validation_steps=validation_steps, callbacks=[checkpoint_callback]) plot_history('Pretrain model performance', history_II, 0) ```
github_jupyter
# General Funds ``` import pandas as pd import seaborn as sns from matplotlib import pyplot as plt import mwdsbe import schuylkill as skool import time ``` ## Data ``` registry = mwdsbe.load_registry() # geopandas df gf = pd.read_excel(r'C:\Users\dabinlee\Documents\GitHub\mwdsbe_binny\MWDSBE\mwdsbe\data\cwedp_37_report.xlsx', sheet_name='general_funds') city_payments = pd.read_excel(r'C:\Users\dabinlee\Documents\GitHub\mwdsbe_binny\MWDSBE\analysis\data\payments\city_payments_detailed_2017.xlsx') gf.head() len(gf) gf = gf.loc[gf['MAJ_CLASS'] != 1] len(gf) gf = gf.loc[gf['VEND_NAME'].dropna().index] len(gf) gf.head() ``` ## General Funds by Departments ``` simple_cp = city_payments[['dept', 'department_title']] # simple_cp['department_title'] = simple_cp['department_title'].str.split(" ", 1, expand=True)[1] simple_cp = simple_cp.groupby(['dept', 'department_title']).size().to_frame('N').reset_index() simple_cp gf = gf.merge(simple_cp, how='left', left_on='DEPT', right_on='dept').drop('dept', axis=1) gf_dept = gf.groupby(['DEPT','department_title'])['AMT'].sum().to_frame('AMT').reset_index() top_10 = gf_dept.sort_values(by='AMT', ascending=False)[:10] sns.set(style="whitegrid") ax = sns.barplot(x="AMT", y="department_title", data=top_10) ax.set(xlabel='Fund Amount', ylabel='Department') plt.show() ``` ## Vendors under Director of Finance ``` finance = gf.loc[gf['department_title'] == '35 DIRECTOR OF FINANCE'] len(finance) len(finance['VEND_NAME'].unique()) finance['VEND_NAME'].unique() ``` ## Unique Vendors of all general funds ``` len(gf['VEND_NAME'].unique()) # out of 243375 print('Percentage of unique vendors:', 9543 / 243375 * 100, '%') ``` ## Matching: Fuzz95 + TF-IDF85 ``` # clean data ignore_words = ['inc', 'group', 'llc', 'corp', 'pc', 'incorporated', 'ltd', 'co', 'associates', 'services', 'company', 'enterprises', 'enterprise', 'service', 'corporation'] cleaned_registry = skool.clean_strings(registry, ['company_name', 'dba_name'], True, ignore_words) cleaned_gf = skool.clean_strings(gf, ['VEND_NAME'], True, ignore_words) cleaned_registry = cleaned_registry.dropna(subset=['company_name']) cleaned_gf = cleaned_gf.dropna(subset=['VEND_NAME']) cleaned_gf.to_excel(r'C:\Users\dabinlee\Documents\GitHub\mwdsbe_binny\MWDSBE\analysis\data\general_funds\cleaned_gf.xlsx', header=True) len(cleaned_gf) # Fuzz 95 + TF-IDF 85 with max_matches = 1 t1 = time.time() merged = ( skool.fuzzy_merge(cleaned_registry, cleaned_gf, left_on="company_name", right_on="VEND_NAME", score_cutoff=95) .pipe(skool.fuzzy_merge, cleaned_registry, cleaned_gf, left_on="dba_name", right_on="VEND_NAME", score_cutoff=95) .pipe(skool.tf_idf_merge, cleaned_registry, cleaned_gf, left_on="company_name", right_on="VEND_NAME", score_cutoff=85) .pipe(skool.tf_idf_merge, cleaned_registry, cleaned_gf, left_on="dba_name", right_on="VEND_NAME", score_cutoff=85) ) t = time.time() - t1 print('Execution time:', t/60, 'min') matched = merged.dropna(subset=['VEND_NAME']) print('Matching:', len(matched), 'out of', len(cleaned_registry)) unique_vendors = matched['VEND_NAME'].tolist() full_matched = cleaned_gf.loc[cleaned_gf['VEND_NAME'].apply(lambda x : x in unique_vendors)] ```
github_jupyter
# Introdução ``` import os import re import time import json import folium import random import requests import numpy as np import pandas as pd import seaborn as sns import geopandas as gpd from folium import plugins from osgeo import gdal, osr from bs4 import BeautifulSoup from tqdm.notebook import trange, tqdm ``` <br> # Join Data ``` # Lê o arquivo csv com o nome dos municípios df = pd.read_csv( 'https://raw.githubusercontent.com/michelmetran/sp_policiaambiental/main/data/tabs/tab_municipio_cpamb.csv', ) # Lê o arquivo csv com o nome dos municípios gdf = gpd.read_file( 'https://raw.githubusercontent.com/michelmetran/sp/main/data/shps/sp_250k_wgs84.geojson', ) gdf.drop(['municipio_nome'], axis=1, inplace=True) gdf['id_municipio'] = gdf['id_municipio'].astype(int) gdf['geometry'] = gdf.simplify(0.0015) # Merge gdf = gdf.merge( df, on='id_municipio', how='left' ) # Results gdf.head() # Batalhão gdf['id_batalhao_desc'] = gdf['id_batalhao'].apply(lambda x: '{}º Batalhão'.format(x)) # Companhia gdf['id_cia_desc'] = gdf['id_cia'].apply(lambda x: '{}ª Companhia do '.format(x)) gdf['id_cia_desc'] = gdf[['id_cia_desc', 'id_batalhao_desc']].apply(lambda x: ''.join(x), axis=1) # Pelotão gdf['id_pelotao_desc'] = gdf['id_pelotao'].apply(lambda x: '{}º Pelotão da '.format(x)) gdf['id_pelotao_desc'] = gdf[['id_pelotao_desc', 'id_cia_desc']].apply(lambda x: ''.join(x), axis=1) # Results gdf.head() set(gdf['id_cia_desc']) ``` <br> ## Create pop up's column ``` # Add Field def popup_html_batalhao(row): #tel = str(row['telefone']).replace('-', '').replace(')', '').replace('(', '+55').replace(' ', '') #fax = str(row['fax']).replace('-', '').replace(')', '').replace('(', '+55').replace(' ', '') html = """ <div> <p><b>{}</b> pertence ao: <h5><b>{}</b></h5></p> </div> """.format( '' if pd.isnull(row['municipio_nome']) else '{}'.format(row['municipio_nome']), '' if pd.isnull(row['id_batalhao_desc']) else '{}'.format(row['id_batalhao_desc']), ) html = html.replace('\n','') html = re.sub('\s\s+' , ' ', html) # Remove Espaços no meio html = html.strip() return html # Add Field def popup_html_cia(row): #tel = str(row['telefone']).replace('-', '').replace(')', '').replace('(', '+55').replace(' ', '') #fax = str(row['fax']).replace('-', '').replace(')', '').replace('(', '+55').replace(' ', '') html = """ <div> <p><b>{}</b> pertence ao: <h5><b>{}</b></h5></p> </div> """.format( '' if pd.isnull(row['municipio_nome']) else '{}'.format(row['municipio_nome']), '' if pd.isnull(row['id_cia_desc']) else '{}'.format(row['id_cia_desc']), ) html = html.replace('\n','') html = re.sub('\s\s+' , ' ', html) # Remove Espaços no meio html = html.strip() return html # Add Field def popup_html_pelotao(row): #tel = str(row['telefone']).replace('-', '').replace(')', '').replace('(', '+55').replace(' ', '') #fax = str(row['fax']).replace('-', '').replace(')', '').replace('(', '+55').replace(' ', '') html = """ <div> <p><b>{}</b> pertence ao: <h5><b>{}</b></h5></p> </div> """.format( '' if pd.isnull(row['municipio_nome']) else '{}'.format(row['municipio_nome']), '' if pd.isnull(row['id_pelotao_desc']) else '{}'.format(row['id_pelotao_desc']), ) html = html.replace('\n','') html = re.sub('\s\s+' , ' ', html) # Remove Espaços no meio html = html.strip() return html # <p><b>Sede:</b><br> # {}{}{}<br> # {} # {} # {}</p> # <p><b>Contatos:</b><br> # {} # {} # {}</p> # <p>{}</p> # '' if pd.isnull(row['agencia']) else '{}'.format(row['agencia']), # '' if pd.isnull(row['endereco']) else '{}'.format(row['endereco']), # '' if pd.isnull(row['numero']) else ', {}'.format(row['numero']), # '' if pd.isnull(row['complemento']) else ' - {}'.format(row['complemento']), # '' if pd.isnull(row['bairro']) else 'Bairro: {}<br>'.format(row['bairro']), # '' if pd.isnull(row['municipio_sede']) else 'Município: {}<br>'.format(row['municipio_sede']), # '' if pd.isnull(row['cep']) else 'CEP: {}<br>'.format(row['cep']), # '' if pd.isnull(row['telefone']) else 'Telefone: <a href="tel:{}">{}</a><br>'.format(tel, row['telefone']), # '' if pd.isnull(row['fax']) else 'Fax: <a href="tel:{}">{}</a><br>'.format(fax, row['fax']), # '' if pd.isnull(row['email']) else 'E-mail: <a href="mailto:{}">{}</a>'.format(row['email'], row['email']), # '' if pd.isnull(row['email']) else '<a href="{}" target="_blank">Conferir Dados</a>'.format(row['url']), # Calculate PopUps gdf['popup_batalhao'] = gdf.apply(popup_html_batalhao, axis=1) gdf['popup_cia'] = gdf.apply(popup_html_cia, axis=1) gdf['popup_pelotao'] = gdf.apply(popup_html_pelotao, axis=1) ``` <br> ## Adjust Table ``` # Delete Columns print(gdf.columns) # gdf.drop([ # 'id_municipio', 'endereco', 'numero', 'bairro', # 'municipio_sede', 'cep', 'telefone', 'fax', # 'complemento', 'url', 'email',], axis=1, inplace=True) # Save geojson gdf.to_file( os.path.join('data', 'shps', 'sp_cpamb.geojson'), driver='GeoJSON', encoding='utf-8' ) # Results gdf.head() ``` ## Get Centroid ``` def get_centroid(gdf): gdf['apenasparacentroid'] = 35 gdf_dissolve = gdf.dissolve(by='apenasparacentroid') gdf_centroid = gdf_dissolve.representative_point() gdf = gdf.drop('apenasparacentroid', axis=1) return [float(gdf_centroid.y), float(gdf_centroid.x)] list_centroid = get_centroid(gdf) list_centroid ``` ## Folium ``` def map_bomb(input_geojson, bbox): gdf = gpd.read_file(input_geojson) # Create Map m = folium.Map( location=[-22.545968889465207, -49.56185387118866], zoom_start=6, min_zoom=6, max_zoom=11, max_bounds=True, min_lon = bbox['min_lon']*(101/100), max_lon = bbox['max_lon']*(99/100), min_lat = bbox['min_lat']*(101/100), max_lat = bbox['max_lat']*(99/100), #zoom_delta=0.1, tiles=None, ) folium.TileLayer( 'cartodbpositron', name='BaseMap', control=False, ).add_to(m) # LAYER 1: Batalhões # Column with category col_categories1 = 'id_batalhao_desc' # Set palette palette_polygon = 'Paired' # Get list of unique values categories = set(gdf[col_categories1]) categories = list(categories) categories.sort() # See the palette chosed pal = sns.color_palette(palette_polygon, n_colors=len(categories)) # Set dictionary color_polygon = dict(zip(categories, pal.as_hex())) color_polygon['1º Batalhão'] = '#ff4d4d' #color_polygon['2º Batalhão'] = '#4da6ff' color_polygon['3º Batalhão'] = '#00b300' color_polygon['4º Batalhão'] = '#ffcc99' color_polygon1 = color_polygon # Geo folium.GeoJson( gdf, name='Batalhões', smooth_factor=1.0, zoom_on_click=False, embed=False, show=False, style_function=lambda x: { 'fillColor': color_polygon1[x['properties'][col_categories1]], 'color': color_polygon1[x['properties'][col_categories1]], 'weight': 1, 'fillOpacity': 0.3, }, highlight_function=lambda x: { 'weight': 3, 'fillOpacity': 0.6, }, tooltip=folium.features.GeoJsonTooltip( fields=['municipio_nome', 'id_batalhao_desc'], aliases=['Munícipio', 'Batalhão'], sticky=True, opacity=0.9, direction='right', ), popup=folium.GeoJsonPopup( ['popup_batalhao'], parse_html=False, max_width='400', show=False, labels=False, sticky=True, ) ).add_to(m) # LAYER 2: Companhia # Column with category col_categories2 = 'id_cia_desc' # Set palette palette_polygon = 'Paired' # Get list of unique values categories = set(gdf[col_categories2]) categories = list(categories) categories.sort() # See the palette chosed pal = sns.color_palette(palette_polygon, n_colors=len(categories)) # Set dictionary color_polygon = dict(zip(categories, pal.as_hex())) color_polygon2 = color_polygon # Geo folium.GeoJson( gdf, name='Companhias', smooth_factor=1.0, zoom_on_click=False, embed=False, show=False, style_function=lambda x: { 'fillColor': color_polygon2[x['properties'][col_categories2]], 'color': color_polygon2[x['properties'][col_categories2]], 'weight': 1, 'fillOpacity': 0.3, }, highlight_function=lambda x: { 'weight': 3, 'fillOpacity': 0.6, }, tooltip=folium.features.GeoJsonTooltip( fields=['municipio_nome', 'id_cia_desc'], aliases=['Munícipio', 'Companhia'], sticky=True, opacity=0.9, direction='right', ), popup=folium.GeoJsonPopup( ['popup_cia'], parse_html=False, max_width='400', show=False, labels=False, sticky=True, ) ).add_to(m) # LAYER 3: Pelotão # Column with category col_categories3 = 'id_pelotao_desc' # Set palette palette_polygon = 'Paired' # Get list of unique values categories = set(gdf[col_categories3]) categories = list(categories) categories.sort() # See the palette chosed pal = sns.color_palette(palette_polygon, n_colors=len(categories)) # Set dictionary color_polygon = dict(zip(categories, pal.as_hex())) color_polygon3 = color_polygon # Geo folium.GeoJson( gdf, name='Pelotão', smooth_factor=1.0, zoom_on_click=False, embed=False, show=True, style_function=lambda x: { 'fillColor': color_polygon3[x['properties'][col_categories3]], 'color': color_polygon3[x['properties'][col_categories3]], 'weight': 1, 'fillOpacity': 0.3, }, highlight_function=lambda x: { 'weight': 3, 'fillOpacity': 0.6, }, tooltip=folium.features.GeoJsonTooltip( fields=['municipio_nome', 'id_pelotao_desc'], aliases=['Munícipio', 'Pelotão'], sticky=True, opacity=0.9, direction='right', ), popup=folium.GeoJsonPopup( ['popup_pelotao'], parse_html=False, max_width='400', show=False, labels=False, sticky=True, ) ).add_to(m) # Plugins m.fit_bounds(m.get_bounds()) plugins.Fullscreen( position='topleft', title='Clique para Maximizar', title_cancel='Mininizar', ).add_to(m) folium.LayerControl('topright', collapsed=False).add_to(m) return m # Map without Bounds bbox = { 'max_lat': 0, 'max_lon': 0, 'min_lat': 0, 'min_lon': 0, } m = map_bomb(os.path.join('data', 'shps', 'sp_cpamb.geojson'), bbox) # Map with Bounds bbox = { 'max_lat': m.get_bounds()[1][0], 'min_lat': m.get_bounds()[0][0], 'max_lon': m.get_bounds()[1][1], 'min_lon': m.get_bounds()[0][1], } m = map_bomb(os.path.join('data', 'shps', 'sp_cpamb.geojson'), bbox) # Results print(bbox) m ``` <br> # Save Map ``` os.makedirs('maps', exist_ok=True) m.save(os.path.join('maps', 'cpamb_map.html')) m.save(os.path.join('..', '..', '..', 'case_django', 'divadmin', 'templates', 'cpamb_map.html')) ```
github_jupyter