code
stringlengths
2.5k
150k
kind
stringclasses
1 value
# Exploratory Data Analysis ``` # Import libraries %matplotlib inline import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn import neighbors from matplotlib.colors import ListedColormap ``` ## Description of the Data ### Data Cleaning Process ### Data Structure ``` # Import the dataframe playlist_df = pd.read_csv("data/playlists.csv", index_col = 0) songs_df = pd.read_csv("data/songs_100000_feat_cleaned.csv", index_col = 0) combined_df = pd.read_csv("data/subset100playlists.csv", index_col = 0) combined_df.dtypes ``` ## Exploratory Data Analysis ### What is the composition of playlists in the cleaned dataset? First, we investigate the high level composition of the playlists in our cleaned dataset. How many playlists and songs do we have? What is the distribution of the number of songs per playlist? Are certain songs used significantly more than others? ``` n_tracks = playlist_df.track_uri.nunique() n_playlists = playlist_df.pid.nunique() summary_stats = pd.DataFrame([{'Statistic': 'Unique Tracks', 'Value': n_tracks}, {'Statistic': 'Unique Playlists', 'Value': n_playlists}]) track_uri_stats = playlist_df.groupby('track_uri')['pid'].count().sort_values(ascending = False).cumsum() weights = 1/track_uri_stats.max() weighted_track_uri_stats = track_uri_stats * weights cum_px = weighted_track_uri_stats[weighted_track_uri_stats.gt(0.9)].index[0] cum_px = round(weighted_track_uri_stats.index.get_loc(cum_px)/n_tracks, 2)*100 pid_stats = playlist_df.groupby('pid')['track_uri'].count() fig, ax = plt.subplots(1,3, figsize = (18,5)) ax[0].set_title('Magnitude of the Data', fontsize=15) ax[0].set_ylabel('Frequency', fontsize=15) summary_stats.plot.bar(x = 'Statistic', y='Value', color=['k','g'], rot=0, legend = False, ax = ax[0], fontsize=15) ax[1].set_title('Number of Tracks per Playlist', fontsize=15) ax[1].hist(pid_stats, color = 'k', alpha = 0.9) ax[1].axvline(pid_stats.mean(), ls = '--', label = 'Mean # of songs') ax[1].set_ylabel('Number of Tracks', fontsize=15) ax[1].set_xlabel('Playlists', fontsize=15) ax[1].legend() ax[2].set_title(f'{cum_px} % of the tracks are on 90% of the playlists', fontsize=15) ax[2].plot(np.arange(n_tracks), weighted_track_uri_stats, c = 'k', label='?') ax[2].set_ylabel('Proportion of Playlists', fontsize=15) ax[2].set_xlabel('Tracks', fontsize=15) ax[2].axhline(0.9, ls = '--', label = '90% of Playlists') ax[2].legend() plt.suptitle('High-Level Playlist Features\n\n', fontsize=20) plt.show() ``` The plots above show immediately the impact of our playlist selection criteria on the dataset that will be used for training, testing, and validating our models. * The number of songs per playlist is not normally distributed, and we only have playlists that include more than 100 songs. * 18% of songs are on 90% of playlists. So we can expect overlap of songs between playlists. ### What do songs sound like? ``` song_feature_cols = ['acousticness', 'danceability', 'duration_ms', 'energy', 'instrumentalness', 'key', 'liveness', 'loudness', 'speechiness', 'tempo', 'time_signature', 'valence'] song_subset_df = songs_df[song_feature_cols] columns = song_subset_df.columns fig = song_subset_df.hist(figsize=(21,14), column = columns) plt.suptitle('How does the distribution of song features look?', fontsize=40) [x.title.set_size(32) for x in fig.ravel()] plt.show() fig = song_subset_df.hist(figsize=(21,14), column=['acousticness','loudness','danceability','energy']) plt.suptitle('How does the distribution of song features look?', fontsize=40) [x.title.set_size(32) for x in fig.ravel()] plt.show() scaler = MinMaxScaler().fit(song_subset_df) scaled_songs_df = pd.DataFrame(scaler.transform(song_subset_df), columns = columns) plt.figure(figsize=(8,5)) scaled_songs_df.boxplot(figsize=(21,7)) plt.xticks(rotation=90) plt.title('Is there significant variation in song features?', fontsize=20) plt.show() corr_mat = song_subset_df.corr() mask = np.zeros_like(corr_mat) mask[np.triu_indices_from(mask)] = True plt.figure(figsize=(15,10)) plt.title('Track Feature Correlations', fontsize=35) sns.heatmap(corr_mat, cmap='Spectral', annot=True, mask=mask) plt.xticks(fontsize=20) plt.yticks(fontsize=20) ``` ### Do playlists tend to have very similar songs, or very different songs? We wanted to know whether playlists in our training set tend to be built from songs that are similar to each other or songs that are different from each other. If the same songs appear in many playlists, we could perhaps identify these and recommend them. Alternatively, if playlists tend to be composed of songs that all have similar features, we could use information about the distribution of feature scores in a stub playlist to recommend additional songs for the playlist. ``` len(np.unique(playlist_df.pid)) playlists_per_track = playlist_df.groupby('track_uri').count() playlists_per_track = playlists_per_track.sort_values('pid', ascending=False) l = [1,2,3,4,5,10,20,50,100,500] ll = [] for i in l: ll.append(len(playlists_per_track[playlists_per_track['pid'] >= i])) for i in range(1, len(l)): l[i] = 'Songs in ' + str(l[i]) + '+ playlists ' l[0] = 'Total Songs (one or more)' barp = sns.barplot(ll,l, orient='h') plt.title('Number of Playlists that Tracks Appear In', fontsize=15) subset_cols = ['pid', 'acousticness', 'danceability', 'duration_ms', 'count', 'energy', 'instrumentalness', 'key', 'liveness', 'loudness', 'speechiness', 'tempo', 'time_signature', 'valence'] subset_df = combined_df[subset_cols] def process_playlists(df, fun, along): index = df.eval(along).unique() columns = df.columns[df.columns != along] n_rows = len(index) n_columns = len(columns) # Construct output df output = pd.DataFrame(np.zeros((n_rows, n_columns)), index = index, columns = columns) # Loop through playslists and apply function for playlist in index: subset = df.loc[df['pid'] == playlist, columns] output.loc[playlist,:] = fun(subset) return output def gini(x): # Mean absolute difference mad = x.mad() mean = x.mean() # Relative mean absolute difference rmad = abs(mad/mean) # Gini coefficient g = 0.5 * rmad return g playlist_ginis = process_playlists(subset_df, gini, 'pid') systemwide_playlist_ginis = gini(subset_df.groupby(['pid']).mean()) systemwide_playlist_ginis ax = playlist_ginis.mean().plot.barh() ax.set_ylabel('Track Feature') ax.set_xlabel('Gini Coefficient (0 = Perfect Equality)') plt.title('Do playlists tend to have songs with similar features?') plt.show() fig, ax = plt.subplots(figsize =(5, 8)) sns.violinplot(ax = ax, data = playlist_ginis, orient = 'h' ) ax.set_ylabel('Track Feature', fontsize=15) ax.set_xlabel('Gini Coefficient (0 = Perfect Equality)', fontsize=15) plt.title('Do playlists tend to have songs with similar features?', fontsize=15) plt.show() song_feature_cols = ['acousticness', 'danceability', 'duration_ms', 'count', 'energy', 'instrumentalness', 'key', 'liveness', 'loudness', 'speechiness', 'tempo', 'time_signature', 'valence'] fig, ax = plt.subplots(figsize = (9,4.5)) ax = sns.scatterplot(playlist_ginis.mean(), systemwide_playlist_ginis, s = 75, hue = song_feature_cols) ax.set_ylabel('Similarity Across Playlists (0 = Perfect Equality)') ax.set_xlabel('Similarity Within Playlists (0 = Perfect Equality)') ax.legend(title = 'Track Feature') plt.title('Do track features differentiate playlists?', fontsize=14) plt.show() ``` ### Do natural clusters of songs emerge as playlists? It seems from our exploratory analysis so far, that using song feature based methods to recommend songs that are "similar" to those already in a playlist is exceedingly difficult. To motivate this visually, we show a scatterplot of two arbitrary feature values, for songs in a few playlists. ``` combined_df = combined_df[combined_df.pos < 50] plt.figure(figsize=(8,6)) for playlist_id, mark in zip(np.unique(combined_df.pid)[30:34], ['4', '3', '2', '1']): plt.scatter(x=combined_df[combined_df.pid == playlist_id].valence, y=combined_df[combined_df.pid == playlist_id].danceability, marker=mark, label='playlist #'+mark) plt.legend() plt.xlabel('Song - Valence', fontsize=13) plt.ylabel('Song - Danceability', fontsize=13) plt.title('Playlists do not Separate in Song feature-space', fontsize=15) ``` Visualising with decision boundaries: ``` combined_df2 = combined_df[['danceability','valence','pid']] combined_df2 = combined_df2[(combined_df2.pid == np.unique(combined_df.pid)[30]) | (combined_df2.pid == np.unique(combined_df.pid)[31]) | (combined_df2.pid == np.unique(combined_df.pid)[32]) | (combined_df2.pid == np.unique(combined_df.pid)[33])] combined_df2.pid = combined_df2.pid.replace(np.unique(combined_df.pid)[30], 1) combined_df2.pid = combined_df2.pid.replace(np.unique(combined_df.pid)[31], 2) combined_df2.pid = combined_df2.pid.replace(np.unique(combined_df.pid)[32], 3) combined_df2.pid = combined_df2.pid.replace(np.unique(combined_df.pid)[33], 4) plt.figure(figsize=(10,8)) cmap_light = ListedColormap(['#5fb7f4', '#ffbf4e', '#6ce06c', '#f66768']) clf = neighbors.KNeighborsClassifier(1) clf.fit(combined_df2[['valence','danceability']], combined_df2['pid']) xx, yy = np.meshgrid(np.arange(-0.01, 1.01, 0.001), np.arange(0.19, 1.01, 0.001)) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) plt.pcolormesh(xx, yy, Z, cmap=cmap_light) for playlist_id, mark in zip(np.unique(combined_df.pid)[30:34], ['4', '3', '2', '1']): plt.scatter(x=combined_df[combined_df.pid == playlist_id].valence, y=combined_df[combined_df.pid == playlist_id].danceability, marker=mark, label='playlist #'+mark) plt.legend() plt.xlabel('Track valence', fontsize=20) plt.ylabel('Track danceability', fontsize=20) plt.title('Separating Playlists in valence-danceability Feature Space', fontsize=23) ``` Note that most of the points are in the center, where there is extremely high variance in colors. It seems like regios on the plot edges are acceptable, but this is actually due to dearth of data (large region, defined by single point), and is thus not likely to have good accuracy
github_jupyter
``` import numpy as np %matplotlib inline import matplotlib.pyplot as plt import pandas as pd ``` ## Exercise 1 - load the dataset: `../data/international-airline-passengers.csv` - inspect it using the `.info()` and `.head()` commands - use the function `pd.to_datetime()` to change the column type of 'Month' to a datatime type - set the index of df to be a datetime index using the column 'Month' and the `df.set_index()` method - choose the appropriate plot and display the data - choose appropriate scale - label the axes ``` # - load the dataset: ../data/international-airline-passengers.csv df = pd.read_csv('../data/international-airline-passengers.csv') # - inspect it using the .info() and .head() commands df.info() df.head() # - use the function to_datetime() to change the column type of 'Month' to a datatime type # - set the index of df to be a datetime index using the column 'Month' and tthe set_index() method df['Month'] = pd.to_datetime(df['Month']) df = df.set_index('Month') df.head() # - choose the appropriate plot and display the data # - choose appropriate scale # - label the axes df.plot() ``` ## Exercise 2 - load the dataset: `../data/weight-height.csv` - inspect it - plot it using a scatter plot with Weight as a function of Height - plot the male and female populations with 2 different colors on a new scatter plot - remember to label the axes ``` # - load the dataset: ../data/weight-height.csv # - inspect it df = pd.read_csv('../data/weight-height.csv') df.head() df.info() df.describe() df['Gender'].value_counts() # - plot it using a scatter plot with Weight as a function of Height _ = df.plot(kind='scatter', x='Height', y='Weight') # - plot the male and female populations with 2 different colors on a new scatter plot # - remember to label the axes # this can be done in several ways, showing 2 here: males = df[df['Gender'] == 'Male'] females = df.query('Gender == "Female"') fig, ax = plt.subplots() males.plot(kind='scatter', x='Height', y='Weight', ax=ax, color='blue', alpha=0.3, title='Male & Female Populations') females.plot(kind='scatter', x='Height', y='Weight', ax=ax, color='red', alpha=0.3) df['Gendercolor'] = df['Gender'].map({'Male': 'blue', 'Female': 'red'}) df.head() df.plot(kind='scatter', x='Height', y='Weight', c=df['Gendercolor'], alpha=0.3, title='Male & Female Populations') fig, ax = plt.subplots() ax.plot(males['Height'], males['Weight'], 'ob', females['Height'], females['Weight'], 'or', alpha=0.3) plt.xlabel('Height') plt.ylabel('Weight') plt.title('Male & Female Populations') ``` ## Exercise 3 - plot the histogram of the heights for males and for females on the same plot - use alpha to control transparency in the plot comand - plot a vertical line at the mean of each population using `plt.axvline()` ``` males['Height'].plot(kind='hist', bins=50, range=(50, 80), alpha=0.3, color='blue') females['Height'].plot(kind='hist', bins=50, range=(50, 80), alpha=0.3, color='red') plt.title('Height distribution') plt.legend(["Males", "Females"]) plt.xlabel("Heigth (in)") plt.axvline(males['Height'].mean(), color='blue', linewidth=2) plt.axvline(females['Height'].mean(), color='red', linewidth=2) males['Height'].plot(kind='hist', bins=200, range=(50, 80), alpha=0.3, color='blue', cumulative=True, normed=True) females['Height'].plot(kind='hist', bins=200, range=(50, 80), alpha=0.3, color='red', cumulative=True, normed=True) plt.title('Height distribution') plt.legend(["Males", "Females"]) plt.xlabel("Heigth (in)") plt.axhline(0.8) plt.axhline(0.5) plt.axhline(0.2) ``` ## Exercise 4 - plot the weights of the males and females using a box plot - which one is easier to read? - (remember to put in titles, axes and legends) ``` dfpvt = df.pivot(columns = 'Gender', values = 'Weight') dfpvt.head() dfpvt.info() dfpvt.plot(kind='box') plt.title('Weight Box Plot') plt.ylabel("Weight (lbs)") ``` ## Exercise 5 - load the dataset: `../data/titanic-train.csv` - learn about scattermatrix here: http://pandas.pydata.org/pandas-docs/stable/visualization.html - display the data using a scattermatrix ``` df = pd.read_csv('../data/titanic-train.csv') df.head() from pandas.tools.plotting import scatter_matrix _ = scatter_matrix(df.drop('PassengerId', axis=1), figsize=(10, 10)) ```
github_jupyter
``` from datetime import datetime import mysql.connector from sys import exit HOST = "localhost" USER = "root" PASSWORD = "root" DATABASE = "hotel" database = mysql.connector.connect( host="localhost", user="noel", password="root", auth_plugin='mysql_native_password' ) mycursor = database.cursor() !pip uninstall mysql-connector def get_database(): try: database = mysql.connector.connect( host="localhost", user="noel", password="root", database=DATABASE ) cursor = database.cursor(dictionary=True) return database, cursor except mysql.connector.Error: return None, None SCREEN_WIDTH = 50 def print_center(s): x_pos = SCREEN_WIDTH // 2 print((" " * x_pos), s) def print_bar(): print("=" * 100) def print_bar_ln(): print_bar() print() def input_center(s): x_pos = SCREEN_WIDTH // 2 print((" " * x_pos), s, end='') return input() ROOMS_TABLE_NAME = "rooms" class Room: def __init__(self): self.room_id = 0 self.room_no = 0 self.floor = "" self.beds = "" self.available = "" def create(self, room_id, room_no, floor, beds, available): self.room_id = room_id self.room_no = room_no self.floor = floor self.beds = beds self.available = available return self def create_from_record(self, record): self.room_id = record['id'] self.room_no = record['room_no'] self.floor = record['floor'] self.beds = record['beds'] self.available = record['available'] return self def print_all(self): print(str(self.room_id).ljust(3), str(self.room_no).ljust(15), self.floor.ljust(15), str(self.beds).ljust(15), str(self.available).ljust(15)) def print_full(self): print_bar() print("Record #", self.room_id) print("Room No: ", self.room_no) print("Floor: ", self.floor) print("Beds: ", self.beds) print("available: ", self.available) print_bar() def create_room(): room_id = None room_no = int(input("Enter the room no: ")) floor = input("Enter the floor (Ex. ground, first etc.): ") beds = int(input("Enter number of beds: ")) available = True return Room().create(room_id, room_no, floor, beds, available) def print_room_header(): print("="*100) print("id".ljust(3), "room no".ljust(15), "floor".ljust(15), "beds".ljust(15), "available".ljust(15) ) print("="*100) def create_rooms_table(database): cursor = database.cursor() cursor.execute("DROP table if exists {0}".format(ROOMS_TABLE_NAME)) cursor.execute("create table {0} (" "id int primary key auto_increment," "room_no int," "floor varchar(50)," "beds int," "available bool)".format(ROOMS_TABLE_NAME)) def add_room(database, cursor): room = create_room() query = "insert into {0}(room_no,floor,beds,available) values({1},'{2}',{3},{4})".\ format(ROOMS_TABLE_NAME, room.room_no, room.floor, room.beds, room.available) try: cursor.execute(query) database.commit() except mysql.connector.Error as err: create_rooms_table(database) cursor.execute(query) database.commit() print("Operation Successful") def show_room_record(cursor, query): try: cursor.execute(query) records = cursor.fetchall() if cursor.rowcount == 0: print("No Matching Records") return record = records[0] room = Room().create_from_record(record) room.print_full() return room except mysql.connector.Error as err: print(err) def show_room_records(cursor, query): try: cursor.execute(query) records = cursor.fetchall() if cursor.rowcount == 0: print("No Matching Records") return print_room_header() for record in records: room = Room().create_from_record(record) room.print_all() return records except mysql.connector.Error as err: print(err) def get_and_print_room_by_no(cursor): room_no = int(input("Enter the room no: ")) query = "select * from {0} where room_no={1}".format(ROOMS_TABLE_NAME, room_no) room = show_room_record(cursor, query) return room def edit_room_by_room_no(database, cursor): room = get_and_print_room_by_no(cursor) if room is not None: query = "update {0} set".format(ROOMS_TABLE_NAME) print("Input new values (leave blank to keep previous value)") room_no = input("Enter new room no: ") if len(room_no) > 0: query += " room_no={0},".format(room_no) floor = input("Enter new floor: ") if len(floor) > 0: query += " floor='{0}',".format(floor) beds = input("Enter number of beds: ") if len(beds) > 0: query += " beds={0},".format(beds) query = query[0:-1] + " where id={0}".format(room.room_id) confirm = input("Confirm Update (Y/N): ").lower() if confirm == 'y': cursor.execute(query) database.commit() print("Operation Successful") else: print("Operation Cancelled") def change_room_status(database, cursor, room_id, available): query = "update {0} set available={1} where id={2}".format(ROOMS_TABLE_NAME, available, room_id) cursor.execute(query) database.commit() def delete_room_by_room_no(database, cursor): room = get_and_print_room_by_no(cursor) if room is not None: confirm = input("Confirm Deletion (Y/N): ").lower() if confirm == 'y': query = "delete from {0} where id={1}".format(ROOMS_TABLE_NAME, room.room_id) cursor.execute(query) database.commit() print("Operation Successful") else: print("Operation Cancelled") def room_menu(database, cursor): while True: print() print("============================") print("==========Room Menu=========") print("============================") print() print("1. Add new room") print("2. Get room details by room no") print("3. Find available rooms by number of beds") print("4. Edit Room details") print("5. Delete room") print("6. View all rooms") print("0. Go Back") choice = int(input("Enter your choice: ")) if choice == 1: add_room(database, cursor) elif choice == 2: room_no = int(input("Enter the room no: ")) query = "select * from {0} where room_no={1}".format(ROOMS_TABLE_NAME, room_no) show_room_records(cursor, query) elif choice == 3: beds = int(input("Enter number of beds required: ")) query = "select * from {0} where beds={1}".format(ROOMS_TABLE_NAME, beds) show_room_records(cursor, query) elif choice == 4: edit_room_by_room_no(database, cursor) elif choice == 5: delete_room_by_room_no(database, cursor) elif choice == 6: query = "select * from {0}".format(ROOMS_TABLE_NAME) show_room_records(cursor, query) elif choice == 0: break else: print("Invalid choice (Press 0 to go back)") CUSTOMER_TABLE_NAME = "customers" class Customer: def __init__(self): self.customer_id = 0 self.name = "" self.address = "" self.phone = "" self.room_no = "0" self.entry_date = "" self.checkout_date = "" def create(self, customer_id, name, address, phone, room_no, entry_date, checkout_date): self.customer_id = customer_id self.name = name self.address = address self.phone = phone self.room_no = room_no self.entry_date = entry_date self.checkout_date = checkout_date return self def create_from_record(self, record): self.customer_id = record['id'] self.name = record['name'] self.address = record['address'] self.phone = record['phone'] self.room_no = record['room_no'] self.entry_date = record['entry'] self.checkout_date = record['checkout'] return self def print_all(self): print(str(self.customer_id).ljust(3), self.name[0:15].ljust(15), self.address[0:15].ljust(15), self.phone.ljust(15), str(self.room_no).ljust(10), self.entry_date.strftime("%d-%b-%y").ljust(15), (self.checkout_date.strftime("%d %b %y") if self.checkout_date is not None else "None").ljust(15)) def print_full(self): print_bar() print("Customer #", self.customer_id) print("Name: ", self.name) print("Address: ", self.address) print("Phone: ", self.phone) print("Checked in to room #", self.room_no, " on ", self.entry_date.strftime("%d %b %y")) print("Checkout: ", self.checkout_date.strftime("%d %b %y") if self.checkout_date is not None else None) print_bar() def create_customer(room_no): customer_id = None name = input("Enter the name: ") address = input("Enter the address: ") phone = input("Enter the phone: ") entry_date = datetime.now() return Customer().create(customer_id, name, address, phone, room_no, entry_date, None) def print_customer_header(): print("="*100) print("id".ljust(3), "name".ljust(15), "address".ljust(15), "phone".ljust(15), "room no".ljust(10), "entry".ljust(15), "check out".ljust(15)) print("="*100) def create_customer_table(database): cursor = database.cursor() cursor.execute("DROP table if exists {0}".format(CUSTOMER_TABLE_NAME)) cursor.execute("create table {0} (" "id int primary key auto_increment," "name varchar(20)," "address varchar(50)," "phone varchar(10)," "room_no int," "entry datetime," "checkout datetime)".format(CUSTOMER_TABLE_NAME)) NUMBER_OF_RECORDS_PER_PAGE = 10 def add_customer(database, cursor): room = get_and_print_room_by_no(cursor) if room is not None: customer = create_customer(room.room_no) confirm = input("Complete the operation? (Y/N) ").lower() if confirm == 'y': query = "insert into {0}(name, address, phone, room_no, entry) values('{1}','{2}','{3}',{4},'{5}')". \ format(CUSTOMER_TABLE_NAME, customer.name, customer.address, customer.phone, customer.room_no, customer.entry_date.strftime("%Y-%m-%d %H:%M:%S")) try: cursor.execute(query) database.commit() except mysql.connector.Error: create_customer_table(database) cursor.execute(query) database.commit() change_room_status(database, cursor, room.room_id, False) print("Operation Successful") else: print("Operation Canceled") def show_customer_records(cursor, query): try: cursor.execute(query) records = cursor.fetchall() if cursor.rowcount == 0: print("No Matching Records") return print_customer_header() for record in records: customer = Customer().create_from_record(record) customer.print_all() return records except mysql.connector.Error as err: print(err) def show_customer_record(cursor, query): try: cursor.execute(query) records = cursor.fetchall() if cursor.rowcount == 0: print("No Matching Records") return record = records[0] customer = Customer().create_from_record(record) customer.print_full() return customer except mysql.connector.Error as err: print(err) def get_and_print_customer_by_room_no(cursor): room = get_and_print_room_by_no(cursor) if room is not None: query = "select * from {0} where room_no={1} order by id desc limit 1".format(CUSTOMER_TABLE_NAME, room.room_no) customer = show_customer_record(cursor, query) return room, customer return None, None def check_out(database, cursor): room, customer = get_and_print_customer_by_room_no(cursor) if room is not None and customer is not None: confirm = input("Confirm checkout? (Y/N): ") if confirm == 'y': checkout = datetime.now() query = "update {0} set checkout='{1}' where id={2}".\ format(CUSTOMER_TABLE_NAME, checkout.strftime("%Y-%m-%d %H:%M:%S"), customer.customer_id) cursor.execute(query) database.commit() change_room_status(database, cursor,room.room_id, True) print("Operation Successful") else: print("Operation Cancelled") def edit_customer_by_room_no(database, cursor): room, customer = get_and_print_customer_by_room_no(cursor) if room is not None and customer is not None: query = "update {0} set".format(CUSTOMER_TABLE_NAME) print("Input new values (leave blank to keep previous value)") name = input("Enter new name: ") if len(name) > 0: query += " name='{0}',".format(name) address = input("Enter new address: ") if len(address) > 0: query += " address='{0}',".format(address) phone = input("Enter number of phone: ") if len(phone) > 0: query += " phone='{0}',".format(phone) query = query[0:-1] + " where id={0}".format(customer.customer_id) confirm = input("Confirm Update (Y/N): ").lower() if confirm == 'y': cursor.execute(query) database.commit() print("Operation Successful") else: print("Operation Cancelled") def delete_customer_by_room_no(database, cursor): room, customer = get_and_print_customer_by_room_no(cursor) if room is not None and customer is not None: confirm = input("Confirm Deletion (Y/N): ").lower() if confirm == 'y': query = "delete from {0} where id={1}".format(CUSTOMER_TABLE_NAME, customer.customer_id) cursor.execute(query) database.commit() print("Operation Successful") else: print("Operation Cancelled") def customer_menu(database, cursor): while True: print() print("==============================") print("==========Customer Menu=========") print("==============================") print() print("1. New Customer") print("2. Show Customer Details by name") print("3. Show customer details by customer_id") print("4. Show customer details by address") print("5. Show customer details by phone number") print("6. Show customer details by room no") print("7. Show customer details by check in date") print("8. Show current list of customers") print("9. Check out") print("10. Edit customer Details") print("11. Delete Customer record") print("12. View all customers") print("0. Go Back") choice = int(input("Enter your choice: ")) if choice == 1: add_customer(database, cursor) elif choice == 2: name = input("Enter the name: ").lower() query = "select * from {0} where name like '%{1}%'".format(CUSTOMER_TABLE_NAME, name) show_customer_records(cursor, query) elif choice == 3: customer_id = input("Enter the customer id: ") query = "select * from {0} where id = {1}".format(CUSTOMER_TABLE_NAME, customer_id) show_customer_record(cursor, query) elif choice == 4: address = input("Enter the address: ").lower() query = "select * from {0} where address like '%{1}%'".format(CUSTOMER_TABLE_NAME, address) show_customer_records(cursor, query) elif choice == 5: phone = input("Enter the phone number: ") query = "select * from {0} where phone like '%{1}%'".format(CUSTOMER_TABLE_NAME, phone) show_customer_records(cursor, query) elif choice == 6: room_no = input("Enter the room_no: ") query = "select * from {0} where room_no = {1}".format(CUSTOMER_TABLE_NAME, room_no) show_customer_record(cursor, query) elif choice == 7: print("Enter the check in date: ") day = int(input("day of month: ")) month = int(input("month: ")) year = int(input("year: ")) query = "select * from {0} where date(entry) = '{1}-{2}-{3}'".format(CUSTOMER_TABLE_NAME, year, month, day) show_customer_records(cursor, query) elif choice == 8: query = "select * from {0} where checkout is null".format(CUSTOMER_TABLE_NAME) show_customer_records(cursor, query) elif choice == 9: check_out(database, cursor) elif choice == 10: edit_customer_by_room_no(database, cursor) elif choice == 11: delete_customer_by_room_no(database, cursor) elif choice == 12: query = "select * from {0}".format(CUSTOMER_TABLE_NAME) show_customer_records(cursor, query) elif choice == 0: break else: print("Invalid choice (Press 0 to go back)") if __name__ == '__main__': database, cursor = get_database() if database is None: print("The Database does not exist or not accessible.") exit(1) while True: print() print_center("==============================") print_center("=====xyz Hotels=====") print_center("==============================") print_center("1. Manage Rooms") print_center("2. Manage Customers") print_center("0. Exit") print() choice = int(input_center("Enter your choice: ")) if choice == 1: room_menu(database, cursor) elif choice == 2: customer_menu(database, cursor) elif choice == 0: break else: print("Invalid choice (Press 0 to exit)") print_center("GoodBye") ```
github_jupyter
``` import sympy as sym from sympy.polys.multivariate_resultants import MacaulayResultant sym.init_printing() ``` Macaulay Resultant ------------------ The Macauly resultant is a multivariate resultant. It is used for calculating the resultant of $n$ polynomials in $n$ variables. The Macaulay resultant is calculated as the determinant of two matrices, $$R = \frac{\text{det}(A)}{\text{det}(M)}.$$ Matrix $A$ ----------- There are a number of steps needed to construct matrix $A$. Let us consider an example from https://dl.acm.org/citation.cfm?id=550525 to show the construction. ``` x, y, z = sym.symbols('x, y, z') a_1_1, a_1_2, a_1_3, a_2_2, a_2_3, a_3_3 = sym.symbols('a_1_1, a_1_2, a_1_3, a_2_2, a_2_3, a_3_3') b_1_1, b_1_2, b_1_3, b_2_2, b_2_3, b_3_3 = sym.symbols('b_1_1, b_1_2, b_1_3, b_2_2, b_2_3, b_3_3') c_1, c_2, c_3 = sym.symbols('c_1, c_2, c_3') variables = [x, y, z] f_1 = a_1_1 * x ** 2 + a_1_2 * x * y + a_1_3 * x * z + a_2_2 * y ** 2 + a_2_3 * y * z + a_3_3 * z ** 2 f_2 = b_1_1 * x ** 2 + b_1_2 * x * y + b_1_3 * x * z + b_2_2 * y ** 2 + b_2_3 * y * z + b_3_3 * z ** 2 f_3 = c_1 * x + c_2 * y + c_3 * z polynomials = [f_1, f_2, f_3] mac = MacaulayResultant(polynomials, variables) ``` **Step 1** Calculated $d_i$ for $i \in n$. ``` mac.degrees ``` **Step 2.** Get $d_M$. ``` mac.degree_m ``` **Step 3.** All monomials of degree $d_M$ and size of set. ``` mac.get_monomials_set() mac.monomial_set mac.monomials_size ``` These are the columns of matrix $A$. **Step 4** Get rows and fill matrix. ``` mac.get_row_coefficients() ``` Each list is being multiplied by polynomials $f_1$, $f_2$ and $f_3$ equivalently. Then we fill the matrix based on the coefficient of the monomials in the columns. ``` matrix = mac.get_matrix() matrix ``` Matrix $M$ ----------- Columns that are non reduced are kept. The rows which contain one if the $a_i$s is dropoed. $a_i$s are the coefficients of $x_i ^ {d_i}$. ``` mac.get_submatrix(matrix) ``` Second example ----------------- This is from: http://isc.tamu.edu/resources/preprints/1996/1996-02.pdf ``` x, y, z = sym.symbols('x, y, z') a_0, a_1, a_2 = sym.symbols('a_0, a_1, a_2') b_0, b_1, b_2 = sym.symbols('b_0, b_1, b_2') c_0, c_1, c_2,c_3, c_4 = sym.symbols('c_0, c_1, c_2, c_3, c_4') f = a_0 * y - a_1 * x + a_2 * z g = b_1 * x ** 2 + b_0 * y ** 2 - b_2 * z ** 2 h = c_0 * y - c_1 * x ** 3 + c_2 * x ** 2 * z - c_3 * x * z ** 2 + c_4 * z ** 3 polynomials = [f, g, h] mac = MacaulayResultant(polynomials, variables=[x, y, z]) mac.degrees mac.degree_m mac.get_monomials_set() mac.get_size() mac.monomial_set mac.get_row_coefficients() matrix = mac.get_matrix() matrix matrix.shape mac.get_submatrix(mac.get_matrix()) ```
github_jupyter
# Visulizing spatial information - California Housing This demo shows a simple workflow when working with geospatial data: * Obtaining a dataset which includes geospatial references. * Obtaining a desired geometries (boundaries etc.) * Visualisation In this example we will make a simple **proportional symbols map** using the `California Housing` dataset in `sklearn` package. ``` import numpy as np import pandas as pd import geopandas as gpd from lets_plot import * LetsPlot.setup_html() ``` ## Prepare the dataset ``` from sklearn.datasets import fetch_california_housing california_housing_bunch = fetch_california_housing() data = pd.DataFrame(california_housing_bunch.data, columns=california_housing_bunch.feature_names) # Add $-value field to the dataframe. # dataset.target: numpy array of shape (20640,) # Each value corresponds to the average house value in units of 100,000. data['Value($)'] = california_housing_bunch.target * 100000 data.head() # Draw a random sample from the data set. data = data.sample(n=1000) ``` ## Static map Let's create a static map using regular `ggplot2` geometries. Various shape files related to the state of California are available at https://data.ca.gov web site. For the purpose of this demo the Calofornia State Boundaty zip was downloaded from https://data.ca.gov/dataset/ca-geographic-boundaries and unpacked to `ca-state-boundary` subdirectory. ### Use `geopandas` to read a shape file to GeoDataFrame ``` #CA = gpd.read_file("./ca-state-boundary/CA_State_TIGER2016.shp") from lets_plot.geo_data import * CA = geocode_states('CA').scope('US').inc_res(2).get_boundaries() CA.head() ``` Keeping in mind that our target is the housing value, fill the choropleth over the state contours using `geom_map()`function ### Make a plot out of polygon and points The color of the points will reflect the house age and the size of the points will reflect the value of the house. ``` # The plot base p = ggplot() + scale_color_gradient(name='House Age', low='red', high='green') # The points layer points = geom_point(aes(x='Longitude', y='Latitude', size='Value($)', color='HouseAge'), data=data, alpha=0.8) # The map p + geom_polygon(data=CA, fill='#F8F4F0', color='#B71234')\ + points\ + theme_classic() + theme(axis='blank')\ + ggsize(600, 500) ``` ## Interactive map The `geom_livemap()` function creates an interactive base-map super-layer to which other geometry layers are added. ### Configuring map tiles By default *Lets-PLot* offers high quality vector map tiles but also can fetch raster tiles from a 3d-party Z-X-Y [tile servers](https://wiki.openstreetmap.org/wiki/Tile_servers). For the sake of the demo lets use *CARTO Antique* tiles by [CARTO](https://carto.com/attribution/) as our basemap. ``` LetsPlot.set( maptiles_zxy( url='https://cartocdn_c.global.ssl.fastly.net/base-antique/{z}/{x}/{y}@2x.png', attribution='<a href="https://www.openstreetmap.org/copyright">© OpenStreetMap contributors</a> <a href="https://carto.com/attributions#basemaps">© CARTO</a>, <a href="https://carto.com/attributions">© CARTO</a>' ) ) ``` ### Make a plot similar to the one above but interactive ``` p + geom_livemap()\ + geom_polygon(data=CA, fill='white', color='#B71234', alpha=0.5)\ + points ``` ### Adjust the initial viewport Use parameters `location` and `zoom` to define the initial viewport. ``` # Pass `[lon,lat]` value to the `location` (near Los Angeles) p + geom_livemap(location=[-118.15, 33.96], zoom=7)\ + geom_polygon(data=CA, fill='white', color='#B71234', alpha=0.5, size=1)\ + points ```
github_jupyter
# Build sentence/paragraph level QA application from python with Vespa > Retrieve paragraph and sentence level information with sparse and dense ranking features We will walk through the steps necessary to create a question answering (QA) application that can retrieve sentence or paragraph level answers based on a combination of semantic and/or term-based search. We start by discussing the dataset used and the question and sentence embeddings generated for semantic search. We then include the steps necessary to create and deploy a Vespa application to serve the answers. We make all the required data available to feed the application and show how to query for sentence and paragraph level answers based on a combination of semantic and term-based search. This tutorial is based on [earlier work](https://docs.vespa.ai/en/semantic-qa-retrieval.html) by the Vespa team to reproduce the results of the paper [ReQA: An Evaluation for End-to-End Answer Retrieval Models](https://arxiv.org/abs/1907.04780) by Ahmad Et al. using the Stanford Question Answering Dataset (SQuAD) v1.1 dataset. ## About the data We are going to use the Stanford Question Answering Dataset (SQuAD) v1.1 dataset. The data contains paragraphs (denoted here as context), and each paragraph has questions that have answers in the associated paragraph. We have parsed the dataset and organized the data that we will use in this tutorial to make it easier to follow along. ### Paragraph ``` import requests, json context_data = json.loads( requests.get("https://data.vespa.oath.cloud/blog/qa/sample_context_data.json").text ) ``` Each `context` data point contains a `context_id` that uniquely identifies a paragraph, a `text` field holding the paragraph string, and a `questions` field holding a list of question ids that can be answered from the paragraph text. We also include a `dataset` field to identify the data source if we want to index more than one dataset in our application. ``` context_data[0] ``` ### Questions According to the data point above, `context_id = 0` can be used to answer the questions with `id = [0, 1, 2, 3, 4]`. We can load the file containing the questions and display those first five questions. ``` from pandas import read_csv questions = read_csv( filepath_or_buffer="https://data.vespa.oath.cloud/blog/qa/sample_questions.csv", sep="\t", ) questions[["question_id", "question"]].head() ``` ### Paragraph sentences To build a more accurate application, we can break the paragraphs down into sentences. For example, the first sentence below comes from the paragraph with `context_id = 0` and can answer the question with `question_id = 4`. ``` sentence_data = json.loads( requests.get("https://data.vespa.oath.cloud/blog/qa/sample_sentence_data.json").text ) {k:sentence_data[0][k] for k in ["text", "dataset", "questions", "context_id"]} ``` ### Embeddings We want to combine semantic (dense) and term-based (sparse) signals to answer the questions sent to our application. We have generated embeddings for both the questions and the sentences to implement the semantic search, each having size equal to 512. ``` questions[["question_id", "embedding"]].head(1) sentence_data[0]["sentence_embedding"]["values"][0:5] # display the first five elements ``` Here is [the script](https://github.com/vespa-engine/sample-apps/blob/master/semantic-qa-retrieval/bin/convert-to-vespa-squad.py) containing the code that we used to generate the sentence and questions embeddings. We used [Google's Universal Sentence Encoder](https://tfhub.dev/google/universal-sentence-encoder) at the time but feel free to replace it with embeddings generated by your preferred model. ## Create and deploy the application We can now build a sentence-level Question answering application based on the data described above. ### Schema to hold context information The `context` schema will have a document containing the four relevant fields described in the data section. We create an index for the `text` field and use `enable-bm25` to pre-compute data required to speed up the use of BM25 for ranking. The `summary` indexing indicates that all the fields will be included in the requested context documents. The `attribute` indexing store the fields in memory as an attribute for sorting, querying, and grouping. ``` from vespa.package import Document, Field context_document = Document( fields=[ Field(name="questions", type="array<int>", indexing=["summary", "attribute"]), Field(name="dataset", type="string", indexing=["summary", "attribute"]), Field(name="context_id", type="int", indexing=["summary", "attribute"]), Field(name="text", type="string", indexing=["summary", "index"], index="enable-bm25"), ] ) ``` The default fieldset means query tokens will be matched against the `text` field by default. We defined two rank-profiles (`bm25` and `nativeRank`) to illustrate that we can define and experiment with as many rank-profiles as we want. You can create different ones using [the ranking expressions and features](https://docs.vespa.ai/en/ranking-expressions-features.html) available. ``` from vespa.package import Schema, FieldSet, RankProfile context_schema = Schema( name="context", document=context_document, fieldsets=[FieldSet(name="default", fields=["text"])], rank_profiles=[ RankProfile(name="bm25", inherits="default", first_phase="bm25(text)"), RankProfile(name="nativeRank", inherits="default", first_phase="nativeRank(text)")] ) ``` ### Schema to hold sentence information The document of the `sentence` schema will inherit the fields defined in the `context` document to avoid unnecessary duplication of the same field types. Besides, we add the `sentence_embedding` field defined to hold a one-dimensional tensor of floats of size 512. We will store the field as an attribute in memory and build an ANN `index` using the `HNSW` (hierarchical navigable small world) algorithm. Read [this blog post](https://blog.vespa.ai/approximate-nearest-neighbor-search-in-vespa-part-1/) to know more about Vespa’s journey to implement ANN search and the [documentation](https://docs.vespa.ai/documentation/approximate-nn-hnsw.html) for more information about the HNSW parameters. ``` from vespa.package import HNSW sentence_document = Document( inherits="context", fields=[ Field( name="sentence_embedding", type="tensor<float>(x[512])", indexing=["attribute", "index"], ann=HNSW( distance_metric="euclidean", max_links_per_node=16, neighbors_to_explore_at_insert=500 ) ) ] ) ``` For the `sentence` schema, we define three rank profiles. The `semantic-similarity` uses the Vespa `closeness` ranking feature, which is defined as `1/(1 + distance)` so that sentences with embeddings closer to the question embedding will be ranked higher than sentences that are far apart. The `bm25` is an example of a term-based rank profile, and `bm25-semantic-similarity` combines both term-based and semantic-based signals as an example of a hybrid approach. ``` sentence_schema = Schema( name="sentence", document=sentence_document, fieldsets=[FieldSet(name="default", fields=["text"])], rank_profiles=[ RankProfile( name="semantic-similarity", inherits="default", first_phase="closeness(sentence_embedding)" ), RankProfile( name="bm25", inherits="default", first_phase="bm25(text)" ), RankProfile( name="bm25-semantic-similarity", inherits="default", first_phase="bm25(text) + closeness(sentence_embedding)" ) ] ) ``` ### Build the application package We can now define our `qa` application by creating an application package with both the `context_schema` and the `sentence_schema` that we defined above. In addition, we need to inform Vespa that we plan to send a query ranking feature named `query_embedding` with the same type that we used to define the `sentence_embedding` field. ``` from vespa.package import ApplicationPackage, QueryProfile, QueryProfileType, QueryTypeField app_package = ApplicationPackage( name="qa", schema=[context_schema, sentence_schema], query_profile=QueryProfile(), query_profile_type=QueryProfileType( fields=[ QueryTypeField( name="ranking.features.query(query_embedding)", type="tensor<float>(x[512])" ) ] ) ) ``` ### Deploy the application We can deploy the `app_package` in a Docker container (or to [Vespa Cloud](https://cloud.vespa.ai/)): ``` import os from vespa.deployment import VespaDocker disk_folder = os.path.join(os.getenv("WORK_DIR"), "sample_application") vespa_docker = VespaDocker( port=8081, disk_folder=disk_folder # requires absolute path ) app = vespa_docker.deploy(application_package=app_package) ``` ## Feed the data Once deployed, we can use the `Vespa` instance `app` to interact with the application. We can start by feeding context and sentence data. ``` for idx, sentence in enumerate(sentence_data): app.feed_data_point(schema="sentence", data_id=idx, fields=sentence) for context in context_data: app.feed_data_point(schema="context", data_id=context["context_id"], fields=context) ``` ## Sentence level retrieval The query below sends the first question embedding (`questions.loc[0, "embedding"]`) through the `ranking.features.query(query_embedding)` parameter and use the `nearestNeighbor` search operator to retrieve the closest 100 sentences in embedding space using Euclidean distance as configured in the `HNSW` settings. The sentences returned will be ranked by the `semantic-similarity` rank profile defined in the `sentence` schema. ``` result = app.query(body={ 'yql': 'select * from sources sentence where ([{"targetNumHits":100}]nearestNeighbor(sentence_embedding,query_embedding));', 'hits': 100, 'ranking.features.query(query_embedding)': questions.loc[0, "embedding"], 'ranking.profile': 'semantic-similarity' }) result.hits[0] ``` ## Sentence level hybrid retrieval In addition to sending the query embedding, we can send the question string (`questions.loc[0, "question"]`) via the `query` parameter and use the `or` operator to retrieve documents that satisfy either the semantic operator `nearestNeighbor` or the term-based operator `userQuery`. Choosing `type` equal `any` means that the term-based operator will retrieve all the documents that match at least one query token. The retrieved documents will be ranked by the hybrid rank-profile `bm25-semantic-similarity`. ``` result = app.query(body={ 'yql': 'select * from sources sentence where ([{"targetNumHits":100}]nearestNeighbor(sentence_embedding,query_embedding)) or userQuery();', 'query': questions.loc[0, "question"], 'type': 'any', 'hits': 100, 'ranking.features.query(query_embedding)': questions.loc[0, "embedding"], 'ranking.profile': 'bm25-semantic-similarity' }) result.hits[0] ``` ## Paragraph level retrieval For paragraph-level retrieval, we use Vespa's [grouping](https://docs.vespa.ai/en/grouping.html) feature to retrieve paragraphs instead of sentences. In the sample query below, we group by `context_id` and use the paragraph’s max sentence score to represent the paragraph level score. We limit the number of paragraphs returned by 3, and each paragraph contains at most two sentences. We return all the summary features for each sentence. All those configurations can be changed to fit different use cases. ``` result = app.query(body={ 'yql': ('select * from sources sentence where ([{"targetNumHits":10000}]nearestNeighbor(sentence_embedding,query_embedding)) |' 'all(group(context_id) max(3) order(-max(relevance())) each( max(2) each(output(summary())) as(sentences)) as(paragraphs));'), 'hits': 0, 'ranking.features.query(query_embedding)': questions.loc[0, "embedding"], 'ranking.profile': 'bm25-semantic-similarity' }) paragraphs = result.json["root"]["children"][0]["children"][0] paragraphs["children"][0] # top-ranked paragraph paragraphs["children"][1] # second-ranked paragraph ``` ### Clean up environment ``` from shutil import rmtree rmtree(disk_folder, ignore_errors=True) vespa_docker.container.stop() vespa_docker.container.remove() ```
github_jupyter
# Basic Protein-Ligand Affinity Models #Tutorial: Use machine learning to model protein-ligand affinity. Written by Evan Feinberg and Bharath Ramsundar Copyright 2016, Stanford University This DeepChem tutorial demonstrates how to use mach.ine learning for modeling protein-ligand binding affinity Overview: In this tutorial, you will trace an arc from loading a raw dataset to fitting a cutting edge ML technique for predicting binding affinities. This will be accomplished by writing simple commands to access the deepchem Python API, encompassing the following broad steps: 1. Loading a chemical dataset, consisting of a series of protein-ligand complexes. 2. Featurizing each protein-ligand complexes with various featurization schemes. 3. Fitting a series of models with these featurized protein-ligand complexes. 4. Visualizing the results. First, let's point to a "dataset" file. This can come in the format of a CSV file or Pandas DataFrame. Regardless of file format, it must be columnar data, where each row is a molecular system, and each column represents a different piece of information about that system. For instance, in this example, every row reflects a protein-ligand complex, and the following columns are present: a unique complex identifier; the SMILES string of the ligand; the binding affinity (Ki) of the ligand to the protein in the complex; a Python `list` of all lines in a PDB file for the protein alone; and a Python `list` of all lines in a ligand file for the ligand alone. This should become clearer with the example. (Make sure to set `DISPLAY = True`) ``` %load_ext autoreload %autoreload 2 %pdb off # set DISPLAY = True when running tutorial DISPLAY = False # set PARALLELIZE to true if you want to use ipyparallel PARALLELIZE = False import warnings warnings.filterwarnings('ignore') import deepchem as dc dataset_file= "../../datasets/pdbbind_core_df.pkl.gz" raw_dataset = dc.utils.save.load_from_disk(dataset_file) ``` Let's see what `dataset` looks like: ``` print("Type of dataset is: %s" % str(type(raw_dataset))) print(raw_dataset[:5]) print("Shape of dataset is: %s" % str(raw_dataset.shape)) ``` One of the missions of ```deepchem``` is to form a synapse between the chemical and the algorithmic worlds: to be able to leverage the powerful and diverse array of tools available in Python to analyze molecules. This ethos applies to visual as much as quantitative examination: ``` import nglview import tempfile import os import mdtraj as md import numpy as np import deepchem.utils.visualization #from deepchem.utils.visualization import combine_mdtraj, visualize_complex, convert_lines_to_mdtraj def combine_mdtraj(protein, ligand): chain = protein.topology.add_chain() residue = protein.topology.add_residue("LIG", chain, resSeq=1) for atom in ligand.topology.atoms: protein.topology.add_atom(atom.name, atom.element, residue) protein.xyz = np.hstack([protein.xyz, ligand.xyz]) protein.topology.create_standard_bonds() return protein def visualize_complex(complex_mdtraj): ligand_atoms = [a.index for a in complex_mdtraj.topology.atoms if "LIG" in str(a.residue)] binding_pocket_atoms = md.compute_neighbors(complex_mdtraj, 0.5, ligand_atoms)[0] binding_pocket_residues = list(set([complex_mdtraj.topology.atom(a).residue.resSeq for a in binding_pocket_atoms])) binding_pocket_residues = [str(r) for r in binding_pocket_residues] binding_pocket_residues = " or ".join(binding_pocket_residues) traj = nglview.MDTrajTrajectory( complex_mdtraj ) # load file from RCSB PDB ngltraj = nglview.NGLWidget( traj ) ngltraj.representations = [ { "type": "cartoon", "params": { "sele": "protein", "color": "residueindex" } }, { "type": "licorice", "params": { "sele": "(not hydrogen) and (%s)" % binding_pocket_residues } }, { "type": "ball+stick", "params": { "sele": "LIG" } } ] return ngltraj def visualize_ligand(ligand_mdtraj): traj = nglview.MDTrajTrajectory( ligand_mdtraj ) # load file from RCSB PDB ngltraj = nglview.NGLWidget( traj ) ngltraj.representations = [ { "type": "ball+stick", "params": {"sele": "all" } } ] return ngltraj def convert_lines_to_mdtraj(molecule_lines): tempdir = tempfile.mkdtemp() molecule_file = os.path.join(tempdir, "molecule.pdb") with open(molecule_file, "wb") as f: f.writelines(molecule_lines) molecule_mdtraj = md.load(molecule_file) return molecule_mdtraj first_protein, first_ligand = raw_dataset.iloc[0]["protein_pdb"], raw_dataset.iloc[0]["ligand_pdb"] protein_mdtraj = convert_lines_to_mdtraj(first_protein) ligand_mdtraj = convert_lines_to_mdtraj(first_ligand) complex_mdtraj = combine_mdtraj(protein_mdtraj, ligand_mdtraj) ngltraj = visualize_complex(complex_mdtraj) ngltraj ``` Now that we're oriented, let's use ML to do some chemistry. So, step (2) will entail featurizing the dataset. The available featurizations that come standard with deepchem are ECFP4 fingerprints, RDKit descriptors, NNScore-style bdescriptors, and hybrid binding pocket descriptors. Details can be found on ```deepchem.io```. ``` grid_featurizer = dc.feat.RdkitGridFeaturizer( voxel_width=16.0, feature_types="voxel_combined", voxel_feature_types=["ecfp", "splif", "hbond", "pi_stack", "cation_pi", "salt_bridge"], ecfp_power=5, splif_power=5, parallel=True, flatten=True) compound_featurizer = dc.feat.CircularFingerprint(size=128) ``` Note how we separate our featurizers into those that featurize individual chemical compounds, compound_featurizers, and those that featurize molecular complexes, complex_featurizers. Now, let's perform the actual featurization. Calling ```loader.featurize()``` will return an instance of class ```Dataset```. Internally, ```loader.featurize()``` (a) computes the specified features on the data, (b) transforms the inputs into ```X``` and ```y``` NumPy arrays suitable for ML algorithms, and (c) constructs a ```Dataset()``` instance that has useful methods, such as an iterator, over the featurized data. This is a little complicated, so we will use MoleculeNet to featurize the PDBBind core set for us. ``` PDBBIND_tasks, (train_dataset, valid_dataset, test_dataset), transformers = dc.molnet.load_pdbbind_grid() ``` Now, we conduct a train-test split. If you'd like, you can choose `splittype="scaffold"` instead to perform a train-test split based on Bemis-Murcko scaffolds. We generate separate instances of the Dataset() object to hermetically seal the train dataset from the test dataset. This style lends itself easily to validation-set type hyperparameter searches, which we will illustate in a separate section of this tutorial. The performance of many ML algorithms hinges greatly on careful data preprocessing. Deepchem comes standard with a few options for such preprocessing. Now, we're ready to do some learning! To fit a deepchem model, first we instantiate one of the provided (or user-written) model classes. In this case, we have a created a convenience class to wrap around any ML model available in Sci-Kit Learn that can in turn be used to interoperate with deepchem. To instantiate an ```SklearnModel```, you will need (a) task_types, (b) model_params, another ```dict``` as illustrated below, and (c) a ```model_instance``` defining the type of model you would like to fit, in this case a ```RandomForestRegressor```. ``` from sklearn.ensemble import RandomForestRegressor sklearn_model = RandomForestRegressor(n_estimators=100) model = dc.models.SklearnModel(sklearn_model) model.fit(train_dataset) from deepchem.utils.evaluate import Evaluator import pandas as pd metric = dc.metrics.Metric(dc.metrics.r2_score) evaluator = Evaluator(model, train_dataset, transformers) train_r2score = evaluator.compute_model_performance([metric]) print("RF Train set R^2 %f" % (train_r2score["r2_score"])) evaluator = Evaluator(model, valid_dataset, transformers) valid_r2score = evaluator.compute_model_performance([metric]) print("RF Valid set R^2 %f" % (valid_r2score["r2_score"])) ``` In this simple example, in few yet intuitive lines of code, we traced the machine learning arc from featurizing a raw dataset to fitting and evaluating a model. Here, we featurized only the ligand. The signal we observed in R^2 reflects the ability of circular fingerprints and random forests to learn general features that make ligands "drug-like." ``` predictions = model.predict(test_dataset) print(predictions) # TODO(rbharath): This cell visualizes the ligand with highest predicted activity. Commenting it out for now. Fix this later #from deepchem.utils.visualization import visualize_ligand #top_ligand = predictions.iloc[0]['ids'] #ligand1 = convert_lines_to_mdtraj(dataset.loc[dataset['complex_id']==top_ligand]['ligand_pdb'].values[0]) #if DISPLAY: # ngltraj = visualize_ligand(ligand1) # ngltraj # TODO(rbharath): This cell visualizes the ligand with lowest predicted activity. Commenting it out for now. Fix this later #worst_ligand = predictions.iloc[predictions.shape[0]-2]['ids'] #ligand1 = convert_lines_to_mdtraj(dataset.loc[dataset['complex_id']==worst_ligand]['ligand_pdb'].values[0]) #if DISPLAY: # ngltraj = visualize_ligand(ligand1) # ngltraj ``` # The protein-ligand complex view. The preceding simple example, in few yet intuitive lines of code, traces the machine learning arc from featurizing a raw dataset to fitting and evaluating a model. In this next section, we illustrate ```deepchem```'s modularity, and thereby the ease with which one can explore different featurization schemes, different models, and combinations thereof, to achieve the best performance on a given dataset. We will demonstrate this by examining protein-ligand interactions. In the previous section, we featurized only the ligand. The signal we observed in R^2 reflects the ability of grid fingerprints and random forests to learn general features that make ligands "drug-like." In this section, we demonstrate how to use hyperparameter searching to find a higher scoring ligands. ``` def rf_model_builder(model_params, model_dir): sklearn_model = RandomForestRegressor(**model_params) return dc.models.SklearnModel(sklearn_model, model_dir) params_dict = { "n_estimators": [10, 50, 100], "max_features": ["auto", "sqrt", "log2", None], } metric = dc.metrics.Metric(dc.metrics.r2_score) optimizer = dc.hyper.HyperparamOpt(rf_model_builder) best_rf, best_rf_hyperparams, all_rf_results = optimizer.hyperparam_search( params_dict, train_dataset, valid_dataset, transformers, metric=metric) %matplotlib inline import matplotlib import numpy as np import matplotlib.pyplot as plt rf_predicted_test = best_rf.predict(test_dataset) rf_true_test = test_dataset.y plt.scatter(rf_predicted_test, rf_true_test) plt.xlabel('Predicted pIC50s') plt.ylabel('True IC50') plt.title(r'RF predicted IC50 vs. True pIC50') plt.xlim([2, 11]) plt.ylim([2, 11]) plt.plot([2, 11], [2, 11], color='k') plt.show() ```
github_jupyter
# Quadtrees iterating on pairs of neighbouring items A quadtree is a tree data structure in which each node has exactly four children. It is a particularly efficient way to store elements when you need to quickly find them according to their x-y coordinates. A common problem with elements in quadtrees is to detect pairs of elements which are closer than a definite threshold. The proposed implementation efficiently addresses this problem. ``` from smartquadtree import Quadtree ``` ## Creation & insertion of elements As you instantiate your quadtree, you must specify the center of your space then the height and width. ``` q = Quadtree(0, 0, 10, 10) ``` The output of a quadtree on the console is pretty explicit. (You can refer to next section for the meaning of "No mask set") ``` q ``` You can easily insert elements from which you can naturally infer x-y coordinates (e.g. tuples or lists) ``` q.insert((1, 2)) q.insert((-3, 4)) q ``` No error is raised if the element you are trying to insert is outside the scope of the quadtree. But it won't be stored anyway! ``` q.insert((-20, 0)) q ``` If you want to insert other Python objects, be sure to provide `get_x()` and `get_y()` methods to your class! ``` class Point(object): def __init__(self, x, y, color): self.x = x self.y = y self.color = color def __repr__(self): return "(%.2f, %.2f) %s" % (self.x, self.y, self.color) def get_x(self): return self.x def get_y(self): return self.y ``` You cannot insert elements of a different type from the first element inserted. ``` q.insert(Point(2, -7, "red")) ``` But feel free to create a new one and play with it: ``` point_quadtree = Quadtree(5, 5, 5, 5) point_quadtree.insert(Point(2, 7, "red")) point_quadtree ``` ## Simple iteration ``` from random import random q = Quadtree(0, 0, 10, 10, 16) for a in range(50): q.insert([random()*20-10, random()*20-10]) ``` The `print` function does not display all elements and uses the `__repr__()` method of each element. ``` print(q) ``` We can write our own iterator and print each element we encounter the way we like. ``` from __future__ import print_function for p in q.elements(): print ("[%.2f, %.2f]" % (p[0], p[1]), end=" ") ``` It is easy to filter the iteration process and apply the function only on elements inside a given polygon. Use the `set_mask()` method and pass a list of x-y coordinates. The polygon will be automatically closed. ``` q.set_mask([(-3, -7), (-3, 7), (3, 7), (3, -7)]) print(q) ``` The same approach can be used to count the number of elements inside the quadtree. ``` print (sum (1 for x in q.elements())) print (sum (1 for x in q.elements(ignore_mask=True))) ``` As a mask is set on the quadtree, we only counted the elements inside the mask. You can use the `size()` method to count elements and ignore the mask by default. Disabling the mask with `set_mask(None)` is also a possibility. ``` print ("%d elements (size method)" % q.size()) print ("%d elements (don't ignore the mask)" % q.size(False)) q.set_mask(None) print ("%d elements (disable the mask)" % q.size()) ``` ## Playing with plots ``` %matplotlib inline from matplotlib import pyplot as plt q = Quadtree(5, 5, 5, 5, 10) for a in range(200): q.insert([random()*10, random()*10]) fig = plt.figure() plt.axis([0, 10, 0, 10]) q.set_mask(None) for p in q.elements(): plt.plot([p[0]], [p[1]], 'o', color='lightgrey') q.set_mask([(3, 3), (3, 7), (7, 7), (7, 3)]) for p in q.elements(): plt.plot([p[0]], [p[1]], 'ro') _ = plt.plot([3, 3, 7, 7, 3], [3, 7, 7, 3, 3], 'r') ``` ## Iteration on pairs of neighbouring elements Iterating on pairs of neighbouring elements is possible through the `neighbour_elements()` function. It works as a generator and yields pair of elements, the first one being inside the mask (if specified), the second one being in the same cell or in any neighbouring cell, also in the mask. Note that if `(a, b)` is yielded by `neighbour_elements()`, `(b, a)` will be omitted from future yields. ``` q = Quadtree(5, 5, 5, 5, 10) q.set_limitation(2) # do not create a new subdivision if one side of the cell is below 2 for a in range(200): q.insert([random()*10, random()*10]) fig = plt.figure() plt.axis([0, 10, 0, 10]) for p in q.elements(): plt.plot([p[0]], [p[1]], 'o', color='lightgrey') q.set_mask([(1, 1), (4, 1), (5, 4), (2, 5), (1, 1)]) for p in q.elements(): plt.plot([p[0]], [p[1]], 'o', color='green') for p1, p2 in q.neighbour_elements(): if ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2 < 1): plt.plot([p1[0]], [p1[1]], 'o', color='red') plt.plot([p2[0]], [p2[1]], 'o', color='red') plt.plot([p1[0], p2[0]], [p1[1], p2[1]], 'red') _ = plt.plot([1, 4, 5, 2, 1], [1, 1, 4, 5, 1], 'r') ```
github_jupyter
``` import sys import os # path_to_script = os.path.dirname(os.path.abspath(__file__)) path_to_imcut = os.path.abspath("..") sys.path.insert(0, path_to_imcut) path_to_imcut import imcut imcut.__file__ import numpy as np import scipy import scipy.ndimage # import sed3 import matplotlib.pyplot as plt ``` ## Input data ``` sz = [10, 300, 300] dist = 30 noise_intensity = 25 noise_std = 20 signal_intensity = 50 segmentation = np.zeros(sz) segmentation[5, 100, 100] = 1 segmentation[5, 150, 120] = 1 segmentation = scipy.ndimage.morphology.distance_transform_edt(1 - segmentation) segmentation = (segmentation < dist).astype(np.int8) seeds = np.zeros_like(segmentation) seeds[5, 90:100, 90:100] = 1 seeds[5, 190:200, 190:200] = 2 # np.random.random(sz) * 100 plt.figure(figsize=(15,5)) plt.subplot(121) plt.imshow(segmentation[5, :, :]) plt.colorbar() data3d = np.random.normal(size=sz, loc=noise_intensity, scale=noise_std) data3d += segmentation * signal_intensity data3d = data3d.astype(np.int16) plt.subplot(122) plt.imshow(data3d[5, :, :], cmap="gray") plt.colorbar() ``` ## Graph-Cut segmentation ``` from imcut import pycut segparams = { 'method':'graphcut', # 'method': 'multiscale_graphcut', 'use_boundary_penalties': False, 'boundary_dilatation_distance': 2, 'boundary_penalties_weight': 1, 'block_size': 8, 'tile_zoom_constant': 1 } gc = pycut.ImageGraphCut(data3d, segparams=segparams) gc.set_seeds(seeds) gc.run() output_segmentation = gc.segmentation plt.figure(figsize=(15,5)) plt.subplot(121) plt.imshow(output_segmentation[5, :, :]) plt.colorbar() ``` ## Model debug ``` segparams = { 'method':'graphcut', # 'method': 'multiscale_graphcut', 'use_boundary_penalties': False, 'boundary_dilatation_distance': 2, 'boundary_penalties_weight': 1, 'block_size': 8, 'tile_zoom_constant': 1 } gc = pycut.ImageGraphCut(data3d, segparams=segparams) gc.set_seeds(seeds) gc.run() output_segmentation = gc.segmentation a=gc.debug_show_model(start=-100, stop=200) gc.debug_show_reconstructed_similarity() ``` ## Other parameters ``` segparams_ssgc = { "method": "graphcut", # "use_boundary_penalties": False, # 'boundary_penalties_weight': 30, # 'boundary_penalties_sigma': 200, # 'boundary_dilatation_distance': 2, # 'use_apriori_if_available': True, # 'use_extra_features_for_training': False, # 'apriori_gamma': 0.1, "modelparams": { "type": "gmmsame", "params": {"n_components": 2}, "return_only_object_with_seeds": True, "fv_type": "intensity", # "fv_type": "intensity_and_blur", # "fv_type": "fv_extern", # "fv_extern": fv_fcn() } } ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import IPython import scipy.io.wavfile as wav import scipy.signal as ss def plotSound(signal, frameRate): plt.plot(signal) plt.show() soundStrings = ['aeiou.wav', 'an_in_on.wav'] selectedSoundString = soundStrings[1] frameRate, frames = wav.read(selectedSoundString) plotSound(frames, frameRate) IPython.display.Audio(frames, rate=frameRate) from math import floor, ceil def nextPower(n, p): m = 1 while m <= n: m *= p return m def periode(signal, frameRate, pmin=1/300, pmax=1/80, seuil=0.7): signal = signal - np.mean(signal) N = len(signal) Nmin = 1 + ceil(pmin * frameRate) Nmax = min(1 + floor(pmax * frameRate), N) Nfft = nextPower(2 * N - 1, 2) fourierSignal = np.fft.fft(signal, n=Nfft) S = fourierSignal * np.conjugate(fourierSignal) / N r = np.real(np.fft.ifft(S)) i = np.argmax(r[Nmin:Nmax]) rmax = r[Nmin:Nmax][i] P = i + Nmin - 2 corr = rmax / r[0] * N / (N-P) voise = corr > seuil if not voise: P = round(10e-3 * frameRate) return P, voise def analysisPitchMarks(signal, frameRate): tArray = [1] vArray = [0] PArray = [10e-3 * frameRate] while True: t = tArray[-1] P = PArray[-1] duration = floor(2.5 * P) if t + duration > len(signal): break x = signal[t:t + duration] newP, voise = periode(x, frameRate) vArray.append(voise) PArray.append(newP) tArray.append(t + newP) A = np.zeros((3, len(tArray))) A[0,:] = tArray A[1,:] = vArray A[2,:] = PArray return A A = analysisPitchMarks(frames, frameRate) B = np.zeros((2, A.shape[1])) B[0,:] = A[0,:] B[1,:] = np.arange(A.shape[1]) ``` ``` A[0] = time marks (in frames) A[1] = 0/1 voise A[2] = pitch duration (in frames) ``` ``` B[0, i] = time mark in frames for the ith synthese mark B[1, i] = synthese mark id ``` ``` plt.figure(figsize=(15, 4)) plt.plot(frames, zorder=0) plt.scatter(A[0, np.nonzero(A[1])[0]], [0] * len(np.nonzero(A[1])[0]), c="r", s=50) plt.show() def sythesis(signal, frameRate, A, B): n = int(B[0,-1]) + floor(A[2, int(B[1,-1])]) + 1 y = np.zeros(n) for k in range(1, B.shape[1]): ta = int(A[0,int(B[1,k])]) P = int(A[2,int(B[1,k])]) ts = int(B[0,k]) x = signal[ta-P:ta+P+1] x = x * np.hanning(2*P+1) y[ts-P:ts+P+1] += x return y y = sythesis(frames, frameRate, A, B) plotSound(y, frameRate) IPython.display.Audio(y, rate=frameRate) def changeTimeScale(alpha, A, frameRate): t = [1] n = [0,1] while int(n[-1]) < A.shape[1]: t.append(t[-1] + A[2,int(n[-1])]) n.append(n[-1] + 1 / alpha) B = np.zeros((2, len(t))) B[0,:] = np.array(t, dtype=int) B[1,:] = np.array(n[:-1], dtype=int) return B # Test A = analysisPitchMarks(frames, frameRate) B = changeTimeScale(1.5, A, frameRate) y = sythesis(frames, frameRate, A, B) plotSound(y, frameRate) IPython.display.Audio(y, rate=frameRate) def changePitchScale(beta, A, frameRate): t = [1] scale = [1 / beta if A[1, 0] else 1] n = [0, scale[0]] while int(n[-1]) < A.shape[1]: scale.append(1 / beta if A[1, int(n[-1])] else 1) t.append(t[-1] + scale[-1] * A[2, int(n[-1])]) n.append(n[-1] + scale[-1]) B = np.zeros((2, len(t))) B[0,:] = np.array(t, dtype=int) B[1,:] = np.array(n[:-1], dtype=int) return B # Test A = analysisPitchMarks(frames, frameRate) B = changePitchScale(0.7, A, frameRate) y = sythesis(frames, frameRate, A, B) plotSound(y, frameRate) IPython.display.Audio(y, rate=frameRate) def changeBothScales(alpha, beta, A, frameRate): t = [1] scale = [1 / beta if A[1, 0] else 1] n = [0, scale[0]] while int(n[-1]) < A.shape[1]: scale.append(1 / beta if A[1, int(n[-1])] else 1) t.append(t[-1] + scale[-1] * A[2, int(n[-1])]) n.append(n[-1] + scale[-1] + 1 / alpha) B = np.zeros((2, len(t))) B[0,:] = np.array(t, dtype=int) B[1,:] = np.array(n[:-1], dtype=int) return B # Test A = analysisPitchMarks(frames, frameRate) B = changeBothScales(0.7, 0.7, A, frameRate) y = sythesis(frames, frameRate, A, B) plotSound(y, frameRate) IPython.display.Audio(y, rate=frameRate) ```
github_jupyter
``` from edahelper import * import sklearn.naive_bayes as NB import sklearn.linear_model from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.pipeline import Pipeline from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score, accuracy_score # Resources: #https://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html wsb = pd.read_csv('../Data/wsb_cleaned.csv') #set up appropriate subset, removing comment outliers #also chose to look at only self posts dfog=wsb.loc[(wsb.is_self==True) & (wsb.ups>=10) & (wsb.num_comments<=10000) & ~(wsb["title"].str.contains("Thread|thread|Sunday Live Chat|consolidation zone|Containment Zone|Daily Discussion|Daily discussion|Saturday Chat|What Are Your Moves Tomorrow|What Are Your Moves Today|MEGATHREAD",na=False))] ``` ## Preprocessing Removing characters that are not alphanumeric or spaces: ``` def RegexCols(df,cols): newdf=df regex = re.compile('[^a-zA-Z ]') for col in cols: newdf=newdf.assign(**{col: df.loc[:,col].apply(lambda x : regex.sub('', str(x) ))}) return newdf df=RegexCols(dfog,['title', 'author', 'selftext']) #df=pd.DataFrame() #regex = re.compile('[^a-zA-Z ]') #for col in ['title', 'author', 'selftext']: # df.loc[:,col] = dfog.loc[:,col].apply(lambda x : regex.sub('', str(x) )) ``` Filtering the data frame, count vectorizing titles. # Can we predict the number of upvotes using the self text? ``` #create the train test split #try to predict ups using the self text X_train, X_test, y_train, y_test = train_test_split(df['selftext'], df['ups'], test_size=0.2, random_state=46) #make a pipeline to do bag of words and linear regression text_clf = Pipeline([ ('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', LinearRegression(copy_X=True)), ]) text_clf.fit(X_train,y_train) #text_clf.predict(X_train) print(r2_score(y_train,text_clf.predict(X_train))) print(r2_score(y_test,text_clf.predict(X_test))) #wow, that is terrible. we do worse than if we just guessed the mean all the time. ``` # Can we predict the number of upvotes using the words in the title? ## NLP on words in the title ``` #this time we don't need only self posts df2og=wsb.loc[(wsb.ups>=10) & (wsb.num_comments<=10000) & ~(wsb["title"].str.contains("Thread|thread|Sunday Live Chat|consolidation zone|Containment Zone|Daily Discussion|Daily discussion|Saturday Chat|What Are Your Moves Tomorrow|What Are Your Moves Today|MEGATHREAD",na=False))] df2=RegexCols(df2og,['title', 'author', 'selftext']) X_train, X_test, y_train, y_test = train_test_split(df2['title'], df2['ups'], test_size=0.2, random_state=46) text_clf = Pipeline([ ('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', LinearRegression(copy_X=True)), ]) text_clf.fit(X_train,y_train) print(r2_score(y_train,text_clf.predict(X_train))) print(r2_score(y_test,text_clf.predict(X_test))) results = pd.DataFrame() results["predicted"] = text_clf.predict(X_test) results["true"] = list(y_test) sns.scatterplot(data = results, x = "predicted", y = "true") ``` Doesn't look particularly useful... neither does using lasso... ``` X_train, X_test, y_train, y_test = train_test_split(df2['title'], df2['ups'], test_size=0.2, random_state=46) text_clf = Pipeline([ ('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', sklearn.linear_model.Lasso()), ]) text_clf.fit(X_train,y_train) print(r2_score(y_train,text_clf.predict(X_train))) print(r2_score(y_test,text_clf.predict(X_test))) results = pd.DataFrame() results["predicted"] = text_clf.predict(X_test) results["true"] = list(y_test) sns.scatterplot(data = results, x = "predicted", y = "true") ``` # Can we predict if a post will be ignored? ``` def PopClassify(ups): if ups <100: return 0 elif ups<100000: return 1 else: return 2 #df2['popularity']=PopClassify(df2['ups']) df2['popularity'] = df2['ups'].map(lambda score: PopClassify(score)) #df['ignored'] = df['ups'] <= 100 # What is a good cutoff for being ignored? #df = wsb[ wsb['ups'] >= 20] df2.head() X_train, X_test, y_train, y_test = train_test_split(df2['title'], df2['popularity'], test_size=0.2, random_state=46) from sklearn.naive_bayes import MultinomialNB text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB()), ]) text_clf.fit(X_train,y_train) p=text_clf.predict(X_train) print(np.where(p==1)) print(np.where(p==2)) np.mean(p==y_train) p2=text_clf.predict(X_test) np.mean(p2==y_test) #what if we just predict 0 all the time? print(np.mean(y_train==0)) print(np.mean(y_test==0)) def PopClassifyn(ups,n): if ups <n: return 0 else: return 1 #the above shows that the 0 category is too big. maybe cut it down to 50? Also throw out the top category df2['popularity'] = df2['ups'].map(lambda score: PopClassifyn(score,50)) X_train, X_test, y_train, y_test = train_test_split(df2['title'], df2['popularity'], test_size=0.2, random_state=46) text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB()), ]) text_clf.fit(X_train,y_train) print("accuracy on training data:") p=text_clf.predict(X_train) print(np.mean(p==y_train)) print(np.mean(y_train==0)) print("accuracy on testing data:") print(np.mean(text_clf.predict(X_test)==y_test)) print(np.mean(y_test==0)) #slight improvement on the testing data, but lost on the training data... #what about something more extreme? Let's keep all the posts with a score of 1. Let's try to predict ups>1 df3og=wsb.loc[(wsb.num_comments<=10000) & ~(wsb["title"].str.contains("Thread|thread|Sunday Live Chat|consolidation zone|Containment Zone|Daily Discussion|Daily discussion|Saturday Chat|What Are Your Moves Tomorrow|What Are Your Moves Today|MEGATHREAD",na=False))] df3=RegexCols(df3og,['title', 'author', 'selftext']) df3['popularity'] = df3['ups'].map(lambda score: PopClassifyn(score,2)) X_train, X_test, y_train, y_test = train_test_split(df3['title'], df3['popularity'], test_size=0.2, random_state=46,stratify=df3['popularity']) text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB()), ]) text_clf.fit(X_train,y_train) print("accuracy on training data:") p=text_clf.predict(X_train) print(np.mean(p==y_train)) print(np.mean(y_train==0)) print("accuracy on testing data:") print(np.mean(text_clf.predict(X_test)==y_test)) print(np.mean(y_test==0)) #nothing!! what if we try using the selftext? #back to df df4og=wsb.loc[(wsb.is_self==True) & (wsb.num_comments<=10000) & ~(wsb["title"].str.contains("Thread|thread|Sunday Live Chat|consolidation zone|Containment Zone|Daily Discussion|Daily discussion|Saturday Chat|What Are Your Moves Tomorrow|What Are Your Moves Today|MEGATHREAD",na=False))] df4=RegexCols(df4og,['title', 'author', 'selftext']) df4['popularity'] = df4['ups'].map(lambda score: PopClassifyn(score,2)) X_train, X_test, y_train, y_test = train_test_split(df4['selftext'], df4['popularity'], test_size=0.2, random_state=46,stratify=df4['popularity']) text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB()), ]) text_clf.fit(X_train,y_train) print("accuracy on training data:") p=text_clf.predict(X_train) print(np.mean(p==y_train)) print(np.mean(y_train==0)) print("accuracy on testing data:") print(np.mean(text_clf.predict(X_test)==y_test)) print(np.mean(y_test==0)) #okay, this is not too bad! #other ways to measure how well this is doing? #let's try the ROC AUC score from sklearn.metrics import roc_curve #text_clf.predict_proba(X_train)[:,1] probs=text_clf.predict_proba(X_train)[:,1] roc_curve(y_train,probs) fpr,tpr,cutoffs = roc_curve(y_train,probs) plt.figure(figsize=(12,8)) plt.plot(fpr,tpr) plt.xlabel("False Positive Rate",fontsize=16) plt.ylabel("True Positive Rate",fontsize=16) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.show() from sklearn.metrics import roc_auc_score roc_auc_score(y_train,probs) #now let's try logistic regression rather than naive Bayes? from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), #('standardscaler', StandardScaler()), ('clf', LogisticRegression(max_iter=1000)), ]) text_clf.fit(X_train,y_train) print("accuracy on training data:") p=text_clf.predict(X_train) #print(np.mean(p==y_train)) print(accuracy_score(y_train,p)) print(np.mean(y_train==0)) print("accuracy on testing data:") print(np.mean(text_clf.predict(X_test)==y_test)) print(np.mean(y_test==0)) #added later, for ROC curve and AUC score probs=text_clf.predict_proba(X_train)[:,1] fpr,tpr,cutoffs = roc_curve(y_train,probs) plt.figure(figsize=(12,8)) plt.plot(fpr,tpr) plt.xlabel("False Positive Rate",fontsize=16) plt.ylabel("True Positive Rate",fontsize=16) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.show() print(roc_auc_score(y_train,probs)) from sklearn.model_selection import cross_validate as cv from sklearn.metrics import SCORERS as sc from sklearn.metrics import make_scorer as ms from sklearn.metrics import balanced_accuracy_score as bas scorer_dict={ 'accuracy_scorer' : ms(accuracy_score), 'auc_scorer' : ms(roc_auc_score), 'bas_scorer' : ms(bas) } #scores = cross_validate(lasso, X, y, cv=3, #... scoring=('r2', 'neg_mean_squared_error'), #... return_train_score=True) #X_train, X_test, y_train, y_test = train_test_split(df4['selftext'], df4['popularity'], test_size=0.2, random_state=46,stratify=df4['popularity']) scores=cv(text_clf,df4['selftext'],df4['popularity'],cv=5,scoring=scorer_dict, return_train_score=True) print(scores) print(np.mean(scores['test_accuracy_scorer'])) print(np.mean(scores['test_bas_scorer'])) print(np.mean(scores['test_auc_scorer'])) #this is very slightly better than the other one. Might be even better if we can scale the data text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('standardscaler', StandardScaler(with_mean=False)), ('clf', LogisticRegression(max_iter=10000)), ]) text_clf.fit(X_train,y_train) print("accuracy on training data:") p=text_clf.predict(X_train) print(np.mean(p==y_train)) print(np.mean(y_train==0)) print("accuracy on testing data:") print(np.mean(text_clf.predict(X_test)==y_test)) print(np.mean(y_test==0)) #scaling somehow made it worse on the testing data?? ``` # Can we cluster similar posts? ``` df3.sort_values(by="ups") ```
github_jupyter
## Sleep analysis, using Passive Infrared (PIR) data, in 10sec bins from a single central PIR, at 200-220mm above the cage floor. Previously EEG-telemetered animals allow direct comparison of sleep scored by direct and non-invasive methods. ### 1st setup analysis environment: ``` import numpy as np # calculations import pandas as pd # dataframes and IO import matplotlib.pyplot as plt # plotting # show graphs/figures in notebooks %matplotlib inline import seaborn as sns # statistical plots and analysis sns.set(style="ticks") # styling sns.set_context("poster") ``` ### Then import .CSV text file from activity monitoring (with ISO-8601 encoding for the timepoints) ``` PIR = pd.read_csv('../PIRdata/1sensorPIRvsEEGdata.csv',parse_dates=True,index_col=0) PIR.head() PIR.pop('PIR4') # remove channels with no Telemetered mice / no sensor PIR.pop('PIR6') PIR.columns=('Act_A', 'Act_B','Act_C', 'Act_D', 'Light') # and rename the remaining columns with activity data #PIR.plot(subplots=True, figsize=(16,12)) ``` ### next identify time of lights ON (to match start of scored EEG data) ``` PIR['Light']['2014-03-18 08:59:30': '2014-03-18 09:00:40'].plot(figsize =(16,4)) ``` ### Define period to match EEG data ``` PIR_24 = PIR.truncate(before='2014-03-18 09:00:00', after='2014-03-19 09:00:00') PIR_24shift = PIR_24.tshift(-9, freq='H') # move data on timescale so 0 represents 'lights on' PIR_24shift.plot(subplots=True,figsize=(20,10)) ``` ### Define sleepscan function and run with selected data ``` # run through trace looking for bouts of sleep (defined as 4 or more sequential '0' values) variable 'a' is dataframe of PIR data def sleepscan(a,bins): ss = a.rolling(bins).sum() y = ss==0 return y.astype(int) # if numerical output is required # for each column of activity data define PIR-derived sleep as a new column ss =PIR_24shift.assign(PIR_A =sleepscan(PIR_24shift['Act_A'],4), PIR_B =sleepscan(PIR_24shift['Act_B'],4), PIR_C =sleepscan(PIR_24shift['Act_C'],4), PIR_D =sleepscan(PIR_24shift['Act_D'],4)).resample('10S').mean() ss.head() # show top of new dataframe ``` ### Importing EEG data scored by Sibah Hasan (follow correction for channels A and B on EEG recordings) #### Scored as 10 second bins starting at 9am (lights on) , for clarity we will only import the columns for total sleep, although REM and NREM sleep were scored) ``` eeg10S = pd.read_csv('../PIRdata/EEG_4mice10sec.csv',index_col=False, usecols=['MouseA Total sleep ','MouseB Total sleep ','MouseC Total sleep ','MouseD Total sleep ']) eeg10S.columns=('EEG_A', 'EEG_B', 'EEG_C','EEG_D') # rename columns eeg10S.head() ss.reset_index(inplace=True) # use sequential numbered index to allow concatination (joining) of data ss_all = pd.concat([ss,eeg10S], axis=1) # join data ss_all.set_index('Time',inplace=True) # Time as index ss_all.head() #ss_all.pop('index') # and drop old index ss_all.head() ``` ### Then resample as an average of 30min to get proportion sleep (scored from immobility) ``` EEG30 = ss_all.resample('30T').mean() EEG30.tail() EEG30.loc[:,['PIR_A','EEG_A']].plot(figsize=(18,4)) # show data for one mouse # red #A10000 and blue #011C4E colour pallette for figure2 EEGred = ["#A10000", "#011C4E"] sns.palplot(sns.color_palette(EEGred)) # show colours sns.set_palette(EEGred) sns.set_context('poster') fig, (ax1,ax2, ax3, ax4) = plt.subplots(nrows=4, ncols=1) fig.text(1, 0.87,'A',fontsize=24, horizontalalignment='center',verticalalignment='center') fig.text(1, 0.635,'B',fontsize=24, horizontalalignment='center',verticalalignment='center') fig.text(1, 0.4,'C',fontsize=24, horizontalalignment='center',verticalalignment='center') fig.text(1, 0.162,'D',fontsize=24, horizontalalignment='center',verticalalignment='center') fig.text(0,0.7, 'Proportion of time asleep', fontsize=18, rotation='vertical') fig.text(0.5,0,'Time', fontsize=18) fig.text(0.08,0.14,'PIR', fontsize=21, color="#011C4E", fontweight='semibold') fig.text(0.08,0.11,'EEG', fontsize=21, color="#A10000", fontweight='semibold') plt.subplot(411) plt.plot(EEG30.index, EEG30['EEG_A'], label= "EEG total sleep",lw=2) plt.fill_between(EEG30.index, 0, 1, where=EEG30.index>='2014-03-18 12:00:00',lw=0, alpha=0.6, facecolor='#aaaaaa') plt.plot(EEG30.index, EEG30['PIR_A'],label= "PIR sleep", lw=2) plt.xticks(horizontalalignment='left',fontsize=12) plt.yticks([0,0.5,1],fontsize=12) plt.subplot(412) plt.plot(EEG30.index, EEG30['EEG_B'], lw=2) plt.plot(EEG30.index, EEG30['PIR_B'], lw=2) plt.fill_between(EEG30.index, 0, 1, where=EEG30.index>='2014-03-18 12:00:00',lw=0, alpha=0.6, facecolor='#aaaaaa') plt.xticks(horizontalalignment='left',fontsize=12) plt.yticks([0,0.5,1],fontsize=12) plt.subplot(413) plt.plot(EEG30.index, EEG30['EEG_C'], lw=2) plt.plot(EEG30.index, EEG30['PIR_C'], lw=2) plt.fill_between(EEG30.index, 0, 1, where=EEG30.index>='2014-03-18 12:00:00',lw=0, alpha=0.6, facecolor='#aaaaaa') plt.xticks(horizontalalignment='left',fontsize=12) plt.yticks([0,0.5,1],fontsize=12) plt.subplot(414) plt.plot(EEG30.index, EEG30['EEG_D'], lw=2) plt.plot(EEG30.index, EEG30['PIR_D'], lw=2) plt.fill_between(EEG30.index, 0, 1, where=EEG30.index>='2014-03-18 12:00:00',lw=0, alpha=0.6, facecolor='#aaaaaa') plt.xticks(horizontalalignment='left',fontsize=12) plt.yticks([0,0.5,1],fontsize=12) plt.tight_layout(h_pad=0.2,pad=2) # options for saving figures #plt.savefig('correlations_BlueRed.eps',format='eps', dpi=1200, bbox_inches='tight', pad_inches=0.5) #plt.savefig('correlations_BlueRed.jpg',format='jpg', dpi=600,frameon=2, bbox_inches='tight', pad_inches=0.5) plt.show() sns.set_style("white") sns.set_context("talk", font_scale=0.6) corr30 = EEG30 corr30.pop('Light') sns.corrplot(corr30, sig_stars=False) # show correlation plot for all values #plt.savefig('../../Figures/CorrFig3left.eps',format='eps', dpi=600,pad_inches=0.2, frameon=2) ``` # Bland-Altman as an alternative to correlation plots? ### Combined data from all 4 mice (paired estimates of sleep by PIR and EEG aligned in Excel) ``` df = pd.read_csv('../PIRdata/blandAltLandD.csv') def bland_altman_plot(data1, data2, *args, **kwargs): data1 = np.asarray(data1) data2 = np.asarray(data2) mean = np.mean([data1, data2], axis=0) diff = data1 - data2 # Difference between data1 and data2 md = np.mean(diff) # Mean of the difference sd = np.std(diff, axis=0) # Standard deviation of the difference plt.scatter(mean, diff, *args, **kwargs) plt.axis([0, 30, -30, 30]) plt.axhline(md, linestyle='-', *args, **kwargs) plt.axhline(md + 1.96*sd, linestyle='--', *args, **kwargs) plt.axhline(md - 1.96*sd, linestyle='--', *args, **kwargs) def bland_altman_output(data1, data2, *args, **kwargs): data1 = np.asarray(data1) data2 = np.asarray(data2) mean = np.mean([data1, data2], axis=0) diff = data1 - data2 # Difference between data1 and data2 md = np.mean(diff) # Mean of the difference sd = np.std(diff, axis=0) # Standard deviation of the difference return md , md-(1.96*sd), md+(1.96*sd) sns.set_context('talk') c1, c2, c3 = sns.blend_palette(["#002147","gold","grey"], 3) plt.subplot(111, axisbg=c3) bland_altman_plot(df.PIR_Light, df.EEG_Light,color=c2, linewidth=3) bland_altman_plot(df.PIR_dark, df.EEG_dark,color=c1, linewidth=3) plt.xlabel('Average score from both methods (min)', fontsize=14) plt.ylabel('PIR score - EEG score (min)', fontsize=14) plt.title('Bland-Altman comparison of PIR-derived sleep and EEG-scored sleep', fontsize=16) #plt.savefig('../../Figures/blandAltman4mice.eps',format='eps', dpi=1200,pad_inches=1, # frameon=0) plt.show() bland_altman_output(df.PIR_Light, df.EEG_Light) bland_altman_output(df.PIR_dark, df.EEG_dark) # Combine (concatenate) these data to get overall comparison of measurements df.PIR = pd.concat([df.PIR_dark, df.PIR_Light],axis=0) df.EEG = pd.concat([df.EEG_dark, df.EEG_Light],axis=0) dfall =pd.concat([df.PIR, df.EEG], axis=1, keys=['PIR', 'EEG']) dfall.head() bland_altman_output(dfall.PIR, dfall.EEG) # mean and 95% CIs for overall comparison ```
github_jupyter
# Notebook to Look at SMELT results ``` import netCDF4 as nc import matplotlib.pyplot as plt import matplotlib.colors from matplotlib.colors import LogNorm import datetime import os import numpy as np from salishsea_tools import visualisations as vis from salishsea_tools import (teos_tools, tidetools, viz_tools) %matplotlib inline def results_dataset(results_dir, date, ndays, period, grid_type): datestr = date.strftime('%Y%m%d') dateend = date + datetime.timedelta(days=ndays-1) dateendstr = dateend.strftime('%Y%m%d') fname = os.path.join(results_dir, 'SalishSea_{}_{}_{}_{}.nc'.format(period, datestr, dateendstr, grid_type)) print (fname) grid = nc.Dataset(fname) return grid mesh_mask = nc.Dataset('/ocean/sallen/allen/research/MEOPAR/NEMO-forcing/grid/mesh_mask_downbyone2.nc') tmask = mesh_mask.variables['tmask'][:] grid_B = nc.Dataset('/ocean/sallen/allen/research/MEOPAR/NEMO-forcing/grid/bathy_downonegrid2.nc') bathy, lons, lats = tidetools.get_bathy_data(grid_B) final = '/results/SalishSea/nowcast-green/14aug16/' date = datetime.datetime(2016, 8, 14) ptrc_T = results_dataset(final, date, 1, '1h', 'ptrc_T') nitrateF = ptrc_T.variables['NO3'][0,:,:,:] print (nitrateF.shape) initial = '/results/SalishSea/nowcast-green/14jan16/' ni = results_dataset(initial, datetime.datetime(2016, 1, 14), 1, '1h', 'ptrc_T') #print (ni.variables.keys()) #nitrate0 = ni.variables['TRNNO3'][0,:] nitrate0 = ni.variables['NO3'][0,:,:,:] fig,ax = plt.subplots(1,1,figsize=(15,5)) clevels = np.arange(0., 31., 1) cbar = vis.contour_thalweg(ax, nitrate0, bathy, lons, lats, mesh_mask, 'gdept_0', clevels, cmap='viridis') ax.set_ylim([450,0]) cbar.set_label('Nitrate [uM]') ax.set_title('Initial Nitrate') fig,ax = plt.subplots(1,1,figsize=(15,5)) cbar = vis.contour_thalweg(ax, nitrateF, bathy, lons, lats, mesh_mask, 'gdept_0', clevels, cmap='viridis') ax.set_ylim([450,0]) cbar.set_label('Nitrate [uM]') ax.set_title('Nitrate') phyto0 = ni.variables['PHY'][0,:] + ni.variables['PHY2'][0,:] clevels = np.arange(0, 12, 1.) fig,ax = plt.subplots(1,1,figsize=(15,5)) cbar = vis.contour_thalweg(ax, phyto0, bathy, lons, lats, mesh_mask, 'gdept_0', clevels, cmap='viridis') ax.set_ylim([50,0]) cbar.set_label('Log Initial Phyto [uM]') ax.set_title('Initial Diatoms + Flag'); diatoms = ptrc_T.variables['PHY2'][0,:,:,:] fig,ax = plt.subplots(1,1,figsize=(15,5)) cbar = vis.contour_thalweg(ax, diatoms, bathy, lons, lats, mesh_mask, 'gdept_0', clevels, cmap='viridis') ax.set_ylim([50,0]) cbar.set_label('Diatoms [uM]') ax.set_title('Diatoms'); diatoms_masked = np.ma.array(diatoms[0:20], mask=np.logical_not(tmask[0, 0:20])) sum_diatoms = np.sum(diatoms_masked, axis=0) fig, ax = plt.subplots(1, 3, figsize=(15,10)) cmap = plt.get_cmap('plasma') cmap.set_bad('black') mesh = ax[0].pcolormesh(sum_diatoms, cmap=cmap, vmax = 100) fig.colorbar(mesh, ax=ax[0]) ax[0].set_title('Top 20 m Diatoms (uM/m$^2$)') mesh = ax[1].pcolormesh(diatoms_masked[0], cmap=cmap, vmax = 10) fig.colorbar(mesh, ax=ax[1]) ax[1].set_title('Surface Diatoms (uM/m$^3$)') mesh = ax[2].pcolormesh(diatoms_masked[11], cmap=cmap, vmax= 10) ax[2].set_title('10.5 m Depth Diatoms (uM/m$^3$)') fig.colorbar(mesh, ax=ax[2]) flag = ptrc_T.variables['PHY'][0,:,:,:] fig,ax = plt.subplots(1,1,figsize=(15,5)) cbar = vis.contour_thalweg(ax, flag, bathy, lons, lats, mesh_mask, 'gdept_0', clevels, cmap='viridis') ax.set_ylim([450,0]) cbar.set_label('Flagellates [uM]') ax.set_title('Flagellates'); fig,ax = plt.subplots(1,2,figsize=(15,5)) clevels = [-0.55, -0.3, -0.05, 0.05, 0.3, 0.55] cbar = vis.contour_thalweg(ax[0], nitrateF-nitrate0, bathy, lons, lats, mesh_mask, 'gdept_0', clevels, cmap='bwr') ax[0].set_ylim([450,0]) ax[0].set_xlim([0,400]) cbar.set_label('Nitrate [uM]') ax[0].set_title('Difference in Nitrate') cbar = vis.contour_thalweg(ax[1], nitrateF-nitrate0, bathy, lons, lats, mesh_mask, 'gdept_0', clevels, cmap='bwr') ax[1].set_ylim([450,0]) ax[1].set_xlim([400, 724]) cbar.set_label('Nitrate [uM]') ax[1].set_title('Difference in Nitrate') print (ptrc_T.variables.keys()) #nitrateF = ptrc_T.variables['NO3'][:,:,:,:] for v in ptrc_T.variables.keys(): print (v, np.max(ptrc_T.variables[v][:])) oxy = ptrc_T.variables['O2'][0, :] clevels = np.arange(150, 300, 10) fig,ax = plt.subplots(1,1,figsize=(15,5)) cbar = vis.contour_thalweg(ax, oxy, bathy, lons, lats, mesh_mask, 'gdept_0', clevels, cmap='viridis') ax.set_ylim([450,0]); oxy_in = ni.variables['O2'][0, :] clevels = np.arange(150, 300, 10) fig,ax = plt.subplots(1,1,figsize=(15,5)) cbar = vis.contour_thalweg(ax, oxy_in, bathy, lons, lats, mesh_mask, 'gdept_0', clevels, cmap='viridis') ax.set_ylim([450,0]); oxy_masked = np.ma.array(oxy, mask=np.logical_not(tmask[0])) sum_oxy = np.sum(oxy_masked, axis=0) fig, ax = plt.subplots(1, 2, figsize=(10,10)) cmap = plt.get_cmap('plasma') cmap.set_bad('black') mesh = ax[0].pcolormesh(sum_oxy, cmap=cmap) fig.colorbar(mesh, ax=ax[0]) ax[0].set_title('Integrated Oxygen') mesh = ax[1].pcolormesh(oxy_masked[0], cmap=cmap) fig.colorbar(mesh, ax=ax[1]) ax[1].set_title('Surface Oxygen') imax=750; imin = 470; jmin=100; jmax=250 diatoms_masked = np.ma.array(diatoms[0:20], mask=np.logical_not(tmask[0, 0:20])) nitrate_masked = np.ma.array(nitrateF[0:20], mask=np.logical_not(tmask[0, 0:20])) sum_diatoms = np.sum(diatoms_masked, axis=0) fig, ax = plt.subplots(1, 3, figsize=(15, 7)) cmap = plt.get_cmap('plasma') cmap.set_bad('burlywood') mesh = ax[2].pcolormesh(sum_diatoms[imin:imax, jmin:jmax], cmap=cmap, norm=matplotlib.colors.LogNorm(), vmin=3) fig.colorbar(mesh, ax=ax[2]) ax[2].set_title('Top 20 m Diatoms (uM N/m$^2$)') mesh = ax[1].pcolormesh(diatoms_masked[0, imin:imax, jmin:jmax], cmap=cmap, norm=matplotlib.colors.LogNorm() ,vmin=0.03) fig.colorbar(mesh, ax=ax[1]) ax[1].set_title('Surface Diatoms (uM N/m$^3$)') mesh = ax[0].pcolormesh(nitrate_masked[0, imin:imax, jmin:jmax], cmap=cmap, norm=matplotlib.colors.LogNorm(), vmin=0.003) ax[0].set_title('Surface Nitrate (uM N/m$^3$)') fig.colorbar(mesh, ax=ax[0]) for axi in ax: viz_tools.set_aspect(axi) axi.set_xlim((0,150)) axi.set_ylim((0,280)) axi.text(10, 20, 'Vancouver Island') axi.text(100, 245, 'Jervis Inlet') axi.text(8, 190, 'Comox') ax[0].text(0, -20, 'Model Results for Aug 14, 2016: all are logscale') ax[0].text(0, -30, 'Note surface nitrate source from north, and surface diatom bloom in response') ax[0].text(0, -40, 'However, depth-integrated diatoms are much more widespread due to sub-surface growth') ```
github_jupyter
### What is Matplotlib? Matplotlib is a plotting library for the Python, Pyplot is a matplotlib module which provides a MATLAB-like interface. Matplotlib is designed to be as usable as MATLAB, with the ability to use Python, and the advantage of being free and open-source. #### What does Matplotlib Pyplot do? Matplotlib is a collection of command style functions that make matplotlib work like MATLAB. Each pyplot function makes some change to a figure: e.g., creates a figure, creates a plotting area in a figure, plots some lines in a plotting area, decorates the plot with labels, etc. ``` # import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd ``` ### Line chart It is a chart in which series of data are plotted by straight lines, in which we can use line chart (straight lines) to compare related features i.e (x and y). We can explicitly define the grid, the x and y axis scale and labels, title and display options. ``` a= range(1,16) b = np.array(a)**2 #Now by just appliying plot command and the below chart will appear plt.plot(a,b) # we can change the line color by following code plt.plot(a,b,color='red') #we can change the type of line and its width by ls and lw variable plt.plot(a,b,color='red', ls='--',lw=2) # OR WE CAN DEFINE THE MARKER plt.plot(a,b,color='green', marker='4',mew=10) # we can enable grid view plt.grid() plt.plot(a,b,color='orange', ls='--',lw=2) ``` Plotting the line chart from panda DataFrame ``` delhi_sale = [45,34,76,65,73,40] bangalore_sale = [51,14,36,95,33,45] pune_sale = [39,85,34,12,55,8] sales = pd.DataFrame({'Delhi':delhi_sale,'Bangalore':bangalore_sale,'Pune':pune_sale}) sales ## Lets plot line chart and xtricks and ytricks are used to specify significant range of axis sales.plot(xticks=range(1,6),yticks=range(0,100,20)) # we can define color for different lines color = ['Red','Yellow','Black'] sales.plot(xticks=range(1,6),yticks=range(0,100,20),color = color) ``` ### Bar Chart Bar Chart is used to analyse the group of data,A bar chart or bar graph is a chart or graph that presents categorical data with rectangular bars with heights or lengths proportional to the values that they represent. The bars can be plotted vertically or horizontally. ``` plt.bar(a,b) ``` Plotting the Bar chart from panda DataFrame ``` #we can generate bar chart from pandas DataFrame sales.plot(kind='bar') ``` ### Pie Chart Pie chart represents whole data as a circle. Different categories makes slice along the circle based on their propertion ``` a = [3,4,5,8,15] plt.pie(a,labels=['A','B','C','D','E']) # we can define color for each categories color_list = ['Red','Blue','Green','black','orange'] plt.pie(a,labels=['A','B','C','D','E'],colors=color_list) ``` ### Histograms Histogram allows us to determine the shape of continuous data. It is one of the plot which is used in statistics. Using this we can detect the distribution of data,outliers in the data and other useful properties to construct histogram from continuous data, we need to create bins and put data in the appropriate bin,The bins parameter tells you the number of bins that your data will be divided into. ``` # For example, here we ask for 20 bins: x = np.random.randn(100) plt.hist(x, bins=20) # And here we ask for bin edges at the locations [-4, -3, -2... 3, 4]. plt.hist(x, bins=range(-4, 5)) ``` ### Scatter Plot It is used to show the relationship between two set of data points. For example, any person weight and height. ``` N = 50 x = np.random.rand(N) y = np.random.rand(N) colors = np.random.rand(N) area = (30 * np.random.rand(N))**2 # 0 to 15 point radii plt.scatter(x, y, s=area, c=colors, alpha=0.5) plt.show() ``` ### Bow Plot Bow plot is used to understand the variable spread. In Box plot , rectangle top boundary represents third quantile, bottom boundary represents first quantile and line in the box indicates medium verticle line at the top indicates max value and vertical line at the bottom indicates the min value ``` box_data = np.random.normal(56,10,50).astype(int) plt.boxplot(box_data) ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # 用 tf.data 加载图片 <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://tensorflow.google.cn/tutorials/load_data/images"><img src="https://tensorflow.google.cn/images/tf_logo_32px.png" />在 tensorflow.google.cn 上查看</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/tutorials/load_data/images.ipynb"><img src="https://tensorflow.google.cn/images/colab_logo_32px.png" />在 Google Colab 运行</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/tutorials/load_data/images.ipynb"><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png" />在 Github 上查看源代码</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/tutorials/load_data/images.ipynb"><img src="https://tensorflow.google.cn/images/download_logo_32px.png" />下载此 notebook</a> </td> </table> Note: 我们的 TensorFlow 社区翻译了这些文档。因为社区翻译是尽力而为, 所以无法保证它们是最准确的,并且反映了最新的 [官方英文文档](https://tensorflow.google.cn/?hl=en)。如果您有改进此翻译的建议, 请提交 pull request 到 [tensorflow/docs](https://github.com/tensorflow/docs) GitHub 仓库。要志愿地撰写或者审核译文,请加入 [docs-zh-cn@tensorflow.org Google Group](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-zh-cn)。 本教程提供一个如何使用 `tf.data` 加载图片的简单例子。 本例中使用的数据集分布在图片文件夹中,一个文件夹含有一类图片。 ## 配置 ``` import tensorflow as tf AUTOTUNE = tf.data.experimental.AUTOTUNE ``` ## 下载并检查数据集 ### 检索图片 在你开始任何训练之前,你将需要一组图片来教会网络你想要训练的新类别。你已经创建了一个文件夹,存储了最初使用的拥有创作共用许可的花卉照片。 ``` import pathlib data_root_orig = tf.keras.utils.get_file(origin='https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz', fname='flower_photos', untar=True) data_root = pathlib.Path(data_root_orig) print(data_root) ``` 下载了 218 MB 之后,你现在应该有花卉照片副本: ``` for item in data_root.iterdir(): print(item) import random all_image_paths = list(data_root.glob('*/*')) all_image_paths = [str(path) for path in all_image_paths] random.shuffle(all_image_paths) image_count = len(all_image_paths) image_count all_image_paths[:10] ``` ### 检查图片 现在让我们快速浏览几张图片,这样你知道你在处理什么: ``` import os attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:] attributions = [line.split(' CC-BY') for line in attributions] attributions = dict(attributions) import IPython.display as display def caption_image(image_path): image_rel = pathlib.Path(image_path).relative_to(data_root) return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1]) for n in range(3): image_path = random.choice(all_image_paths) display.display(display.Image(image_path)) print(caption_image(image_path)) print() ``` ### 确定每张图片的标签 列出可用的标签: ``` label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir()) label_names ``` 为每个标签分配索引: ``` label_to_index = dict((name, index) for index, name in enumerate(label_names)) label_to_index ``` 创建一个列表,包含每个文件的标签索引: ``` all_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in all_image_paths] print("First 10 labels indices: ", all_image_labels[:10]) ``` ### 加载和格式化图片 TensorFlow 包含加载和处理图片时你需要的所有工具: ``` img_path = all_image_paths[0] img_path ``` 以下是原始数据: ``` img_raw = tf.io.read_file(img_path) print(repr(img_raw)[:100]+"...") ``` 将它解码为图像 tensor(张量): ``` img_tensor = tf.image.decode_image(img_raw) print(img_tensor.shape) print(img_tensor.dtype) ``` 根据你的模型调整其大小: ``` img_final = tf.image.resize(img_tensor, [192, 192]) img_final = img_final/255.0 print(img_final.shape) print(img_final.numpy().min()) print(img_final.numpy().max()) ``` 将这些包装在一个简单的函数里,以备后用。 ``` def preprocess_image(image): image = tf.image.decode_jpeg(image, channels=3) image = tf.image.resize(image, [192, 192]) image /= 255.0 # normalize to [0,1] range return image def load_and_preprocess_image(path): image = tf.io.read_file(path) return preprocess_image(image) import matplotlib.pyplot as plt image_path = all_image_paths[0] label = all_image_labels[0] plt.imshow(load_and_preprocess_image(img_path)) plt.grid(False) plt.xlabel(caption_image(img_path)) plt.title(label_names[label].title()) print() ``` ## 构建一个 `tf.data.Dataset` ### 一个图片数据集 构建 `tf.data.Dataset` 最简单的方法就是使用 `from_tensor_slices` 方法。 将字符串数组切片,得到一个字符串数据集: ``` path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths) ``` `shapes(维数)` 和 `types(类型)` 描述数据集里每个数据项的内容。在这里是一组标量二进制字符串。 ``` print(path_ds) ``` 现在创建一个新的数据集,通过在路径数据集上映射 `preprocess_image` 来动态加载和格式化图片。 ``` image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE) import matplotlib.pyplot as plt plt.figure(figsize=(8,8)) for n, image in enumerate(image_ds.take(4)): plt.subplot(2,2,n+1) plt.imshow(image) plt.grid(False) plt.xticks([]) plt.yticks([]) plt.xlabel(caption_image(all_image_paths[n])) plt.show() ``` ### 一个`(图片, 标签)`对数据集 使用同样的 `from_tensor_slices` 方法你可以创建一个标签数据集: ``` label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64)) for label in label_ds.take(10): print(label_names[label.numpy()]) ``` 由于这些数据集顺序相同,你可以将他们打包在一起得到一个`(图片, 标签)`对数据集: ``` image_label_ds = tf.data.Dataset.zip((image_ds, label_ds)) ``` 这个新数据集的 `shapes(维数)` 和 `types(类型)` 也是维数和类型的元组,用来描述每个字段: ``` print(image_label_ds) ``` 注意:当你拥有形似 `all_image_labels` 和 `all_image_paths` 的数组,`tf.data.dataset.Dataset.zip` 的替代方法是将这对数组切片。 ``` ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels)) # 元组被解压缩到映射函数的位置参数中 def load_and_preprocess_from_path_label(path, label): return load_and_preprocess_image(path), label image_label_ds = ds.map(load_and_preprocess_from_path_label) image_label_ds ``` ### 训练的基本方法 要使用此数据集训练模型,你将会想要数据: * 被充分打乱。 * 被分割为 batch。 * 永远重复。 * 尽快提供 batch。 使用 `tf.data` api 可以轻松添加这些功能。 ``` BATCH_SIZE = 32 # 设置一个和数据集大小一致的 shuffle buffer size(随机缓冲区大小)以保证数据 # 被充分打乱。 ds = image_label_ds.shuffle(buffer_size=image_count) ds = ds.repeat() ds = ds.batch(BATCH_SIZE) # 当模型在训练的时候,`prefetch` 使数据集在后台取得 batch。 ds = ds.prefetch(buffer_size=AUTOTUNE) ds ``` 这里有一些注意事项: 1. 顺序很重要。 * 在 `.repeat` 之后 `.shuffle`,会在 epoch 之间打乱数据(当有些数据出现两次的时候,其他数据还没有出现过)。 * 在 `.batch` 之后 `.shuffle`,会打乱 batch 的顺序,但是不会在 batch 之间打乱数据。 1. 你在完全打乱中使用和数据集大小一样的 `buffer_size(缓冲区大小)`。较大的缓冲区大小提供更好的随机化,但使用更多的内存,直到超过数据集大小。 1. 在从随机缓冲区中拉取任何元素前,要先填满它。所以当你的 `Dataset(数据集)`启动的时候一个大的 `buffer_size(缓冲区大小)`可能会引起延迟。 1. 在随机缓冲区完全为空之前,被打乱的数据集不会报告数据集的结尾。`Dataset(数据集)`由 `.repeat` 重新启动,导致需要再次等待随机缓冲区被填满。 最后一点可以通过使用 `tf.data.Dataset.apply` 方法和融合过的 `tf.data.experimental.shuffle_and_repeat` 函数来解决: ``` ds = image_label_ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds = ds.batch(BATCH_SIZE) ds = ds.prefetch(buffer_size=AUTOTUNE) ds ``` ### 传递数据集至模型 从 `tf.keras.applications` 取得 MobileNet v2 副本。 该模型副本会被用于一个简单的迁移学习例子。 设置 MobileNet 的权重为不可训练: ``` mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False) mobile_net.trainable=False ``` 该模型期望它的输出被标准化至 `[-1,1]` 范围内: ``` help(keras_applications.mobilenet_v2.preprocess_input) ``` <pre> …… 该函数使用“Inception”预处理,将 RGB 值从 [0, 255] 转化为 [-1, 1] …… </pre> 在你将输出传递给 MobilNet 模型之前,你需要将其范围从 `[0,1]` 转化为 `[-1,1]`: ``` def change_range(image,label): return 2*image-1, label keras_ds = ds.map(change_range) ``` MobileNet 为每张图片的特征返回一个 `6x6` 的空间网格。 传递一个 batch 的图片给它,查看结果: ``` # 数据集可能需要几秒来启动,因为要填满其随机缓冲区。 image_batch, label_batch = next(iter(keras_ds)) feature_map_batch = mobile_net(image_batch) print(feature_map_batch.shape) ``` 构建一个包装了 MobileNet 的模型并在 `tf.keras.layers.Dense` 输出层之前使用 `tf.keras.layers.GlobalAveragePooling2D` 来平均那些空间向量: ``` model = tf.keras.Sequential([ mobile_net, tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dense(len(label_names), activation = 'softmax')]) ``` 现在它产出符合预期 shape(维数)的输出: ``` logit_batch = model(image_batch).numpy() print("min logit:", logit_batch.min()) print("max logit:", logit_batch.max()) print() print("Shape:", logit_batch.shape) ``` 编译模型以描述训练过程: ``` model.compile(optimizer=tf.keras.optimizers.Adam(), loss='sparse_categorical_crossentropy', metrics=["accuracy"]) ``` 此处有两个可训练的变量 —— Dense 层中的 `weights(权重)` 和 `bias(偏差)`: ``` len(model.trainable_variables) model.summary() ``` 你已经准备好来训练模型了。 注意,出于演示目的每一个 epoch 中你将只运行 3 step,但一般来说在传递给 `model.fit()` 之前你会指定 step 的真实数量,如下所示: ``` steps_per_epoch=tf.math.ceil(len(all_image_paths)/BATCH_SIZE).numpy() steps_per_epoch model.fit(ds, epochs=1, steps_per_epoch=3) ``` ## 性能 注意:这部分只是展示一些可能帮助提升性能的简单技巧。深入指南,请看:[输入 pipeline(管道)的性能](https://tensorflow.google.cn/guide/performance/datasets)。 上面使用的简单 pipeline(管道)在每个 epoch 中单独读取每个文件。在本地使用 CPU 训练时这个方法是可行的,但是可能不足以进行 GPU 训练并且完全不适合任何形式的分布式训练。 要研究这点,首先构建一个简单的函数来检查数据集的性能: ``` import time default_timeit_steps = 2*steps_per_epoch+1 def timeit(ds, steps=default_timeit_steps): overall_start = time.time() # 在开始计时之前 # 取得单个 batch 来填充 pipeline(管道)(填充随机缓冲区) it = iter(ds.take(steps+1)) next(it) start = time.time() for i,(images,labels) in enumerate(it): if i%10 == 0: print('.',end='') print() end = time.time() duration = end-start print("{} batches: {} s".format(steps, duration)) print("{:0.5f} Images/s".format(BATCH_SIZE*steps/duration)) print("Total time: {}s".format(end-overall_start)) ``` 当前数据集的性能是: ``` ds = image_label_ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE) ds timeit(ds) ``` ### 缓存 使用 `tf.data.Dataset.cache` 在 epoch 之间轻松缓存计算结果。这是非常高效的,特别是当内存能容纳全部数据时。 在被预处理之后(解码和调整大小),图片在此被缓存了: ``` ds = image_label_ds.cache() ds = ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE) ds timeit(ds) ``` 使用内存缓存的一个缺点是必须在每次运行时重建缓存,这使得每次启动数据集时有相同的启动延迟: ``` timeit(ds) ``` 如果内存不够容纳数据,使用一个缓存文件: ``` ds = image_label_ds.cache(filename='./cache.tf-data') ds = ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds = ds.batch(BATCH_SIZE).prefetch(1) ds timeit(ds) ``` 这个缓存文件也有可快速重启数据集而无需重建缓存的优点。注意第二次快了多少: ``` timeit(ds) ``` ### TFRecord 文件 #### 原始图片数据 TFRecord 文件是一种用来存储一串二进制 blob 的简单格式。通过将多个示例打包进同一个文件内,TensorFlow 能够一次性读取多个示例,当使用一个远程存储服务,如 GCS 时,这对性能来说尤其重要。 首先,从原始图片数据中构建出一个 TFRecord 文件: ``` image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.io.read_file) tfrec = tf.data.experimental.TFRecordWriter('images.tfrec') tfrec.write(image_ds) ``` 接着,构建一个从 TFRecord 文件读取的数据集,并使用你之前定义的 `preprocess_image` 函数对图像进行解码/重新格式化: ``` image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image) ``` 压缩该数据集和你之前定义的标签数据集以得到期望的 `(图片,标签)` 对: ``` ds = tf.data.Dataset.zip((image_ds, label_ds)) ds = ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE) ds timeit(ds) ``` 这比 `缓存` 版本慢,因为你还没有缓存预处理。 #### 序列化的 Tensor(张量) 要为 TFRecord 文件省去一些预处理过程,首先像之前一样制作一个处理过的图片数据集: ``` paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths) image_ds = paths_ds.map(load_and_preprocess_image) image_ds ``` 现在你有一个 tensor(张量)数据集,而不是一个 `.jpeg` 字符串数据集。 要将此序列化至一个 TFRecord 文件你首先将该 tensor(张量)数据集转化为一个字符串数据集: ``` ds = image_ds.map(tf.io.serialize_tensor) ds tfrec = tf.data.experimental.TFRecordWriter('images.tfrec') tfrec.write(ds) ``` 有了被缓存的预处理,就能从 TFrecord 文件高效地加载数据——只需记得在使用它之前反序列化: ``` ds = tf.data.TFRecordDataset('images.tfrec') def parse(x): result = tf.io.parse_tensor(x, out_type=tf.float32) result = tf.reshape(result, [192, 192, 3]) return result ds = ds.map(parse, num_parallel_calls=AUTOTUNE) ds ``` 现在,像之前一样添加标签和进行相同的标准操作: ``` ds = tf.data.Dataset.zip((ds, label_ds)) ds = ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE) ds timeit(ds) ```
github_jupyter
``` !wget https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip !unzip -q kagglecatsanddogs_3367a.zip import os import numpy as np import shutil import glob import warnings warnings.filterwarnings('ignore') cat_files = os.listdir('PetImages/Cat') dog_files = os.listdir('PetImages/Dog') for cat in cat_files: src = os.path.join('PetImages/Cat',cat) dst = os.path.join('PetImages/Cat','cat_'+cat) os.rename( src,dst ) for dog in dog_files: src = os.path.join('PetImages/Dog',dog) dst = os.path.join('PetImages/Dog','dog_'+dog) os.rename( src , dst ) cat_files = glob.glob('PetImages/Cat/*') dog_files = glob.glob('PetImages/Dog/*') print(len(cat_files),len(dog_files)) cat_train = np.random.choice(cat_files, size=3000, replace=False) dog_train = np.random.choice(dog_files, size=3000, replace=False) cat_files = list(set(cat_files) - set(cat_train)) dog_files = list(set(dog_files) - set(dog_train)) cat_val = np.random.choice(cat_files, size=1000, replace=False) dog_val = np.random.choice(dog_files, size=1000, replace=False) cat_files = list(set(cat_files) - set(cat_val)) dog_files = list(set(dog_files) - set(dog_val)) cat_test = np.random.choice(cat_files, size=1000, replace=False) dog_test = np.random.choice(dog_files, size=1000, replace=False) print('Cat datasets:', cat_train.shape, cat_val.shape, cat_test.shape) print('Dog datasets:', dog_train.shape, dog_val.shape, dog_test.shape) #rm -r PetImages/ kagglecatsanddogs_3367a.zip readme\[1\].txt MSR-LA\ -\ 3467.docx ``` ### Splitting Train, Validation, Test Data ``` train_dir = 'training_data' val_dir = 'validation_data' test_dir = 'test_data' train_files = np.concatenate([cat_train, dog_train]) validate_files = np.concatenate([cat_val, dog_val]) test_files = np.concatenate([cat_test, dog_test]) os.mkdir(train_dir) if not os.path.isdir(train_dir) else None os.mkdir(val_dir) if not os.path.isdir(val_dir) else None os.mkdir(test_dir) if not os.path.isdir(test_dir) else None for fn in train_files: shutil.copy(fn, train_dir) for fn in validate_files: shutil.copy(fn, val_dir) for fn in test_files: shutil.copy(fn, test_dir) #!rm -r test_data/ training_data/ validation_data/ from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img IMG_DIM = (150,150) train_files = glob.glob('training_data/*') train_imgs = [];train_labels = [] for file in train_files: try: train_imgs.append( img_to_array(load_img( file,target_size=IMG_DIM )) ) train_labels.append(file.split('/')[1].split('_')[0]) except: pass train_imgs = np.array(train_imgs) validation_files = glob.glob('validation_data/*') validation_imgs = [];validation_labels = [] for file in validation_files: try: validation_imgs.append( img_to_array(load_img( file,target_size=IMG_DIM )) ) validation_labels.append(file.split('/')[1].split('_')[0]) except: pass train_imgs = np.array(train_imgs) validation_imgs = np.array(validation_imgs) print('Train dataset shape:', train_imgs.shape, '\tValidation dataset shape:', validation_imgs.shape) # encode text category labels from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(train_labels) train_labels_enc = le.transform(train_labels) validation_labels_enc = le.transform(validation_labels) ``` ### Image Augmentation ``` train_datagen = ImageDataGenerator(rescale=1./255, zoom_range=0.3, rotation_range=50, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, horizontal_flip=True, fill_mode='nearest') val_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow(train_imgs, train_labels_enc, batch_size=30) val_generator = val_datagen.flow(validation_imgs, validation_labels_enc, batch_size=20) ``` ### Keras Model ``` from keras.layers import Flatten, Dense, Dropout from keras.applications import VGG16 from keras.models import Model from keras import optimizers input_shape = (150, 150, 3) vgg = VGG16(include_top=False, weights='imagenet',input_shape=input_shape) vgg.trainable = False for layer in vgg.layers[:-8]: layer.trainable = False vgg_output = vgg.layers[-1].output fc1 = Flatten()(vgg_output) fc1 = Dense(512, activation='relu')(fc1) fc1_dropout = Dropout(0.3)(fc1) fc2 = Dense(512, activation='relu')(fc1_dropout) fc2_dropout = Dropout(0.3)(fc2) output = Dense(1, activation='sigmoid')(fc2_dropout) model = Model(vgg.input, output) model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['accuracy']) model.summary() import pandas as pd layers = [(layer, layer.name, layer.trainable) for layer in model.layers] pd.DataFrame(layers, columns=['Layer Type', 'Layer Name', 'Layer Trainable']) from keras.callbacks import EarlyStopping, ModelCheckpoint filepath="saved_models/vgg_transfer_learn_dogvscat.h5" save_model_cb = ModelCheckpoint(filepath, monitor='val_acc', verbose=2, save_best_only=True, mode='max') # callback to stop the training if no improvement early_stopping_cb = EarlyStopping(monitor='val_loss', patience=7, mode='min') callbacks_list = [save_model_cb,early_stopping_cb] history = model.fit_generator(train_generator, steps_per_epoch=100, epochs=100, validation_data=val_generator, validation_steps=50, verbose=2,callbacks=callbacks_list) ``` ### Model Performance ``` %matplotlib inline import matplotlib.pyplot as plt f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) t = f.suptitle('Basic CNN Performance', fontsize=12) f.subplots_adjust(top=0.85, wspace=0.3) epoch_list = history.epoch ax1.plot(epoch_list, history.history['acc'], label='Train Accuracy') ax1.plot(epoch_list, history.history['val_acc'], label='Validation Accuracy') ax1.set_xticks(np.arange(0, epoch_list[-1], 3)) ax1.set_ylabel('Accuracy Value') ax1.set_xlabel('Epoch') ax1.set_title('Accuracy') l1 = ax1.legend(loc="best") ax2.plot(epoch_list, history.history['loss'], label='Train Loss') ax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss') ax2.set_xticks(np.arange(0, epoch_list[-1], 3)) ax2.set_ylabel('Loss Value') ax2.set_xlabel('Epoch') ax2.set_title('Loss') l2 = ax2.legend(loc="best") if not os.path.exists('saved_models'): os.mkdir('saved_models') model.save('saved_models/vgg transfer learning.h5') ```
github_jupyter
``` import pandas as pd medicare = pd.read_csv("/netapp2/home/se197/data/CMS/Data/medicare.csv") train_set = medicare[medicare.Hospital != 'BWH'] # MGH; n = 204014 validation_set = medicare[medicare.Hospital == 'BWH'] # BWH and Neither; n = 115726 import numpy as np fifty_perc_EHR_cont = np.percentile(medicare['Cal_MPEC_R0'],50) train_set_high = train_set[train_set.Cal_MPEC_R0 >= fifty_perc_EHR_cont] train_set_low= train_set[train_set.Cal_MPEC_R0 < fifty_perc_EHR_cont] validation_set_high = validation_set[validation_set.Cal_MPEC_R0 >= fifty_perc_EHR_cont] validation_set_low = validation_set[validation_set.Cal_MPEC_R0 < fifty_perc_EHR_cont] predictor_variable_claims = [ 'Co_CAD_RC0', 'Co_Embolism_RC0', 'Co_DVT_RC0', 'Co_PE_RC0', 'Co_AFib_RC0', 'Co_Hypertension_RC0', 'Co_Hyperlipidemia_RC0', 'Co_Atherosclerosis_RC0', 'Co_HF_RC0', 'Co_HemoStroke_RC0', 'Co_IscheStroke_RC0', 'Co_OthStroke_RC0', 'Co_TIA_RC0', 'Co_COPD_RC0', 'Co_Asthma_RC0', 'Co_Pneumonia_RC0', 'Co_Alcoholabuse_RC0', 'Co_Drugabuse_RC0', 'Co_Epilepsy_RC0', 'Co_Cancer_RC0', 'Co_MorbidObesity_RC0', 'Co_Dementia_RC0', 'Co_Depression_RC0', 'Co_Bipolar_RC0', 'Co_Psychosis_RC0', 'Co_Personalitydisorder_RC0', 'Co_Adjustmentdisorder_RC0', 'Co_Anxiety_RC0', 'Co_Generalizedanxiety_RC0', 'Co_OldMI_RC0', 'Co_AcuteMI_RC0', 'Co_PUD_RC0', 'Co_UpperGIbleed_RC0', 'Co_LowerGIbleed_RC0', 'Co_Urogenitalbleed_RC0', 'Co_Othbleed_RC0', 'Co_PVD_RC0', 'Co_LiverDisease_RC0', 'Co_MRI_RC0', 'Co_ESRD_RC0', 'Co_Obesity_RC0', 'Co_Sepsis_RC0', 'Co_Osteoarthritis_RC0', 'Co_RA_RC0', 'Co_NeuroPain_RC0', 'Co_NeckPain_RC0', 'Co_OthArthritis_RC0', 'Co_Osteoporosis_RC0', 'Co_Fibromyalgia_RC0', 'Co_Migraine_RC0', 'Co_Headache_RC0', 'Co_OthPain_RC0', 'Co_GeneralizedPain_RC0', 'Co_PainDisorder_RC0', 'Co_Falls_RC0', 'Co_CoagulationDisorder_RC0', 'Co_WhiteBloodCell_RC0', 'Co_Parkinson_RC0', 'Co_Anemia_RC0', 'Co_UrinaryIncontinence_RC0', 'Co_DecubitusUlcer_RC0', 'Co_Oxygen_RC0', 'Co_Mammography_RC0', 'Co_PapTest_RC0', 'Co_PSATest_RC0', 'Co_Colonoscopy_RC0', 'Co_FecalOccultTest_RC0', 'Co_FluShot_RC0', 'Co_PneumococcalVaccine_RC0' , 'Co_RenalDysfunction_RC0', 'Co_Valvular_RC0', 'Co_Hosp_Prior30Days_RC0', 'Co_RX_Antibiotic_RC0', 'Co_RX_Corticosteroid_RC0', 'Co_RX_Aspirin_RC0', 'Co_RX_Dipyridamole_RC0', 'Co_RX_Clopidogrel_RC0', 'Co_RX_Prasugrel_RC0', 'Co_RX_Cilostazol_RC0', 'Co_RX_Ticlopidine_RC0', 'Co_RX_Ticagrelor_RC0', 'Co_RX_OthAntiplatelet_RC0', 'Co_RX_NSAIDs_RC0', 'Co_RX_Opioid_RC0', 'Co_RX_Antidepressant_RC0', 'Co_RX_AAntipsychotic_RC0', 'Co_RX_TAntipsychotic_RC0', 'Co_RX_Anticonvulsant_RC0', 'Co_RX_PPI_RC0', 'Co_RX_H2Receptor_RC0', 'Co_RX_OthGastro_RC0', 'Co_RX_ACE_RC0', 'Co_RX_ARB_RC0', 'Co_RX_BBlocker_RC0', 'Co_RX_CCB_RC0', 'Co_RX_Thiazide_RC0', 'Co_RX_Loop_RC0', 'Co_RX_Potassium_RC0', 'Co_RX_Nitrates_RC0', 'Co_RX_Aliskiren_RC0', 'Co_RX_OthAntihypertensive_RC0', 'Co_RX_Antiarrhythmic_RC0', 'Co_RX_OthAnticoagulant_RC0', 'Co_RX_Insulin_RC0', 'Co_RX_Noninsulin_RC0', 'Co_RX_Digoxin_RC0', 'Co_RX_Statin_RC0', 'Co_RX_Lipid_RC0', 'Co_RX_Lithium_RC0', 'Co_RX_Benzo_RC0', 'Co_RX_ZDrugs_RC0', 'Co_RX_OthAnxiolytic_RC0', 'Co_RX_Barbiturate_RC0', 'Co_RX_Dementia_RC0', 'Co_RX_Hormone_RC0', 'Co_RX_Osteoporosis_RC0', 'Co_N_Drugs_RC0', 'Co_N_Hosp_RC0', 'Co_Total_HospLOS_RC0', 'Co_N_MDVisit_RC0', 'Co_RX_AnyAspirin_RC0', 'Co_RX_AspirinMono_RC0', 'Co_RX_ClopidogrelMono_RC0', 'Co_RX_AspirinClopidogrel_RC0', 'Co_RX_DM_RC0', 'Co_RX_Antipsychotic_RC0' ] co_train_gpop = train_set[predictor_variable_claims] co_train_high = train_set_high[predictor_variable_claims] co_train_low = train_set_low[predictor_variable_claims] co_validation_gpop = validation_set[predictor_variable_claims] co_validation_high = validation_set_high[predictor_variable_claims] co_validation_low = validation_set_low[predictor_variable_claims] out_train_death_gpop = train_set['ehr_claims_death'] out_train_death_high = train_set_high['ehr_claims_death'] out_train_death_low = train_set_low['ehr_claims_death'] out_validation_death_gpop = validation_set['ehr_claims_death'] out_validation_death_high = validation_set_high['ehr_claims_death'] out_validation_death_low = validation_set_low['ehr_claims_death'] ``` # Template LR ``` def lasso(X,y): from sklearn.linear_model import Lasso from sklearn.model_selection import GridSearchCV model = Lasso() param_grid = [ {'alpha' : np.logspace(-4, 4, 20)} ] clf = GridSearchCV(model, param_grid, cv = 5, verbose = True, n_jobs = 1) best_clf = clf.fit(X, y) return best_clf def scores(X_train,y_train, best_clf): from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score from sklearn.metrics import fbeta_score from sklearn.metrics import roc_auc_score from sklearn.metrics import log_loss import numpy as np pred = np.round(best_clf.predict(X_train)) actual = y_train prob = best_clf.predict(X_train) print(accuracy_score(actual,pred)) print(f1_score(actual,pred, average = 'macro')) print(fbeta_score(actual,pred, average = 'macro', beta = 2)) print(roc_auc_score(actual, prob)) print(log_loss(actual,prob)) def cross_val(X,y): from sklearn.model_selection import KFold from sklearn.model_selection import cross_validate from sklearn.metrics import log_loss from sklearn.metrics import roc_auc_score from sklearn.metrics import fbeta_score import sklearn import numpy as np cv = KFold(n_splits=5, random_state=1, shuffle=True) log_loss = [] auc = [] accuracy = [] f1 = [] f2 = [] for train_index, test_index in cv.split(X): X_train, X_test, y_train, y_test = X.iloc[train_index], X.iloc[test_index], y.iloc[train_index], y.iloc[test_index] model = lasso(X_train, y_train) prob = model.predict(X_test) # prob is a vector of probabilities pred = np.round(model.predict(X_test)) # pred is the rounded predictions log_loss.append(sklearn.metrics.log_loss(y_test, prob)) auc.append(sklearn.metrics.roc_auc_score(y_test, prob)) accuracy.append(sklearn.metrics.accuracy_score(y_test, pred)) f1.append(sklearn.metrics.f1_score(y_test, pred, average = 'macro')) f2.append(fbeta_score(y_test,pred, average = 'macro', beta = 2)) print(np.mean(accuracy)) print(np.mean(f1)) print(np.mean(f2)) print(np.mean(auc)) print(np.mean(log_loss)) ``` # General Population ``` from imblearn.over_sampling import SMOTE sm = SMOTE(random_state = 42) co_train_gpop_sm,out_train_death_gpop_sm = sm.fit_resample(co_train_gpop,out_train_death_gpop) best_clf = lasso(co_train_gpop_sm, out_train_death_gpop_sm) cross_val(co_train_gpop_sm, out_train_death_gpop_sm) print() scores(co_train_gpop, out_train_death_gpop, best_clf) print() scores(co_validation_gpop, out_validation_death_gpop, best_clf) #comb = [] #for i in range(len(predictor_variable_claims)): #comb.append(predictor_variable_claims[i] + str(best_clf.best_estimator_.coef_[:,i:i+1])) #comb ``` # High Continuity ``` from imblearn.over_sampling import SMOTE sm = SMOTE(random_state = 42) co_train_high_sm,out_train_death_high_sm = sm.fit_resample(co_train_high, out_train_death_high) best_clf = lasso(co_train_high_sm, out_train_death_high_sm) cross_val(co_train_high_sm, out_train_death_high_sm) print() scores(co_train_high, out_train_death_high, best_clf) print() scores(co_validation_high, out_validation_death_high, best_clf) #comb = [] #for i in range(len(predictor_variable_claims)): #comb.append(predictor_variable_claims[i] + str(best_clf.best_estimator_.coef_[:,i:i+1])) #comb ``` # Low Continuity ``` from imblearn.over_sampling import SMOTE sm = SMOTE(random_state = 42) co_train_low_sm,out_train_death_low_sm = sm.fit_resample(co_train_low,out_train_death_low) best_clf = lasso(co_train_low_sm, out_train_death_low_sm) cross_val(co_train_low_sm, out_train_death_low_sm) print() scores(co_train_low, out_train_death_low, best_clf) print() scores(co_validation_low, out_validation_death_low, best_clf) #comb = [] #for i in range(len(predictor_variable_claims)): #comb.append(predictor_variable_claims[i] + str(best_clf.best_estimator_.coef_[:,i:i+1])) #comb ```
github_jupyter
Import des données ``` from __future__ import division, print_function, unicode_literals # imports import numpy as np import os import pandas as pd # stabilité du notebook d'une exécution à l'autre np.random.seed(42) # ignorer les warnings inutiles (voir SciPy issue #5998) import warnings warnings.filterwarnings(action="ignore", message="^internal gelsd") url = 'https://raw.githubusercontent.com/HugoLeBoennec/A4_Intelligence_Artificielle/main/Projet/' general_data = pd.read_csv(url + "dataset/general_data.csv", error_bad_lines=False) employee_survey_data = pd.read_csv(url + "dataset/employee_survey_data.csv", error_bad_lines=False) manager_survey_data = pd.read_csv(url + "dataset/manager_survey_data.csv", error_bad_lines=False) out_time = pd.read_csv(url + "dataset/out_time.csv", error_bad_lines=False) in_time = pd.read_csv(url + "dataset/in_time.csv", error_bad_lines=False) employee_survey_data ``` Fusion des tableaux ``` alldata = pd.merge(general_data, employee_survey_data) alldata ``` Affichage des données ``` import seaborn as sns import matplotlib.pyplot as plt sns.countplot(x=alldata.Attrition, data= alldata, palette='hls') plt.show() sns.countplot(x=alldata.BusinessTravel, data= alldata, palette='hls') plt.show() sns.countplot(x=alldata.Department, data= alldata, palette='hls') plt.show() sns.countplot(x=alldata.DistanceFromHome, data= alldata, palette='hls') plt.show() sns.countplot(x=alldata.Education, data= alldata, palette='hls') plt.show() sns.countplot(x=alldata.EducationField, data= alldata, palette='hls') plt.show() sns.countplot(x=alldata.EmployeeCount, data= alldata, palette='hls') plt.show() sns.countplot(x=alldata.EducationField, data= alldata, palette='hls') plt.show() sns.countplot(x=alldata.Gender, data= alldata, palette='hls') plt.show() sns.countplot(x=alldata.JobLevel, data= alldata, palette='hls') plt.show() sns.countplot(x=alldata.JobRole, data= alldata, palette='hls') plt.show() sns.countplot(x=alldata.MaritalStatus, data= alldata, palette='hls') plt.show() sns.countplot(x=alldata.MonthlyIncome, data= alldata, palette='hls') plt.show() sns.countplot(x=alldata.NumCompaniesWorked, data= alldata, palette='hls') plt.show() sns.countplot(x=alldata.Over18, data= alldata, palette='hls') plt.show() pd.crosstab(alldata.WorkLifeBalance,alldata.MaritalStatus).plot(kind='bar') plt.title('MaritalStatus par WorkLifeBalance') plt.xlabel('MaritalStatus') plt.ylabel('WorkLifeBalance') plt.show() table1 = pd.crosstab(alldata.WorkLifeBalance, alldata.MaritalStatus) table1.div(table1.sum(1).astype(float), axis=0).plot(kind='bar', stacked = True) plt.title('MaritalStatus par WorkLifeBalance') plt.xlabel('MaritalStatus') plt.ylabel('WorkLifeBalance') plt.show() table2 = pd.crosstab(alldata.JobSatisfaction, alldata.Attrition) table2.div(table2.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True) plt.title("Attrition en fonction de la satisfation au travail") plt.xlabel("Satisfaction au travail") plt.ylabel("Proportion d'employés") plt.show() ``` suppression des nan ``` copy_alldata = alldata copy_alldata.dropna(inplace = True) copy_alldata.isnull().sum().sort_values(ascending=False).head() ``` remplacement des variables qualitatives en quantitative ``` #Change Attrition column values to numeric values attrition={'Yes':1, 'No':0} Gender={'Female':1, 'Male':0} MaritalStatus={'Divorced':2, 'Married':1, 'Single':0 } Department={ 'Research & Development':2, 'Human Resources':1, 'Sales':0 } BusinessTravel={ 'Travel_Rarely':0, 'Travel_Frequently':1, 'Non-Travel':2} EducationField={ 'Life Sciences':0, 'Other':1, 'Medical':2, 'Marketing':3, 'Technical Degree':4, 'Human Resources':5} JobRole={ 'Healthcare Representative':0, 'Research Scientist':1, 'Sales Executive':2, 'Human Resources':3, 'Research Director':4, 'Laboratory Technician':5, 'Manufacturing Director':6, 'Sales Representative': 7, 'Manager':8} #raw_data.Attrition=[attrition[item] for item in raw_data.Attrition] copy_alldata.Attrition=copy_alldata.Attrition.map(lambda x:attrition[x]) copy_alldata.EducationField=copy_alldata.EducationField.map(lambda x:EducationField[x]) copy_alldata.Gender=copy_alldata.Gender.map(lambda x:Gender[x]) copy_alldata.MaritalStatus=copy_alldata.MaritalStatus.map(lambda x:MaritalStatus[x]) copy_alldata.Department=copy_alldata.Department.map(lambda x:Department[x]) copy_alldata.BusinessTravel=copy_alldata.BusinessTravel.map(lambda x:BusinessTravel[x]) copy_alldata.JobRole=copy_alldata.JobRole.map(lambda x:JobRole[x]) print(copy_alldata) ``` calcule des corélations ``` corr_features=copy_alldata.corr() corr_features.iloc[:,1] def Heat_map(data, features): plt.figure(figsize=(20, 10)) sns.heatmap(data[features].corr(), cmap='RdBu', annot=True) plt.xticks(rotation=45) plt.title('Heatmap of Correlation Matrix') Heat_map(copy_alldata,list(copy_alldata.columns)) ``` application d'un model ``` # X = copy_alldata.loc[:, copy_alldata.columns != 'quit'] X, y = copy_alldata.loc[:, copy_alldata.columns !="Attrition"], copy_alldata.loc[:, "Attrition"] X = pd.get_dummies(X, drop_first= True) X.head() y = pd.get_dummies(y, drop_first= True) y.head() from sklearn.model_selection import train_test_split y = np.ravel(y) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0, stratify=y) !pip install graphviz; yellowbrick from sklearn import tree from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.tree import export_graphviz # display the tree within a Jupyter notebook from IPython.display import SVG from graphviz import Source from IPython.display import display from ipywidgets import interactive, IntSlider, FloatSlider, interact import ipywidgets from IPython.display import Image from subprocess import call import matplotlib.image as mpimg @interact def plot_tree(crit=["gini", "entropy"], split=["best", "random"], depth=IntSlider(min=1,max=30,value=2, continuous_update=False), min_split=IntSlider(min=2,max=5,value=2, continuous_update=False), min_leaf=IntSlider(min=1,max=5,value=1, continuous_update=False)): estimator = DecisionTreeClassifier(random_state=0, criterion=crit, splitter = split, max_depth = depth, min_samples_split=min_split, min_samples_leaf=min_leaf) estimator.fit(X_train, y_train) print('Decision Tree Training Accuracy: {:.3f}'.format(accuracy_score(y_train, estimator.predict(X_train)))) print('Decision Tree Test Accuracy: {:.3f}'.format(accuracy_score(y_test, estimator.predict(X_test)))) # graph = Source(tree.export_graphviz(estimator, # out_file=None, # feature_names=X_train.columns, # class_names=['0', '1'], # filled = True)) # display(Image(data=graph.pipe(format='png'))) return estimator @interact def plot_tree_rf(crit=["gini", "entropy"], bootstrap=["True", "False"], depth=IntSlider(min=1,max=30,value=3, continuous_update=False), forests=IntSlider(min=1,max=200,value=100,continuous_update=False), min_split=IntSlider(min=2,max=5,value=2, continuous_update=False), min_leaf=IntSlider(min=1,max=5,value=1, continuous_update=False)): estimator = RandomForestClassifier(random_state=1, criterion=crit, bootstrap=bootstrap, n_estimators=forests, max_depth=depth, min_samples_split=min_split, min_samples_leaf=min_leaf, n_jobs=-1, verbose=False).fit(X_train, y_train) print('Random Forest Training Accuracy: {:.3f}'.format(accuracy_score(y_train, estimator.predict(X_train)))) print('Random Forest Test Accuracy: {:.3f}'.format(accuracy_score(y_test, estimator.predict(X_test)))) num_tree = estimator.estimators_[0] print('\nVisualizing Decision Tree:', 0) # graph = Source(tree.export_graphviz(num_tree, # out_file=None, # feature_names=X_train.columns, # class_names=['0', '1'], # filled = True)) # display(Image(data=graph.pipe(format='png'))) return estimator from yellowbrick.model_selection import FeatureImportances plt.rcParams['figure.figsize'] = (12,8) plt.style.use("ggplot") rf = RandomForestClassifier(bootstrap='True', class_weight=None, criterion='gini', max_depth=3, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=100, n_jobs=-1, oob_score=False, random_state=1, verbose=False, warm_start=False) viz = FeatureImportances(rf) viz.fit(X_train, y_train) viz.show(); from yellowbrick.classifier import ROCAUC visualizer = ROCAUC(rf, classes=["stayed", "quit"]) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data visualizer.poof(); dt = DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=2, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, presort=False, random_state=0, splitter='best') visualizer = ROCAUC(dt, classes=["stayed", "quit"]) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data visualizer.poof(); from sklearn.linear_model import LogisticRegressionCV logit = LogisticRegressionCV(random_state=1, n_jobs=-1,max_iter=500, cv=10) lr = logit.fit(X_train, y_train) print('Logistic Regression Accuracy: {:.3f}'.format(accuracy_score(y_test, lr.predict(X_test)))) visualizer = ROCAUC(lr, classes=["stayed", "quit"]) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data visualizer.poof(); ```
github_jupyter
This script is based on instructions given in [this lesson](https://github.com/HeardLibrary/digital-scholarship/blob/master/code/scrape/pylesson/lesson2-api.ipynb). ## Import libraries and load API key from file The API key should be the only item in a text file called `flickr_api_key.txt` located in the user's home directory. No trailing newline and don't include the "secret". ``` from pathlib import Path import requests import json import csv from time import sleep import webbrowser # define some canned functions we need to use # write a list of dictionaries to a CSV file def write_dicts_to_csv(table, filename, fieldnames): with open(filename, 'w', newline='', encoding='utf-8') as csv_file_object: writer = csv.DictWriter(csv_file_object, fieldnames=fieldnames) writer.writeheader() for row in table: writer.writerow(row) home = str(Path.home()) #gets path to home directory; supposed to work for Win and Mac key_filename = 'flickr_api_key.txt' api_key_path = home + '/' + key_filename try: with open(api_key_path, 'rt', encoding='utf-8') as file_object: api_key = file_object.read() # print(api_key) # delete this line once the script is working; don't want the key as part of the notebook except: print(key_filename + ' file not found - is it in your home directory?') ``` ## Make a test API call to the account We need to know the user ID. Go to flickr.com, and search for vutheatre. The result is https://www.flickr.com/photos/123262983@N05 which tells us that the ID is 123262983@N05 . There are a lot of kinds of searches we can do. A list is [here](https://www.flickr.com/services/api/). Let's try `flickr.people.getPhotos` (described [here](https://www.flickr.com/services/api/flickr.people.getPhotos.html)). This method doesn't actually get the photos; it gets metadata about the photos for an account. The main purpose of this query is to find out the number of photos that are available so that we can know how to set up the next part. The number of photos is in `['photos']['total']`, so we can extract that from the response data. ``` user_id = '123262983@N05' # vutheatre's ID endpoint_url = 'https://www.flickr.com/services/rest' method = 'flickr.people.getPhotos' filename = 'theatre-metadata.csv' param_dict = { 'method' : method, # 'tags' : 'kangaroo', # 'extras' : 'url_o', 'per_page' : '1', # default is 100, maximum is 500. Use paging to retrieve more than 500. 'page' : '1', 'user_id' : user_id, 'oauth_consumer_key' : api_key, 'nojsoncallback' : '1', # this parameter causes the API to return actual JSON instead of its weird default string 'format' : 'json' # overrides the default XML serialization for the search results } metadata_response = requests.get(endpoint_url, params = param_dict) # print(metadata_response.url) # uncomment this if testing is needed, again don't reveal key in notebook data = metadata_response.json() print(json.dumps(data, indent=4)) print() number_photos = int(data['photos']['total']) # need to convert string to number print('Number of photos: ', number_photos) ``` ## Test to see what kinds of useful metadata we can get The instructions for the [method](https://www.flickr.com/services/api/flickr.people.getPhotos.html) says what kinds of "extras" you can request metadata about. Let's ask for everything that we care about and don't already know: `description,license,original_format,date_taken,original_format,geo,tags,machine_tags,media,url_t,url_o` `url_t` is the URL for a thumbnail of the image and `url_o` is the URL to retrieve the original photo. The dimensions of these images will be given automatically when we request the URLs, so we don't need `o_dims`. There isn't any place to request the title, since it's automatically returned. ``` param_dict = { 'method' : method, 'extras' : 'description,license,original_format,date_taken,original_format,geo,tags,machine_tags,media,url_t,url_o', 'per_page' : '1', # default is 100, maximum is 500. Use paging to retrieve more than 500. 'page' : '1', 'user_id' : user_id, 'oauth_consumer_key' : api_key, 'nojsoncallback' : '1', # this parameter causes the API to return actual JSON instead of its weird default string 'format' : 'json' # overrides the default XML serialization for the search results } metadata_response = requests.get(endpoint_url, params = param_dict) # print(metadata_response.url) # uncomment this if testing is needed, again don't reveal key in notebook data = metadata_response.json() print(json.dumps(data, indent=4)) print() ``` ## Create and test the function to extract the data we want ``` def extract_data(photo_number, data): dictionary = {} # create an empty dictionary # load the response data into a dictionary dictionary['id'] = data['photos']['photo'][photo_number]['id'] dictionary['title'] = data['photos']['photo'][photo_number]['title'] dictionary['license'] = data['photos']['photo'][photo_number]['license'] dictionary['description'] = data['photos']['photo'][photo_number]['description']['_content'] # convert the stupid date format to ISO 8601 dateTime; don't know the time zone - maybe add later? temp_time = data['photos']['photo'][photo_number]['datetaken'] dictionary['date_taken'] = temp_time.replace(' ', 'T') dictionary['tags'] = data['photos']['photo'][photo_number]['tags'] dictionary['machine_tags'] = data['photos']['photo'][photo_number]['machine_tags'] dictionary['original_format'] = data['photos']['photo'][photo_number]['originalformat'] dictionary['latitude'] = data['photos']['photo'][photo_number]['latitude'] dictionary['longitude'] = data['photos']['photo'][photo_number]['longitude'] dictionary['thumbnail_url'] = data['photos']['photo'][photo_number]['url_t'] dictionary['original_url'] = data['photos']['photo'][photo_number]['url_o'] dictionary['original_height'] = data['photos']['photo'][photo_number]['height_o'] dictionary['original_width'] = data['photos']['photo'][photo_number]['width_o'] return dictionary # test the function with a single row table = [] photo_number = 0 photo_dictionary = extract_data(photo_number, data) table.append(photo_dictionary) # write the data to a file fieldnames = photo_dictionary.keys() # use the keys from the last dictionary for column headers; assume all are the same write_dicts_to_csv(table, filename, fieldnames) print('Done') ``` ## Create the loops to do the paging Flickr limits the number of photos that can be requested to 500. Since we have more than that, we need to request the data 500 photos at a time. ``` per_page = 5 # use 500 for full download, use smaller number like 5 for testing pages = number_photos // per_page # the // operator returns the integer part of the division ("floor") table = [] #for page_number in range(0, pages + 1): # need to add one to get the final partial page for page_number in range(0, 1): # use this to do only one page for testing print('retrieving page ', page_number + 1) page_string = str(page_number + 1) param_dict = { 'method' : method, 'extras' : 'description,license,original_format,date_taken,original_format,geo,tags,machine_tags,media,url_t,url_o', 'per_page' : str(per_page), # default is 100, maximum is 500. 'page' : page_string, 'user_id' : user_id, 'oauth_consumer_key' : api_key, 'nojsoncallback' : '1', # this parameter causes the API to return actual JSON instead of its weird default string 'format' : 'json' # overrides the default XML serialization for the search results } metadata_response = requests.get(endpoint_url, params = param_dict) data = metadata_response.json() # print(json.dumps(data, indent=4)) # uncomment this line for testing # data['photos']['photo'] is the number of photos for which data was returned for image_number in range(0, len(data['photos']['photo'])): photo_dictionary = extract_data(image_number, data) table.append(photo_dictionary) # write the data to a file # We could just do this for all the data at the end. # But if the search fails in the middle, we will at least get partial results fieldnames = photo_dictionary.keys() # use the keys from the last dictionary for column headers; assume all are the same write_dicts_to_csv(table, filename, fieldnames) sleep(1) # wait a second to avoid getting blocked for hitting the API to rapidly print('Done') ```
github_jupyter
``` import matplotlib.pyplot as plt import torch import gpytorch import time import numpy as np %matplotlib inline import pickle import finite_ntk %pdb class ExactGPModel(gpytorch.models.ExactGP): # exact RBF Gaussian process class def __init__(self, train_x, train_y, likelihood, model, use_linearstrategy=False): super(ExactGPModel, self).__init__(train_x, train_y, likelihood) self.mean_module = gpytorch.means.ConstantMean() self.covar_module = finite_ntk.lazy.NTK( model=model, use_linearstrategy=use_linearstrategy ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return gpytorch.distributions.MultivariateNormal(mean_x, covar_x) model = torch.nn.Sequential( torch.nn.Linear(5, 200), torch.nn.ELU(), torch.nn.Linear(200, 2000), torch.nn.ELU(), torch.nn.Linear(2000, 200), torch.nn.ELU(), torch.nn.Linear(200, 1), ).cuda() likelihood = gpytorch.likelihoods.GaussianLikelihood().cuda() gpmodel = ExactGPModel(torch.randn(10, 5).cuda(), torch.randn(10).cuda(), likelihood, model).cuda() parspace_gpmodel = ExactGPModel(torch.randn(10, 5).cuda(), torch.randn(10).cuda(), likelihood, model, use_linearstrategy=True).cuda() def run_model_list(mm, n_list): num_data_list = [] for n in num_data_points: mm.train() #parspace_gpmodel.train() print('N: ', n) data = torch.randn(n, 5).cuda() y = torch.randn(n).cuda() mm.set_train_data(data, y, strict=False) #parspace_gpmodel.set_train_data(data, y, strict=False) start = time.time() logprob = likelihood(mm(data)).log_prob(y) log_end = time.time() - start #start = time.time() #logprob = likelihood(parspace_gpmodel(data)).log_prob(y) #plog_end = time.time() - start mm.eval() #parspace_gpmodel.eval() with gpytorch.settings.fast_pred_var(), gpytorch.settings.max_eager_kernel_size(200): test_data = torch.randn(50, 5).cuda() start = time.time() pred_vars = mm(test_data).mean var_end = time.time() - start # start = time.time() # pred_vars = parspace_gpmodel(data).variance # pvar_end = time.time() - start #timings = [log_end, plog_end, var_end, pvar_end] #timings = [log_end, plog_end] #print(timings) num_data_list.append([log_end, var_end]) mm.prediction_strategy = None return num_data_list num_data_points = [300, 500, 1000, 5000, 10000, 25000, 50000, 100000] fun_space_list = run_model_list(gpmodel, num_data_points) del gpmodel par_space_list = run_model_list(parspace_gpmodel, num_data_points) del parspace_gpmodel plt.plot(num_data_points, np.stack(fun_space_list)[:,1], marker = 'x', label = 'Function Space') plt.plot(num_data_points, np.stack(par_space_list)[:,1], marker = 'x', label = 'Parameter Space') plt.xscale('log') plt.yscale('log') plt.grid() plt.legend() numpars = 0 for p in model.parameters(): numpars += p.numel() print(numpars) with open('../data/ntk_mlp_varying_data_speed_gp.pkl', 'wb') as handle: plot_dict = { 'N': num_data_points, 'ntk': fun_space_list, 'fisher': par_space_list, 'numpars': numpars } pickle.dump(plot_dict, handle, pickle.HIGHEST_PROTOCOL) ```
github_jupyter
### Lab 3: Expectation Maximization and Variational Autoencoder ### Machine Learning 2 (2017/2018) * The lab exercises should be made in groups of two or three people. * The deadline is Friday, 01.06. * Assignment should be submitted through BlackBoard! Make sure to include your and your teammates' names with the submission. * Attach the .IPYNB (IPython Notebook) file containing your code and answers. Naming of the file should be "studentid1\_studentid2\_lab#", for example, the attached file should be "12345\_12346\_lab1.ipynb". Only use underscores ("\_") to connect ids, otherwise the files cannot be parsed. Notes on implementation: * You should write your code and answers in an IPython Notebook: http://ipython.org/notebook.html. If you have problems, please ask. * Use __one cell__ for code and markdown answers only! * Put all code in the cell with the ```# YOUR CODE HERE``` comment and overwrite the ```raise NotImplementedError()``` line. * For theoretical questions, put your solution using LaTeX style formatting in the YOUR ANSWER HERE cell. * Among the first lines of your notebook should be "%pylab inline". This imports all required modules, and your plots will appear inline. * Large parts of you notebook will be graded automatically. Therefore it is important that your notebook can be run completely without errors and within a reasonable time limit. To test your notebook before submission, select Kernel -> Restart \& Run All. $\newcommand{\bx}{\mathbf{x}} \newcommand{\bpi}{\mathbf{\pi}} \newcommand{\bmu}{\mathbf{\mu}} \newcommand{\bX}{\mathbf{X}} \newcommand{\bZ}{\mathbf{Z}} \newcommand{\bz}{\mathbf{z}}$ ### Installing PyTorch In this lab we will use PyTorch. PyTorch is an open source deep learning framework primarily developed by Facebook's artificial-intelligence research group. In order to install PyTorch in your conda environment go to https://pytorch.org and select your operating system, conda, Python 3.6, no cuda. Copy the text from the "Run this command:" box. Now open a terminal and activate your 'ml2labs' conda environment. Paste the text and run. After the installation is done you should restart Jupyter. ### MNIST data In this Lab we will use several methods for unsupervised learning on the MNIST dataset of written digits. The dataset contains digital images of handwritten numbers $0$ through $9$. Each image has 28x28 pixels that each take 256 values in a range from white ($= 0$) to black ($=1$). The labels belonging to the images are also included. Fortunately, PyTorch comes with a MNIST data loader. The first time you run the box below it will download the MNIST data set. That can take a couple of minutes. The main data types in PyTorch are tensors. For Part 1, we will convert those tensors to numpy arrays. In Part 2, we will use the torch module to directly work with PyTorch tensors. ``` %pylab inline import torch from torchvision import datasets, transforms train_dataset = datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])) train_labels = train_dataset.train_labels.numpy() train_data = train_dataset.train_data.numpy() # For EM we will use flattened data train_data = train_data.reshape(train_data.shape[0], -1) ``` ## Part 1: Expectation Maximization We will use the Expectation Maximization (EM) algorithm for the recognition of handwritten digits in the MNIST dataset. The images are modelled as a Bernoulli mixture model (see Bishop $\S9.3.3$): $$ p(\bx|\bmu, \bpi) = \sum_{k=1}^K \pi_k \prod_{i=1}^D \mu_{ki}^{x_i}(1-\mu_{ki})^{(1-x_i)} $$ where $x_i$ is the value of pixel $i$ in an image, $\mu_{ki}$ represents the probability that pixel $i$ in class $k$ is black, and $\{\pi_1, \ldots, \pi_K\}$ are the mixing coefficients of classes in the data. We want to use this data set to classify new images of handwritten numbers. ### 1.1 Binary data (5 points) As we like to apply our Bernoulli mixture model, write a function `binarize` to convert the (flattened) MNIST data to binary images, where each pixel $x_i \in \{0,1\}$, by thresholding at an appropriate level. ``` def binarize(X): ######################## ######################## ######################## # YOUR CODE HERE X_ = np.around(X.astype(np.double)/255) return X_ # Test test test! bin_train_data = binarize(train_data) assert bin_train_data.dtype == np.float assert bin_train_data.shape == train_data.shape ``` Sample a few images of digits $2$, $3$ and $4$; and show both the original and the binarized image together with their label. ``` # YOUR CODE HERE ######################## ######################## ######################## def plot_MNIST_digits(data): i = data.shape[0] for j in range(i): plt.subplot(1,i,j+1) plt.imshow(data[j].reshape(28,28), interpolation = 'nearest', cmap ='Greys') plt.axis('off') plt.show() def plot_bin_MNIST_digits(digits, bin_train_data, train_labels, train_data): for digit in digits: print('\n\n'+'='*50+'\n\n') print('plotting label {}:\n'.format(digit)) bin_zip = zip(bin_train_data, train_labels) float_zip = zip(train_data, train_labels) bin_ = np.array([i for i,n in bin_zip if n == digit]) float_ = np.array([i for i,n in float_zip if n == digit]) print('... float') plot_MNIST_digits(np.vstack((float_[0:5]))) print('... binary') plot_MNIST_digits(np.vstack((bin_[0:5]))) print('\n\n'+'='*50+'\n\n') plot_bin_MNIST_digits([2,3,4], bin_train_data, train_labels, train_data) ######################## ######################## ######################## ``` ### 1.2 Implementation (40 points) You are going to write a function ```EM(X, K, max_iter)``` that implements the EM algorithm on the Bernoulli mixture model. The only parameters the function has are: * ```X``` :: (NxD) array of input training images * ```K``` :: size of the latent space * ```max_iter``` :: maximum number of iterations, i.e. one E-step and one M-step You are free to specify your return statement. Make sure you use a sensible way of terminating the iteration process early to prevent unnecessarily running through all epochs. Vectorize computations using ```numpy``` as much as possible. You should implement the `E_step(X, mu, pi)` and `M_step(X, gamma)` separately in the functions defined below. These you can then use in your function `EM(X, K, max_iter)`. ``` def E_step(X, mu, pi): # YOUR CODE HERE ######################## ######################## ######################## # expand X over axis 1 which gives us from (NxD) for X to (Nx1xD) for X_ X_ = np.expand_dims(X,axis=1) # calculate unnormalized gamma g = pi*np.prod(mu**X_ * (1-mu)**(1-X_), axis=2) # calculate normalization and expend it from N to Nx1 normal = np.expand_dims(np.sum(g,axis=1),axis = 1) # get normalized gamma and handle devision by zero gamma = np.divide(g, normal, out=np.zeros_like(g), where=normal!=0) ######################## ######################## ######################## return gamma # Let's test on 5 datapoints n_test = 5 X_test = bin_train_data[:n_test] D_test, K_test = X_test.shape[1], 10 np.random.seed(2018) mu_test = np.random.uniform(low=.25, high=.75, size=(K_test,D_test)) pi_test = np.ones(K_test) / K_test gamma_test = E_step(X_test, mu_test, pi_test) assert gamma_test.shape == (n_test, K_test) def M_step(X, gamma): # YOUR CODE HERE ######################## ######################## ######################## N_k = np.sum(gamma, axis=0) pi = N_k / np.sum(N_k) # we expend similar to the E-step but now over the second dimension of gamma X_ = np.expand_dims(X,axis=1) X__ = np.repeat(X_,gamma.shape[1],axis=1) # expend from Nxk to NxKx1 gamma_ = np.expand_dims(gamma,axis=2) mu_ = np.sum(X__*gamma_,axis=0) mu = mu_/np.expand_dims(N_k,axis=1) ######################## ######################## ######################## return mu, pi # Oh, let's test again mu_test, pi_test = M_step(X_test, gamma_test) assert mu_test.shape == (K_test,D_test) assert pi_test.shape == (K_test, ) import time def EM(X, K, max_iter, mu=None, pi=None, plotting = False, threshold = 1e-1): # YOUR CODE HERE ######################## ######################## ######################## start_total = time.time() print('='*50+'\ninitialize EM\n') # init mu and pi if not given (reuse initialisation from above tests.) if mu is None: mu = np.random.uniform(low=.25, high=.75, size=(K,X_test.shape[1])) if pi is None: pi = np.ones(K) / K mu_ = mu pi_ = pi if plotting: print('initialize mu:') plot_MNIST_digits(mu) print('='*50+'\n') # loop over epochs for i in range(max_iter): start = time.time() print('iteration {}'.format(i+1)) gamma = E_step(X,mu,pi) mu, pi = M_step(X,gamma) # plotting latent space if plotting: plot_MNIST_digits(mu) # break if differences in mu and pi are small -> converged if all([i>0,np.linalg.norm(mu_-mu)<threshold , np.linalg.norm(pi_-pi)<threshold]): print('stop because of convergence after {} iterations'.format(i+1)) break mu_ = mu pi_ = pi print('\n\n'+'='*50+'\n\n{} iterations in {} min\n\n'.format( i+1,(time.time()-start_total)/60)+'='*50+'\n\n') return mu, pi ######################## ######################## ######################## ``` ### 1.3 Three digits experiment (10 points) In analogue with Bishop $\S9.3.3$, sample a training set consisting of only __binary__ images of written digits $2$, $3$, and $4$. Run your EM algorithm and show the reconstructed digits. ``` # YOUR CODE HERE ######################## ######################## ######################## # fast version: only use part of the data bin_zip = zip(bin_train_data[:5000], train_labels[:5000]) X = np.array([i for i,n in bin_zip if any([n==2, n==3, n==4])]) K = 3 epochs = 10 mu,pi = EM(X,K,epochs,plotting=True) ######################## ######################## ######################## ``` Can you identify which element in the latent space corresponds to which digit? What are the identified mixing coefficients for digits $2$, $3$ and $4$, and how do these compare to the true ones? YOUR ANSWER HERE Yes, we can identify which element in the latent space corresponds to which digit, as there is one element for each of the digits $2,3$ and $4$. The mixing coefficients for the digits are $0.35, 0.30, 0.35$. This is slightly different to the true values, where each number should appear $\frac{1}{3}$ of the time. ``` print(pi) ``` ### 1.4 Experiments (20 points) Perform the follow-up experiments listed below using your implementation of the EM algorithm. For each of these, describe/comment on the obtained results and give an explanation. You may still use your dataset with only digits 2, 3 and 4 as otherwise computations can take very long. #### 1.4.1 Size of the latent space (5 points) Run EM with $K$ larger or smaller than the true number of classes. Describe your results. ``` # YOUR CODE HERE ######################## ######################## ######################## # smaller K = 2 _, _ = EM(X,K,epochs,plotting=True) ######################## ######################## ######################## ``` ###YOUR ANSWER HERE When we use $K=2$, the latent space can only represent two different classes, while there are still three classes present. As a result, the latent representation starts mixing the representations for the different numbers. We can see this especially for the digits $2$ and $4$, which are merged into a joined representation. Additionally, we can see that in contrast to the setting with three classes in the latent space, the algorithm fails to converge within ten epochs. ``` # YOUR CODE HERE ######################## ######################## ######################## # larger K = 5 _, _ = EM(X,K,epochs,plotting=True) ######################## ######################## ######################## ``` ###YOUR ANSWER HERE When we use $K=5$, the latent space can represent five different classes, while there are still three classes present. As a result, the latent representation starts splitting the representations of the given numbers. We can see this especially for the digits $2$ and $3$. For the $2$, we can see that we learn two depictions: one which encompasses a distinctive loop, while the other does not. For the $3$, we get one representation that is squeezed more than the other and one of them seems to be mixed with a representation of the digit two. #### 1.4.2 Identify misclassifications (10 points) How can you use the data labels to assign a label to each of the clusters/latent variables? Use this to identify images that are 'misclassified' and try to understand why they are. Report your findings. ``` # YOUR CODE HERE ######################## ######################## ######################## def get_true_mu(bin_train_data,train_labels): true_mu = np.zeros((3,bin_train_data.shape[1])) for i in range(2,5): #find indices of images belonging to this label indx = np.argwhere(train_labels==i) # average over these images true_mu[i-2] = np.average(bin_train_data[indx],axis=0) return true_mu def classify_image(image, mu): """ takes image and mu as an input and maps the image to the closest mu """ # L1 distance between mu and image dist = np.zeros((mu.shape[0],)) for c in range(mu.shape[0]): dist[c] = numpy.linalg.norm(image-mu[c]) # minimal distance is class classification = np.argmin(dist) return classification def find_misclassification(mu, bin_train_data, train_labels, number_mc = 5): #find true labels of the latent space true_mu = get_true_mu(bin_train_data,train_labels) mu_label = np.zeros((mu.shape[0],)) for i in range(len(mu)): mu_label[i] = classify_image(mu[i], true_mu) + 2 #initializing arrays to save the misclassified results mc_pic = np.zeros((number_mc,mu.shape[1])) mc_pred = np.zeros((number_mc,)) mc_label = np.zeros((number_mc,)) i = 0 mc_counter = 0 while mc_counter < number_mc: if train_labels[i] >= 2 and train_labels[i] <= 4: ## only consider images with true labels 2, 3 or 4 c_pred = mu_label[classify_image(bin_train_data[i], mu)] if c_pred != train_labels[i]: mc_pic[mc_counter] = bin_train_data[i] mc_pred[mc_counter] = c_pred mc_label[mc_counter] = train_labels[i] mc_counter = mc_counter + 1 i = i + 1 print('True label - predicted label:') for j in range(number_mc): plt.subplot(1,number_mc,j+1) plt.imshow(mc_pic[j].reshape(28,28), interpolation = 'nearest', cmap ='Greys') plt.title("{0:d} - {1:d}".format(int(mc_label[j]), int(mc_pred[j]))) plt.axis('off') plt.show() # using mu from the run above with the correct number of classes find_misclassification(mu, bin_train_data, train_labels, number_mc=10) print('Comparing to the mus found using EM:') plot_MNIST_digits(mu) ######################## ######################## ######################## ``` YOUR ANSWER HERE We can use the data labels in order to calculate the true values of the mus by averaging over all images that are assigned with this label. Then, we compare these true values to the mus we found using the EM-algorithm and label them with the according label if they show the smallest difference between one another. Using the same technique, we can label the images of the dataset by comparing the to the mus we found using the EM-algorithm and assigning them with the correponding label of the most similar one. Comparing our result to the true labels, gives us a number of misclassifications as shown above. We can see that a lot of twos get misclassified, especially if they do not encompass the loop as it is present in the latent representation. We can conclude that in general samples that are too different from the latent representation will be misclassified. #### 1.4.3 Initialize with true values (5 points) Initialize the three classes with the true values of the parameters and see what happens. Report your results. ``` # YOUR CODE HERE ######################## ######################## ######################## K = 3 true_mu = get_true_mu(bin_train_data,train_labels) _, _ = EM(X,K,epochs,mu = true_mu, plotting=True) ######################## ######################## ######################## ``` YOUR ANSWER HERE When we initialize with the true values for mu, we can see that the algorithm changes this representation only marginally. For example, the loop of the two slightly increases. The algorithm manages to converge within 3 epochs. ## Part 2: Variational Auto-Encoder A Variational Auto-Encoder (VAE) is a probabilistic model $p(\bx, \bz)$ over observed variables $\bx$ and latent variables and/or parameters $\bz$. Here we distinguish the decoder part, $p(\bx | \bz) p(\bz)$ and an encoder part $p(\bz | \bx)$ that are both specified with a neural network. A lower bound on the log marginal likelihood $\log p(\bx)$ can be obtained by approximately inferring the latent variables z from the observed data x using an encoder distribution $q(\bz| \bx)$ that is also specified as a neural network. This lower bound is then optimized to fit the model to the data. The model was introduced by Diederik Kingma (during his PhD at the UVA) and Max Welling in 2013, https://arxiv.org/abs/1312.6114. Since it is such an important model there are plenty of well written tutorials that should help you with the assignment. E.g: https://jaan.io/what-is-variational-autoencoder-vae-tutorial/. In the following, we will make heavily use of the torch module, https://pytorch.org/docs/stable/index.html. Most of the time replacing `np.` with `torch.` will do the trick, e.g. `np.sum` becomes `torch.sum` and `np.log` becomes `torch.log`. In addition, we will use `torch.FloatTensor()` as an equivalent to `np.array()`. In order to train our VAE efficiently we will make use of batching. The number of data points in a batch will become the first dimension of our data tensor, e.g. A batch of 128 MNIST images has the dimensions [128, 1, 28, 28]. To check check the dimensions of a tensor you can call `.size()`. ### 2.1 Loss function The objective function (variational lower bound), that we will use to train the VAE, consists of two terms: a log Bernoulli loss (reconstruction loss) and a Kullback–Leibler divergence. We implement the two terms separately and combine them in the end. As seen in Part 1: Expectation Maximization, we can use a multivariate Bernoulli distribution to model the likelihood $p(\bx | \bz)$ of black and white images. Formally, the variational lower bound is maximized but in PyTorch we are always minimizing therefore we need to calculate the negative log Bernoulli loss and Kullback–Leibler divergence. ### 2.1.1 Negative Log Bernoulli loss (5 points) The negative log Bernoulli loss is defined as, \begin{align} loss = - (\sum_i^D \bx_i \log \hat{\bx_i} + (1 − \bx_i) \log(1 − \hat{\bx_i})). \end{align} Write a function `log_bernoulli_loss` that takes a D dimensional vector `x`, its reconstruction `x_hat` and returns the negative log Bernoulli loss. Make sure that your function works for batches of arbitrary size. ``` def log_bernoulli_loss(x_hat, x): # YOUR CODE HERE ######################## ######################## ######################## n = torch.mul(x, torch.log(x_hat)) + torch.mul(1 - x, torch.log(1 - x_hat)) loss = -n.sum() ######################## ######################## ######################## return loss ### Test test test x_test = torch.FloatTensor([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8], [0.9, 0.9, 0.9, 0.9]]) x_hat_test = torch.FloatTensor([[0.11, 0.22, 0.33, 0.44], [0.55, 0.66, 0.77, 0.88], [0.99, 0.99, 0.99, 0.99]]) assert log_bernoulli_loss(x_hat_test, x_test) > 0.0 assert log_bernoulli_loss(x_hat_test, x_test) < 10.0 ``` ### 2.1.2 Negative Kullback–Leibler divergence (10 Points) The variational lower bound (the objective to be maximized) contains a KL term $D_{KL}(q(\bz)||p(\bz))$ that can often be calculated analytically. In the VAE we assume $q = N(\bz, \mu, \sigma^2I)$ and $p = N(\bz, 0, I)$. Solve analytically! YOUR ANSWER HERE \begin{align*} \newcommand{\xscalar}{x} \newcommand{\xvec}{{\bf z}} \newcommand{\Lmat}{{\sigma^2 \bf I}} \newcommand{\Sigmamat}{\bf I} \newcommand{\SigmamatInv}{\Sigmamat^{-1}} \newcommand{\mvec}{\boldsymbol{\mu}} \mathcal{KL}(q||p) &= - \int q(\xvec) \log \left(\frac{p(\xvec)}{q(\xvec)}\right) d\xvec \\ &= \int q(\xvec) \log (p(\xvec) - q(\xvec)) d\xvec \\ &= \int q(\xvec) [ - \frac{D}{2} \log(2\pi) - \frac{1}{2} \log(|\Sigmamat|) - \frac{1}{2} (\xvec)^T \SigmamatInv(\xvec) \\ & + \frac{D}{2} \log(2\pi) + \frac{1}{2} \log(|\Lmat|) + \frac{1}{2} (\xvec-\mvec)^T \Lmat^{-1}(\xvec-\mvec) ] d\xvec \\ &= \frac{1}{2} \log \left(\frac{|\Lmat|}{|\Sigmamat|}\right) \int q(\xvec) d\xvec \int q(\xvec) [ - \frac{1}{2} (\xvec)^T \SigmamatInv(\xvec) \\ & + \frac{1}{2} (\xvec-\mvec)^T \Lmat^{-1}(\xvec-\mvec) ] d\xvec \end{align*} We use $\int q(\xvec) d\xvec = 1$ and the law of the unconscious statistician to get \begin{align*} &= \frac{1}{2} \log \left(\frac{|\Lmat|}{|\Sigmamat|}\right) - \frac{1}{2} \mathbb{E}[ (\xvec)^T \SigmamatInv(\xvec)] + \frac{1}{2} \mathbb{E}[(\xvec-\mvec)^T \Lmat^{-1}(\xvec-\mvec)] \end{align*} Since $p$ is considered to be the original distribution, it follows that $\xvec \sim \mathcal{N}(\xvec|0, \Sigmamat)$. Therefore, we can use equation 380 from the matrix cookbook to get: \begin{align*} &= \frac{1}{2} \log \left(\frac{|\Lmat|}{|\Sigmamat|}\right) - \frac{1}{2}[\SigmamatInv + Tr(\SigmamatInv\Sigmamat)] \\ & + \frac{1}{2}[(-\mvec)^T \Lmat^{-1}(-\mvec) + Tr(\Lmat^{-1}\Sigmamat)] \\ &= \frac{1}{2} \left[\log \left(\frac{|\Lmat|}{|\Sigmamat|}\right) - Tr(\Sigmamat)+ (-\mvec)^T \Lmat^{-1}(-\mvec) + Tr(\Lmat^{-1}\Sigmamat)\right] \\ &= \frac{1}{2} \left[\log \left(\frac{|\Lmat|}{|\Sigmamat|}\right) - D + (-\mvec)^T \Lmat^{-1}(-\mvec) + Tr(\Lmat^{-1}\Sigmamat)\right]\\ \end{align*} Write a function `KL_loss` that takes two J dimensional vectors `mu` and `logvar` and returns the negative Kullback–Leibler divergence. Where `logvar` is $\log(\sigma^2)$. Make sure that your function works for batches of arbitrary size. ``` def KL_loss(mu, logvar): # YOUR CODE HERE ######################## ######################## ######################## loss = -0.5 * torch.sum(1 + logvar - torch.pow(mu, 2) - torch.exp(logvar)) ######################## ######################## ######################## return loss ### Test test test mu_test = torch.FloatTensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]) logvar_test = torch.FloatTensor([[0.01, 0.02], [0.03, 0.04], [0.05, 0.06]]) assert KL_loss(mu_test, logvar_test) > 0.0 assert KL_loss(mu_test, logvar_test) < 10.0 ``` ### 2.1.3 Putting the losses together (5 points) Write a function `loss_function` that takes a D dimensional vector `x`, its reconstruction `x_hat`, two J dimensional vectors `mu` and `logvar` and returns the final loss. Make sure that your function works for batches of arbitrary size. ``` def loss_function(x_hat, x, mu, logvar): # YOUR CODE HERE ######################## ######################## ######################## entropy_loss = log_bernoulli_loss(x_hat, x) kl_loss = KL_loss(mu,logvar) loss = entropy_loss+kl_loss ######################## ######################## ######################## return loss x_test = torch.FloatTensor([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]) x_hat_test = torch.FloatTensor([[0.11, 0.22, 0.33], [0.44, 0.55, 0.66], [0.77, 0.88, 0.99]]) mu_test = torch.FloatTensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]) logvar_test = torch.FloatTensor([[0.01, 0.02], [0.03, 0.04], [0.05, 0.06]]) assert loss_function(x_hat_test, x_test, mu_test, logvar_test) > 0.0 assert loss_function(x_hat_test, x_test, mu_test, logvar_test) < 10.0 ``` ### 2.2 The model Below you see a data structure for the VAE. The modell itself consists of two main parts the encoder (images $\bx$ to latent variables $\bz$) and the decoder (latent variables $\bz$ to images $\bx$). The encoder is using 3 fully-connected layers, whereas the decoder is using fully-connected layers. Right now the data structure is quite empty, step by step will update its functionality. For test purposes we will initialize a VAE for you. After the data structure is completed you will do the hyperparameter search. ``` from torch import nn from torch.nn import functional as F class VAE(nn.Module): def __init__(self, fc1_dims, fc21_dims, fc22_dims, fc3_dims, fc4_dims): super(VAE, self).__init__() self.fc1 = nn.Linear(*fc1_dims) self.fc21 = nn.Linear(*fc21_dims) self.fc22 = nn.Linear(*fc22_dims) self.fc3 = nn.Linear(*fc3_dims) self.fc4 = nn.Linear(*fc4_dims) def encode(self, x): # To be implemented raise Exception('Method not implemented') def reparameterize(self, mu, logvar): # To be implemented raise Exception('Method not implemented') def decode(self, z): # To be implemented raise Exception('Method not implemented') def forward(self, x): # To be implemented raise Exception('Method not implemented') VAE_test = VAE(fc1_dims=(784, 4), fc21_dims=(4, 2), fc22_dims=(4, 2), fc3_dims=(2, 4), fc4_dims=(4, 784)) ``` ### 2.3 Encoding (10 points) Write a function `encode` that gets a vector `x` with 784 elements (flattened MNIST image) and returns `mu` and `logvar`. Your function should use three fully-connected layers (`self.fc1()`, `self.fc21()`, `self.fc22()`). First, you should use `self.fc1()` to embed `x`. Second, you should use `self.fc21()` and `self.fc22()` on the embedding of `x` to compute `mu` and `logvar` respectively. PyTorch comes with a variety of activation functions, the most common calls are `F.relu()`, `F.sigmoid()`, `F.tanh()`. Make sure that your function works for batches of arbitrary size. ``` def encode(self, x): # YOUR CODE HERE ######################## ######################## ######################## z = F.relu(self.fc1(x)) mu = self.fc21(z) logvar = self.fc22(z) ######################## ######################## ######################## return mu, logvar ### Test, test, test VAE.encode = encode x_test = torch.ones((5,784)) mu_test, logvar_test = VAE_test.encode(x_test) assert np.allclose(mu_test.size(), [5, 2]) assert np.allclose(logvar_test.size(), [5, 2]) ``` ### 2.4 Reparameterization (10 points) One of the major question that the VAE is answering, is 'how to take derivatives with respect to the parameters of a stochastic variable?', i.e. if we are given $\bz$ that is drawn from a distribution $q(\bz|\bx)$, and we want to take derivatives. This step is necessary to be able to use gradient-based optimization algorithms like SGD. For some distributions, it is possible to reparameterize samples in a clever way, such that the stochasticity is independent of the parameters. We want our samples to deterministically depend on the parameters of the distribution. For example, in a normally-distributed variable with mean $\mu$ and standard deviation $\sigma$, we can sample from it like this: \begin{align} \bz = \mu + \sigma \odot \epsilon, \end{align} where $\odot$ is the element-wise multiplication and $\epsilon$ is sampled from $N(0, I)$. Write a function `reparameterize` that takes two J dimensional vectors `mu` and `logvar`. It should return $\bz = \mu + \sigma \odot \epsilon$. ``` def reparameterize(self, mu, logvar): # YOUR CODE HERE ######################## ######################## ######################## sigma = torch.sqrt(torch.exp(logvar)) z = mu + sigma * torch.randn(mu.size()) ######################## ######################## ######################## return z ### Test, test, test VAE.reparameterize = reparameterize VAE_test.train() mu_test = torch.FloatTensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]) logvar_test = torch.FloatTensor([[0.01, 0.02], [0.03, 0.04], [0.05, 0.06]]) z_test = VAE_test.reparameterize(mu_test, logvar_test) assert np.allclose(z_test.size(), [3, 2]) assert z_test[0][0] < 5.0 assert z_test[0][0] > -5.0 ``` ### 2.5 Decoding (10 points) Write a function `decode` that gets a vector `z` with J elements and returns a vector `x_hat` with 784 elements (flattened MNIST image). Your function should use two fully-connected layers (`self.fc3()`, `self.fc4()`). PyTorch comes with a variety of activation functions, the most common calls are `F.relu()`, `F.sigmoid()`, `F.tanh()`. Make sure that your function works for batches of arbitrary size. ``` def decode(self, z): # YOUR CODE HERE ######################## ######################## ######################## out3 = F.relu(self.fc3(z)) x_hat = F.sigmoid(self.fc4(out3)) ######################## ######################## ######################## return x_hat # test test test VAE.decode = decode z_test = torch.ones((5,2)) x_hat_test = VAE_test.decode(z_test) assert np.allclose(x_hat_test.size(), [5, 784]) assert (x_hat_test <= 1).all() assert (x_hat_test >= 0).all() ``` ### 2.6 Forward pass (10) To complete the data structure you have to define a forward pass through the VAE. A single forward pass consists of the encoding of an MNIST image $\bx$ into latent space $\bz$, the reparameterization of $\bz$ and the decoding of $\bz$ into an image $\bx$. Write a function `forward` that gets a a vector `x` with 784 elements (flattened MNIST image) and returns a vector `x_hat` with 784 elements (flattened MNIST image), `mu` and `logvar`. ``` def forward(self, x): x = x.view(-1, 784) # YOUR CODE HERE ######################## ######################## ######################## mu, logvar = self.encode(x) z = self.reparameterize(mu, logvar) x_hat = self.decode(z) ######################## ######################## ######################## return x_hat, mu, logvar # test test test VAE.forward = forward x_test = torch.ones((5,784)) x_hat_test, mu_test, logvar_test = VAE_test.forward(x_test) assert np.allclose(x_hat_test.size(), [5, 784]) assert np.allclose(mu_test.size(), [5, 2]) assert np.allclose(logvar_test.size(), [5, 2]) ``` ### 2.7 Training (15) We will now train the VAE using an optimizer called Adam, https://arxiv.org/abs/1412.6980. The code to train a model in PyTorch is given below. ``` from torch.autograd import Variable def train(epoch, train_loader, model, optimizer): model.train() train_loss = 0 for batch_idx, (data, _) in enumerate(train_loader): data = Variable(data) optimizer.zero_grad() recon_batch, mu, logvar = model(data) loss = loss_function(recon_batch, data.view(-1, 784), mu, logvar) loss.backward() train_loss += loss.data optimizer.step() if batch_idx % 100 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.data / len(data))) print('====> Epoch: {} Average loss: {:.4f}'.format( epoch, train_loss / len(train_loader.dataset))) ``` Let's train. You have to choose the hyperparameters. Make sure your loss is going down in a reasonable amount of epochs (around 10). ``` # Hyperparameters # fc1_dims = (?,?) # fc21_dims = # fc22_dims = # fc3_dims = # fc4_dims = # lr = # batch_size = # epochs = # YOUR CODE HERE ######################## ######################## ######################## # using the parameters as given in: https://github.com/pytorch/examples/tree/master/vae fc1_dims = (784,400) fc21_dims = (400,20) fc22_dims = (400,20) fc3_dims = (20,400) fc4_dims = (400,784) lr = 1e-3 batch_size = 128 epochs = 10 ######################## ######################## ######################## # This cell contains a hidden test, please don't delete it, thx ``` Run the box below to train the model using the hyperparameters you entered above. ``` from torchvision import datasets, transforms from torch import nn, optim # Load data train_data = datasets.MNIST('../data', train=True, download=True, transform=transforms.ToTensor()) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, **{}) # Init model VAE_MNIST = VAE(fc1_dims=fc1_dims, fc21_dims=fc21_dims, fc22_dims=fc22_dims, fc3_dims=fc3_dims, fc4_dims=fc4_dims) # Init optimizer optimizer = optim.Adam(VAE_MNIST.parameters(), lr=lr) # Train for epoch in range(1, epochs + 1): train(epoch, train_loader, VAE_MNIST, optimizer) ``` Run the box below to check if the model you trained above is able to correctly reconstruct images. ``` ### Let's check if the reconstructions make sense # Set model to test mode VAE_MNIST.eval() # Reconstructed train_data_plot = datasets.MNIST('../data', train=True, download=True, transform=transforms.ToTensor()) train_loader_plot = torch.utils.data.DataLoader(train_data_plot, batch_size=1, shuffle=False, **{}) for batch_idx, (data, _) in enumerate(train_loader_plot): x_hat, mu, logvar = VAE_MNIST(data) plt.imshow(x_hat.view(1,28,28).squeeze().data.numpy(), cmap='gray') plt.title('%i' % train_data.train_labels[batch_idx]) plt.show() if batch_idx == 3: break ``` ### 2.8 Visualize latent space (20 points) Now, implement the auto-encoder now with a 2-dimensional latent space, and train again over the MNIST data. Make a visualization of the learned manifold by using a linearly spaced coordinate grid as input for the latent space, as seen in https://arxiv.org/abs/1312.6114 Figure 4. ``` # YOUR CODE HERE ######################## ######################## ######################## fc1_dims = (784,400) fc21_dims = (400,2) fc22_dims = (400,2) fc3_dims = (2,400) fc4_dims = (400,784) lr = 1e-3 batch_size = 128 epochs = 10 # Init model VAE_MNIST = VAE(fc1_dims=fc1_dims, fc21_dims=fc21_dims, fc22_dims=fc22_dims, fc3_dims=fc3_dims, fc4_dims=fc4_dims) # Init optimizer optimizer = optim.Adam(VAE_MNIST.parameters(), lr=lr) # Train for epoch in range(1, epochs + 1): train(epoch, train_loader, VAE_MNIST, optimizer) ######################## ######################## ######################## ######################## ######################## ######################## VAE_MNIST.eval() size = 5 f, axarr = plt.subplots(size*2,size*2) for i in range(-size,size): for j in range(-size,size): pos = torch.zeros((1,2)) pos[0,0] = i pos[0,1] = j x_hat = VAE_MNIST.decode(torch.autograd.Variable(pos)) axarr[i+size,j+size].imshow(x_hat.view(1,28,28).squeeze().data.numpy(), interpolation = 'nearest', cmap ='Greys') axarr[i+size,j+size].axis('off') plt.show() ######################## ######################## ######################## ``` ### 2.8 Amortized inference (10 points) What is amortized inference? Where in the code of Part 2 is it used? What is the benefit of using it? YOUR ANSWER HERE In amortized inference, we share the variational parameters $\lambda$ across datapoints. In the Variational Autoencoder in Part 2, this is the case, since we use the same parameters, i.e. the same weights and biases in the encoding network, for all datapoints, i.e. samples from the dataset. If we see a new datapoint, we can use the learned network in order to approximate its posterior $q(z)$, which can be an advantage over the alternative mean-field approach, where we would have to run variational inference for each new datapoint again.
github_jupyter
## Set up ### package install ``` !sudo apt-get install build-essential swig !curl https://raw.githubusercontent.com/automl/auto-sklearn/master/requirements.txt | xargs -n 1 -L 1 pip install !pip install auto-sklearn !pip install pipelineprofiler # visualize the pipelines created by auto-sklearn !pip install shap !pip install --upgrade plotly !pip3 install -U scikit-learn ``` ### Packages imports ``` import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn import set_config from sklearn.pipeline import Pipeline from sklearn.metrics import mean_squared_error import autosklearn.regression import plotly.express as px import plotly.graph_objects as go from joblib import dump import shap import datetime import logging import matplotlib.pyplot as plt ``` ### Google Drive connection ``` from google.colab import drive drive.mount('/content/drive', force_remount=True) ``` ### options and settings ``` data_path = "/content/drive/MyDrive/Introduction2DataScience/tutorials/w2d2/data/raw/" model_path = "/content/drive/MyDrive/Introduction2DataScience/tutorials/w2d2/models/" timesstr = str(datetime.datetime.now()).replace(' ', '_') logging.basicConfig(filename=f"{model_path}explog_{timesstr}.log", level=logging.INFO) ``` Please Download the data from [this source](https://drive.google.com/file/d/1MUZrfW214Pv9p5cNjNNEEosiruIlLUXz/view?usp=sharing), and upload it on your Introduction2DataScience/data google drive folder. ## Loading Data and Train-Test Split ``` df = pd.read_csv(f'{data_path}winequality-red.csv') test_size = 0.2 random_state = 0 train, test = train_test_split(df, test_size=test_size, random_state=random_state) logging.info(f'train test split with test_size={test_size} and random state={random_state}') train.to_csv(f'{data_path}winequality-red.csv', index=False) train= train.copy() test.to_csv(f'{data_path}winequality-red.csv', index=False) test = test.copy() ``` ## Modelling ``` X_train, y_train = train.iloc[:,:-1], train.iloc[:,-1] total_time = 600 per_run_time_limit = 30 automl = autosklearn.regression.AutoSklearnRegressor( time_left_for_this_task=total_time, per_run_time_limit=per_run_time_limit, ) automl.fit(X_train, y_train) logging.info(f'Ran autosklearn regressor for a total time of {total_time} seconds, with a maximum of {per_run_time_limit} seconds per model run') dump(automl, f'{model_path}model{timesstr}.pkl') logging.info(f'Saved regressor model at {model_path}model{timesstr}.pkl ') logging.info(f'autosklearn model statistics:') logging.info(automl.sprint_statistics()) #profiler_data= PipelineProfiler.import_autosklearn(automl) #PipelineProfiler.plot_pipeline_matrix(profiler_data) ``` ## Model Evluation and Explainability Let's separate our test dataframe into a feature variable (X_test), and a target variable (y_test): ``` X_test, y_test = test.iloc[:,:-1], test.iloc[:,-1] ``` #### Model Evaluation Now, we can attempt to predict the median house value from our test set. To do that, we just use the .predict method on the object "automl" that we created and trained in the last sections: ``` y_pred = automl.predict(X_test) ``` Let's now evaluate it using the mean_squared_error function from scikit learn: ``` logging.info(f"Mean Squared Error is {mean_squared_error(y_test, y_pred)}, \n R2 score is {automl.score(X_test, y_test)}") ``` we can also plot the y_test vs y_pred scatter: ``` df = pd.DataFrame(np.concatenate((X_test, y_test.to_numpy().reshape(-1,1), y_pred.reshape(-1,1)), axis=1)) df.columns = ['fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar', 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density', 'pH', 'sulphates', 'alcohol', 'Actual Target', 'Predicted Target'] fig = px.scatter(df, x='Predicted Target', y='Actual Target') fig.write_html(f"{model_path}residualfig_{timesstr}.html") logging.info(f"Figure of residuals saved as {model_path}residualfig_{timesstr}.html") ``` #### Model Explainability ``` explainer = shap.KernelExplainer(model = automl.predict, data = X_test.iloc[:50, :], link = "identity") # Set the index of the specific example to explain X_idx = 0 shap_value_single = explainer.shap_values(X = X_test.iloc[X_idx:X_idx+1,:], nsamples = 100) X_test.iloc[X_idx:X_idx+1,:] # print the JS visualization code to the notebook #shap.initjs() shap.force_plot(base_value = explainer.expected_value, shap_values = shap_value_single, features = X_test.iloc[X_idx:X_idx+1,:], show=False, matplotlib=True ) plt.savefig(f"{model_path}shap_example_{timesstr}.png") logging.info(f"Shapley example saved as {model_path}shap_example_{timesstr}.png") shap_values = explainer.shap_values(X = X_test.iloc[0:50,:], nsamples = 100) # print the JS visualization code to the notebook #shap.initjs() fig = shap.summary_plot(shap_values = shap_values, features = X_test.iloc[0:50,:], show=False) plt.savefig(f"{model_path}shap_summary_{timesstr}.png") logging.info(f"Shapley summary saved as {model_path}shap_summary_{timesstr}.png") ```
github_jupyter
#[1] Mount Drive ``` from google.colab import drive drive.mount('/content/drive') ``` # [2] Install Requirements and Load Libs ## Install RequirementsRequirements ``` !pip install datasets &> /dev/null !pip install rouge_score &> /dev/null !pip install -q transformers==4.8.2 &> /dev/null !pip install sentencepiece !pip install nltk # Import Python Lib import os import shutil import pandas as pd import numpy as np from ast import literal_eval import re import torch import nltk from rouge_score import rouge_scorer from IPython.display import display, HTML # Import local lib %load_ext autoreload %autoreload 2 path_utils = '/content/drive/MyDrive/Github/Synopsis/utils' os.chdir(path_utils) #importlib.reload(utils_lsstr) #importlib.reload(utils_model) from utils_model import Summarization_Model, Tokenizer, \ str_summarize, segment_to_split_size, str_seg_and_summarize, str_led_summarize from utils_lsstr import str_word_count, ls_word_count from utils_lsstr import split_str_to_batch_ls, \ str_remove_duplicated_consective_token from Screenplay import SC_Elements # instantiate SC_Element sc = SC_Elements() ``` ## Set Common Paths ``` path_datasets ='/content/drive/MyDrive/Github/Synopsis/Datasets' path_results = '/content/drive/MyDrive/Github/Synopsis/results' ``` # [3] Compare Segmentation Methods ## Load Various Tokenizer google/pegasus-large, 500/1000 google/pegasus-cnn_dailymail, 500 (no large version) facebook/bart-large, 500/1000 facebook/bart-large-cnn, 500/1000 google/bigbird-pegasus-large-arxiv, 512/1000/4000 allenai/led-large-16384, 512/1000/4000/16000 allenai/led-large-16384-arxiv, 512/1000/4000/16000 ``` # initalize tokenizer by model model_name = 'allenai/led-base-16384' tokenizer = Tokenizer(model_name) # Instantiate word tokenizer and detokenizer from nltk.tokenize import RegexpTokenizer from nltk.tokenize import line_tokenize, sent_tokenize, word_tokenize from nltk.tokenize import TreebankWordTokenizer from nltk.tokenize.treebank import TreebankWordDetokenizer nltk.download('punkt') ``` ## Preprocessing Test Data Overview ``` # Load SSGD path_datasets = '/content/drive/MyDrive/Github/Synopsis/Datasets' path_dfssgd = '/content/drive/MyDrive/Github/Synopsis/Datasets/SSGD-2021-07-23-719SC-TVTbl.json' df_wscript = pd.read_json(path_dfssgd) df_wscript['dfsc'] = df_wscript['dfsc'].apply(lambda x: pd.DataFrame(x)) # Load Turning Points from TRIPOD (for testing splits methods using turning poitns path_TRIPOD = '/content/drive/MyDrive/Github/Synopsis/Datasets/TRIPOD-master' path_tps= path_TRIPOD + '/Synopses_and_annotations/TRIPOD_screenplays_test.csv' df_tps = pd.read_csv(path_tps, header=0) # for each film title with turniing points, find the corresponding SSGD record # Save to df_cases, use df_cases for Long Document Processing Experiments df_cases = df_wscript[df_wscript['title'].isin(df_tps['movie_name'])] df_tmp = df_tps.melt(id_vars=['movie_name']) df_tmp['value'] = df_tmp['value'].apply(lambda x: literal_eval(x)).apply(lambda x: [x]) df_tmp = df_tmp.groupby('movie_name')['value'].sum().reset_index() df_tmp.columns = ['title', 'ls_tps'] df_cases = df_cases.merge(df_tmp, on='title', how='left') df_cases = df_cases.drop_duplicates('title') # assign tps to scenes in dfsc for i, row in df_cases.iterrows(): df_cases.loc[i,'dfsc'] ['tps'] = 0 for j, ls in enumerate(df_cases.loc[i, 'ls_tps']): df_cases.loc[i,'dfsc'].loc[df_cases.loc[i,'dfsc']['Scene'].isin(ls),'tps'] = j+1 # fillna for Scene numbers and insure type as i for i, case in df_cases.iterrows(): df_cases.loc[i, 'dfsc']['Scene'] =\ df_cases.loc[i, 'dfsc']['Scene'].fillna('-1').astype('int') df_cases['gold'] = df_cases['ls_sums_sorted'].apply(lambda x: x[0]) df_cases['nScenes'] = df_cases['dfsc'].apply(lambda x: x['Scene'].nunique()) df_cases['gold_wc'] = df_cases['gold'].apply(lambda x: len(word_tokenize(x))) df_cases['nScenes_tps'] = df_cases['ls_tps'].apply(lambda x: sum([len(ls) for ls in x])) df_cases['dict_AF']= df_cases['dfsc'].apply(sc.extract_str_by_method, method='AF', return_type='df').apply(lambda x: x.dropna().to_dict(), axis=1) def calc_tc(x): for k, v in x.items(): x[k] = len(tokenizer(v)['input_ids']) return list(x.values()) df_cases['Scene_tc'] = df_cases['dict_AF'].apply(calc_tc) Scene_tc = pd.DataFrame(df_cases['Scene_tc'].sum()).describe().astype('int') Scene_tc.columns =['分词数量'] df_cases ``` ### 长文本预测试集概览 ``` overview = df_cases[['title', 'word_count', 'nScenes', '%Dialog', 'gold_wc', 'nScenes_tps']].copy() overview.columns = ['片名', '剧本单词量', '场次数量', '对白占比', '参考总结单词量', '重点场次数量'] overview['压缩倍数'] = overview['剧本单词量'] / overview['参考总结单词量'] overview.loc['均数'] = overview.mean() overview = overview.fillna('均数') overview.round() df_cases.columns # Specify selection methods or define custom # methods are in utils/Screenplay.py # SELECTION METHOD ##################### selection_method = 'PSentAO_F1' ##################### path_utils = '/content/drive/MyDrive/Github/Synopsis/utils' os.chdir(path_utils) #%load_ext autoreload %autoreload 2 from Screenplay import SC_Elements sc = SC_Elements() # Initialize df df = df_cases[['title', 'ls_sums_sorted', 'dfsc']].copy() # Get selection by selection_method df['selection'] = df['dfsc'].apply(sc.extract_str_by_method, method=selection_method, return_type='df').apply( lambda x: x.dropna().to_dict(), axis=1 ) df['selection'] # Input Huggingface model name or local model path ##################################### model_name = 'facebook/bart-large' ##################################### # assign cuda to device if it exists if torch.cuda.device_count() > 0: device = 'cuda:' + str(torch.cuda.current_device()) else: device = 'cpu' # Instantiate tokenizer and model tokenizer = Tokenizer(model_name=None) #model = Summarization_Model(model_name=model_name, device=device) tmp ``` ## by token count vs. by scene ``` path_compare_segmentation_methods = '/content/drive/MyDrive/Github/Synopsis/results/by_SegMethod' for root, dirs, files in os.walk(path_compare_segmentation_methods): if root == path_compare_segmentation_methods: fns = files break rouge_scores = [] for fn in fns: record = [fn[3:-5]] dftmp = pd.read_json(path_compare_segmentation_methods + '/' + fn) record.extend(dftmp[['R1', 'R2', 'RL', 's0_wc', 's0_tc', 'sum_wc_max', 'sum_tc_max', 's1_wc', 's1_tc']].mean().tolist()) rouge_scores.append(record) dfscores = pd.DataFrame(rouge_scores) dfscores.columns = ['method', 'R1', 'R2', 'RL', 's0_wc', 's0_tc', 'sum_wc_max', 'sum_tc_max', 's1_wc', 's1_tc'] dfscores['s0_wc'] = dfscores['s0_wc'].astype('int') dfscores['s0_tc'] = dfscores['s0_tc'].astype('int') dfscores['s1_wc'] = dfscores['s1_wc'].astype('int') dfscores['s1_tc'] = dfscores['s1_tc'].astype('int') dfscores['sum_tc_max'] = dfscores['sum_tc_max'].astype('int') methods = dfscores['method'].str.split('_', expand=True) methods.columns = ['筛选方法', '分段方法', '模型', '分段tc距离'] dfscores = dfscores.merge(methods, left_index=True, right_index=True).drop('method', axis=1) # create view view = dfscores[[ '分段方法', '模型', '分段tc距离', 'R1', 'R2', 'RL', 's0_wc', 'sum_wc_max', 's1_wc', 's0_tc', 'sum_tc_max', 's1_tc']].sort_values(['R2'], ascending=[False]) view.columns = ['分段方法', '模型', '分段tc', 'R1', 'R2', 'RL', '输入wc', '参考梗概wc', '生成梗概wc', '输入tc', '参考梗概tc', '生成梗概tc'] view['参考梗概wc'] = view['参考梗概wc'].astype('int') view.round(2) ``` # [4] Compare Selection Method ``` path_selections = '/content/drive/MyDrive/Github/Synopsis/results/by_selections' # Get result file names for root, dirs, files in os.walk(path_selections): if root == path_selections: fns = files break from rouge_score import rouge_scorer scorer = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'], use_stemmer=True) rouge_scores = [] for fn in fns: record = [fn[3:-5]] dftmp = pd.read_json(path_selections + '/' + fn) dftmp['s0_wc'] = dftmp['s0'].apply(lambda x: len(word_tokenize(x))) record.extend(dftmp[['R1', 'R2', 'RL', 's0_wc']].mean().tolist()) rouge_scores.append(record) dfscores = pd.DataFrame(rouge_scores) dfscores.columns = ['method', 'R1', 'R2', 'RL', 's0_wc'] methods = dfscores['method'].str.split('_', expand=True) methods.columns = ['selection_method', 'model', 'split-size'] dfscores = dfscores.merge(methods, left_index=True, right_index=True).drop('method', axis=1) # Create View view = dfscores[['selection_method', 's0_wc', 'R1', 'R2', 'RL']].copy() view.columns = ['筛选方式', '输入wc','R1', 'R2', 'RL'] view['输入wc'] = view['输入wc'].astype('int') view.sort_values('R2', ascending=False).round(2) view.sort_values('R2', ascending=False).round(2) view.sort_values('输入wc').round(2) ``` # [5] Compare Finetuning Method ## by window size ``` path_wsize = '/content/drive/MyDrive/Github/Synopsis/results/by_FTmodel/by_window_size' # Get result file names for root, dirs, files in os.walk(path_wsize): if root == path_wsize: fns = files break from rouge_score import rouge_scorer scorer = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'], use_stemmer=True) rouge_scores = [] for fn in fns: record = [fn[3:-5]] dftmp = pd.read_json(path_wsize + '/' + fn) record.extend(dftmp[['R1', 'R2', 'RL']].mean().tolist()) rouge_scores.append(record) dfscores = pd.DataFrame(rouge_scores) dfscores.columns = ['method', 'R1', 'R2', 'RL'] methods = dfscores['method'].str.split('_', expand=True) methods.columns = ['selection_method', 'model', 'pred GA', 'split-size'] dfscores = dfscores.merge(methods, left_index=True, right_index=True).drop('method', axis=1) # Create View view = dfscores.sort_values( ['selection_method', 'R2'], ascending=[True, False]) view = view[['model', 'R1', 'R2', 'RL']] view.columns = ['模型', 'R1', 'R2', 'RL'] view['窗口'] = view['模型'].str.extract('([0-9]*)W') view = view[['窗口', 'R1', 'R2', 'RL', '模型']] view.round(2) ``` ## by global attention application ``` path_global_attention = '/content/drive/MyDrive/Github/Synopsis/results/by_FTmodel/by_global_attention' # Get result file names for root, dirs, files in os.walk(path_global_attention): if root == path_global_attention: fns = files break from rouge_score import rouge_scorer scorer = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'], use_stemmer=True) rouge_scores = [] for fn in fns: record = [fn[3:-5]] dftmp = pd.read_json(path_global_attention + '/' + fn) record.extend(dftmp[['R1', 'R2', 'RL']].mean().tolist()) rouge_scores.append(record) dfscores = pd.DataFrame(rouge_scores) dfscores.columns = ['method', 'R1', 'R2', 'RL'] methods =dfscores['method'].str.split('-', expand=True) dfscores['训练GA'] = methods[5].apply(lambda x: x[2:]) dfscores['预测GA'] = methods[7].str.extract('_(.*)_') view = dfscores[['训练GA', '预测GA', 'R1', 'R2', 'RL', 'method']].sort_values('R2', ascending=False) view['模型'] = view['method'].apply(lambda x: x[10:]) view.drop('method', axis=1).round(2) pd.read_json(path_global_attention + '/' + fn) ``` ## by expanding gold tc range ``` fp_goldtcrange= '/content/drive/MyDrive/Github/Synopsis/results/by_FTmodel/by_expand_goldtcrange' # Get result file names for root, dirs, files in os.walk(fp_goldtcrange): if root == fp_goldtcrange: fns = files break from rouge_score import rouge_scorer scorer = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'], use_stemmer=True) rouge_scores = [] for fn in fns: record = [fn[3:-5]] dftmp = pd.read_json(fp_goldtcrange + '/' + fn) record.extend(dftmp[['R1', 'R2', 'RL', 's1_wc']].mean().tolist()) rouge_scores.append(record) dfscores = pd.DataFrame(rouge_scores) dfscores.columns = ['method', 'R1', 'R2', 'RL', 's1_wc'] dfscores['s1_wc'] = dfscores['s1_wc'].astype('int') methods = dfscores['method'].str.split('_', expand=True) methods.columns = ['预测集筛选方法', 'model', '预测GA', 'split-size'] dfscores = dfscores.merge(methods, left_index=True, right_index=True).drop('method', axis=1) selection_methods = ['Grp_n06', 'Grp_n06', 'Grp_n06'] gold_tc_range = [[256,1024], [0,1024], [512, 1024]] training_time = ['1小时06分钟', '5小时07分钟', '3分钟15秒'] methods = dfscores['model'].str.split('-', expand=True) dfscores['训练步数'] = methods[7].apply(lambda x: re.sub('steps', '', x)) dfscores['训练样本量'] = methods[6].apply(lambda x: re.split('T', x)[0]) dfscores['训练输入筛选法'] = selection_methods dfscores['参考总结tc范围'] = gold_tc_range dfscores['训练耗时'] = training_time dfscores['基础模型'] = dfscores['model'].apply( lambda x: re.split('-6L', x)[0]) view = dfscores.sort_values('R2', ascending=False) view = view[['训练输入筛选法','参考总结tc范围', '训练样本量', '训练步数', 'R1', 'R2', 'RL', '训练耗时', 's1_wc']] view.columns = ['训练输入筛选方式','参考总结tc范围', '训练样本', '训练步数', 'R1', 'R2', 'RL', '训练耗时', '生成梗概wc均值'] view.round(2) ``` ## by augmentaiton with selection methods ``` fp_aug_sm = '/content/drive/MyDrive/Github/Synopsis/results/by_FTmodel/by_augmentation_w_selections' # Get result file names for root, dirs, files in os.walk(fp_aug_sm): if root == fp_aug_sm: fns = files break from rouge_score import rouge_scorer scorer = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'], use_stemmer=True) rouge_scores = [] for fn in fns: record = [fn[3:-5]] dftmp = pd.read_json(fp_aug_sm + '/' + fn) record.extend(dftmp[['R1', 'R2', 'RL', 's0_wc', 's1_wc']].mean().tolist()) rouge_scores.append(record) dfscores = pd.DataFrame(rouge_scores) dfscores.columns = ['method', 'R1', 'R2', 'RL','s0_wc', 's1_wc'] dfscores['s0_wc'] = dfscores['s0_wc'].astype('int') dfscores['s1_wc'] = dfscores['s1_wc'].astype('int') methods = dfscores['method'].str.split('_', expand=True) methods.columns = ['预测集筛选方法', 'model', '预测GA', 'split-size'] selection_methods = ['Grp_n12', 'Grp_n06', 'Grp_n19', 'Grp_n24'] gold_tc_range = [[512,1024], [512,1024], [512, 1024], [512, 1024]] training_time = ['11分钟24秒', '3分钟15秒', '15分钟47秒', '48分钟02秒'] dfscores = dfscores.merge(methods, left_index=True, right_index=True).drop('method', axis=1) methods = dfscores['model'].str.split('-', expand=True) dfscores['训练步数'] = methods[7].apply(lambda x: re.sub('steps', '', x)) dfscores['训练样本量'] = methods[6].apply(lambda x: re.split('T', x)[0]) dfscores['训练输入筛选法'] = selection_methods dfscores['参考总结tc范围'] = gold_tc_range dfscores['训练耗时'] = training_time # create view view = dfscores[['训练输入筛选法','参考总结tc范围', '训练样本量', '训练步数', 'R1', 'R2', 'RL', '训练耗时', 's1_wc']].sort_values('R2', ascending=False) view.columns = ['训练输入筛选方式','参考总结tc范围', '训练样本量', '训练步数', 'R1', 'R2', 'RL', '训练耗时', '输出wc均值'] view.round(2) ``` ## by prediction with selection methods ``` fp_pred_sm = '/content/drive/MyDrive/Github/Synopsis/results/by_FTmodel/by_pred_selection_methods' # Get result file names for root, dirs, files in os.walk(fp_pred_sm): if root == fp_pred_sm: fns = files break from rouge_score import rouge_scorer scorer = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'], use_stemmer=True) rouge_scores = [] for fn in fns: record = [fn[3:-5]] dftmp = pd.read_json(fp_pred_sm + '/' + fn) record.extend(dftmp[['R1', 'R2', 'RL', 's0_wc', 's1_wc']].mean().tolist()) rouge_scores.append(record) dfscores = pd.DataFrame(rouge_scores) dfscores.columns = ['method', 'R1', 'R2', 'RL','s0_wc', 's1_wc'] dfscores['s0_wc'] = dfscores['s0_wc'].astype('int') dfscores['s1_wc'] = dfscores['s1_wc'].astype('int') methods = dfscores['method'].str.split('_', expand=True) methods.columns = ['预测输入筛选方法', 'model', '预测GA', 'split-size'] dfscores = dfscores.merge(methods, left_index=True, right_index=True).drop('method', axis=1) view = dfscores.sort_values('R2', ascending=False).round(2) view = view[['预测输入筛选方法', '预测GA', 'R1', 'R2', 'RL', 's0_wc', 's1_wc']] left = view[view['预测GA'] == 'None'].sort_values('预测输入筛选方法') left = left[['s0_wc', 's1_wc', 'R1', 'R2', 'RL', '预测GA', '预测输入筛选方法']] left.loc['均值'] = left.mean() left.fillna('') right = view[view.预测GA == 'boScene'].sort_values('预测输入筛选方法') right.loc['均值'] = right.mean() right.fillna('') view.loc[view['预测GA'] == 'boScene'].sort_values('预测输入筛选方法') view.sort_values(['预测输入筛选方法', 'R2'], ascending=[True, False]) view.sort_values( ['预测输入筛选方法', 'R2'], ascending=[True, False] )[['预测输入筛选方法', 'R1', 'R2', 'RL', '预测GA', 's0_wc', 's1_wc']] ``` ## Pred ``` fptest = '/content/drive/MyDrive/Github/Synopsis/results/df_results_pt_then_ft.json' #dftest.to_json(fptest) dftest = pd.read_json(fptest) dftest['dfsc'] = dftest['dfsc'].apply(lambda x: pd.DataFrame(x)) dftest['s0_EVERY4at0'] = dftest['dfsc'].apply(sc.extract_str_by_method, method='EVERY4at0') dftest ``` # [8] View Generated Summaries ``` path_global_training = '/content/drive/MyDrive/Github/Synopsis/results/by_FTmodel' # Get result file names for root, dirs, files in os.walk(path_global_training): if root == path_global_training: fns = files break dfs = pd.read_json(path_global_training + '/' + fns[0])[['title', 'ls_sums_sorted']] dfs['gold'] = dfs['ls_sums_sorted'].apply(lambda x: x[0]) for fn in fns: # append method df = pd.read_json(path_global_training + '/' + fn) # append title dftmp = df[['title']].copy() # append predicted summary dftmp['pred_sum_{}'.format(fn[3:-5])] = df['s1'] # append rouge2_f1 dftmp['rouge2_f1_{}'.format(fn[3:-5])] = df['rouge2_f1'] # merge summary to dfc dfs = dfs.merge(dftmp, on='title', how='left') dfs.T HTML(dfs.T[[10]].to_html()) dfs.columns ```
github_jupyter
Precipitation Metrics (consecutive dry days, rolling 5-day precip accumulation, return period) ``` ! pip install xclim %matplotlib inline import xarray as xr import numpy as np import matplotlib.pyplot as plt import os import pandas as pd from datetime import datetime, timedelta, date import dask import dask.array as dda import dask.distributed as dd # rhodium-specific kubernetes cluster configuration import rhg_compute_tools.kubernetes as rhgk client, cluster = rhgk.get_big_cluster() cluster.scale(30) client cluster.close() def pull_ERA5_variable(filevar, variable): filenames = [] for num_yrs in range(len(yrs)): filename = '/gcs/impactlab-data/climate/source_data/ERA-5/{}/daily/netcdf/v1.3/{}_daily_{}-{}.nc'.format(filevar, filevar, yrs[num_yrs], yrs[num_yrs]) filenames.append(filename) era5_var = xr.open_mfdataset(filenames, concat_dim='time', combine='by_coords') var_all = era5_var[variable] return var_all yrs = np.arange(1995,2015) da = pull_ERA5_variable('pr', 'tp') import xclim as xc from xclim.core.calendar import convert_calendar # remove leap days and convert calendar to no-leap da = convert_calendar(da, 'noleap') da_mm = da*1000 da_mm.attrs["units"] = "mm/day" da_mm = da_mm.persist() ``` Calculate the max number of consecutive dry days per year. Use the threshold value for the wet day frequency correction ``` dry_days = xc.indicators.atmos.maximum_consecutive_dry_days(da_mm, thresh=0.0005, freq='YS') dry_days = dry_days.compute() #dry_days.sel(latitude=50.0, longitude=0.0).plot() avg_dry_days = dry_days.mean(dim='time').compute() avg_dry_days.plot(robust=True) from matplotlib import cm from cartopy import config import cartopy.crs as ccrs import cartopy.feature as cfeature def plot_average_dry_days(da, years, fname): fig = plt.figure(figsize=(10, 5)) ax = plt.axes(projection=ccrs.Robinson()) cmap = cm.pink_r da.plot( ax=ax, cmap=cmap, transform=ccrs.PlateCarree(), cbar_kwargs={'shrink': 0.8, 'pad': 0.02, "label": "# of days"}, vmin=0, vmax=180, ) ax.coastlines() ax.add_feature(cfeature.BORDERS, linestyle=":") ax.set_title("Mean number of consecutive dry days annually ({})".format(years)) plt.savefig(fname, dpi=600, bbox_inches='tight') plot_average_dry_days(avg_dry_days, '1995-2014', 'avg_dry_days_era5') ``` Calculate the highest precipitation amount cumulated over a 5-day moving window ``` max_5day_dailyprecip = xc.indicators.icclim.RX5day(da_mm, freq='YS') # there is a different function for a n-day moving window max_5day_dailyprecip = max_5day_dailyprecip.compute() avg_5day_dailyprecip = max_5day_dailyprecip.mean(dim='time').compute() avg_5day_dailyprecip.plot() def plot_average_5day_max_precip(da, years, fname): fig = plt.figure(figsize=(10, 5)) ax = plt.axes(projection=ccrs.Robinson()) cmap = cm.GnBu da.plot( ax=ax, cmap=cmap, transform=ccrs.PlateCarree(), cbar_kwargs={'shrink': 0.8, 'pad': 0.02, "label": "5-day accumulated precip (mm)"}, vmin=0, vmax=250, ) ax.coastlines() ax.add_feature(cfeature.BORDERS, linestyle=":") ax.set_title("Maximum annual 5-day rolling precipitation accumulation ({})".format(years)) plt.savefig(fname, dpi=600, bbox_inches='tight') plot_average_5day_max_precip(avg_5day_dailyprecip, '1995-2014', 'avg_max_5day_precip_era5') ``` Comparing difference of mean with nans and mean without taking into account nans ``` avg_5day_dailyprecip = max_5day_dailyprecip.mean(dim='time', skipna=True).compute() avg_5day_dailyprecip plot_average_5day_max_precip(avg_5day_dailyprecip, '1995-2014') max_5day_dailyprecip.sel(latitude=-89.0, longitude=0.0).plot() ``` Basics for calculating the return period of daily precipitation. More testing needed as it blows up currently. ``` def calculate_return(da, return_interval): ''' calculate return period of daily precip data per grid point ''' # Sort data smallest to largest sorted_data = da.sortby(da, ascending=True).compute() # Count total obervations n = sorted_data.shape[0] # Compute rank position rank = np.arange(1, 1 + n) # Calculate probability probability = (n - rank + 1) / (n + 1) # Calculate return - data are daily to then divide by 365? return_year = (1 / probability) # Round return period return_yr_rnd = np.around(return_year, decimals=1) # identify daily precip for specified return interval indices = np.where(return_yr_rnd == return_interval) # Compute over daily accumulation for the X return period mean_return_period_value = sorted_data[indices].mean().compute() return(mean_return_period_value) da_grid_cell = da.sel(latitude=lat, longitude=lon) da_grid_cell # applyufunc --> this applies a function to a single grid cell return_values = [] for ilat in range(0, len(da.latitude)): for ilon in range(0, len(da.longitude): # create array to store lon values per lat values_per_lat = [] # select da per grid cell da_grid_cell = da.sel(latitude=latitude[ilat], longitude=longitude[ilon]) # compute return period value & append mean_return_value = calculate_return(da_grid_cell, 5.0) values_per_lat.append(mean_return_value) # for each latitude save all longitude values return_values.append(values_per_lat) return_values for lat in da.latitude: for lon in da.longitude: da_grid_cell = da.sel(latitude=lat, longitude=lon) mean_return_value = calculate_return(da_grid_cell, 5.0) ``` Breakdown of per step testing of return period ``` da_test = da.sel(latitude=75.0, longitude=18.0).persist() da_test mean = calculate_return(da_test, 5.0) mean sorted_data = da_test.sortby(da_test, ascending=True).compute() sorted_data n = sorted_data.shape[0] n rank = np.arange(1, 1 + n) # sorted_data.insert(0, 'rank', range(1, 1 + n)) rank probability = (n - rank + 1) / (n + 1) probability return_year = (1 / probability) return_year return_yr_rnd = np.around(return_year, decimals=1) return_yr_rnd[5679] indices = np.where(return_yr_rnd == 5.0) indices sorted_data[indices].mean().compute() sorted_test = np.sort(da_test, axis=0) sorted_test = xr.DataArray(sorted_test) sorted_test ```
github_jupyter
# PageRank Performance Benchmarking This notebook benchmarks performance of running PageRank within cuGraph against NetworkX. NetworkX contains several implementations of PageRank. This benchmark will compare cuGraph versus the defaukt Nx implementation as well as the SciPy version Notebook Credits Original Authors: Bradley Rees Last Edit: 06/10/2019 RAPIDS Versions: 0.15 Test Hardware GV100 32G, CUDA 10,0 Intel(R) Core(TM) CPU i7-7800X @ 3.50GHz 32GB system memory ### Test Data | File Name | Num of Vertices | Num of Edges | |:---------------------- | --------------: | -----------: | | preferentialAttachment | 100,000 | 999,970 | | caidaRouterLevel | 192,244 | 1,218,132 | | coAuthorsDBLP | 299,067 | 1,955,352 | | dblp-2010 | 326,186 | 1,615,400 | | citationCiteseer | 268,495 | 2,313,294 | | coPapersDBLP | 540,486 | 30,491,458 | | coPapersCiteseer | 434,102 | 32,073,440 | | as-Skitter | 1,696,415 | 22,190,596 | ### Timing What is not timed: Reading the data What is timmed: (1) creating a Graph, (2) running PageRank The data file is read in once for all flavors of PageRank. Each timed block will craete a Graph and then execute the algorithm. The results of the algorithm are not compared. If you are interested in seeing the comparison of results, then please see PageRank in the __notebooks__ repo. ## NOTICE _You must have run the __dataPrep__ script prior to running this notebook so that the data is downloaded_ See the README file in this folder for a discription of how to get the data ## If you have more than one GPU, set the GPU to use This is not needed on a Single GPU system or if the default GPU is to be used ``` !nvidia-smi # since this is a shared machine - let's pick a GPU that no one else is using import os os.environ["CUDA_VISIBLE_DEVICES"]="0" ``` ## Now load the required libraries ``` # Import needed libraries import gc import time import rmm import cugraph import cudf # NetworkX libraries import networkx as nx from scipy.io import mmread try: import matplotlib except ModuleNotFoundError: os.system('pip install matplotlib') import matplotlib.pyplot as plt; plt.rcdefaults() import numpy as np # Print out GPU Name cudf._cuda.gpu.deviceGetName(0) ``` ### Define the test data ``` # Test File data = { 'preferentialAttachment' : './data/preferentialAttachment.mtx', 'caidaRouterLevel' : './data/caidaRouterLevel.mtx', 'coAuthorsDBLP' : './data/coAuthorsDBLP.mtx', 'dblp' : './data/dblp-2010.mtx', 'citationCiteseer' : './data/citationCiteseer.mtx', 'coPapersDBLP' : './data/coPapersDBLP.mtx', 'coPapersCiteseer' : './data/coPapersCiteseer.mtx', 'as-Skitter' : './data/as-Skitter.mtx' } ``` ### Define the testing functions ``` # Data reader - the file format is MTX, so we will use the reader from SciPy def read_mtx_file(mm_file): print('Reading ' + str(mm_file) + '...') M = mmread(mm_file).asfptype() return M # CuGraph PageRank def cugraph_call(M, max_iter, tol, alpha): gdf = cudf.DataFrame() gdf['src'] = M.row gdf['dst'] = M.col print('\tcuGraph Solving... ') t1 = time.time() # cugraph Pagerank Call G = cugraph.DiGraph() G.from_cudf_edgelist(gdf, source='src', destination='dst', renumber=False) df = cugraph.pagerank(G, alpha=alpha, max_iter=max_iter, tol=tol) t2 = time.time() - t1 return t2 # Basic NetworkX PageRank def networkx_call(M, max_iter, tol, alpha): nnz_per_row = {r: 0 for r in range(M.get_shape()[0])} for nnz in range(M.getnnz()): nnz_per_row[M.row[nnz]] = 1 + nnz_per_row[M.row[nnz]] for nnz in range(M.getnnz()): M.data[nnz] = 1.0/float(nnz_per_row[M.row[nnz]]) M = M.tocsr() if M is None: raise TypeError('Could not read the input graph') if M.shape[0] != M.shape[1]: raise TypeError('Shape is not square') # should be autosorted, but check just to make sure if not M.has_sorted_indices: print('sort_indices ... ') M.sort_indices() z = {k: 1.0/M.shape[0] for k in range(M.shape[0])} print('\tNetworkX Solving... ') # start timer t1 = time.time() Gnx = nx.DiGraph(M) pr = nx.pagerank(Gnx, alpha, z, max_iter, tol) t2 = time.time() - t1 return t2 # SciPy PageRank def networkx_scipy_call(M, max_iter, tol, alpha): nnz_per_row = {r: 0 for r in range(M.get_shape()[0])} for nnz in range(M.getnnz()): nnz_per_row[M.row[nnz]] = 1 + nnz_per_row[M.row[nnz]] for nnz in range(M.getnnz()): M.data[nnz] = 1.0/float(nnz_per_row[M.row[nnz]]) M = M.tocsr() if M is None: raise TypeError('Could not read the input graph') if M.shape[0] != M.shape[1]: raise TypeError('Shape is not square') # should be autosorted, but check just to make sure if not M.has_sorted_indices: print('sort_indices ... ') M.sort_indices() z = {k: 1.0/M.shape[0] for k in range(M.shape[0])} # SciPy Pagerank Call print('\tSciPy Solving... ') t1 = time.time() Gnx = nx.DiGraph(M) pr = nx.pagerank_scipy(Gnx, alpha, z, max_iter, tol) t2 = time.time() - t1 return t2 ``` ### Run the benchmarks ``` # arrays to capture performance gains time_cu = [] time_nx = [] time_sp = [] perf_nx = [] perf_sp = [] names = [] # init libraries by doing a simple task v = './data/preferentialAttachment.mtx' M = read_mtx_file(v) trapids = cugraph_call(M, 100, 0.00001, 0.85) del M for k,v in data.items(): gc.collect() # Saved the file Name names.append(k) # read the data M = read_mtx_file(v) # call cuGraph - this will be the baseline trapids = cugraph_call(M, 100, 0.00001, 0.85) time_cu.append(trapids) # Now call NetworkX tn = networkx_call(M, 100, 0.00001, 0.85) speedUp = (tn / trapids) perf_nx.append(speedUp) time_nx.append(tn) # Now call SciPy tsp = networkx_scipy_call(M, 100, 0.00001, 0.85) speedUp = (tsp / trapids) perf_sp.append(speedUp) time_sp.append(tsp) print("cuGraph (" + str(trapids) + ") Nx (" + str(tn) + ") SciPy (" + str(tsp) + ")" ) del M ``` ### plot the output ``` %matplotlib inline plt.figure(figsize=(10,8)) bar_width = 0.35 index = np.arange(len(names)) _ = plt.bar(index, perf_nx, bar_width, color='g', label='vs Nx') _ = plt.bar(index + bar_width, perf_sp, bar_width, color='b', label='vs SciPy') plt.xlabel('Datasets') plt.ylabel('Speedup') plt.title('PageRank Performance Speedup') plt.xticks(index + (bar_width / 2), names) plt.xticks(rotation=90) # Text on the top of each barplot for i in range(len(perf_nx)): plt.text(x = (i - 0.55) + bar_width, y = perf_nx[i] + 25, s = round(perf_nx[i], 1), size = 12) for i in range(len(perf_sp)): plt.text(x = (i - 0.1) + bar_width, y = perf_sp[i] + 25, s = round(perf_sp[i], 1), size = 12) plt.legend() plt.show() ``` # Dump the raw stats ``` perf_nx perf_sp time_cu time_nx time_sp ``` ___ Copyright (c) 2020, NVIDIA CORPORATION. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ___
github_jupyter
# lab 4 ## import libs and connect db ``` !pip install psycopg2 import pandas import configparser import psycopg2 config = configparser.ConfigParser() config.read('config.ini') host = config['my aws']['host'] db = config['my aws']['db'] user = config['my aws']['user'] password = config['my aws']['password'] conn = psycopg2.connect( host=host, user=user, password=password, database=db ) print(user) cur=conn.cursor() ``` # Q1 ``` sql_q1 = """ select * from gp13.student """ df=pandas.read_sql_query(sql_q1,conn) df[:] ``` ## Q2 ``` sql_q2= """ select gp13.professor.p_name, gp13.course.course_name from gp13.professor inner join gp13.course on gp13.professor.p_email = gp13.course.p_email """ df=pandas.read_sql_query(sql_q2, conn) df[:] ``` # Q3 ``` sql_q3= """ select course_number, count(course_number) as enrolled from gp13.enroll_list group by course_number order by enrolled desc """ df=pandas.read_sql_query(sql_q3, conn) df.plot.bar(y='enrolled',x='course_number') ``` # Q4 ``` sql_q4= """ select gp13.professor.p_name, count(gp13.course.course_name) as teaching_number from gp13.professor inner join gp13.course on gp13.professor.p_email = gp13.course.p_email group by professor.p_name order by teaching_number desc """ df=pandas.read_sql_query(sql_q4, conn) df.plot.bar(y='teaching_number',x='p_name') ``` # Q5 ``` sql_q5_professor= """ insert into gp13.professor(p_email,p_name, office) values('{}','{}','{}') """ .format('new@jmu.edu','new','new_office') conn.commit() df=pandas.read_sql_query('select * from gp13.professor',conn) df[:] sql_q5_course= """ insert into gp13.course(course_number,course_name,room,p_email) values('{}','{}','{}','{}') """ .format('ia_new','new_c_name','new_room','new@jmu.edu') cur.execute(sql_q5_course) df=pandas.read_sql_query('select * from gp13.course',conn) df[:] ``` # Q6 ``` sql_q6_course = """ update gp13.course set p_email = '{}' where p_email ='{}' """.format('new@jmu.edu','waltontr@jmu.edu') cur.execute(sql_q6_course) conn.commit() df=pandas.read_sql_query('select * from gp13.course',conn) df[:] sql_q6_professor= """ delete from gp13.professor where p_email ='{}' """.format('waltontr@jmu.edu') cur.execute(sql_q6_professor) df=pandas.read_sql_query('select * from gp13.course',conn) df[:] ``` # close connection ``` cur.close() conn.close() ```
github_jupyter
# IElixir - Elixir kernel for Jupyter Project <img src="logo.png" title="Hosted by imgur.com" style="margin: 0 0;"/> --- ## Google Summer of Code 2015 > Developed by [Piotr Przetacznik](https://twitter.com/pprzetacznik) > Mentored by [José Valim](https://twitter.com/josevalim) --- ## References * [Elixir language](http://elixir-lang.org/) * [Jupyter Project](https://jupyter.org/) * [IElixir sources](https://github.com/pprzetacznik/IElixir) ## Getting Started ### Basic Types <pre> 1 # integer 0x1F # integer 1.0 # float true # boolean :atom # atom / symbol "elixir" # string [1, 2, 3] # list {1, 2, 3} # tuple </pre> ### Basic arithmetic ``` 1 + 2 5 * 5 10 / 2 div(10, 2) div 10, 2 rem 10, 3 0b1010 0o777 0x1F 1.0 1.0e-10 round 3.58 trunc 3.58 ``` ### Booleans ``` true true == false is_boolean(true) is_boolean(1) is_integer(5) is_float(5) is_number("5.0") ``` ### Atoms ``` :hello :hello == :world true == :true is_atom(false) is_boolean(:false) ``` ### Strings ``` "hellö" "hellö #{:world}" IO.puts "hello\nworld" is_binary("hellö") byte_size("hellö") String.length("hellö") String.upcase("hellö") ``` ### Anonymous functions ``` add = fn a, b -> a + b end is_function(add) is_function(add, 2) is_function(add, 1) add.(1, 2) add_two = fn a -> add.(a, 2) end add_two.(2) x = 42 (fn -> x = 0 end).() x ``` ### (Linked) Lists ``` a = [1, 2, true, 3] length [1, 2, 3] [1, 2, 3] ++ [4, 5, 6] [1, true, 2, false, 3, true] -- [true, false] hd(a) tl(a) hd [] [11, 12, 13] [104, 101, 108, 108, 111] 'hello' == "hello" ``` ### Tuples ``` {:ok, "hello"} tuple_size {:ok, "hello"} tuple = {:ok, "hello"} elem(tuple, 1) tuple_size(tuple) put_elem(tuple, 1, "world") tuple ``` ### Lists or tuples? ``` list = [1|[2|[3|[]]]] [0] ++ list list ++ [4] File.read("LICENSE") File.read("path/to/unknown/file") ``` ### Other examples ``` 0x1F a = 25 b = 150 IO.puts(a+b) defmodule Math do def sum(a, b) do a + b end end Math.sum(1, 2) import ExUnit.CaptureIO capture_io(fn -> IO.write "john" end) == "john" ?a <<98>> == <<?b>> <<?g, ?o, ?\n>> == "go " {hlen, blen} = {4, 4} <<header :: binary-size(hlen), body :: binary-size(blen)>> = "headbody" {header, body} h() defmodule KV.Registry do use GenServer ## Client API @doc """ Starts the registry. """ def start_link(opts \\ []) do GenServer.start_link(__MODULE__, :ok, opts) end @doc """ Looks up the bucket pid for `name` stored in `server`. Returns `{:ok, pid}` if the bucket exists, `:error` otherwise. """ def lookup(server, name) do GenServer.call(server, {:lookup, name}) end @doc """ Ensures there is a bucket associated to the given `name` in `server`. """ def create(server, name) do GenServer.cast(server, {:create, name}) end ## Server Callbacks def init(:ok) do {:ok, HashDict.new} end def handle_call({:lookup, name}, _from, names) do {:reply, HashDict.fetch(names, name), names} end def handle_cast({:create, name}, names) do if HashDict.has_key?(names, name) do {:noreply, names} else {:ok, bucket} = KV.Bucket.start_link() {:noreply, HashDict.put(names, name, bucket)} end end end ExUnit.start() defmodule KV.RegistryTest do use ExUnit.Case, async: true setup do {:ok, registry} = KV.Registry.start_link {:ok, registry: registry} end test "spawns buckets", %{registry: registry} do assert KV.Registry.lookup(registry, "shopping") == :error KV.Registry.create(registry, "shopping") assert {:ok, bucket} = KV.Registry.lookup(registry, "shopping") KV.Bucket.put(bucket, "milk", 1) assert KV.Bucket.get(bucket, "milk") == 1 end end ``` ## IElixir magic commands Get output of previous cell. ``` ans ``` You can also access output of any cell using it's number. ``` out[142] ```
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt A = np.random.randn(4,3) B = np.sum(A, axis = 1, keepdims = True) B.shape ``` # Data Loading ``` data = pd.read_csv("ner_dataset.csv", encoding="latin1") data = data.drop(['POS'], axis =1) data.head() plt.style.use("ggplot") data = pd.read_csv("ner_dataset.csv", encoding="latin1") data = data.drop(['POS'], axis =1) data = data.fillna(method="ffill") words = set(list(data['Word'].values)) #Vocabulary words.add('PADword') n_words = len(words) tags = list(set(data["Tag"].values)) n_tags = len(tags) print(n_words,n_tags) ``` # Data Preprocessing ``` class SentenceGetter(object): def __init__(self, data): self.n_sent = 1 self.data = data self.empty = False agg_func = lambda s: [(w, t) for w, t in zip(s["Word"]. values.tolist(),s["Tag"].values.tolist())] self.grouped = self.data.groupby("Sentence #").apply(agg_func) self.sentences = [s for s in self.grouped] def get_next(self): try: s = self.grouped["Sentence: {}".format(self.n_sent)] self.n_sent += 1 return s except: return None getter = SentenceGetter(data) sent = getter.get_next() print(sent) sentences = getter.sentences print(len(sentences)) largest_sen = max(len(sen) for sen in sentences) print('biggest sentence has {} words'.format(largest_sen)) %matplotlib inline plt.hist([len(sen) for sen in sentences],bins=50) plt.xlabel('Sentence Length') plt.ylabel('Frequency') plt.show() max_len = 50 X = [[w[0]for w in s] for s in sentences] new_X = [] for seq in X: new_seq = [] for i in range(max_len): try: new_seq.append(seq[i]) except: new_seq.append("PADword") new_X.append(new_seq) print(new_X[0]) sentences[0] list(enumerate(tags)) tags2index from keras.preprocessing.sequence import pad_sequences tags2index = {t:i for i,t in enumerate(tags)} y = [[tags2index[w[1]] for w in s] for s in sentences] y = pad_sequences(maxlen=max_len, sequences=y, padding="post", value=tags2index["O"]) y ``` # Model Building and Training ``` from sklearn.model_selection import train_test_split import tensorflow as tf import tensorflow_hub as hub from keras import backend as K X_tr, X_te, y_tr, y_te = train_test_split(new_X, y, test_size=0.1, random_state=2018) sess = tf.Session() K.set_session(sess) elmo_model = hub.Module("https://tfhub.dev/google/elmo/2", trainable=True) sess.run(tf.global_variables_initializer()) sess.run(tf.tables_initializer()) batch_size = 32 def ElmoEmbedding(x): return elmo_model(inputs={"tokens": tf.squeeze(tf.cast(x, tf.string)),"sequence_len": tf.constant(batch_size*[max_len]) },signature="tokens",as_dict=True)["elmo"] from keras.models import Model, Input from keras.layers.merge import add from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional, Lambda input_text = Input(shape=(max_len,), dtype=tf.string) embedding = Lambda(ElmoEmbedding, output_shape=(max_len, 1024))(input_text) x = Bidirectional(LSTM(units=512, return_sequences=True, recurrent_dropout=0.2, dropout=0.2))(embedding) x_rnn = Bidirectional(LSTM(units=512, return_sequences=True, recurrent_dropout=0.2, dropout=0.2))(x) x = add([x, x_rnn]) # residual connection to the first biLSTM out = TimeDistributed(Dense(n_tags, activation="softmax"))(x) model = Model(input_text, out) model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]) model.summary() X_tr, X_val = X_tr[:1213*batch_size], X_tr[-135*batch_size:] y_tr, y_val = y_tr[:1213*batch_size], y_tr[-135*batch_size:] y_tr = y_tr.reshape(y_tr.shape[0], y_tr.shape[1], 1) y_val = y_val.reshape(y_val.shape[0], y_val.shape[1], 1) history = model.fit(np.array(X_tr), y_tr, validation_data=(np.array(X_val), y_val),batch_size=batch_size, epochs=3, verbose=1) model.save_weights('bilstm_model.hdf5') !pip install seqeval ``` # Model Evaluation ``` from seqeval.metrics import precision_score, recall_score, f1_score, classification_report X_te = X_te[:149*batch_size] test_pred = model.predict(np.array(X_te), verbose=1) idx2tag = {i: w for w, i in tags2index.items()} def pred2label(pred): out = [] for pred_i in pred: out_i = [] for p in pred_i: p_i = np.argmax(p) out_i.append(idx2tag[p_i].replace("PADword", "O")) out.append(out_i) return out def test2label(pred): out = [] for pred_i in pred: out_i = [] for p in pred_i: out_i.append(idx2tag[p].replace("PADword", "O")) out.append(out_i) return out pred_labels = pred2label(test_pred) test_labels = test2label(y_te[:149*32]) print(classification_report(test_labels, pred_labels)) i = 390 p = model.predict(np.array(X_te[i:i+batch_size]))[0] p = np.argmax(p, axis=-1) print("{:15} {:5}: ({})".format("Word", "Pred", "True")) print("="*30) for w, true, pred in zip(X_te[i], y_te[i], p): if w != "__PAD__": print("{:15}:{:5} ({})".format(w, tags[pred], tags[true])) history.history ?(figsize=(12,12)) ?(history.history["acc"],c = 'b') ?(history.history["val_acc"], c = 'g') plt.show() test_sentence = [["Hawking", "is", "a", "Fellow", "of", "the", "Royal", "Society", ",", "a", "lifetime", "member", "of", "the", "Pontifical", "Academy", "of", "Sciences", ",", "and", "a", "recipient", "of", "the", "Presidential", "Medal", "of", "Freedom", ",", "the", "highest", "civilian", "award", "in", "the", "United", "States", "."]] max_len = 50 X_test = [[w for w in s] for s in test_sentence] new_X_test = [] for seq in X_test: new_seq = [] for i in range(max_len): try: new_seq.append(seq[i]) except: new_seq.append("PADword") new_X_test.append(new_seq) new_X_test np.array(new_X_test,dtype='<U26') np.array(X_te)[1] ``` # Inference ``` #model.load_weights('bilstm_model.hdf5') #model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]) p = ?(np.array(new_X_test*32,dtype='<U26'))[0] p = ?(p, axis=-1) print("{:15} {:5}".format("Word", "Pred")) print("="*30) for w, pred in zip(new_X_test[0], p): if w != "__PAD__": print("{:15}:{:5}".format(w, tags[pred])) ```
github_jupyter
##### Copyright 2019 The TF-Agents Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` ### Checkpointer and PolicySaver <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/agents/tutorials/10_checkpointer_policysaver_tutorial"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/agents/blob/master/docs/tutorials/10_checkpointer_policysaver_tutorial.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/agents/blob/master/docs/tutorials/10_checkpointer_policysaver_tutorial.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/agents/docs/tutorials/10_checkpointer_policysaver_tutorial.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> ## Introduction `tf_agents.utils.common.Checkpointer` is a utility to save/load the training state, policy state, and replay_buffer state to/from a local storage. `tf_agents.policies.policy_saver.PolicySaver` is a tool to save/load only the policy, and is lighter than `Checkpointer`. You can use `PolicySaver` to deploy the model as well without any knowledge of the code that created the policy. In this tutorial, we will use DQN to train a model, then use `Checkpointer` and `PolicySaver` to show how we can store and load the states and model in an interactive way. Note that we will use TF2.0's new saved_model tooling and format for `PolicySaver`. ## Setup If you haven't installed the following dependencies, run: ``` #@test {"skip": true} !sudo apt-get install -y xvfb ffmpeg !pip install 'gym==0.10.11' !pip install 'imageio==2.4.0' !pip install 'pyglet==1.3.2' !pip install 'xvfbwrapper==0.2.9' !pip install tf-agents from __future__ import absolute_import from __future__ import division from __future__ import print_function import base64 import imageio import io import matplotlib import matplotlib.pyplot as plt import os import shutil import tempfile import tensorflow as tf import zipfile import IPython try: from google.colab import files except ImportError: files = None from tf_agents.agents.dqn import dqn_agent from tf_agents.drivers import dynamic_step_driver from tf_agents.environments import suite_gym from tf_agents.environments import tf_py_environment from tf_agents.eval import metric_utils from tf_agents.metrics import tf_metrics from tf_agents.networks import q_network from tf_agents.policies import policy_saver from tf_agents.policies import py_tf_eager_policy from tf_agents.policies import random_tf_policy from tf_agents.replay_buffers import tf_uniform_replay_buffer from tf_agents.trajectories import trajectory from tf_agents.utils import common tf.compat.v1.enable_v2_behavior() tempdir = os.getenv("TEST_TMPDIR", tempfile.gettempdir()) #@test {"skip": true} # Set up a virtual display for rendering OpenAI gym environments. import xvfbwrapper xvfbwrapper.Xvfb(1400, 900, 24).start() ``` ## DQN agent We are going to set up DQN agent, just like in the previous colab. The details are hidden by default as they are not core part of this colab, but you can click on 'SHOW CODE' to see the details. ### Hyperparameters ``` env_name = "CartPole-v1" collect_steps_per_iteration = 100 replay_buffer_capacity = 100000 fc_layer_params = (100,) batch_size = 64 learning_rate = 1e-3 log_interval = 5 num_eval_episodes = 10 eval_interval = 1000 ``` ### Environment ``` train_py_env = suite_gym.load(env_name) eval_py_env = suite_gym.load(env_name) train_env = tf_py_environment.TFPyEnvironment(train_py_env) eval_env = tf_py_environment.TFPyEnvironment(eval_py_env) ``` ### Agent ``` #@title q_net = q_network.QNetwork( train_env.observation_spec(), train_env.action_spec(), fc_layer_params=fc_layer_params) optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate) global_step = tf.compat.v1.train.get_or_create_global_step() agent = dqn_agent.DqnAgent( train_env.time_step_spec(), train_env.action_spec(), q_network=q_net, optimizer=optimizer, td_errors_loss_fn=common.element_wise_squared_loss, train_step_counter=global_step) agent.initialize() ``` ### Data Collection ``` #@title replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( data_spec=agent.collect_data_spec, batch_size=train_env.batch_size, max_length=replay_buffer_capacity) collect_driver = dynamic_step_driver.DynamicStepDriver( train_env, agent.collect_policy, observers=[replay_buffer.add_batch], num_steps=collect_steps_per_iteration) # Initial data collection collect_driver.run() # Dataset generates trajectories with shape [BxTx...] where # T = n_step_update + 1. dataset = replay_buffer.as_dataset( num_parallel_calls=3, sample_batch_size=batch_size, num_steps=2).prefetch(3) iterator = iter(dataset) ``` ### Train the agent ``` #@title # (Optional) Optimize by wrapping some of the code in a graph using TF function. agent.train = common.function(agent.train) def train_one_iteration(): # Collect a few steps using collect_policy and save to the replay buffer. collect_driver.run() # Sample a batch of data from the buffer and update the agent's network. experience, unused_info = next(iterator) train_loss = agent.train(experience) iteration = agent.train_step_counter.numpy() print ('iteration: {0} loss: {1}'.format(iteration, train_loss.loss)) ``` ### Video Generation ``` #@title def embed_gif(gif_buffer): """Embeds a gif file in the notebook.""" tag = '<img src="data:image/gif;base64,{0}"/>'.format(base64.b64encode(gif_buffer).decode()) return IPython.display.HTML(tag) def run_episodes_and_create_video(policy, eval_tf_env, eval_py_env): num_episodes = 3 frames = [] for _ in range(num_episodes): time_step = eval_tf_env.reset() frames.append(eval_py_env.render()) while not time_step.is_last(): action_step = policy.action(time_step) time_step = eval_tf_env.step(action_step.action) frames.append(eval_py_env.render()) gif_file = io.BytesIO() imageio.mimsave(gif_file, frames, format='gif', fps=60) IPython.display.display(embed_gif(gif_file.getvalue())) ``` ### Generate a video Check the performance of the policy by generating a video. ``` print ('global_step:') print (global_step) run_episodes_and_create_video(agent.policy, eval_env, eval_py_env) ``` ## Setup Checkpointer and PolicySaver Now we are ready to use Checkpointer and PolicySaver. ### Checkpointer ``` checkpoint_dir = os.path.join(tempdir, 'checkpoint') train_checkpointer = common.Checkpointer( ckpt_dir=checkpoint_dir, max_to_keep=1, agent=agent, policy=agent.policy, replay_buffer=replay_buffer, global_step=global_step ) ``` ### Policy Saver ``` policy_dir = os.path.join(tempdir, 'policy') tf_policy_saver = policy_saver.PolicySaver(agent.policy) ``` ### Train one iteration ``` #@test {"skip": true} print('Training one iteration....') train_one_iteration() ``` ### Save to checkpoint ``` train_checkpointer.save(global_step) ``` ### Restore checkpoint For this to work, the whole set of objects should be recreated the same way as when the checkpoint was created. ``` train_checkpointer.initialize_or_restore() global_step = tf.compat.v1.train.get_global_step() ``` Also save policy and export to a location ``` tf_policy_saver.save(policy_dir) ``` The policy can be loaded without having any knowledge of what agent or network was used to create it. This makes deployment of the policy much easier. Load the saved policy and check how it performs ``` saved_policy = tf.compat.v2.saved_model.load(policy_dir) run_episodes_and_create_video(saved_policy, eval_env, eval_py_env) ``` ## Export and import The rest of the colab will help you export / import checkpointer and policy directories such that you can continue training at a later point and deploy the model without having to train again. Now you can go back to 'Train one iteration' and train a few more times such that you can understand the difference later on. Once you start to see slightly better results, continue below. ``` #@title Create zip file and upload zip file (double-click to see the code) def create_zip_file(dirname, base_filename): return shutil.make_archive(base_filename, 'zip', dirname) def upload_and_unzip_file_to(dirname): if files is None: return uploaded = files.upload() for fn in uploaded.keys(): print('User uploaded file "{name}" with length {length} bytes'.format( name=fn, length=len(uploaded[fn]))) shutil.rmtree(dirname) zip_files = zipfile.ZipFile(io.BytesIO(uploaded[fn]), 'r') zip_files.extractall(dirname) zip_files.close() ``` Create a zipped file from the checkpoint directory. ``` train_checkpointer.save(global_step) checkpoint_zip_filename = create_zip_file(checkpoint_dir, os.path.join(tempdir, 'exported_cp')) ``` Download the zip file. ``` #@test {"skip": true} if files is not None: files.download(checkpoint_zip_filename) # try again if this fails: https://github.com/googlecolab/colabtools/issues/469 ``` After training for some time (10-15 times), download the checkpoint zip file, and go to "Runtime > Restart and run all" to reset the training, and come back to this cell. Now you can upload the downloaded zip file, and continue the training. ``` #@test {"skip": true} upload_and_unzip_file_to(checkpoint_dir) train_checkpointer.initialize_or_restore() global_step = tf.compat.v1.train.get_global_step() ``` Once you have uploaded checkpoint directory, go back to 'Train one iteration' to continue training or go back to 'Generate a video' to check the performance of the loaded poliicy. Alternatively, you can save the policy (model) and restore it. Unlike checkpointer, you cannot continue with the training, but you can still deploy the model. Note that the downloaded file is much smaller than that of the checkpointer. ``` tf_policy_saver.save(policy_dir) policy_zip_filename = create_zip_file(policy_dir, os.path.join(tempdir, 'exported_policy')) #@test {"skip": true} if files is not None: files.download(policy_zip_filename) # try again if this fails: https://github.com/googlecolab/colabtools/issues/469 ``` Upload the downloaded policy directory (exported_policy.zip) and check how the saved policy performs. ``` #@test {"skip": true} upload_and_unzip_file_to(policy_dir) saved_policy = tf.compat.v2.saved_model.load(policy_dir) run_episodes_and_create_video(saved_policy, eval_env, eval_py_env) ``` ## SavedModelPyTFEagerPolicy If you don't want to use TF policy, then you can also use the saved_model directly with the Python env through the use of `py_tf_eager_policy.SavedModelPyTFEagerPolicy`. Note that this only works when eager mode is enabled. ``` eager_py_policy = py_tf_eager_policy.SavedModelPyTFEagerPolicy( policy_dir, eval_py_env.time_step_spec(), eval_py_env.action_spec()) # Note that we're passing eval_py_env not eval_env. run_episodes_and_create_video(eager_py_policy, eval_py_env, eval_py_env) ```
github_jupyter
# TUTORIAL FOR TRAVELING SALESMAN PROBLEM __Introduction__: The famous travelling salesman problem (also called the travelling salesperson problem or in short TSP) is a well-known NP-hard problem in combinatorial optimization, asking for the shortest possible route that visits each city exactly once, given a list of cities and the distances between each pair of cities [1]. Applications of the TSP can be found in planning, logistics, and the manufacture of microchips. In these applications, the general concept of a city represents, for example, customers, or points on a chip. __Methods__: Using brute-force search one could try all permutations (ordered combinations) and see which one is cheapest. The running time for this approach, however, lies within a polynomial factor of $O(n!)$, the factorial of the number of cities $n$. Thus, this solution becomes impractical already for only $\sim 20$ cities. Still, many heuristics are known, and some instances with tens of thousands of cities can be solved completely [1]. In this hello-world tutorial we will solve small instances of the TSP with one particular approach, that is simulated annealing and quantum annealing, as made available with D-Wave on Amazon Braket. Specifically, we will leverage two different quantum devices made available through Amazon Braket, namely D-Wave's 2000Q QPU (with 2000 qubits and 6000 couplers) and D-Wave's Advantage QPU (with more than 5000 qubits and more than 35,000 couplers). __TSP as graph problem__: The solution to the TSP can be viewed as a specific ordering of the vertices in a weighted graph. Taking an undirected weighted graph, cities correspond to the graph's nodes, with paths corresponding to the graph's edges, and a path's distance is the edge's weight. Typically, the graph is complete where each pair of nodes is connected by an edge. If no connection exists between two cities, one can add an arbitrarily long edge to complete the graph without affecting the optimal tour [1]. The goal is then to find a Hamiltonian cycle with the least weight. We will provide figures for visualization below. __Binary encoding__: To solve the TSP with (quantum) annealing we need to formulate the TSP as a QUBO problem of the general form $$ \mathrm{min} \hspace{0.1cm} y=x^{\intercal}Qx + x^{\intercal}B + c, $$ where $x=(x_{1}, x_{2}, \dots)$ is a vector of binary decision variables $x_{i}=0,1$. To this end, here we introduce double-indexed binary variables $x_{i,j}$ with $x_{i,j}=1$ if city $i$is located at position $j$ in the cycle and $x_{i,j}=0$ otherwise. Consider for example three cities: New York indexed with $i=0$, Los Angeles ($i=1$), and Chicago ($i=2$). Then, $x_{0,0}=x_{1,2}=x_{2,1}=1$ means that we visit these cities in the order New York - Chicago - Los Angeles. With this encoding $x_{i,j}$ with $i=0,\dots, N-1$ and $j=0,\dots, N-1$ in total we deal with $N^2$ binary variables for a problem with $N$ cities (nodes in the graph), causing a quadratic overhead. Our goal is then to find the Hamiltonian cycle with the shortest length, as described by the following objective function $$ H_{\mathrm{dist}} = \sum_{i,j} D_{i,j} \sum_{k} x_{i,k}x_{j,k+1}, $$ with $D_{i,j}$ being the distance between city $i$ and city $j$. Note that the product $x_{i,k}x_{j,k+1}=1$, only if city $i$ is at position $k$ in the cycle and city $j$ is visited right after city $i$; in that case we add the distance $D_{i,j}$ to our objective function which we would like to minimize. Overall, we sum all costs of the distances between successive cities. Finally, we need to account for the following constraints: (i) First, each city should occur exactly once in the cycle. This can be written as: $$ \sum_{j=0}^{N-1} x_{i,j}=1 \hspace{0.5cm} \forall i=0,...,N-1. $$ (ii) Second, each position in the cycle should be assigned to exactly one city. Mathematically this means: $$ \sum_{i=0}^{N-1} x_{i,j}=1 \hspace{0.5cm} \forall j=0,...,N-1. $$ For illustration a valid solution for $N=4$ for the route $[1,3,2,4]$ could look as follows (note that every row and column sums up to one, as desired): <div> <img src="attachment:image.png" width="300"/> </div> To enforce solutions that satisfy these constraints we add the following penalty terms to our Hamiltonian $$ H_{\mathrm{constraint}} = P \sum_{i=0}^{N-1} \left(1-\sum_{j=0}^{N-1} x_{i,j}\right)^{2} + P \sum_{j=0}^{N-1} \left(1-\sum_{i=0}^{N-1} x_{i,j}\right)^{2} $$ With these terms we enforce solutions where every city is visited exactly once as part of the tour. Otherwise, a high penalty value $P$ would be added to the solution, making it unfavorable. For simplicity we use the same penalty parameter $P$ for the two types of constraints covered in $H_{\mathrm{constraint}}$. The total Hamiltonian for the TSP problem then reads $$ H = H_{\mathrm{dist}} + H_{\mathrm{constraint}}. $$ We will perform hyperparameter optimization on the penalty parameter $P$ in order to find a good solution that complies with the constraints outlined above. If $P$ is not chosen properly it is possible that the algorithm provides solutions that are not acceptable, for example routes that do not cover all cities or routes that visit some cities multiple times. In our code below we apply simple heuristic postprocessing steps to account for these issues. ## IMPORTS AND SETUP ``` !pip install pandas -q import boto3 from braket.aws import AwsDevice from braket.ocean_plugin import BraketSampler, BraketDWaveSampler import numpy as np import networkx as nx import dimod import dwave_networkx as dnx from dimod.binary_quadratic_model import BinaryQuadraticModel from dwave.system.composites import EmbeddingComposite import matplotlib.pyplot as plt # magic word for producing visualizations in notebook %matplotlib inline from collections import defaultdict import itertools import pandas as pd # local imports from utils_tsp import get_distance, traveling_salesperson %load_ext autoreload %autoreload 2 # Please enter the S3 bucket you created during onboarding in the code below my_bucket = f"amazon-braket-Your-Bucket-Name" # the name of the bucket my_prefix = "Your-Folder-Name" # the name of the folder in the bucket s3_folder = (my_bucket, my_prefix) # fix random seed for reproducibility seed = 1 np.random.seed(seed) ``` ## IMPORT DATASET Sample datasets for TSP are available [here](https://people.sc.fsu.edu/~jburkardt/datasets/tsp/tsp.html). Here, we start with a very small dataset comprising five cities only. The minimal tour is known to have length 19. ``` # load dataset data = pd.read_csv('tsp_data/five_d.txt', sep='\s+', header=None) # show data set for inter-city distances data # distance between two example cities idx_city1 = 0 idx_city2 = 1 distance = data[idx_city1][idx_city2] print('Distance between city {} and city {} is {}.'.format(idx_city1, idx_city2, distance)) # get number of cities number_cities = data.shape[0] print('Total number of cities:', number_cities) ``` ## SET UP GRAPH We can generate ```networkx``` graphs from ```pandas``` data frames, as explained [here](https://networkx.github.io/documentation/stable/reference/convert.html#pandas) and in particular [here](https://networkx.github.io/documentation/stable/reference/generated/networkx.convert_matrix.from_pandas_adjacency.html?highlight=from%20pandas#networkx.convert_matrix.from_pandas_adjacency). ``` # G = nx.from_pandas_dataframe(data) G = nx.from_pandas_adjacency(data) # pos = nx.random_layout(G) pos = nx.spring_layout(G, seed=seed) # get characteristics of graph nodes = G.nodes() edges = G.edges() weights = nx.get_edge_attributes(G,'weight'); # print weights of graph print('Weights of graph:', weights) # show graph with weigths plt.axis('off'); nx.draw_networkx(G, pos, with_labels=True); nx.draw_networkx_edge_labels(G, pos, edge_labels=weights); ``` The weights of this fully-connected graph correspond to the distances between the cities. ## QUBO FOR TSP We can get the QUBO matrix using the ```traveling_salesperson_qubo``` method as described [here](https://docs.ocean.dwavesys.com/projects/dwave-networkx/en/latest/reference/algorithms/generated/dwave_networkx.algorithms.tsp.traveling_salesperson_qubo.html#dwave_networkx.algorithms.tsp.traveling_salesperson_qubo). This method will return the QUBO with ground states corresponding to a minimum TSP route. Here, if $|G|$ is the number of nodes (cities) in the original graph, the resulting QUBO will have $|G|^2$ variables/nodes and $|G|^2(|G|^2-1)/2$ edges. There is a quadratic overhead because of the binary encoding $x_{i,j}=1$ if city $i$ is at position $j$ on the route and zero otherwise. The Lagrange penalty parameter enforces the constraints that every city should be visited exactly once in our route (i.e., we do not leave out any city and we do not visit cities multiple times). As this parameter can be tuned we will run hyperparameter optimization (HPO) to find a good value for this hyperparameter. ``` # get QUBO for TSP tsp_qubo = dnx.algorithms.tsp.traveling_salesperson_qubo(G) # find default Langrange parameter for enforcing constraints # set parameters lagrange = None weight='weight' # get corresponding QUBO step by step N = G.number_of_nodes() if lagrange is None: # If no lagrange parameter provided, set to 'average' tour length. # Usually a good estimate for a lagrange parameter is between 75-150% # of the objective function value, so we come up with an estimate for # tour length and use that. if G.number_of_edges()>0: lagrange = G.size(weight=weight)*G.number_of_nodes()/G.number_of_edges() else: lagrange = 2 print('Default Lagrange parameter:', lagrange) # create list around default value for HPO lagrange_list= list(np.arange(int(0.8*lagrange), int(1.1*lagrange))) print('Lagrange parameter for HPO:', lagrange_list) ``` ## SOLUTION WITH SIMULATED ANNEALING First let us solve the TSP problem with classical simulated annealing. To this end we can simply call the built-in ```traveling_salesperson(...)``` routine from the ```dwave_networkx``` package using the ```SimulatedAnnealingSampler``` sampler as provided in the standard Ocean tool suite. ``` # use (classical) simulated annealing sampler = dimod.SimulatedAnnealingSampler() # route = dnx.traveling_salesperson(G, dimod.ExactSolver(), start=0) route = dnx.traveling_salesperson(G, sampler, start=0) print('Route found with simulated annealing:', route) # get the total distance total_dist = 0 for idx, node in enumerate(route[:-1]): dist = data[route[idx+1]][route[idx]] total_dist += dist print('Total distance (without return):', total_dist) # add distance between start and end point to complete cycle return_distance = data[route[0]][route[-1]] print('Distance between start and end:', return_distance) # get distance for full cyle distance = total_dist + return_distance print('Total distance (including return):', distance) ``` ## SOLUTION WITH QUANTUM ANNEALING ON D-WAVE WITH HPO FOR LAGRANGE PARAMETER Now let us run the TSP problem on D-Wave's 2000Q QPU, together with hyperparameter optimization for the Langrange parameter. To this end, we augment the ```traveling_salesperson(...)``` routine with post-processing heuristics that correct for invalid solutions if some cities are not present in the sample produced by D-Wave or if some cities are duplicates in the route. The original source code for ```traveling_salesperson(...)``` can be found in the Appendix. ``` # run TSP with imported TSP routine sampler = BraketDWaveSampler(s3_folder,'arn:aws:braket:::device/qpu/d-wave/DW_2000Q_6') sampler = EmbeddingComposite(sampler) # set parameters num_shots = 1000 start_city = 0 best_distance = sum(weights.values()) best_route = [None]*len(G) # run HPO to find route for lagrange in lagrange_list: print('Running quantum annealing for TSP with Lagrange parameter=', lagrange) route = traveling_salesperson(G, sampler, lagrange=lagrange, start=start_city, num_reads=num_shots, answer_mode="histogram") # print route print('Route found with D-Wave:', route) # print distance total_dist, distance_with_return = get_distance(route, data) # update best values if distance_with_return < best_distance: best_distance = distance_with_return best_route = route print('---FINAL SOLUTION---') print('Best solution found with D-Wave:', best_route) print('Total distance (including return):', best_distance) ``` Now let us visualize the solution found by the D-Wave QPU. First we plot again the original graph with nodes representing cities and weighted edges representing distances between the cities. It is a complete graph showing the distance for every pair of cities. Then, we plot below the graph showing the proposed route, with steps labelling the specific sequence connecting the individual stops on the route. ``` # show original graph with weigths plt.axis('off'); nx.draw_networkx(G, pos, with_labels=True, font_color='w'); nx.draw_networkx_edge_labels(G, pos, edge_labels=weights); # get mapping from original nodes to position in cycle node_labels = {list(nodes)[ii]: best_route[ii] for ii in range(number_cities)} # Construct route as list of (node_i, node_i+1) sol_graph_base = [(best_route[idx], best_route[idx+1]) for idx in range(len(best_route)-1)] # Establish weights between nodes along route, allowing for mirrored keys (i.e. weights[(0, 1)] = weights[(1, 0)]) best_weights = {k: weights[k] if k in weights.keys() else weights[(k[1],k[0])] for k in sol_graph_base} # Rebuild graph containing only route connections G_best = nx.Graph(sol_graph_base) route_labels = {x: f'step_{i}={best_weights[x]}' for i, x in enumerate(sol_graph_base)} # show solution plt.axis('off'); nx.draw_networkx(G_best, pos, font_color='w'); nx.draw_networkx_edge_labels(G_best, pos, edge_labels=route_labels, label_pos=0.25); ``` In conclusion, in this part of our tutorial we have solved a very small instance of the famous NP-hard TSP problem using both (classical) simulated annealing and quantum annealing using D-Wave's Ocean tool suite that is natively supported on Amazon Braket. While there are classical methods that can solve this problem very efficiently, at least to a very good approximation, this is an educational tutorial focused on the formulation of a specific QUBO problem and the approximate solution thereof using (quantum) annealing methods. Below we extend our analysis to larger problem sizes that cannot be embedded on the 2000Q D-Wave chip with Chimera connectivity, but can be run on the larger Advantage chip with $\sim 5000$ physical variables and Pegasus connectivity. ## LARGER TSP PROBLEM ON D-WAVE ADVANTAGE CHIP Next we run a larger problem instance of TSP on D-Wave's Advantage chip with over 5000 physical qubits and Pegasus connectivity graph. We take a dataset comprising ten cities; we have taken the original data set with 15 cities from [here](https://people.sc.fsu.edu/~jburkardt/datasets/tsp/tsp.html) and cut it down to a smaller dataset containing only the first ten cities. First let us load and inspect the data set for the inter-city distances. ``` # load dataset data10 = pd.read_csv('tsp_data/data10cities.csv') # rename columns from object to int dic_map = {} for key in data10.columns: d = {key: int(key)} dic_map.update(d) data10 = data10.rename(columns=dic_map) # show data set for inter-city distances data10 ``` Next, using the ```networkx``` library again we display this problem as a complete graph with a single node per city and weighted edges specifying the intercity distances. ``` # G = nx.from_pandas_dataframe(data) G = nx.from_pandas_adjacency(data10) # pos = nx.random_layout(G) pos = nx.spring_layout(G, seed=seed) # get characteristics of graph nodes = G.nodes() edges = G.edges() weights = nx.get_edge_attributes(G,'weight'); # show graph with weigths plt.figure(figsize=(10,10)) plt.axis('off'); nx.draw_networkx(G, pos, with_labels=True); nx.draw_networkx_edge_labels(G, pos, edge_labels=weights, label_pos=0.25); ``` Now that the problem is set up as a graph problem, we first use classical simulated annealing to easily build a classical benchmark, using the ```SimulatedAnnealingSampler``` that comes out of the box with the ```dimod``` library. ``` # use (classical) simulated annealing sampler = dimod.SimulatedAnnealingSampler() # route = dnx.traveling_salesperson(G, dimod.ExactSolver(), start=0) route = dnx.traveling_salesperson(G, sampler, start=0) print('Route found with simulated annealing:', route) # print distance total_dist, distance_with_return = get_distance(route, data10) ``` Below again we will run HPO to try several numerical values for the hyperparameter $P$ enforcing the constraints within the cost function. To this end we first set up a list of parameters to loop over later in our simulation routine. ``` # find default Langrange parameter for enforcing constraints # set parameters lagrange = None weight='weight' # get corresponding QUBO step by step N = G.number_of_nodes() if lagrange is None: # If no lagrange parameter provided, set to 'average' tour length. # Usually a good estimate for a lagrange parameter is between 75-150% # of the objective function value, so we come up with an estimate for # tour length and use that. if G.number_of_edges()>0: lagrange = G.size(weight=weight)*G.number_of_nodes()/G.number_of_edges() else: lagrange = 2 print('Default Lagrange parameter:', lagrange) # create list around default value for HPO # lagrange_list= list(np.arange(int(0.99*lagrange), int(1.01*lagrange))) lagrange_list= [int(lagrange)-10, int(lagrange), int(lagrange)+10] print('Lagrange parameter for HPO:', lagrange_list) ``` Next, we try to run this problem on D-Wave's 2000Q backend with Chimera connectivity. Here, we will run into an ```ValueError: no embedding found```, because a problem with 10 cities results in a fully-connected problem with 100 logical variables. This problem size cannot be embedded onto the sparse Chimera graph with $\sim 2000$ physical qubits. Therefore we will try to run the same problem on the larger Advantage chip below. ``` # run TSP on 2000Q chip sampler = BraketDWaveSampler(s3_folder,'arn:aws:braket:::device/qpu/d-wave/DW_2000Q_6') sampler = EmbeddingComposite(sampler) # set parameters num_shots = 1000 start_city = 0 try: print('Running quantum annealing for TSP with Lagrange parameter=', lagrange) route = traveling_salesperson(G, sampler, lagrange=lagrange_list[0], start=start_city, num_reads=num_shots, answer_mode="histogram") # print route print('Route found with D-Wave:', route) except ValueError: print("ValueError: no embedding found. This problem is too large to be embedded on the 2000Q chip.") ``` Next we simply switch to the D-Wave's Advantage chip with over 5000 physical qubits and Pegasus connectivity graph. This switch amounts to changing one line of code. ``` # run TSP on Advantage chip sampler = BraketDWaveSampler(s3_folder,'arn:aws:braket:::device/qpu/d-wave/Advantage_system4') sampler = EmbeddingComposite(sampler) # set parameters num_shots = 1000 start_city = 0 best_distance = sum(weights.values()) best_route = [None]*len(G) # run HPO to find route for lagrange in lagrange_list: print('Running quantum annealing for TSP with Lagrange parameter=', lagrange) route = traveling_salesperson(G, sampler, lagrange=lagrange, start=start_city, num_reads=num_shots, answer_mode="histogram") # print route print('Route found with D-Wave:', route) # print distance total_dist, distance_with_return = get_distance(route, data10) # update best values if distance_with_return < best_distance: best_distance = distance_with_return best_route = route print('---FINAL SOLUTION---') print('Best solution found with D-Wave:', best_route) print('Total distance (including return):', best_distance) ``` In conclusion in this tutorial we have mapped the canonical TSP to a QUBO problem using the Ocean tool suite that is natively supported on Amazon Braket. We have used classical simulated annealing and quantum annealing to find solutions to this problem. Specifically, we have seen that we can solve larger problem instances with the Advantage chip (that has more than 5000 qubits and more than 35,000 couplers) than what we could solve for using the previous-generation 2000Q QPU with roughly 2000 qubits and 6000 couplers. --- ## APPENDIX ### APPENDIX FOR HEURISTIC POSTPROCESSING If there are cities unassigned to route, we just fill the route with these without optimization. First, we can take care of filling ```None``` values. ``` # set example route route = [None, 4, 0, 3, 1] print('Original route:', route) # get lists with all cities list_cities = list(nodes) # get not assigned cities cities_unassigned = [city for city in list_cities if city not in route] # fill None values for idx, city in enumerate(route): if city == None: route[idx] = cities_unassigned[0] cities_unassigned.remove(route[idx]) print('Route after filling heuristic:', route) # randomly permute cities_unassigned = [0, 4, 7] np.random.permutation(cities_unassigned) ``` Second, we can still have proposed routes with cities appearing multiple times in exchange for some cities not visited at all. ``` # set example route route = [0, 2, 3, 4, 4] print('Original route:', route) unique_entries = set(route) number_unique_entries = len(unique_entries) if number_unique_entries != len(route): print('Solution not valid.') # get unassigned cities cities_unassigned = [city for city in list_cities if city not in route] print('Unassigned cities:', cities_unassigned) # replace duplicates route_new = [] for city in route: if city not in route_new: route_new.append(city) else: route_new.append(cities_unassigned[0]) cities_unassigned.remove(route_new[-1]) print('Route after heuristics:', route_new) # set example route route = [0, 0, 1, 1, 1] print('Original route:', route) unique_entries = set(route) number_unique_entries = len(unique_entries) if number_unique_entries != len(route): print('Solution not valid.') # get unassigned cities cities_unassigned = [city for city in list_cities if city not in route] print('Unassigned cities:', cities_unassigned) # replace duplicates route_new = [] for city in route: if city not in route_new: route_new.append(city) else: route_new.append(cities_unassigned[0]) cities_unassigned.remove(route_new[-1]) print('Route after heuristics:', route_new) ``` ### APPENDIX: ORIGINAL SOURCE CODE WITH LINKS Here we display the ocean source code used above for solving the TSP problem. We show the code for both ```traveling_salesperson(...)``` taken from [here](https://docs.ocean.dwavesys.com/projects/dwave-networkx/en/latest/_modules/dwave_networkx/algorithms/tsp.html#traveling_salesperson) and ```traveling_salesperson_qubo(...)``` taken from [here](https://docs.ocean.dwavesys.com/projects/dwave-networkx/en/latest/_modules/dwave_networkx/algorithms/tsp.html#traveling_salesperson_qubo). ```python @binary_quadratic_model_sampler(1) def traveling_salesperson(G, sampler=None, lagrange=None, weight='weight', start=None, **sampler_args): """Returns an approximate minimum traveling salesperson route. Defines a QUBO with ground states corresponding to the minimum routes and uses the sampler to sample from it. A route is a cycle in the graph that reaches each node exactly once. A minimum route is a route with the smallest total edge weight. Parameters ---------- G : NetworkX graph The graph on which to find a minimum traveling salesperson route. This should be a complete graph with non-zero weights on every edge. sampler : A binary quadratic model sampler. A sampler is a process that samples from low energy states in models defined by an Ising equation or a Quadratic Unconstrained Binary Optimization Problem (QUBO). A sampler is expected to have a 'sample_qubo' and 'sample_ising' method. A sampler is expected to return an iterable of samples, in order of increasing energy. If no sampler is provided, one must be provided using the `set_default_sampler` function. lagrange : number, optional (default None) Lagrange parameter to weight constraints (visit every city once) versus objective (shortest distance route). weight : optional (default 'weight') The name of the edge attribute containing the weight. start : node, optional If provided, the route will begin at `start`. sampler_args : Additional keyword parameters are passed to the sampler. Returns ------- route : list List of nodes in order to be visited on a route Examples -------- >>> import dimod ... >>> G = nx.Graph() >>> G.add_weighted_edges_from({(0, 1, .1), (0, 2, .5), (0, 3, .1), (1, 2, .1), ... (1, 3, .5), (2, 3, .1)}) >>> dnx.traveling_salesperson(G, dimod.ExactSolver(), start=0) # doctest: +SKIP [0, 1, 2, 3] Notes ----- Samplers by their nature may not return the optimal solution. This function does not attempt to confirm the quality of the returned sample. """ # Get a QUBO representation of the problem Q = traveling_salesperson_qubo(G, lagrange, weight) # use the sampler to find low energy states response = sampler.sample_qubo(Q, **sampler_args) sample = response.first.sample route = [None]*len(G) for (city, time), val in sample.items(): if val: route[time] = city if start is not None and route[0] != start: # rotate to put the start in front idx = route.index(start) route = route[idx:] + route[:idx] return route ``` ```python def traveling_salesperson_qubo(G, lagrange=None, weight='weight'): """Return the QUBO with ground states corresponding to a minimum TSP route. If :math:`|G|` is the number of nodes in the graph, the resulting qubo will have: * :math:`|G|^2` variables/nodes * :math:`2 |G|^2 (|G| - 1)` interactions/edges Parameters ---------- G : NetworkX graph A complete graph in which each edge has a attribute giving its weight. lagrange : number, optional (default None) Lagrange parameter to weight constraints (no edges within set) versus objective (largest set possible). weight : optional (default 'weight') The name of the edge attribute containing the weight. Returns ------- QUBO : dict The QUBO with ground states corresponding to a minimum travelling salesperson route. The QUBO variables are labelled `(c, t)` where `c` is a node in `G` and `t` is the time index. For instance, if `('a', 0)` is 1 in the ground state, that means the node 'a' is visted first. """ N = G.number_of_nodes() if lagrange is None: # If no lagrange parameter provided, set to 'average' tour length. # Usually a good estimate for a lagrange parameter is between 75-150% # of the objective function value, so we come up with an estimate for # tour length and use that. if G.number_of_edges()>0: lagrange = G.size(weight=weight)*G.number_of_nodes()/G.number_of_edges() else: lagrange = 2 # some input checking if N in (1, 2) or len(G.edges) != N*(N-1)//2: msg = "graph must be a complete graph with at least 3 nodes or empty" raise ValueError(msg) # Creating the QUBO Q = defaultdict(float) # Constraint that each row has exactly one 1 for node in G: for pos_1 in range(N): Q[((node, pos_1), (node, pos_1))] -= lagrange for pos_2 in range(pos_1+1, N): Q[((node, pos_1), (node, pos_2))] += 2.0*lagrange # Constraint that each col has exactly one 1 for pos in range(N): for node_1 in G: Q[((node_1, pos), (node_1, pos))] -= lagrange for node_2 in set(G)-{node_1}: # QUBO coefficient is 2*lagrange, but we are placing this value # above *and* below the diagonal, so we put half in each position. Q[((node_1, pos), (node_2, pos))] += lagrange # Objective that minimizes distance for u, v in itertools.combinations(G.nodes, 2): for pos in range(N): nextpos = (pos + 1) % N # going from u -> v Q[((u, pos), (v, nextpos))] += G[u][v][weight] # going from v -> u Q[((v, pos), (u, nextpos))] += G[u][v][weight] return Q ``` --- ## REFERENCES [1] Wikipedia: [Travelling salesman problem](https://en.wikipedia.org/wiki/Travelling_salesman_problem).
github_jupyter
# Logistic Regression Notebook version: 2.0 (Nov 21, 2017) 2.1 (Oct 19, 2018) Author: Jesús Cid Sueiro (jcid@tsc.uc3m.es) Jerónimo Arenas García (jarenas@tsc.uc3m.es) Changes: v.1.0 - First version v.1.1 - Typo correction. Prepared for slide presentation v.2.0 - Prepared for Python 3.0 (backcompmatible with 2.7) Assumptions for regression model modified v.2.1 - Minor changes regarding notation and assumptions ``` from __future__ import print_function # To visualize plots in the notebook %matplotlib inline # Imported libraries import csv import random import matplotlib import matplotlib.pyplot as plt import pylab import numpy as np from mpl_toolkits.mplot3d import Axes3D from sklearn.preprocessing import PolynomialFeatures from sklearn import linear_model ``` # Logistic Regression ## 1. Introduction ### 1.1. Binary classification and decision theory. The MAP criterion The goal of a classification problem is to assign a *class* or *category* to every *instance* or *observation* of a data collection. Here, we will assume that every instance ${\bf x}$ is an $N$-dimensional vector in $\mathbb{R}^N$, and that the class $y$ of sample ${\bf x}$ is an element of a binary set ${\mathcal Y} = \{0, 1\}$. The goal of a classifier is to predict the true value of $y$ after observing ${\bf x}$. We will denote as $\hat{y}$ the classifier output or *decision*. If $y=\hat{y}$, the decision is a *hit*, otherwise $y\neq \hat{y}$ and the decision is an *error*. Decision theory provides a solution to the classification problem in situations where the relation between instance ${\bf x}$ and its class $y$ is given by a known probabilistic model: assume that every tuple $({\bf x}, y)$ is an outcome of a random vector $({\bf X}, Y)$ with joint distribution $p_{{\bf X},Y}({\bf x}, y)$. A natural criteria for classification is to select predictor $\hat{Y}=f({\bf x})$ in such a way that the probability or error, $P\{\hat{Y} \neq Y\}$ is minimum. Noting that $$ P\{\hat{Y} \neq Y\} = \int P\{\hat{Y} \neq Y | {\bf x}\} p_{\bf X}({\bf x}) d{\bf x} $$ the optimal decision is got if, for every sample ${\bf x}$, we make decision minimizing the conditional error probability: \begin{align} \hat{y}^* &= \arg\min_{\hat{y}} P\{\hat{y} \neq Y |{\bf x}\} \\ &= \arg\max_{\hat{y}} P\{\hat{y} = Y |{\bf x}\} \\ \end{align} Thus, the optimal decision rule can be expressed as $$ P_{Y|{\bf X}}(1|{\bf x}) \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0}\quad P_{Y|{\bf X}}(0|{\bf x}) $$ or, equivalently $$ P_{Y|{\bf X}}(1|{\bf x}) \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0}\quad \frac{1}{2} $$ The classifier implementing this decision rule is usually named MAP (*Maximum A Posteriori*). As we have seen, the MAP classifier minimizes the error probability for binary classification, but the result can also be generalized to multiclass classification problems. ### 1.2. Parametric classification. Classical decision theory is grounded on the assumption that the probabilistic model relating the observed sample ${\bf X}$ and the true hypothesis $Y$ is known. Unfortunately, this is unrealistic in many applications, where the only available information to construct the classifier is a dataset $\mathcal D = \{{\bf x}^{(k)}, y^{(k)}\}_{k=0}^{K-1}$ of instances and their respective class labels. A more realistic formulation of the classification problem is the following: given a dataset $\mathcal D = \{({\bf x}^{(k)}, y^{(k)}) \in {\mathbb{R}}^N \times {\mathcal Y}, \, k=0,\ldots,{K-1}\}$ of independent and identically distributed (i.i.d.) samples from an ***unknown*** distribution $p_{{\bf X},Y}({\bf x}, y)$, predict the class $y$ of a new sample ${\bf x}$ with the minimum probability of error. Since the probabilistic model generating the data is unknown, the MAP decision rule cannot be applied. However, many classification algorithms use the dataset to obtain an estimate of the posterior class probabilities, and apply it to implement an approximation to the MAP decision maker. Parametric classifiers based on this idea assume, additionally, that the posterior class probabilty satisfies some parametric formula: $$ P_{Y|X}(1|{\bf x},{\bf w}) = f_{\bf w}({\bf x}) $$ where ${\bf w}$ is a vector of parameters. Given the expression of the MAP decision maker, classification consists in comparing the value of $f_{\bf w}({\bf x})$ with the threshold $\frac{1}{2}$, and each parameter vector would be associated to a different decision maker. In practice, the dataset ${\mathcal S}$ is used to select a particular parameter vector $\hat{\bf w}$ according to certain criterion. Accordingly, the decision rule becomes $$ f_{\hat{\bf w}}({\bf x}) \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0}\quad \frac{1}{2} $$ In this lesson, we explore one of the most popular model-based parametric classification methods: **logistic regression**. <img src="./figs/parametric_decision.png", width=400> ## 2. Logistic regression. ### 2.1. The logistic function The logistic regression model assumes that the binary class label $Y \in \{0,1\}$ of observation $X\in \mathbb{R}^N$ satisfies the expression. $$P_{Y|{\bf X}}(1|{\bf x}, {\bf w}) = g({\bf w}^\intercal{\bf x})$$ $$P_{Y|{\bf,X}}(0|{\bf x}, {\bf w}) = 1-g({\bf w}^\intercal{\bf x})$$ where ${\bf w}$ is a parameter vector and $g(·)$ is the *logistic* function, which is defined by $$g(t) = \frac{1}{1+\exp(-t)}$$ It is straightforward to see that the logistic function has the following properties: - **P1**: Probabilistic output: $\quad 0 \le g(t) \le 1$ - **P2**: Symmetry: $\quad g(-t) = 1-g(t)$ - **P3**: Monotonicity: $\quad g'(t) = g(t)·[1-g(t)] \ge 0$ In the following we define a logistic function in python, and use it to plot a graphical representation. **Exercise 1**: Verify properties P2 and P3. **Exercise 2**: Implement a function to compute the logistic function, and use it to plot such function in the inverval $[-6,6]$. ``` # Define the logistic function def logistic(t): #<SOL> #</SOL> # Plot the logistic function t = np.arange(-6, 6, 0.1) z = logistic(t) plt.plot(t, z) plt.xlabel('$t$', fontsize=14) plt.ylabel('$g(t)$', fontsize=14) plt.title('The logistic function') plt.grid() ``` ### 2.2. Classifiers based on the logistic model. The MAP classifier under a logistic model will have the form $$P_{Y|{\bf X}}(1|{\bf x}, {\bf w}) = g({\bf w}^\intercal{\bf x}) \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0} \quad \frac{1}{2} $$ Therefore $$ 2 \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0} \quad 1 + \exp(-{\bf w}^\intercal{\bf x}) $$ which is equivalent to $${\bf w}^\intercal{\bf x} \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0}\quad 0 $$ Therefore, the classifiers based on the logistic model are given by linear decision boundaries passing through the origin, ${\bf x} = {\bf 0}$. ``` # Weight vector: w = [4, 8] # Try different weights # Create a rectangular grid. x_min = -1 x_max = 1 dx = x_max - x_min h = float(dx) / 200 xgrid = np.arange(x_min, x_max, h) xx0, xx1 = np.meshgrid(xgrid, xgrid) # Compute the logistic map for the given weights Z = logistic(w[0]*xx0 + w[1]*xx1) # Plot the logistic map fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_surface(xx0, xx1, Z, cmap=plt.cm.copper) ax.contour(xx0, xx1, Z, levels=[0.5], colors='b', linewidths=(3,)) plt.xlabel('$x_0$') plt.ylabel('$x_1$') ax.set_zlabel('P(1|x,w)') plt.show() ``` The next code fragment represents the output of the same classifier, representing the output of the logistic function in the $x_0$-$x_1$ plane, encoding the value of the logistic function in the representation color. ``` CS = plt.contourf(xx0, xx1, Z) CS2 = plt.contour(CS, levels=[0.5], colors='m', linewidths=(3,)) plt.xlabel('$x_0$') plt.ylabel('$x_1$') plt.colorbar(CS, ticks=[0, 0.5, 1]) plt.show() ``` ### 3.3. Nonlinear classifiers. The logistic model can be extended to construct non-linear classifiers by using non-linear data transformations. A general form for a nonlinear logistic regression model is $$P_{Y|{\bf X}}(1|{\bf x}, {\bf w}) = g[{\bf w}^\intercal{\bf z}({\bf x})] $$ where ${\bf z}({\bf x})$ is an arbitrary nonlinear transformation of the original variables. The boundary decision in that case is given by equation $$ {\bf w}^\intercal{\bf z} = 0 $$ ** Exercise 2**: Modify the code above to generate a 3D surface plot of the polynomial logistic regression model given by $$ P_{Y|{\bf X}}(1|{\bf x}, {\bf w}) = g(1 + 10 x_0 + 10 x_1 - 20 x_0^2 + 5 x_0 x_1 + x_1^2) $$ ``` # Weight vector: w = [1, 10, 10, -20, 5, 1] # Try different weights # Create a regtangular grid. x_min = -1 x_max = 1 dx = x_max - x_min h = float(dx) / 200 xgrid = np.arange(x_min, x_max, h) xx0, xx1 = np.meshgrid(xgrid, xgrid) # Compute the logistic map for the given weights # Z = <FILL IN> # Plot the logistic map fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_surface(xx0, xx1, Z, cmap=plt.cm.copper) plt.xlabel('$x_0$') plt.ylabel('$x_1$') ax.set_zlabel('P(1|x,w)') plt.show() CS = plt.contourf(xx0, xx1, Z) CS2 = plt.contour(CS, levels=[0.5], colors='m', linewidths=(3,)) plt.xlabel('$x_0$') plt.ylabel('$x_1$') plt.colorbar(CS, ticks=[0, 0.5, 1]) plt.show() ``` ## 3. Inference Remember that the idea of parametric classification is to use the training data set $\mathcal D = \{({\bf x}^{(k)}, y^{(k)}) \in {\mathbb{R}}^N \times \{0,1\}, k=0,\ldots,{K-1}\}$ to set the parameter vector ${\bf w}$ according to certain criterion. Then, the estimate $\hat{\bf w}$ can be used to compute the label prediction for any new observation as $$\hat{y} = \arg\max_y P_{Y|{\bf X}}(y|{\bf x},\hat{\bf w}).$$ <img src="figs/parametric_decision.png", width=400> We need still to choose a criterion to optimize with the selection of the parameter vector. In the notebook, we will discuss two different approaches to the estimation of ${\bf w}$: * Maximum Likelihood (ML): $\hat{\bf w}_{\text{ML}} = \arg\max_{\bf w} P_{{\mathcal D}|{\bf W}}({\mathcal D}|{\bf w})$ * Maximum *A Posteriori* (MAP): $\hat{\bf w}_{\text{MAP}} = \arg\max_{\bf w} p_{{\bf W}|{\mathcal D}}({\bf w}|{\mathcal D})$ For the mathematical derivation of the logistic regression algorithm, the following representation of the logistic model will be useful: noting that $$P_{Y|{\bf X}}(0|{\bf x}, {\bf w}) = 1-g[{\bf w}^\intercal{\bf z}({\bf x})] = g[-{\bf w}^\intercal{\bf z}({\bf x})]$$ we can write $$P_{Y|{\bf X}}(y|{\bf x}, {\bf w}) = g[\overline{y}{\bf w}^\intercal{\bf z}({\bf x})]$$ where $\overline{y} = 2y-1$ is a *symmetrized label* ($\overline{y}\in\{-1, 1\}$). ### 3.1. Model assumptions In the following, we will make the following assumptions: - **A1**. (Logistic Regression): We assume a logistic model for the *a posteriori* probability of ${Y=1}$ given ${\bf X}$, i.e., $$P_{Y|{\bf X}}(1|{\bf x}, {\bf w}) = g[{\bf w}^\intercal{\bf z}({\bf x})].$$ - **A2**. All samples in ${\mathcal D}$ have been generated by the same distribution, $p_{{\bf X}, Y}({\bf x}, y)$. - **A3**. Input variables $\bf x$ do not depend on $\bf w$. This implies that $$p({\bf x}|{\bf w}) = p({\bf x})$$ - **A4**. Targets $y^{(0)}, \cdots, y^{(K-1)}$ are statistically independent given $\bf w$ and the inputs ${\bf x}^{(0)}, \cdots, {\bf x}^{(K-1)}$, that is: $$p(y^{(0)}, \cdots, y^{(K-1)} | {\bf x}^{(0)}, \cdots, {\bf x}^{(K-1)}, {\bf w}) = \prod_{k=0}^{K-1} p(s^{(k)} | {\bf x}^{(k)}, {\bf w})$$ ### 3.2. ML estimation. The ML estimate is defined as $$\hat{\bf w}_{\text{ML}} = \arg\max_{\bf w} P_{{\mathcal D}|{\bf W}}({\mathcal D}|{\bf w})$$ Ussing assumptions A2 and A3 above, we have that \begin{align} P_{{\mathcal D}|{\bf W}}({\mathcal D}|{\bf w}) & = p(y^{(0)}, \cdots, y^{(K-1)},{\bf x}^{(0)}, \cdots, {\bf x}^{(K-1)}| {\bf w}) \\ & = P(y^{(0)}, \cdots, y^{(K-1)}|{\bf x}^{(0)}, \cdots, {\bf x}^{(K-1)}, {\bf w}) \; p({\bf x}^{(0)}, \cdots, {\bf x}^{(K-1)}| {\bf w}) \\ & = P(y^{(0)}, \cdots, y^{(K-1)}|{\bf x}^{(0)}, \cdots, {\bf x}^{(K-1)}, {\bf w}) \; p({\bf x}^{(0)}, \cdots, {\bf x}^{(K-1)})\end{align} Finally, using assumption A4, we can formulate the ML estimation of $\bf w$ as the resolution of the following optimization problem \begin{align} \hat {\bf w}_\text{ML} & = \arg \max_{\bf w} p(y^{(0)}, \cdots, y^{(K-1)}|{\bf x}^{(0)}, \cdots, {\bf x}^{(K-1)}, {\bf w}) \\ & = \arg \max_{\bf w} \prod_{k=0}^{K-1} P(y^{(k)}|{\bf x}^{(k)}, {\bf w}) \\ & = \arg \max_{\bf w} \sum_{k=0}^{K-1} \log P(y^{(k)}|{\bf x}^{(k)}, {\bf w}) \\ & = \arg \min_{\bf w} \sum_{k=0}^{K-1} - \log P(y^{(k)}|{\bf x}^{(k)}, {\bf w}) \end{align} where the arguments of the maximization or minimization problems of the last three lines are usually referred to as the **likelihood**, **log-likelihood** $\left[L(\bf w)\right]$, and **negative log-likelihood** $\left[\text{NLL}(\bf w)\right]$, respectively. Now, using A1 (the logistic model) \begin{align} \text{NLL}({\bf w}) &= - \sum_{k=0}^{K-1}\log\left[g\left(\overline{y}^{(k)}{\bf w}^\intercal {\bf z}^{(k)}\right)\right] \\ &= \sum_{k=0}^{K-1}\log\left[1+\exp\left(-\overline{y}^{(k)}{\bf w}^\intercal {\bf z}^{(k)}\right)\right] \end{align} where ${\bf z}^{(k)}={\bf z}({\bf x}^{(k)})$. It can be shown that $\text{NLL}({\bf w})$ is a convex and differentiable function of ${\bf w}$. Therefore, its minimum is a point with zero gradient. \begin{align} \nabla_{\bf w} \text{NLL}(\hat{\bf w}_{\text{ML}}) &= - \sum_{k=0}^{K-1} \frac{\exp\left(-\overline{y}^{(k)}\hat{\bf w}_{\text{ML}}^\intercal {\bf z}^{(k)}\right) \overline{y}^{(k)} {\bf z}^{(k)}} {1+\exp\left(-\overline{y}^{(k)}\hat{\bf w}_{\text{ML}}^\intercal {\bf z}^{(k)} \right)} = \\ &= - \sum_{k=0}^{K-1} \left[y^{(k)}-g(\hat{\bf w}_{\text{ML}}^T {\bf z}^{(k)})\right] {\bf z}^{(k)} = 0 \end{align} Unfortunately, $\hat{\bf w}_{\text{ML}}$ cannot be taken out from the above equation, and some iterative optimization algorithm must be used to search for the minimum. ### 3.2. Gradient descent. A simple iterative optimization algorithm is <a href = https://en.wikipedia.org/wiki/Gradient_descent> gradient descent</a>. \begin{align} {\bf w}_{n+1} = {\bf w}_n - \rho_n \nabla_{\bf w} L({\bf w}_n) \end{align} where $\rho_n >0$ is the *learning step*. Applying the gradient descent rule to logistic regression, we get the following algorithm: \begin{align} {\bf w}_{n+1} &= {\bf w}_n + \rho_n \sum_{k=0}^{K-1} \left[y^{(k)}-g({\bf w}_n^\intercal {\bf z}^{(k)})\right] {\bf z}^{(k)} \end{align} Defining vectors \begin{align} {\bf y} &= [y^{(0)},\ldots,y^{(K-1)}]^\intercal \\ \hat{\bf p}_n &= [g({\bf w}_n^\intercal {\bf z}^{(0)}), \ldots, g({\bf w}_n^\intercal {\bf z}^{(K-1)})]^\intercal \end{align} and matrix \begin{align} {\bf Z} = \left[{\bf z}^{(0)},\ldots,{\bf z}^{(K-1)}\right]^\intercal \end{align} we can write \begin{align} {\bf w}_{n+1} &= {\bf w}_n + \rho_n {\bf Z}^\intercal \left({\bf y}-\hat{\bf p}_n\right) \end{align} In the following, we will explore the behavior of the gradient descend method using the Iris Dataset. #### 3.2.1 Example: Iris Dataset. As an illustration, consider the <a href = http://archive.ics.uci.edu/ml/datasets/Iris> Iris dataset </a>, taken from the <a href=http://archive.ics.uci.edu/ml/> UCI Machine Learning repository</a>. This data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant (*setosa*, *versicolor* or *virginica*). Each instance contains 4 measurements of given flowers: sepal length, sepal width, petal length and petal width, all in centimeters. We will try to fit the logistic regression model to discriminate between two classes using only two attributes. First, we load the dataset and split them in training and test subsets. ``` # Adapted from a notebook by Jason Brownlee def loadDataset(filename, split): xTrain = [] cTrain = [] xTest = [] cTest = [] with open(filename, 'r') as csvfile: lines = csv.reader(csvfile) dataset = list(lines) for i in range(len(dataset)-1): for y in range(4): dataset[i][y] = float(dataset[i][y]) item = dataset[i] if random.random() < split: xTrain.append(item[0:4]) cTrain.append(item[4]) else: xTest.append(item[0:4]) cTest.append(item[4]) return xTrain, cTrain, xTest, cTest xTrain_all, cTrain_all, xTest_all, cTest_all = loadDataset('iris.data', 0.66) nTrain_all = len(xTrain_all) nTest_all = len(xTest_all) print('Train:', nTrain_all) print('Test:', nTest_all) ``` Now, we select two classes and two attributes. ``` # Select attributes i = 0 # Try 0,1,2,3 j = 1 # Try 0,1,2,3 with j!=i # Select two classes c0 = 'Iris-versicolor' c1 = 'Iris-virginica' # Select two coordinates ind = [i, j] # Take training test X_tr = np.array([[xTrain_all[n][i] for i in ind] for n in range(nTrain_all) if cTrain_all[n]==c0 or cTrain_all[n]==c1]) C_tr = [cTrain_all[n] for n in range(nTrain_all) if cTrain_all[n]==c0 or cTrain_all[n]==c1] Y_tr = np.array([int(c==c1) for c in C_tr]) n_tr = len(X_tr) # Take test set X_tst = np.array([[xTest_all[n][i] for i in ind] for n in range(nTest_all) if cTest_all[n]==c0 or cTest_all[n]==c1]) C_tst = [cTest_all[n] for n in range(nTest_all) if cTest_all[n]==c0 or cTest_all[n]==c1] Y_tst = np.array([int(c==c1) for c in C_tst]) n_tst = len(X_tst) ``` #### 3.2.2. Data normalization Normalization of data is a common pre-processing step in many machine learning algorithms. Its goal is to get a dataset where all input coordinates have a similar scale. Learning algorithms usually show less instabilities and convergence problems when data are normalized. We will define a normalization function that returns a training data matrix with zero sample mean and unit sample variance. ``` def normalize(X, mx=None, sx=None): # Compute means and standard deviations if mx is None: mx = np.mean(X, axis=0) if sx is None: sx = np.std(X, axis=0) # Normalize X0 = (X-mx)/sx return X0, mx, sx ``` Now, we can normalize training and test data. Observe in the code that the same transformation should be applied to training and test data. This is the reason why normalization with the test data is done using the means and the variances computed with the training set. ``` # Normalize data Xn_tr, mx, sx = normalize(X_tr) Xn_tst, mx, sx = normalize(X_tst, mx, sx) ``` The following figure generates a plot of the normalized training data. ``` # Separate components of x into different arrays (just for the plots) x0c0 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==0] x1c0 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==0] x0c1 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==1] x1c1 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==1] # Scatterplot. labels = {'Iris-setosa': 'Setosa', 'Iris-versicolor': 'Versicolor', 'Iris-virginica': 'Virginica'} plt.plot(x0c0, x1c0,'r.', label=labels[c0]) plt.plot(x0c1, x1c1,'g+', label=labels[c1]) plt.xlabel('$x_' + str(ind[0]) + '$') plt.ylabel('$x_' + str(ind[1]) + '$') plt.legend(loc='best') plt.axis('equal') plt.show() ``` In order to apply the gradient descent rule, we need to define two methods: - A `fit` method, that receives the training data and returns the model weights and the value of the negative log-likelihood during all iterations. - A `predict` method, that receives the model weight and a set of inputs, and returns the posterior class probabilities for that input, as well as their corresponding class predictions. ``` def logregFit(Z_tr, Y_tr, rho, n_it): # Data dimension n_dim = Z_tr.shape[1] # Initialize variables nll_tr = np.zeros(n_it) pe_tr = np.zeros(n_it) Y_tr2 = 2*Y_tr - 1 # Transform labels into binary symmetric. w = np.random.randn(n_dim,1) # Running the gradient descent algorithm for n in range(n_it): # Compute posterior probabilities for weight w p1_tr = logistic(np.dot(Z_tr, w)) # Compute negative log-likelihood # (note that this is not required for the weight update, only for nll tracking) nll_tr[n] = np.sum(np.log(1 + np.exp(-np.dot(Y_tr2*Z_tr, w)))) # Update weights w += rho*np.dot(Z_tr.T, Y_tr - p1_tr) return w, nll_tr def logregPredict(Z, w): # Compute posterior probability of class 1 for weights w. p = logistic(np.dot(Z, w)).flatten() # Class D = [int(round(pn)) for pn in p] return p, D ``` We can test the behavior of the gradient descent method by fitting a logistic regression model with ${\bf z}({\bf x}) = (1, {\bf x}^\intercal)^\intercal$. ``` # Parameters of the algorithms rho = float(1)/50 # Learning step n_it = 200 # Number of iterations # Compute Z's Z_tr = np.c_[np.ones(n_tr), Xn_tr] Z_tst = np.c_[np.ones(n_tst), Xn_tst] n_dim = Z_tr.shape[1] # Convert target arrays to column vectors Y_tr2 = Y_tr[np.newaxis].T Y_tst2 = Y_tst[np.newaxis].T # Running the gradient descent algorithm w, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it) # Classify training and test data p_tr, D_tr = logregPredict(Z_tr, w) p_tst, D_tst = logregPredict(Z_tst, w) # Compute error rates E_tr = D_tr!=Y_tr E_tst = D_tst!=Y_tst # Error rates pe_tr = float(sum(E_tr)) / n_tr pe_tst = float(sum(E_tst)) / n_tst # NLL plot. plt.plot(range(n_it), nll_tr,'b.:', label='Train') plt.xlabel('Iteration') plt.ylabel('Negative Log-Likelihood') plt.legend() print('The optimal weights are:') print(w) print('The final error rates are:') print('- Training:', pe_tr) print('- Test:', pe_tst) print('The NLL after training is', nll_tr[len(nll_tr)-1]) ``` #### 3.2.3. Free parameters Under certain conditions, the gradient descent method can be shown to converge asymptotically (i.e. as the number of iterations goes to infinity) to the ML estimate of the logistic model. However, in practice, the final estimate of the weights ${\bf w}$ depend on several factors: - Number of iterations - Initialization - Learning step **Exercise**: Visualize the variability of gradient descent caused by initializations. To do so, fix the number of iterations to 200 and the learning step, and execute the gradient descent 100 times, storing the training error rate of each execution. Plot the histogram of the error rate values. Note that you can do this exercise with a loop over the 100 executions, including the code in the previous code slide inside the loop, with some proper modifications. To plot a histogram of the values in array `p` with `n`bins, you can use `plt.hist(p, n)` ##### 3.2.3.1. Learning step The learning step, $\rho$, is a free parameter of the algorithm. Its choice is critical for the convergence of the algorithm. Too large values of $\rho$ make the algorithm diverge. For too small values, the convergence gets very slow and more iterations are required for a good convergence. **Exercise 3**: Observe the evolution of the negative log-likelihood with the number of iterations for different values of $\rho$. It is easy to check that, for large enough $\rho$, the gradient descent method does not converge. Can you estimate (through manual observation) an approximate value of $\rho$ stating a boundary between convergence and divergence? **Exercise 4**: In this exercise we explore the influence of the learning step more sistematically. Use the code in the previouse exercises to compute, for every value of $\rho$, the average error rate over 100 executions. Plot the average error rate vs. $\rho$. Note that you should explore the values of $\rho$ in a logarithmic scale. For instance, you can take $\rho = 1, 1/10, 1/100, 1/1000, \ldots$ In practice, the selection of $\rho$ may be a matter of trial an error. Also there is some theoretical evidence that the learning step should decrease along time up to cero, and the sequence $\rho_n$ should satisfy two conditions: - C1: $\sum_{n=0}^{\infty} \rho_n^2 < \infty$ (decrease slowly) - C2: $\sum_{n=0}^{\infty} \rho_n = \infty$ (but not too slowly) For instance, we can take $\rho_n= 1/n$. Another common choice is $\rho_n = \alpha/(1+\beta n)$ where $\alpha$ and $\beta$ are also free parameters that can be selected by trial and error with some heuristic method. #### 3.2.4. Visualizing the posterior map. We can also visualize the posterior probability map estimated by the logistic regression model for the estimated weights. ``` # Create a regtangular grid. x_min, x_max = Xn_tr[:, 0].min(), Xn_tr[:, 0].max() y_min, y_max = Xn_tr[:, 1].min(), Xn_tr[:, 1].max() dx = x_max - x_min dy = y_max - y_min h = dy /400 xx, yy = np.meshgrid(np.arange(x_min - 0.1 * dx, x_max + 0.1 * dx, h), np.arange(y_min - 0.1 * dx, y_max + 0.1 * dy, h)) X_grid = np.array([xx.ravel(), yy.ravel()]).T # Compute Z's Z_grid = np.c_[np.ones(X_grid.shape[0]), X_grid] # Compute the classifier output for all samples in the grid. pp, dd = logregPredict(Z_grid, w) # Paint output maps pylab.rcParams['figure.figsize'] = 6, 6 # Set figure size # Put the result into a color plot plt.plot(x0c0, x1c0,'r.', label=labels[c0]) plt.plot(x0c1, x1c1,'g+', label=labels[c1]) plt.xlabel('$x_' + str(ind[0]) + '$') plt.ylabel('$x_' + str(ind[1]) + '$') plt.legend(loc='best') plt.axis('equal') pp = pp.reshape(xx.shape) CS = plt.contourf(xx, yy, pp, cmap=plt.cm.copper) plt.contour(xx, yy, pp, levels=[0.5], colors='b', linewidths=(3,)) plt.colorbar(CS, ticks=[0, 0.5, 1]) plt.show() ``` #### 3.2.5. Polynomial Logistic Regression The error rates of the logistic regression model can be potentially reduced by using polynomial transformations. To compute the polynomial transformation up to a given degree, we can use the `PolynomialFeatures` method in `sklearn.preprocessing`. ``` # Parameters of the algorithms rho = float(1)/50 # Learning step n_it = 500 # Number of iterations g = 5 # Degree of polynomial # Compute Z_tr poly = PolynomialFeatures(degree=g) Z_tr = poly.fit_transform(Xn_tr) # Normalize columns (this is useful to make algorithms more stable).) Zn, mz, sz = normalize(Z_tr[:,1:]) Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1) # Compute Z_tst Z_tst = poly.fit_transform(Xn_tst) Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz) Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1) # Convert target arrays to column vectors Y_tr2 = Y_tr[np.newaxis].T Y_tst2 = Y_tst[np.newaxis].T # Running the gradient descent algorithm w, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it) # Classify training and test data p_tr, D_tr = logregPredict(Z_tr, w) p_tst, D_tst = logregPredict(Z_tst, w) # Compute error rates E_tr = D_tr!=Y_tr E_tst = D_tst!=Y_tst # Error rates pe_tr = float(sum(E_tr)) / n_tr pe_tst = float(sum(E_tst)) / n_tst # NLL plot. plt.plot(range(n_it), nll_tr,'b.:', label='Train') plt.xlabel('Iteration') plt.ylabel('Negative Log-Likelihood') plt.legend() print('The optimal weights are:') print(w) print('The final error rates are:') print('- Training:', pe_tr) print('- Test:', pe_tst) print('The NLL after training is', nll_tr[len(nll_tr)-1]) ``` Visualizing the posterior map we can se that the polynomial transformation produces nonlinear decision boundaries. ``` # Compute Z_grid Z_grid = poly.fit_transform(X_grid) n_grid = Z_grid.shape[0] Zn, mz, sz = normalize(Z_grid[:,1:], mz, sz) Z_grid = np.concatenate((np.ones((n_grid,1)), Zn), axis=1) # Compute the classifier output for all samples in the grid. pp, dd = logregPredict(Z_grid, w) pp = pp.reshape(xx.shape) # Paint output maps pylab.rcParams['figure.figsize'] = 6, 6 # Set figure size plt.plot(x0c0, x1c0,'r.', label=labels[c0]) plt.plot(x0c1, x1c1,'g+', label=labels[c1]) plt.xlabel('$x_' + str(ind[0]) + '$') plt.ylabel('$x_' + str(ind[1]) + '$') plt.axis('equal') plt.legend(loc='best') CS = plt.contourf(xx, yy, pp, cmap=plt.cm.copper) plt.contour(xx, yy, pp, levels=[0.5], colors='b', linewidths=(3,)) plt.colorbar(CS, ticks=[0, 0.5, 1]) plt.show() ``` ## 4. Regularization and MAP estimation. An alternative to the ML estimation of the weights in logistic regression is Maximum A Posteriori estimation. Modelling the logistic regression weights as a random variable with prior distribution $p_{\bf W}({\bf w})$, the MAP estimate is defined as $$ \hat{\bf w}_{\text{MAP}} = \arg\max_{\bf w} p({\bf w}|{\mathcal D}) $$ The posterior density $p({\bf w}|{\mathcal D})$ is related to the likelihood function and the prior density of the weights, $p_{\bf W}({\bf w})$ through the Bayes rule $$ p({\bf w}|{\mathcal D}) = \frac{P\left({\mathcal D}|{\bf w}\right) \; p_{\bf W}({\bf w})} {p\left({\mathcal D}\right)} $$ In general, the denominator in this expression cannot be computed analytically. However, it is not required for MAP estimation because it does not depend on ${\bf w}$. Therefore, the MAP solution is given by \begin{align} \hat{\bf w}_{\text{MAP}} & = \arg\max_{\bf w} P\left({\mathcal D}|{\bf w}\right) \; p_{\bf W}({\bf w}) \\ & = \arg\max_{\bf w} \left\{ L({\mathbf w}) + \log p_{\bf W}({\bf w})\right\} \\ & = \arg\min_{\bf w} \left\{ \text{NLL}({\mathbf w}) - \log p_{\bf W}({\bf w})\right\} \end{align} In the light of this expression, we can conclude that the MAP solution is affected by two terms: - The likelihood, which takes large values for parameter vectors $\bf w$ that fit well the training data - The prior distribution of weights $p_{\bf W}({\bf w})$, which expresses our *a priori* preference for some solutions. Usually, we recur to prior distributions that take large values when $\|{\bf w}\|$ is small (associated to smooth classification borders). We can check that the MAP criterion adds a penalty term to the ML objective, that penalizes parameter vectors for which the prior distribution of weights takes small values. ### 4.1 MAP estimation with Gaussian prior If we assume that ${\bf W}$ is a zero-mean Gaussian random variable with variance matrix $v{\bf I}$, $$ p_{\bf W}({\bf w}) = \frac{1}{(2\pi v)^{N/2}} \exp\left(-\frac{1}{2v}\|{\bf w}\|^2\right) $$ the MAP estimate becomes \begin{align} \hat{\bf w}_{\text{MAP}} &= \arg\min_{\bf w} \left\{L({\bf w}) + \frac{1}{C}\|{\bf w}\|^2 \right\} \end{align} where $C = 2v$. Noting that $$\nabla_{\bf w}\left\{L({\bf w}) + \frac{1}{C}\|{\bf w}\|^2\right\} = - {\bf Z} \left({\bf y}-\hat{\bf p}_n\right) + \frac{2}{C}{\bf w}, $$ we obtain the following gradient descent rule for MAP estimation \begin{align} {\bf w}_{n+1} &= \left(1-\frac{2\rho_n}{C}\right){\bf w}_n + \rho_n {\bf Z} \left({\bf y}-\hat{\bf p}_n\right) \end{align} ### 4.2 MAP estimation with Laplacian prior If we assume that ${\bf W}$ follows a multivariate zero-mean Laplacian distribution given by $$ p_{\bf W}({\bf w}) = \frac{1}{(2 C)^{N}} \exp\left(-\frac{1}{C}\|{\bf w}\|_1\right) $$ (where $\|{\bf w}\|=|w_1|+\ldots+|w_N|$ is the $L_1$ norm of ${\bf w}$), the MAP estimate is \begin{align} \hat{\bf w}_{\text{MAP}} &= \arg\min_{\bf w} \left\{L({\bf w}) + \frac{1}{C}\|{\bf w}\|_1 \right\} \end{align} The additional term introduced by the prior in the optimization algorithm is usually named the *regularization term*. It is usually very effective to avoid overfitting when the dimension of the weight vectors is high. Parameter $C$ is named the *inverse regularization strength*. **Exercise 5**: Derive the gradient descent rules for MAP estimation of the logistic regression weights with Laplacian prior. ## 5. Other optimization algorithms ### 5.1. Stochastic Gradient descent. Stochastic gradient descent (SGD) is based on the idea of using a single sample at each iteration of the learning algorithm. The SGD rule for ML logistic regression is \begin{align} {\bf w}_{n+1} &= {\bf w}_n + \rho_n {\bf z}^{(n)} \left(y^{(n)}-\hat{p}^{(n)}_n\right) \end{align} Once all samples in the training set have been applied, the algorith can continue by applying the training set several times. The computational cost of each iteration of SGD is much smaller than that of gradient descent, though it usually needs more iterations to converge. **Exercise 6**: Modify logregFit to implement an algorithm that applies the SGD rule. ### 5.2. Newton's method Assume that the function to be minimized, $C({\bf w})$, can be approximated by its second order Taylor series expansion around ${\bf w}_0$ $$ C({\bf w}) \approx C({\bf w}_0) + \nabla_{\bf w}^\intercal C({\bf w}_0)({\bf w}-{\bf w}_0) + \frac{1}{2}({\bf w}-{\bf w}_0)^\intercal{\bf H}({\bf w}_0)({\bf w}-{\bf w}_0) $$ where ${\bf H}({\bf w}_k)$ is the <a href=https://en.wikipedia.org/wiki/Hessian_matrix> *Hessian* matrix</a> of $C$ at ${\bf w}_k$. Taking the gradient of $C({\bf w})$, and setting the result to ${\bf 0}$, the minimum of C around ${\bf w}_0$ can be approximated as $$ {\bf w}^* = {\bf w}_0 - {\bf H}({\bf w}_0)^{-1} \nabla_{\bf w}^\intercal C({\bf w}_0) $$ Since the second order polynomial is only an approximation to $C$, ${\bf w}^*$ is only an approximation to the optimal weight vector, but we can expect ${\bf w}^*$ to be closer to the minimizer of $C$ than ${\bf w}_0$. Thus, we can repeat the process, computing a second order approximation around ${\bf w}^*$ and a new approximation to the minimizer. <a href=https://en.wikipedia.org/wiki/Newton%27s_method_in_optimization> Newton's method</a> is based on this idea. At each optization step, the function to be minimized is approximated by a second order approximation using a Taylor series expansion around the current estimate. As a result, the learning rules becomes $$\hat{\bf w}_{n+1} = \hat{\bf w}_{n} - \rho_n {\bf H}({\bf w}_k)^{-1} \nabla_{{\bf w}}C({\bf w}_k) $$ For instance, for the MAP estimate with Gaussian prior, the *Hessian* matrix becomes $$ {\bf H}({\bf w}) = \frac{2}{C}{\bf I} + \sum_{k=1}^K f({\bf w}^T {\bf z}^{(k)}) \left(1-f({\bf w}^T {\bf z}^{(k)})\right){\bf z}^{(k)} ({\bf z}^{(k)})^\intercal $$ Defining diagonal matrix $$ {\mathbf S}({\bf w}) = \text{diag}\left(f({\bf w}^T {\bf z}^{(k)}) \left(1-f({\bf w}^T {\bf z}^{(k)})\right)\right) $$ the Hessian matrix can be written in more compact form as $$ {\bf H}({\bf w}) = \frac{2}{C}{\bf I} + {\bf Z}^\intercal {\bf S}({\bf w}) {\bf Z} $$ Therefore, the Newton's algorithm for logistic regression becomes \begin{align} \hat{\bf w}_{n+1} = \hat{\bf w}_{n} + \rho_n \left(\frac{2}{C}{\bf I} + {\bf Z}^\intercal {\bf S}(\hat{\bf w}_{n}) {\bf Z} \right)^{-1} {\bf Z}^\intercal \left({\bf y}-\hat{\bf p}_n\right) \end{align} Some variants of the Newton method are implemented in the <a href="http://scikit-learn.org/stable/"> Scikit-learn </a> package. ``` def logregFit2(Z_tr, Y_tr, rho, n_it, C=1e4): # Compute Z's r = 2.0/C n_dim = Z_tr.shape[1] # Initialize variables nll_tr = np.zeros(n_it) pe_tr = np.zeros(n_it) w = np.random.randn(n_dim,1) # Running the gradient descent algorithm for n in range(n_it): p_tr = logistic(np.dot(Z_tr, w)) sk = np.multiply(p_tr, 1-p_tr) S = np.diag(np.ravel(sk.T)) # Compute negative log-likelihood nll_tr[n] = - np.dot(Y_tr.T, np.log(p_tr)) - np.dot((1-Y_tr).T, np.log(1-p_tr)) # Update weights invH = np.linalg.inv(r*np.identity(n_dim) + np.dot(Z_tr.T, np.dot(S, Z_tr))) w += rho*np.dot(invH, np.dot(Z_tr.T, Y_tr - p_tr)) return w, nll_tr # Parameters of the algorithms rho = float(1)/50 # Learning step n_it = 500 # Number of iterations C = 1000 g = 4 # Compute Z_tr poly = PolynomialFeatures(degree=g) Z_tr = poly.fit_transform(X_tr) # Normalize columns (this is useful to make algorithms more stable).) Zn, mz, sz = normalize(Z_tr[:,1:]) Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1) # Compute Z_tst Z_tst = poly.fit_transform(X_tst) Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz) Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1) # Convert target arrays to column vectors Y_tr2 = Y_tr[np.newaxis].T Y_tst2 = Y_tst[np.newaxis].T # Running the gradient descent algorithm w, nll_tr = logregFit2(Z_tr, Y_tr2, rho, n_it, C) # Classify training and test data p_tr, D_tr = logregPredict(Z_tr, w) p_tst, D_tst = logregPredict(Z_tst, w) # Compute error rates E_tr = D_tr!=Y_tr E_tst = D_tst!=Y_tst # Error rates pe_tr = float(sum(E_tr)) / n_tr pe_tst = float(sum(E_tst)) / n_tst # NLL plot. plt.plot(range(n_it), nll_tr,'b.:', label='Train') plt.xlabel('Iteration') plt.ylabel('Negative Log-Likelihood') plt.legend() print('The final error rates are:') print('- Training:', str(pe_tr)) print('- Test:', str(pe_tst)) print('The NLL after training is:', str(nll_tr[len(nll_tr)-1])) ``` ## 6. Logistic regression in Scikit Learn. The <a href="http://scikit-learn.org/stable/"> scikit-learn </a> package includes an efficient implementation of <a href="http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression"> logistic regression</a>. To use it, we must first create a classifier object, specifying the parameters of the logistic regression algorithm. ``` # Create a logistic regression object. LogReg = linear_model.LogisticRegression(C=1.0) # Compute Z_tr poly = PolynomialFeatures(degree=g) Z_tr = poly.fit_transform(Xn_tr) # Normalize columns (this is useful to make algorithms more stable).) Zn, mz, sz = normalize(Z_tr[:,1:]) Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1) # Compute Z_tst Z_tst = poly.fit_transform(Xn_tst) Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz) Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1) # Fit model to data. LogReg.fit(Z_tr, Y_tr) # Classify training and test data D_tr = LogReg.predict(Z_tr) D_tst = LogReg.predict(Z_tst) # Compute error rates E_tr = D_tr!=Y_tr E_tst = D_tst!=Y_tst # Error rates pe_tr = float(sum(E_tr)) / n_tr pe_tst = float(sum(E_tst)) / n_tst print('The final error rates are:') print('- Training:', str(pe_tr)) print('- Test:', str(pe_tst)) # Compute Z_grid Z_grid = poly.fit_transform(X_grid) n_grid = Z_grid.shape[0] Zn, mz, sz = normalize(Z_grid[:,1:], mz, sz) Z_grid = np.concatenate((np.ones((n_grid,1)), Zn), axis=1) # Compute the classifier output for all samples in the grid. dd = LogReg.predict(Z_grid) pp = LogReg.predict_proba(Z_grid)[:,1] pp = pp.reshape(xx.shape) # Paint output maps pylab.rcParams['figure.figsize'] = 6, 6 # Set figure size plt.plot(x0c0, x1c0,'r.', label=labels[c0]) plt.plot(x0c1, x1c1,'g+', label=labels[c1]) plt.xlabel('$x_' + str(ind[0]) + '$') plt.ylabel('$x_' + str(ind[1]) + '$') plt.axis('equal') plt.contourf(xx, yy, pp, cmap=plt.cm.copper) plt.legend(loc='best') plt.contour(xx, yy, pp, levels=[0.5], colors='b', linewidths=(3,)) plt.colorbar(CS, ticks=[0, 0.5, 1]) plt.show() ```
github_jupyter
# Installation Make sure to have all the required software installed after proceeding. For installation help, please consult the [school guide](http://lxmls.it.pt/2018/LxMLS_guide_2018.pdf). # Python Basics ``` print('Hello World!') ``` We could also have this code in a separate file and still run it on a notebook like this: `run ./my_file.py` ### Basic Math Operations ``` print(3 + 5) print(3 - 5) print(3 * 5) print(3 ** 5) # Observation: this code gives different results for python2 and python3 # because of the behaviour for the division operator print(3 / 5.0) print(3 / 5) # for compatibility, make sure to use the follow statement from __future__ import division print(3 / 5.0) print(3 / 5) ``` In this case I'm using Python3 but in Python2, to force floating point division instead of the default integer division, we would have to force at least one of the operands to be a floating point number, like we can see above: `print(3 / 5.0)` ### Data Strutures ``` countries = ['Portugal','Spain','United Kingdom'] print(countries) ``` ## Exercise 0.1 Use L[i:j] to return the countries in the Iberian Peninsula. ``` countries[0:2] ``` Forgetting the `0` also does the trick. ``` countries[:2] ``` ### Loops and Indentation ``` i = 2 while i < 10: print(i) i += 2 for i in range(2,10,2): print(i) a=1 while a <= 3: print(a) a += 1 ``` ## Exercise 0.2 Can you then predict the output of the following code? Yes, the following code results in an infinite loop that prints 1 everytime :) ``` a=1 while a <= 3: print(a) a += 1 ``` ### Control Flow ``` hour = 16 if hour < 12: print('Good morning!') elif hour >= 12 and hour < 20: print('Good afternoon!') else: print('Good evening!') ``` ### Functions ``` def greet(hour): if hour < 0 or hour > 24: print('Invalid hour: it should be between 0 and 24.') elif hour < 12: print('Good morning!') elif hour >= 12 and hour < 20: print('Good afternoon!') else: print('Good evening!') ``` ## Exercise 0.3 Note that the previous code allows the hour to be less than 0 or more than 24. Change the code in order to indicate that the hour given as input is invalid. Your output should be something like: ``greet(50) Invalid hour: it should be between 0 and 24. greet(-5) Invalid hour: it should be between 0 and 24.`` ``` greet(50) greet(-5) ``` ### Profiling ``` %prun greet(22) ``` ### Debugging in Python ``` def greet2(hour): if hour < 12: print('Good morning!') elif hour >= 12 and hour < 20: print('Good afternoon!') else: import pdb; pdb.set_trace() print('Good evening!') greet2(22) ``` ## Exceptions for a complete list of built-in exceptions, see [http://docs.python.org/2/library/exceptions.html](http://docs.python.org/2/library/exceptions.html) ``` raise ValueError("Invalid input value.") while True: try: x = int(input("Please enter a number: ")) break except ValueError: print("Oops! That was no valid number. Try again...") ``` ### Extending basic Functionalities with Modules ``` import numpy as np np.var? np.random.normal? ``` ### Organizing your Code with your own modules See details in [guide](http://lxmls.it.pt/2018/LxMLS_guide_2018.pdf) ## Matplotlib – Plotting in Python ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline X = np.linspace(-4, 4, 1000) plt.plot(X, X**2*np.cos(X**2)) plt.savefig("simple.pdf") ``` ## Exercise 0.5 Try running the following on Jupyter, which will introduce you to some of the basic numeric and plotting operations. ``` # This will import the numpy library # and give it the np abbreviation import numpy as np # This will import the plotting library import matplotlib.pyplot as plt # Linspace will return 1000 points, # evenly spaced between -4 and +4 X = np.linspace(-4, 4, 1000) # Y[i] = X[i]**2 Y = X**2 # Plot using a red line ('r') plt.plot(X, Y, 'r') # arange returns integers ranging from -4 to +4 # (the upper argument is excluded!) Ints = np.arange(-4,5) # We plot these on top of the previous plot # using blue circles (o means a little circle) plt.plot(Ints, Ints**2, 'bo') # You may notice that the plot is tight around the line # Set the display limits to see better plt.xlim(-4.5,4.5) plt.ylim(-1,17) plt.show() import matplotlib.pyplot as plt import numpy as np X = np.linspace(0, 4 * np.pi, 1000) C = np.cos(X) S = np.sin(X) plt.plot(X, C) plt.plot(X, S) plt.show() ``` # Exercise 0.6 Run the following example and lookup the ptp function/method (use the ? functionality in Jupyter) ``` A = np.arange(100) # These two lines do exactly the same thing print(np.mean(A)) print(A.mean()) np.ptp? ``` # Exercise 0.7 Consider the following approximation to compute an integral \begin{equation*} \int_0^1 f(x) dx \approx \sum_{i=0}^{999} \frac{f(i/1000)}{1000} \end{equation*} Use numpy to implement this for $f(x) = x^2$. You should not need to use any loops. Note that integer division in Python 2.x returns the floor division (use floats – e.g. 5.0/2.0 – to obtain rationals). The exact value is 1/3. How close is the approximation? ``` def f(x): return(x**2) sum([f(x*1./1000)/1000 for x in range(0,1000)]) ``` # Exercise 0.8 In the rest of the school we will represent both matrices and vectors as numpy arrays. You can create arrays in different ways, one possible way is to create an array of zeros. ``` import numpy as np m = 3 n = 2 a = np.zeros([m,n]) print(a) ``` You can check the shape and the data type of your array using the following commands: ``` print(a.shape) print(a.dtype.name) ``` This shows you that “a” is an 3*2 array of type float64. By default, arrays contain 64 bit6 floating point numbers. You can specify the particular array type by using the keyword dtype. ``` a = np.zeros([m,n], dtype=int) print(a.dtype) ``` You can also create arrays from lists of numbers: ``` a = np.array([[2,3],[3,4]]) print(a) ``` # Exercise 0.9 You can multiply two matrices by looping over both indexes and multiplying the individual entries. ``` a = np.array([[2,3],[3,4]]) b = np.array([[1,1],[1,1]]) a_dim1, a_dim2 = a.shape b_dim1, b_dim2 = b.shape c = np.zeros([a_dim1,b_dim2]) for i in range(a_dim1): for j in range(b_dim2): for k in range(a_dim2): c[i,j] += a[i,k]*b[k,j] print(c) ``` This is, however, cumbersome and inefficient. Numpy supports matrix multiplication with the dot function: ``` d = np.dot(a,b) print(d) a = np.array([1,2]) b = np.array([1,1]) np.dot(a,b) np.outer(a,b) I = np.eye(2) x = np.array([2.3, 3.4]) print(I) print(np.dot(I,x)) A = np.array([ [1, 2], [3, 4] ]) print(A) print(A.T) ```
github_jupyter
TSG086 - Run `top` in all containers ==================================== Steps ----- ### Instantiate Kubernetes client ``` # Instantiate the Python Kubernetes client into 'api' variable import os from IPython.display import Markdown try: from kubernetes import client, config from kubernetes.stream import stream if "KUBERNETES_SERVICE_PORT" in os.environ and "KUBERNETES_SERVICE_HOST" in os.environ: config.load_incluster_config() else: try: config.load_kube_config() except: display(Markdown(f'HINT: Use [TSG118 - Configure Kubernetes config](../repair/tsg118-configure-kube-config.ipynb) to resolve this issue.')) raise api = client.CoreV1Api() print('Kubernetes client instantiated') except ImportError: display(Markdown(f'HINT: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.')) raise ``` ### Get the namespace for the big data cluster Get the namespace of the Big Data Cluster from the Kuberenetes API. **NOTE:** If there is more than one Big Data Cluster in the target Kubernetes cluster, then either: - set \[0\] to the correct value for the big data cluster. - set the environment variable AZDATA\_NAMESPACE, before starting Azure Data Studio. ``` # Place Kubernetes namespace name for BDC into 'namespace' variable if "AZDATA_NAMESPACE" in os.environ: namespace = os.environ["AZDATA_NAMESPACE"] else: try: namespace = api.list_namespace(label_selector='MSSQL_CLUSTER').items[0].metadata.name except IndexError: from IPython.display import Markdown display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.')) raise print('The kubernetes namespace for your big data cluster is: ' + namespace) ``` ### Run top in each container ``` cmd = "top -b -n 1" pod_list = api.list_namespaced_pod(namespace) pod_names = [pod.metadata.name for pod in pod_list.items] for pod in pod_list.items: container_names = [container.name for container in pod.spec.containers] for container in container_names: print (f"CONTAINER: {container} / POD: {pod.metadata.name}") try: print(stream(api.connect_get_namespaced_pod_exec, pod.metadata.name, namespace, command=['/bin/sh', '-c', cmd], container=container, stderr=True, stdout=True)) except Exception as err: print (f"Failed to get run 'top' for container: {container} in pod: {pod.metadata.name}. Error: {err}") print('Notebook execution complete.') ```
github_jupyter
``` import pandas as pd data_folder = '..\\\\..\\\\..\\\\..\\\\Google Drive\\\\datasets\\\\mf_data_kaggle\\\\' def flip_cols2rows(src_df, col_list, item_category, key_col): df1 = pd.DataFrame() for i in col_list: df2 = src_df[[key_col, i]].rename(columns = {key_col: key_col, i: 'value'}).assign(item_desc=i) df1 = pd.concat([df1, df2], ignore_index=True) df1['item_category'] = item_category df1 = df1[['fund_symbol', 'item_category', 'item_desc', 'value']] return df1 def write2disk(df, out_filename): df.to_parquet(data_folder+out_filename+'.parquet', engine='auto', compression='snappy',index=False) df.to_csv(data_folder+out_filename+'.csv', index=False) def print_df_details(df): print(df.shape[0]) print() print(df.dropna(axis=0).head()) mf_df = pd.read_parquet(data_folder+'mf.parquet') print_df_details(mf_df) key_column = "fund_symbol" week_52 = [ "week52_high_low_change", "week52_high_low_change_perc", "week52_high", "week52_high_change", "week52_high_change_perc", "week52_low", "week52_low_change", "week52_low_change_perc" ] exp_ratio = [ "fund_annual_report_net_expense_ratio", "category_annual_report_net_expense_ratio", "fund_prospectus_net_expense_ratio", "fund_prospectus_gross_expense_ratio", "fund_max_12b1_fee", "fund_max_front_end_sales_load", "category_max_front_end_sales_load", "fund_max_deferred_sales_load", "category_max_deferred_sales_load", "fund_year3_expense_projection", "fund_year5_expense_projection", "fund_year10_expense_projection" ] asset_allocation = [ "asset_cash", "asset_stocks", "asset_bonds", "asset_others", "asset_preferred", "asset_convertible" ] sector_allocation = [ "fund_sector_basic_materials", "fund_sector_communication_services", "fund_sector_consumer_cyclical", "fund_sector_consumer_defensive", "fund_sector_energy", "fund_sector_financial_services", "fund_sector_healthcare", "fund_sector_industrials", "fund_sector_real_estate", "fund_sector_technology", "fund_sector_utilities" ] ratio = [ "fund_price_book_ratio", "category_price_book_ratio", "fund_price_cashflow_ratio", "category_price_cashflow_ratio", "fund_price_earning_ratio", "category_price_earning_ratio", "fund_price_sales_ratio", "category_price_sales_ratio", "fund_median_market_cap", "category_median_market_cap", "fund_year3_earnings_growth", "category_year3_earnings_growth", "fund_bond_maturity", "category_bond_maturity", "fund_bond_duration", "category_bond_duration" ] bond_rating = [ "fund_bonds_us_government", "fund_bonds_aaa", "fund_bonds_aa", "fund_bonds_a", "fund_bonds_bbb", "fund_bonds_bb", "fund_bonds_b", "fund_bonds_below_b", "fund_bonds_others" ] returns = [ "fund_return_ytd", "category_return_ytd", "fund_return_1month", "category_return_1month", "fund_return_3months", "category_return_3months", "fund_return_1year", "category_return_1year", "fund_return_3years", "category_return_3years", "fund_return_5years", "category_return_5years", "fund_return_10years", "category_return_10years", "fund_return_last_bull_market", "category_return_last_bull_market", "fund_return_last_bear_market", "category_return_last_bear_market", "years_up", "years_down", "quarters_up", "quarters_down" ] annual_returns = [ "fund_return_2020", "category_return_2020", "fund_return_2019", "category_return_2019", "fund_return_2018", "category_return_2018", "fund_return_2017", "category_return_2017", "fund_return_2016", "category_return_2016", "fund_return_2015", "category_return_2015", "fund_return_2014", "category_return_2014", "fund_return_2013", "category_return_2013", "fund_return_2012", "category_return_2012", "fund_return_2011", "category_return_2011", "fund_return_2010", "category_return_2010", "fund_return_2009", "category_return_2009", "fund_return_2008", "category_return_2008", "fund_return_2007", "category_return_2007", "fund_return_2006", "category_return_2006", "fund_return_2005", "category_return_2005", "fund_return_2004", "category_return_2004", "fund_return_2003", "category_return_2003", "fund_return_2002", "category_return_2002", "fund_return_2001", "category_return_2001", "fund_return_2000", "category_return_2000" ] quarterly_returns = [ "fund_return_2021_q3", "fund_return_2021_q2", "fund_return_2021_q1", "fund_return_2020_q4", "fund_return_2020_q3", "fund_return_2020_q2", "fund_return_2020_q1", "fund_return_2019_q4", "fund_return_2019_q3", "fund_return_2019_q2", "fund_return_2019_q1", "fund_return_2018_q4", "fund_return_2018_q3", "fund_return_2018_q2", "fund_return_2018_q1", "fund_return_2017_q4", "fund_return_2017_q3", "fund_return_2017_q2", "fund_return_2017_q1", "fund_return_2016_q4", "fund_return_2016_q3", "fund_return_2016_q2", "fund_return_2016_q1", "fund_return_2015_q4", "fund_return_2015_q3", "fund_return_2015_q2", "fund_return_2015_q1", "fund_return_2014_q4", "fund_return_2014_q3", "fund_return_2014_q2", "fund_return_2014_q1", "fund_return_2013_q4", "fund_return_2013_q3", "fund_return_2013_q2", "fund_return_2013_q1", "fund_return_2012_q4", "fund_return_2012_q3", "fund_return_2012_q2", "fund_return_2012_q1", "fund_return_2011_q4", "fund_return_2011_q3", "fund_return_2011_q2", "fund_return_2011_q1", "fund_return_2010_q4", "fund_return_2010_q3", "fund_return_2010_q2", "fund_return_2010_q1", "fund_return_2009_q4", "fund_return_2009_q3", "fund_return_2009_q2", "fund_return_2009_q1", "fund_return_2008_q4", "fund_return_2008_q3", "fund_return_2008_q2", "fund_return_2008_q1", "fund_return_2007_q4", "fund_return_2007_q3", "fund_return_2007_q2", "fund_return_2007_q1", "fund_return_2006_q4", "fund_return_2006_q3", "fund_return_2006_q2", "fund_return_2006_q1", "fund_return_2005_q4", "fund_return_2005_q3", "fund_return_2005_q2", "fund_return_2005_q1", "fund_return_2004_q4", "fund_return_2004_q3", "fund_return_2004_q2", "fund_return_2004_q1", "fund_return_2003_q4", "fund_return_2003_q3", "fund_return_2003_q2", "fund_return_2003_q1", "fund_return_2002_q4", "fund_return_2002_q3", "fund_return_2002_q2", "fund_return_2002_q1", "fund_return_2001_q4", "fund_return_2001_q3", "fund_return_2001_q2", "fund_return_2001_q1", "fund_return_2000_q4", "fund_return_2000_q3", "fund_return_2000_q2", "fund_return_2000_q1" ] alpha_beta = [ "fund_alpha_3years", "fund_beta_3years", "fund_mean_annual_return_3years", "fund_r_squared_3years", "fund_stdev_3years", "fund_sharpe_ratio_3years", "fund_treynor_ratio_3years", "fund_alpha_5years", "fund_beta_5years", "fund_mean_annual_return_5years", "fund_r_squared_5years", "fund_stdev_5years", "fund_sharpe_ratio_5years", "fund_treynor_ratio_5years", "fund_alpha_10years", "fund_beta_10years", "fund_mean_annual_return_10years", "fund_r_squared_10years", "fund_stdev_10years", "fund_sharpe_ratio_10years", "fund_treynor_ratio_10years", "fund_return_category_rank_ytd", "fund_return_category_rank_1month", "fund_return_category_rank_3months", "fund_return_category_rank_1year", "fund_return_category_rank_3years", "fund_return_category_rank_5years", "load_adj_return_1year", "load_adj_return_3years", "load_adj_return_5years", "load_adj_return_10years" ] # Converting week_52 columns week_52_df = flip_cols2rows(src_df=mf_df, col_list=week_52, item_category='Week 52', key_col=key_column) print_df_details(week_52_df) write2disk(week_52_df, 'mfd_week_52') # Converting exp_ratio columns exp_ratio_df = flip_cols2rows(src_df=mf_df, col_list=exp_ratio, item_category='Expense Ratio', key_col=key_column) print_df_details(exp_ratio_df) write2disk(exp_ratio_df, 'mfd_exp_ratio') asset_allocation_df = flip_cols2rows(src_df=mf_df, col_list=asset_allocation, item_category='Asset Allocation', key_col=key_column) print_df_details(asset_allocation_df) write2disk(asset_allocation_df, 'mfd_asset_allocation') sector_allocation_df = flip_cols2rows(src_df=mf_df, col_list=sector_allocation, item_category='Sector Allocation', key_col=key_column) print_df_details(sector_allocation_df) write2disk(sector_allocation_df, 'mfd_sector_allocation') ratio_df = flip_cols2rows(src_df=mf_df, col_list=ratio, item_category='Ratio', key_col=key_column) print_df_details(ratio_df) write2disk(ratio_df, 'mfd_ratio') bond_rating_df = flip_cols2rows(src_df=mf_df, col_list=bond_rating, item_category='Bond Rating', key_col=key_column) print_df_details(bond_rating_df) write2disk(bond_rating_df, 'mfd_bond_rating') returns_df = flip_cols2rows(src_df=mf_df, col_list=returns, item_category='Return', key_col=key_column) print_df_details(returns_df) returns_df.info() write2disk(returns_df, 'mfd_returns') annual_returns_df = flip_cols2rows(src_df=mf_df, col_list=annual_returns, item_category='Annual Returns', key_col=key_column) print_df_details(annual_returns_df) write2disk(annual_returns_df, 'mfd_annual_returns') quarterly_returns_df = flip_cols2rows(src_df=mf_df, col_list=quarterly_returns, item_category='Quarterly Returns', key_col=key_column) print_df_details(quarterly_returns_df) write2disk(quarterly_returns_df, 'mfd_quarterly_returns') alpha_beta_df = flip_cols2rows(src_df=mf_df, col_list=alpha_beta, item_category='Alpha/Beta', key_col=key_column) print_df_details(alpha_beta_df) write2disk(alpha_beta_df, 'mfd_alpha_beta') drop_columns = [ "week52_high_low_change", "week52_high_low_change_perc", "week52_high", "week52_high_change", "week52_high_change_perc", "week52_low", "week52_low_change", "week52_low_change_perc", "fund_annual_report_net_expense_ratio", "category_annual_report_net_expense_ratio", "fund_prospectus_net_expense_ratio", "fund_prospectus_gross_expense_ratio", "fund_max_12b1_fee", "fund_max_front_end_sales_load", "category_max_front_end_sales_load", "fund_max_deferred_sales_load", "category_max_deferred_sales_load", "fund_year3_expense_projection", "fund_year5_expense_projection", "fund_year10_expense_projection", "asset_cash", "asset_stocks", "asset_bonds", "asset_others", "asset_preferred", "asset_convertible", "fund_sector_basic_materials", "fund_sector_communication_services", "fund_sector_consumer_cyclical", "fund_sector_consumer_defensive", "fund_sector_energy", "fund_sector_financial_services", "fund_sector_healthcare", "fund_sector_industrials", "fund_sector_real_estate", "fund_sector_technology", "fund_sector_utilities", "fund_price_book_ratio", "category_price_book_ratio", "fund_price_cashflow_ratio", "category_price_cashflow_ratio", "fund_price_earning_ratio", "category_price_earning_ratio", "fund_price_sales_ratio", "category_price_sales_ratio", "fund_median_market_cap", "category_median_market_cap", "fund_year3_earnings_growth", "category_year3_earnings_growth", "fund_bond_maturity", "category_bond_maturity", "fund_bond_duration", "category_bond_duration", "fund_bonds_us_government", "fund_bonds_aaa", "fund_bonds_aa", "fund_bonds_a", "fund_bonds_bbb", "fund_bonds_bb", "fund_bonds_b", "fund_bonds_below_b", "fund_bonds_others", "fund_return_ytd", "category_return_ytd", "fund_return_1month", "category_return_1month", "fund_return_3months", "category_return_3months", "fund_return_1year", "category_return_1year", "fund_return_3years", "category_return_3years", "fund_return_5years", "category_return_5years", "fund_return_10years", "category_return_10years", "fund_return_last_bull_market", "category_return_last_bull_market", "fund_return_last_bear_market", "category_return_last_bear_market", "years_up", "years_down", "quarters_up", "quarters_down", "fund_return_2020", "category_return_2020", "fund_return_2019", "category_return_2019", "fund_return_2018", "category_return_2018", "fund_return_2017", "category_return_2017", "fund_return_2016", "category_return_2016", "fund_return_2015", "category_return_2015", "fund_return_2014", "category_return_2014", "fund_return_2013", "category_return_2013", "fund_return_2012", "category_return_2012", "fund_return_2011", "category_return_2011", "fund_return_2010", "category_return_2010", "fund_return_2009", "category_return_2009", "fund_return_2008", "category_return_2008", "fund_return_2007", "category_return_2007", "fund_return_2006", "category_return_2006", "fund_return_2005", "category_return_2005", "fund_return_2004", "category_return_2004", "fund_return_2003", "category_return_2003", "fund_return_2002", "category_return_2002", "fund_return_2001", "category_return_2001", "fund_return_2000", "category_return_2000", "fund_return_2021_q3", "fund_return_2021_q2", "fund_return_2021_q1", "fund_return_2020_q4", "fund_return_2020_q3", "fund_return_2020_q2", "fund_return_2020_q1", "fund_return_2019_q4", "fund_return_2019_q3", "fund_return_2019_q2", "fund_return_2019_q1", "fund_return_2018_q4", "fund_return_2018_q3", "fund_return_2018_q2", "fund_return_2018_q1", "fund_return_2017_q4", "fund_return_2017_q3", "fund_return_2017_q2", "fund_return_2017_q1", "fund_return_2016_q4", "fund_return_2016_q3", "fund_return_2016_q2", "fund_return_2016_q1", "fund_return_2015_q4", "fund_return_2015_q3", "fund_return_2015_q2", "fund_return_2015_q1", "fund_return_2014_q4", "fund_return_2014_q3", "fund_return_2014_q2", "fund_return_2014_q1", "fund_return_2013_q4", "fund_return_2013_q3", "fund_return_2013_q2", "fund_return_2013_q1", "fund_return_2012_q4", "fund_return_2012_q3", "fund_return_2012_q2", "fund_return_2012_q1", "fund_return_2011_q4", "fund_return_2011_q3", "fund_return_2011_q2", "fund_return_2011_q1", "fund_return_2010_q4", "fund_return_2010_q3", "fund_return_2010_q2", "fund_return_2010_q1", "fund_return_2009_q4", "fund_return_2009_q3", "fund_return_2009_q2", "fund_return_2009_q1", "fund_return_2008_q4", "fund_return_2008_q3", "fund_return_2008_q2", "fund_return_2008_q1", "fund_return_2007_q4", "fund_return_2007_q3", "fund_return_2007_q2", "fund_return_2007_q1", "fund_return_2006_q4", "fund_return_2006_q3", "fund_return_2006_q2", "fund_return_2006_q1", "fund_return_2005_q4", "fund_return_2005_q3", "fund_return_2005_q2", "fund_return_2005_q1", "fund_return_2004_q4", "fund_return_2004_q3", "fund_return_2004_q2", "fund_return_2004_q1", "fund_return_2003_q4", "fund_return_2003_q3", "fund_return_2003_q2", "fund_return_2003_q1", "fund_return_2002_q4", "fund_return_2002_q3", "fund_return_2002_q2", "fund_return_2002_q1", "fund_return_2001_q4", "fund_return_2001_q3", "fund_return_2001_q2", "fund_return_2001_q1", "fund_return_2000_q4", "fund_return_2000_q3", "fund_return_2000_q2", "fund_return_2000_q1", "fund_alpha_3years", "fund_beta_3years", "fund_mean_annual_return_3years", "fund_r_squared_3years", "fund_stdev_3years", "fund_sharpe_ratio_3years", "fund_treynor_ratio_3years", "fund_alpha_5years", "fund_beta_5years", "fund_mean_annual_return_5years", "fund_r_squared_5years", "fund_stdev_5years", "fund_sharpe_ratio_5years", "fund_treynor_ratio_5years", "fund_alpha_10years", "fund_beta_10years", "fund_mean_annual_return_10years", "fund_r_squared_10years", "fund_stdev_10years", "fund_sharpe_ratio_10years", "fund_treynor_ratio_10years", "fund_return_category_rank_ytd", "fund_return_category_rank_1month", "fund_return_category_rank_3months", "fund_return_category_rank_1year", "fund_return_category_rank_3years", "fund_return_category_rank_5years", "load_adj_return_1year", "load_adj_return_3years", "load_adj_return_5years", "load_adj_return_10years" ] mf_df_basic = mf_df.drop(drop_columns, axis = 1) mf_df_basic.info() write2disk(mf_df_basic, 'mf_basic') # Run this after catting all the mfd files into one mf_detail_df = pd.read_csv(data_folder+'mf_detail.csv') print_df_details(mf_detail_df) mf_detail_df.dropna(axis=0, how='any', inplace=True) print_df_details(mf_detail_df) mf_detail_df.groupby(['item_category', 'item_desc']).count() write2disk(mf_detail_df, 'mf_detail_nona') ```
github_jupyter
**[Data Visualization: From Non-Coder to Coder Micro-Course Home Page](https://www.kaggle.com/learn/data-visualization-from-non-coder-to-coder)** --- In this tutorial you'll learn all about **histograms** and **density plots**. # Set up the notebook As always, we begin by setting up the coding environment. (_This code is hidden, but you can un-hide it by clicking on the "Code" button immediately below this text, on the right._) ``` import pandas as pd import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns print("Setup Complete") ``` # Select a dataset We'll work with a dataset of 150 different flowers, or 50 each from three different species of iris (*Iris setosa*, *Iris versicolor*, and *Iris virginica*). ![tut4_iris](https://i.imgur.com/RcxYYBA.png) # Load and examine the data Each row in the dataset corresponds to a different flower. There are four measurements: the sepal length and width, along with the petal length and width. We also keep track of the corresponding species. ``` # Path of the file to read iris_filepath = "./input/iris.csv" # Read the file into a variable iris_data iris_data = pd.read_csv(iris_filepath, index_col="Id") # Print the first 5 rows of the data iris_data.head() ``` # Histograms Say we would like to create a **histogram** to see how petal length varies in iris flowers. We can do this with the `sns.distplot` command. ``` # Histogram sns.distplot(a=iris_data['Petal Length (cm)'], kde=False) ``` We customize the behavior of the command with two additional pieces of information: - `a=` chooses the column we'd like to plot (_in this case, we chose `'Petal Length (cm)'`_). - `kde=False` is something we'll always provide when creating a histogram, as leaving it out will create a slightly different plot. # Density plots The next type of plot is a **kernel density estimate (KDE)** plot. In case you're not familiar with KDE plots, you can think of it as a smoothed histogram. To make a KDE plot, we use the `sns.kdeplot` command. Setting `shade=True` colors the area below the curve (_and `data=` has identical functionality as when we made the histogram above_). ``` # KDE plot sns.kdeplot(data=iris_data['Petal Length (cm)'], shade=True) ``` # 2D KDE plots We're not restricted to a single column when creating a KDE plot. We can create a **two-dimensional (2D) KDE plot** with the `sns.jointplot` command. In the plot below, the color-coding shows us how likely we are to see different combinations of sepal width and petal length, where darker parts of the figure are more likely. ``` # 2D KDE plot sns.jointplot(x=iris_data['Petal Length (cm)'], y=iris_data['Sepal Width (cm)'], kind="kde") ``` Note that in addition to the 2D KDE plot in the center, - the curve at the top of the figure is a KDE plot for the data on the x-axis (in this case, `iris_data['Petal Length (cm)']`), and - the curve on the right of the figure is a KDE plot for the data on the y-axis (in this case, `iris_data['Sepal Width (cm)']`). # Color-coded plots For the next part of the tutorial, we'll create plots to understand differences between the species. To accomplish this, we begin by breaking the dataset into three separate files, with one for each species. ``` # Paths of the files to read iris_set_filepath = "./input/iris_setosa.csv" iris_ver_filepath = "./input/iris_versicolor.csv" iris_vir_filepath = "./input/iris_virginica.csv" # Read the files into variables iris_set_data = pd.read_csv(iris_set_filepath, index_col="Id") iris_ver_data = pd.read_csv(iris_ver_filepath, index_col="Id") iris_vir_data = pd.read_csv(iris_vir_filepath, index_col="Id") # Print the first 5 rows of the Iris versicolor data iris_ver_data.head() ``` In the code cell below, we create a different histogram for each species by using the `sns.distplot` command (_as above_) three times. We use `label=` to set how each histogram will appear in the legend. ``` # Histograms for each species sns.distplot(a=iris_set_data['Petal Length (cm)'], label="Iris-setosa", kde=False) sns.distplot(a=iris_ver_data['Petal Length (cm)'], label="Iris-versicolor", kde=False) sns.distplot(a=iris_vir_data['Petal Length (cm)'], label="Iris-virginica", kde=False) # Add title plt.title("Histogram of Petal Lengths, by Species") # Force legend to appear plt.legend() ``` In this case, the legend does not automatically appear on the plot. To force it to show (for any plot type), we can always use `plt.legend()`. We can also create a KDE plot for each species by using `sns.kdeplot` (_as above_). Again, `label=` is used to set the values in the legend. ``` # KDE plots for each species sns.kdeplot(data=iris_set_data['Petal Length (cm)'], label="Iris-setosa", shade=True) sns.kdeplot(data=iris_ver_data['Petal Length (cm)'], label="Iris-versicolor", shade=True) sns.kdeplot(data=iris_vir_data['Petal Length (cm)'], label="Iris-virginica", shade=True) # Add title plt.title("Distribution of Petal Lengths, by Species") ``` One interesting pattern that can be seen in plots is that the plants seem to belong to one of two groups, where _Iris versicolor_ and _Iris virginica_ seem to have similar values for petal length, while _Iris setosa_ belongs in a category all by itself. In fact, according to this dataset, we might even be able to classify any iris plant as *Iris setosa* (as opposed to *Iris versicolor* or *Iris virginica*) just by looking at the petal length: if the petal length of an iris flower is less than 2 cm, it's most likely to be *Iris setosa*! # What's next? Put your new skills to work in a **[coding exercise](https://www.kaggle.com/kernels/fork/2951534)**! --- **[Data Visualization: From Non-Coder to Coder Micro-Course Home Page](https://www.kaggle.com/learn/data-visualization-from-non-coder-to-coder)**
github_jupyter
## Output data preparation for dataset Twitch #### Plots and figures in separate notebook ``` # IMPORTS import matplotlib.pyplot as plt import numpy as np import csv import networkx as nx from random import sample import time import math import random import scipy import pandas as pd # Define necessary functions def arccosh_og(x): ##note that x*x-1 might be less than zero :( And then log(t) could be negative (negative distance?!?!?!?!) t = x + math.sqrt(x * x - 1) return math.log(t) def arccosh(x): t = x + math.sqrt(max(x * x, 1) - 1) return max(math.log(t), 0.5) def query(coordinates, source, destination, curvature): if source == destination: return 0 sourceCoords = coordinates[source] destinationCoords = coordinates[destination] i = 0 ts = 1.0 td = 1.0 tt = 1.0 for i in range(len(sourceCoords)): ts += math.pow(sourceCoords[i], 2) td += math.pow(destinationCoords[i], 2) tt += (sourceCoords[i] * destinationCoords[i]) #print(ts, td, tt) t = math.sqrt(ts * td) - tt #print('t:', t) return arccosh(t) * math.fabs(curvature) def intersection_similarity(u,v): return len(set(u).intersection(set(v))) def weighted_intersection_similarity(u,v, alpha): similarity = 0 if len(u)==len(v): n = len(u) for i in range(n): if u[i] in v: j = v.index(u[i]) similarity += (n-abs(i-j))**alpha else: print('not equal vector lengths') similarity = -1 return similarity # READ REAL NETWORK - Giant Cconnected Component dataset = 'large_twitch_edges.csv' data = pd.read_csv(dataset, header = 0, sep = ',') data = data[[data.columns[0], data.columns[1]]] data.head() graph = nx.from_pandas_edgelist(data, data.columns[0], data.columns[1]) Gcc = sorted(nx.connected_components(graph), key=len, reverse=True) giant = graph.subgraph(Gcc[0]) # SPECIFY THESE INPUTS output_file_name = 'twitch/out' partitions = 1 curvature = -1 number_of_nodes = 168114 ####################### landFile = output_file_name + '.land' coordFiles = [output_file_name + str(i) + '.coord' for i in range(partitions)] coordinates = dict() with open(landFile) as infile: for line in infile: linesplit = line.split() id = int(linesplit[0]) coords = [float(c) for c in linesplit[1:]] coordinates[id] = coords for coordFile in coordFiles: with open(coordFile) as infile: for line in infile: linesplit = line.split() id = int(linesplit[0]) coords = [float(c) for c in linesplit[1:]] coordinates[id] = coords #while True: # query_input = input("Enter ID of 2 nodes: ") # if query_input == 'exit' or query_input == 'q' or query_input == 'quit': # break # querysplit = query_input.split() # source = int(querysplit[0]) # destination = int(querysplit[1]) # estimate = query(coordinates, source, destination, curvature) # print('Rigel estimates the distance between %d and %d to be %f.\n' % (source, destination, estimate)) # Relative errors - approximation: select 'select_count = 1000' nodes from where distances (to all nodes) are calculated ### This is necessary due to slow EXACT path calculation result_avg_path_length_estimated = [] result_avg_path_length_exact = [] result_radius_estimated = [] result_radius_exact = [] result_diameter_estimated = [] result_diameter_exact = [] top_cent_exact = [] top_cent_estimate = [] top_ecc_exact = [] top_ecc_estimate = [] for sed in range(5): print('START OF SEED', sed, '.') np.random.seed(sed) select_count = 1000 selected_nodes = random.sample(range(number_of_nodes), select_count) relative_errors = dict() exact_distances = dict() estimated_distances= dict() avg_path_length_exact = 0 avg_path_length_estimated = 0 radius_estimated = number_of_nodes diameter_estimated = 0 radius_exact = number_of_nodes diameter_exact = 0 eccentricites_estimated = [] eccentricites_exact =[] centralities_exact = [] centralities_estimated = [] node_names = list(giant.nodes()) iters = 0 for source in selected_nodes: iters += 1 if iters % int(select_count/10) == 0: print('Processed ', 10 * iters / int(select_count/10), '% of total calculations...') eccentricity_curr_est = 0 eccentricity_curr_ex = 0 exact_distances[source] = [] estimated_distances[source] = [] relative_errors[source] = [] for target in selected_nodes: #print('points:', source, target) if source != target: estimate = query(coordinates, source, target, curvature) exact = nx.shortest_path_length(giant, node_names[source], node_names[target]) avg_path_length_estimated += estimate avg_path_length_exact += exact eccentricity_curr_est = max(eccentricity_curr_est, estimate) diameter_estimated = max(diameter_estimated, estimate) eccentricity_curr_ex = max(eccentricity_curr_ex,exact) diameter_exact = max(diameter_exact,exact) relative_errors[source].append(abs(estimate-exact)/exact) exact_distances[source].append(exact) estimated_distances[source].append(estimate) else: relative_errors[source].append(0) exact_distances[source].append(0) estimated_distances[source].append(0) radius_estimated = min(eccentricity_curr_est, radius_estimated) radius_exact = min(eccentricity_curr_ex, radius_exact) eccentricites_estimated.append(0-eccentricity_curr_est) eccentricites_exact.append(0-eccentricity_curr_ex) centralities_exact.append(0-np.mean(list(exact_distances.values()))) centralities_estimated.append(0-np.mean(list(estimated_distances.values()))) avg_path_length_estimated = avg_path_length_estimated / (select_count * (select_count - 1) ) avg_path_length_exact = avg_path_length_exact / (select_count * (select_count - 1) ) result_avg_path_length_estimated.append(avg_path_length_estimated) result_avg_path_length_exact.append(avg_path_length_exact) result_radius_estimated.append(radius_estimated) result_radius_exact.append(radius_exact) result_diameter_estimated.append(diameter_estimated) result_diameter_exact.append(diameter_exact) ind = np.argpartition(centralities_exact, -20)[-20:] top_cent_exact.append(ind[np.argsort(np.array(centralities_exact)[ind])]) ind = np.argpartition(centralities_estimated, -20)[-20:] top_cent_estimate.append(ind[np.argsort(np.array(centralities_estimated)[ind])]) ind = np.argpartition(eccentricites_exact, -20)[-20:] top_ecc_exact.append(ind[np.argsort(np.array(eccentricites_exact)[ind])]) ind = np.argpartition(eccentricites_estimated, -20)[-20:] top_ecc_estimate.append(ind[np.argsort(np.array(eccentricites_estimated)[ind])]) # estimated metrics print(result_avg_path_length_estimated) print(result_avg_path_length_exact) print(result_radius_estimated) print(result_radius_exact) print(result_diameter_estimated) print(result_diameter_exact) # Similarity of top central nodes for i in range(5): print('Weighted Centrality similarity of top 20: ', weighted_intersection_similarity(list(top_cent_estimate[i]),list(top_cent_exact[i]),1)) print('Weighted Eccentricity similarity of top 20: ', weighted_intersection_similarity(list(top_ecc_estimate[i]),list(top_ecc_exact[i]),1)) print('Centrality similarity of top 20: ', intersection_similarity(list(top_cent_estimate[i]),list(top_cent_exact[i]))) print('Eccentricity similarity of top 20: ', intersection_similarity(list(top_ecc_estimate[i]),list(top_ecc_exact[i]))) #save data for later reuse (plotting) with open('twitch_diam_ex.pickle', 'wb') as handle: pickle.dump(result_diameter_exact, handle) #Average relative error calculation ARE_per_source = [np.mean(relative_errors[node]) for node in relative_errors.keys()] ARE_total = np.mean(ARE_per_source) print('Relative error (approximated): ', ARE_total) # distribution of relative error in total relative_errors_total = [] for source in relative_errors.keys(): relative_errors_total += relative_errors[source] #print(source, ': ' ,min(relative_errors[source])) plt.hist(relative_errors_total, bins = 100) plt.title('RE distribution') plt.xlabel('RE') plt.ylabel('#occurance') plt.show() plt.hist([relative_errors_total[i] for i in range(len(relative_errors_total)) if (relative_errors_total[i] < 1.0 and relative_errors_total[i] > 0.0)], bins = 100) plt.title('RE distribution - in [0,1]') plt.xlabel('RE') plt.ylabel('#occurance') plt.show() # Save data for later reuse (plotting) with open('twitch_erdist.pickle', 'wb') as handle: pickle.dump([relative_errors_total[i] for i in range(len(relative_errors_total)) if (relative_errors_total[i] < 1.0 and relative_errors_total[i] > 0.0)], handle) with open('twitch_cdf.pickle', 'wb') as handle: pickle.dump({'bins': bins_count[1:], 'cdf':cdf }, handle) # Cumulative Distribution Function of the Distribution if Relative Errors base = [relative_errors_total[i] for i in range(len(relative_errors_total)) if (relative_errors_total[i] < 1.0 and relative_errors_total[i] > 0.0)] count, bins_count = np.histogram(base, bins=1000) pdf = count / sum(count) cdf = np.cumsum(pdf) plt.plot(bins_count[1:], cdf, label="CDF") plt.title('CDF of Relative Error') plt.xlabel('Relative Error') plt.ylabel('CDF') plt.show() ```
github_jupyter
### Irrigation model input file prep This code prepares the final input file to the irrigation (agrodem) model. It extracts all necessary attributes to crop locations. It also applies some name fixes as needed for the model to run smoothly.The output dataframe is exported as csv and ready to be used in the irrigation model. **Original code:** [Alexandros Korkovelos](https://github.com/akorkovelos) & [Konstantinos Pegios](https://github.com/kopegios)<br /> **Conceptualization & Methodological review :** [Alexandros Korkovelos](https://github.com/akorkovelos)<br /> **Updates, Modifications:** [Alexandros Korkovelos](https://github.com/akorkovelos)<br /> **Funding:** The World Bank (contract number: 7190531), [KTH](https://www.kth.se/en/itm/inst/energiteknik/forskning/desa/welcome-to-the-unit-of-energy-systems-analysis-kth-desa-1.197296) ``` #Import modules and libraries import os import geopandas as gpd from rasterstats import point_query import logging import pandas as pd from shapely.geometry import Point, Polygon import gdal import rasterio as rio import fiona import gdal import osr import ogr import rasterio.mask import time import numpy as np import itertools import re from osgeo import gdal,ogr import struct import csv import tkinter as tk from tkinter import filedialog, messagebox from pandas import DataFrame as df from rasterio.warp import calculate_default_transform, reproject from rasterio.enums import Resampling from rasterstats import point_query from pyproj import Proj from shapely.geometry import Point, Polygon # Import data root = tk.Tk() root.withdraw() root.attributes("-topmost", True) messagebox.showinfo('Agrodem Prepping', 'Open the extracted csv file obtained after running the QGIS plugin - AGRODEM') input_file = filedialog.askopenfilename() # Import csv as pandas dataframe crop_df = pd.read_csv(input_file) # Fill in Nan values with 0 crop_df.fillna(99999,inplace=True) crop_df.head(2) ##Dropping unecessary columns droping_cols = ["Pixel"] crop_df.drop(droping_cols, axis=1, inplace=True) # New for whole Moz crop_df.rename(columns={'elevation': 'sw_depth', 'MaizeArea': 'harv_area'}, inplace=True) # Adding columns missing crop_df["country"] = "moz" #maize_gdf["admin_1"] = "Zambezia" crop_df["curr_yield"] = "4500" crop_df["max_yield"] = "6000" crop_df['field_1'] = range(0, 0+len(crop_df)) ``` #### Converting dataframe to geo-dataframe ``` # Add geometry and convert to spatial dataframe in source CRS #crop_df['geometry'] = list(zip(crop_df['lon'], crop_df['lat'])) #crop_df['geometry'] = crop_df['geometry'].apply(Point) crop_df['geometry'] = crop_df.apply(lambda x: Point((float(x.lon), float(x.lat))), axis =1) crop_df = gpd.GeoDataFrame(crop_df, geometry ='geometry') # Reproject data in to Ordnance Survey GB coordinates crop_df.crs="+proj=utm +zone=37 +south +datum=WGS84 +units=m +no_defs" # convert to shapefile #write the name you would like to have in the string "test_final5, you can keep this also as the default name" crop_df.to_file('test_final5.shp',driver = 'ESRI Shapefile') #export to csv messagebox.showinfo('Agrodem Prepping','Browse to the folder where you want to save geodataframe as a csv file') path = filedialog.askdirectory() shpname = 'Output' crop_df.to_csv(os.path.join(path,"{}.csv".format(shpname))) messagebox.showinfo('Agrodem Prepping', 'Browse to the folder that contains required Raster files for temp, prec and radiance') #file location: r"N:\Agrodem\Irrigation_model\Input_data\Supporting_Layers" raster_path = filedialog.askdirectory() raster_files =[] print ("Reading independent variables...") for i in os.listdir(raster_path): if i.endswith('.tif'): raster_files.append(i) messagebox.showinfo('Agrodem Prepping','Open the saved shapefile extracted from the input csv file above ') shp_filename = filedialog.askopenfilename() print ("Extracting raster values to points...") for i in raster_files: print("Extracting " + i + " values...") src_filename = raster_path + "\\" + i li_values = list() src_ds=gdal.Open(src_filename) gt=src_ds.GetGeoTransform() rb=src_ds.GetRasterBand(1) ds=ogr.Open(shp_filename) lyr=ds.GetLayer() for feat in lyr: geom = feat.GetGeometryRef() feat_id = feat.GetField('field_1') mx,my=geom.GetX(), geom.GetY() #coord in map units #Convert from map to pixel coordinates. #Only works for geotransforms with no rotation. px = int((mx - gt[0]) / gt[1]) #x pixel py = int((my - gt[3]) / gt[5]) #y pixel intval=rb.ReadAsArray(px,py,1,1) li_values.append([feat_id, intval[0]]) print ("Writing " + i + " values to csv...") #input to the output folder for generated csv files csvoutpath = r"C:\Oluchi\Irrigation model\Maize" with open(csvoutpath + "\\" + i.split('.')[0] + i.split('.')[1] + '.csv', 'w') as csvfile: wr = csv.writer(csvfile) wr.writerows(li_values) ``` ## Merge csv files with crop ``` #Import data messagebox.showinfo('Agrodem Prepping', 'Open the csv file you in which you exported the geodataframe previously') file = filedialog.askopenfilename() agrodem_input = pd.read_csv(file) csv_files = [] print ("Reading csv files...") for i in os.listdir(csvoutpath): if i.endswith('.csv'): csv_files.append(i) for i in csv_files: print('Reading...'+ i) df_csv = pd.read_csv(csvoutpath + "//" + i, index_col=None, header=None) df_csv.iloc[:,1] = df_csv.iloc[:,1].astype(str) df_csv.iloc[:,1] = df_csv.iloc[:,1].str.replace('[','') df_csv.iloc[:,1] = df_csv.iloc[:,1].str.replace(']','') columnName = i.split('.')[0] print("Merging..." + columnName) agrodem_input[columnName] = df_csv.iloc[:,1] # Define output path # Overwriting the csv file path = r"N:\Agrodem\Irrigation_model\Output_data\agrodem_input" shpname = "Cassava_Moz_1km_2030_SG_downscaled_SW.csv" #drybeans crop_gdf.to_csv(os.path.join(path,"{c}".format(c=shpname))) ``` ### Alternative way of extraction raster value to point (long run) ``` # Seetting rasters path #set_path_4rasters = r"N:\Agrodem\Irrigation_model\Input_data\Supporting_Layers" #for i in os.listdir(set_path_4rasters): # if i.endswith('.tif'): # #Check if this keeps the raster name as found with the .tif extension # columName = i[:-4] # print (columName) # print ("Extracting " + columName + " values to points...") # maize_gdf[columName] = point_query(maize_gdf, set_path_4rasters + "\\" + i) agrodem_input.columns ``` ### Updated names of input files for 30s rasters ``` # Renaming columns as input file requires agrodem_input.rename(columns={'wc20_30s_prec_01': 'prec_1', 'wc20_30s_prec_02': 'prec_2', 'wc20_30s_prec_03': 'prec_3', 'wc20_30s_prec_04': 'prec_4', 'wc20_30s_prec_05': 'prec_5', 'wc20_30s_prec_06': 'prec_6', 'wc20_30s_prec_07': 'prec_7', 'wc20_30s_prec_08': 'prec_8', 'wc20_30s_prec_09': 'prec_9', 'wc20_30s_prec_10': 'prec_10', 'wc20_30s_prec_11': 'prec_11', 'wc20_30s_prec_12': 'prec_12', 'wc20_30s_srad_01': 'srad_1', 'wc20_30s_srad_02': 'srad_2', 'wc20_30s_srad_03': 'srad_3', 'wc20_30s_srad_04': 'srad_4', 'wc20_30s_srad_05': 'srad_5', 'wc20_30s_srad_06': 'srad_6', 'wc20_30s_srad_07': 'srad_7', 'wc20_30s_srad_08': 'srad_8', 'wc20_30s_srad_09': 'srad_9', 'wc20_30s_srad_10': 'srad_10', 'wc20_30s_srad_11': 'srad_11', 'wc20_30s_srad_12': 'srad_12', 'wc20_30s_tavg_01': 'tavg_1', 'wc20_30s_tavg_02': 'tavg_2', 'wc20_30s_tavg_03': 'tavg_3', 'wc20_30s_tavg_04': 'tavg_4', 'wc20_30s_tavg_05': 'tavg_5', 'wc20_30s_tavg_06': 'tavg_6', 'wc20_30s_tavg_07': 'tavg_7', 'wc20_30s_tavg_08': 'tavg_8', 'wc20_30s_tavg_09': 'tavg_9', 'wc20_30s_tavg_10': 'tavg_10', 'wc20_30s_tavg_11': 'tavg_11', 'wc20_30s_tavg_12': 'tavg_12', 'wc20_30s_tmax_01': 'tmax_1', 'wc20_30s_tmax_02': 'tmax_2', 'wc20_30s_tmax_03': 'tmax_3', 'wc20_30s_tmax_04': 'tmax_4', 'wc20_30s_tmax_05': 'tmax_5', 'wc20_30s_tmax_06': 'tmax_6', 'wc20_30s_tmax_07': 'tmax_7', 'wc20_30s_tmax_08': 'tmax_8', 'wc20_30s_tmax_09': 'tmax_9', 'wc20_30s_tmax_10': 'tmax_10', 'wc20_30s_tmax_11': 'tmax_11', 'wc20_30s_tmax_12': 'tmax_12', 'wc20_30s_tmin_01': 'tmin_1', 'wc20_30s_tmin_02': 'tmin_2', 'wc20_30s_tmin_03': 'tmin_3', 'wc20_30s_tmin_04': 'tmin_4', 'wc20_30s_tmin_05': 'tmin_5', 'wc20_30s_tmin_06': 'tmin_6', 'wc20_30s_tmin_07': 'tmin_7', 'wc20_30s_tmin_08': 'tmin_8', 'wc20_30s_tmin_09': 'tmin_9', 'wc20_30s_tmin_10': 'tmin_10', 'wc20_30s_tmin_11': 'tmin_11', 'wc20_30s_tmin_12': 'tmin_12', 'wc20_30s_wind_01': 'wind_1', 'wc20_30s_wind_02': 'wind_2', 'wc20_30s_wind_03': 'wind_3', 'wc20_30s_wind_04': 'wind_4', 'wc20_30s_wind_05': 'wind_5', 'wc20_30s_wind_06': 'wind_6', 'wc20_30s_wind_07': 'wind_7', 'wc20_30s_wind_08': 'wind_8', 'wc20_30s_wind_09': 'wind_9', 'wc20_30s_wind_10': 'wind_10', 'wc20_30s_wind_11': 'wind_11', 'wc20_30s_wind_12': 'wind_12', 'gyga_af_agg_erzd_tawcpf23mm__m_1kmtif': 'awsc', 'Surface_Water_Suitability_Moz' : 'sw_suit', 'elevationtif': 'elevation', 'WTDtif':'gw_depth'}, inplace=True) agrodem_input.columns droping_cols = ["Unnamed: 0","geometry"] agrodem_input.drop(droping_cols, axis=1, inplace=True) ``` ## Exporting gdf into csv (or shapefile, gpkg as needed) ``` #gpkg #agrodem_input.to_file("Zambezia_1km.gpkg", layer='Maize_Inputfile', driver="GPKG") #shp #agrodem_input.to_file("Moz_250m_Maize_190920.shp") # Define output path path = r"C:\Oluchi\Irrigation model\Output_data\agrodem_input\Final_input_files" csvname = "agrodem_input_Maize.csv" #maize agrodem_input.to_csv(os.path.join(path,"{c}".format(c=csvname)), index=False) ```
github_jupyter
# Self-Driving Car Engineer Nanodegree ## Project: **Finding Lane Lines on the Road** *** In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below. Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right. In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project. --- Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image. **Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".** --- **The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.** --- <figure> <img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" /> <figcaption> <p></p> <p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p> </figcaption> </figure> <p></p> <figure> <img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" /> <figcaption> <p></p> <p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p> </figcaption> </figure> **Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** ## Import Packages ``` #importing some useful packages import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import cv2 %matplotlib inline ``` ## Read in an Image ``` #reading in an image image = mpimg.imread('test_images/solidWhiteRight.jpg') #printing out some stats and plotting print('This image is:', type(image), 'with dimensions:', image.shape) plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray') ``` ## Ideas for Lane Detection Pipeline **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:** `cv2.inRange()` for color selection `cv2.fillPoly()` for regions selection `cv2.line()` to draw lines on an image given endpoints `cv2.addWeighted()` to coadd / overlay two images `cv2.cvtColor()` to grayscale or change color `cv2.imwrite()` to output images to file `cv2.bitwise_and()` to apply a mask to an image **Check out the OpenCV documentation to learn about these and discover even more awesome functionality!** ## Helper Functions Below are some helper functions to help get you started. They should look familiar from the lesson! ``` import math def grayscale(img): """Applies the Grayscale transform This will return an image with only one color channel but NOTE: to see the returned image as grayscale (assuming your grayscaled image is called 'gray') you should call plt.imshow(gray, cmap='gray')""" return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Or use BGR2GRAY if you read an image with cv2.imread() # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) def canny(img, low_threshold, high_threshold): """Applies the Canny transform""" return cv2.Canny(img, low_threshold, high_threshold) def gaussian_blur(img, kernel_size): """Applies a Gaussian Noise kernel""" return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0) def region_of_interest(img, vertices): """ Applies an image mask. Only keeps the region of the image defined by the polygon formed from `vertices`. The rest of the image is set to black. `vertices` should be a numpy array of integer points. """ #defining a blank mask to start with mask = np.zeros_like(img) #defining a 3 channel or 1 channel color to fill the mask with depending on the input image if len(img.shape) > 2: channel_count = img.shape[2] # i.e. 3 or 4 depending on your image ignore_mask_color = (255,) * channel_count else: ignore_mask_color = 255 #filling pixels inside the polygon defined by "vertices" with the fill color cv2.fillPoly(mask, vertices, ignore_mask_color) #returning the image only where mask pixels are nonzero masked_image = cv2.bitwise_and(img, mask) return masked_image def draw_lines(img, lines, color=[255, 0, 0], thickness=2): """ NOTE: this is the function you might want to use as a starting point once you want to average/extrapolate the line segments you detect to map out the full extent of the lane (going from the result shown in raw-lines-example.mp4 to that shown in P1_example.mp4). Think about things like separating line segments by their slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left line vs. the right line. Then, you can average the position of each of the lines and extrapolate to the top and bottom of the lane. This function draws `lines` with `color` and `thickness`. Lines are drawn on the image inplace (mutates the image). If you want to make the lines semi-transparent, think about combining this function with the weighted_img() function below """ for line in lines: for x1,y1,x2,y2 in line: cv2.line(img, (x1, y1), (x2, y2), color, thickness) def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap): """ `img` should be the output of a Canny transform. Returns an image with hough lines drawn. """ lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap) line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) draw_lines(line_img, lines) return line_img # Python 3 has support for cool math symbols. def weighted_img(img, initial_img, α=0.8, β=1., γ=0.): """ `img` is the output of the hough_lines(), An image with lines drawn on it. Should be a blank image (all black) with lines drawn on it. `initial_img` should be the image before any processing. The result image is computed as follows: initial_img * α + img * β + γ NOTE: initial_img and img must be the same shape! """ return cv2.addWeighted(initial_img, α, img, β, γ) ``` ## Test Images Build your pipeline to work on the images in the directory "test_images" **You should make sure your pipeline works well on these images before you try the videos.** ``` import os os.listdir("test_images/") ``` ## Build a Lane Finding Pipeline Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report. Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters. ``` # TODO: Build your pipeline that will draw lane lines on the test_images # then save them to the test_images_output directory. gray = grayscale(image) plt.imshow(gray, cmap='gray') kernel_size = 3 blur_gray = gaussian_blur(gray, kernel_size) low_threshold = 100 high_threshold = 150 canny_out = canny(blur_gray, low_threshold, high_threshold) plt.imshow(canny_out, cmap='Greys_r') # give the vertices of polygon in an array form ysize = image.shape[0] xsize = image.shape[1] vertices = np.array([[[100, ysize], [450,325],[525,325], [850,ysize]]], dtype=np.int32) #Region of interest masked_image = region_of_interest(canny_out, vertices) plt.imshow(masked_image, cmap='Greys_r') #Hough transforms img = masked_image rho = 1 theta = np.pi/180 threshold = 10 min_line_len = 10 max_line_gap = 2 line_img = hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap) plt.imshow(line_img) lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]),min_line_len, max_line_gap) weighted_image = weighted_img(line_img, image, α=0.8, β=1., γ=0.) plt.imshow(weighted_image) draw_lines(weighted_image, lines, color=[255, 0, 0], thickness=5) plt.imshow(weighted_image) ``` ## Test on Videos You know what's cooler than drawing lanes over images? Drawing lanes over video! We can test our solution on two provided videos: `solidWhiteRight.mp4` `solidYellowLeft.mp4` **Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** **If you get an error that looks like this:** ``` NeedDownloadError: Need ffmpeg exe. You can download it by calling: imageio.plugins.ffmpeg.download() ``` **Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.** ``` # Import everything needed to edit/save/watch video clips from moviepy.editor import VideoFileClip from IPython.display import HTML def process_image(image): # NOTE: The output you return should be a color image (3 channel) for processing video below # TODO: put your pipeline here, # you should return the final output (image where lines are drawn on lanes) return result ``` Let's try the one with the solid white lane on the right first ... ``` white_output = 'test_videos_output/solidWhiteRight.mp4' ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5) clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4") white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!! %time white_clip.write_videofile(white_output, audio=False) ``` Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice. ``` HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(white_output)) ``` ## Improve the draw_lines() function **At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".** **Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.** Now for the one with the solid yellow lane on the left. This one's more tricky! ``` yellow_output = 'test_videos_output/solidYellowLeft.mp4' ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5) clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4') yellow_clip = clip2.fl_image(process_image) %time yellow_clip.write_videofile(yellow_output, audio=False) HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(yellow_output)) ``` ## Writeup and Submission If you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file. ## Optional Challenge Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project! ``` challenge_output = 'test_videos_output/challenge.mp4' ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5) clip3 = VideoFileClip('test_videos/challenge.mp4') challenge_clip = clip3.fl_image(process_image) %time challenge_clip.write_videofile(challenge_output, audio=False) HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(challenge_output)) ```
github_jupyter
<a href="https://colab.research.google.com/github/KristynaPijackova/Radio-Modulation-Recognition-Networks/blob/main/Radio_Modulation_Recognition_Networks.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Radio Modulation Recognition Networks --- **Author: Kristyna Pijackova** --- This notebook contains code for my [bachelor thesis](https://www.vutbr.cz/studenti/zav-prace/detail/133594) in the academic year 2020/2021. --- **The code structure is following:** * **Imports** - Import needed libraries * **Defined Functions** - Functions defined for an easier manipulation with the data later on * **Accessing the datasets** - you may skip this part and download the datasets elsewhere if you please * **Loading Data** - Load the data and divide them into training, validation and test sets * **Deep Learning Part** -Contains the architectures, which are prepared to be trained and evaluated * **Load Trained Model** - Optionaly you can download the CGDNN model and see how it does on the corresponding dataset * **Layer Visualization** - A part of code which was written to visualize the activation maps of the convolutional and recurrent layers * **Plotting** - You can plot the confusion matrices in this part --- **Quick guide to running the document:** Open [Google Colab](https://colab.research.google.com/notebooks/intro.ipynb#recent=true) and go to 'GitHub' bookmark. Insert the link to the Github repository. This should open the code for you and allow you to run and adjust it. * Use `up` and `down` keys to move in the notebook * Use `ctrl+enter` to run cell or choose 'Run All' in Runtime to run the whole document at once * If you change something in specific cell, it's enough to re-run just the cell to save the changes * Hide/show sections of the code with the arrows at side, which are next to some cell code * In the top left part yoz can click on the Content icon, which will allow you to navigate easier through this notebook # Imports Import needed libraries ``` from scipy.io import loadmat from pandas import factorize import pickle import numpy as np import random from scipy import signal from matplotlib import pyplot as plt from sklearn.metrics import confusion_matrix import seaborn as sns from tensorflow.keras.utils import to_categorical import tensorflow as tf from tensorflow import keras from tensorflow.keras import backend as K from tensorflow.keras import layers from tensorflow.keras.utils import plot_model ``` Mount to Google Drive (optional) ``` # Mounting your Google Drive # from google.colab import drive # drive.mount('/content/gdrive', force_remount=True) # root_dir = "/content/gdrive/My Drive/" ``` # Defined functions for easier work with data ## Functions to load datasets ``` # VUT Dataset def load_VUT_dataset(dataset_location): """ Load dataset and extract needed data Input: dataset_location: specify where the file is stored and its name Output: SNR: list of the SNR range in dataset [-20 to 18] X: array of the measured I/Q data [num_of_samples, 128, 2] modulations: list of the modulations in this dataset one_hot: one_hot encoded data - the other maps the order of the mods lbl_SNR: list of each snr (for plotting) """ # Load the dataset stored as .mat with loadmat fuction from scipy.io # from scipy.io import loadmat dataset = loadmat(dataset_location) # Point to wanted data SNR = dataset['SNR'] X = dataset['X'] mods = dataset['mods'] one_hot = dataset['one_hot'] # Transpose the structure of X from [:,2,128] to [:,128,2] X = np.transpose(X[:,:,:],(0,2,1)) # Change the type and structure of output SNR and mods to lists SNRs = [] SNR = np.reshape(SNR,-1) for i in range(SNR.shape[0]): snr = SNR[:][i].tolist() SNRs.append(snr) modulations = [] mods = np.reshape(mods,-1) for i in range(mods.shape[0]): mod = mods[i][0].tolist() modulations.append(mod) # Assign SNR value to each vector repeat_n = X.shape[0]/len(mods)/len(SNR) repeat_n_mod = len(mods) lbl_SNR = np.tile(np.repeat(SNR, repeat_n), repeat_n_mod) # X = tf.convert_to_tensor(X, dtype=tf.float32) # one_hot = tf.convert_to_tensor(one_hot, dtype=tf.float32) return SNRs, X, modulations, one_hot, lbl_SNR # RadioML2016.10a/10b or MIGOU MOD def load_dataset(dataset_location): """ Load dataset and extract needed data Input: dataset_location: specify where the file is stored and its name Output: snrs: list of the SNR range in dataset [-20 to 18] X: array of the measured I/Q data [num_of_samples, 128, 2] modulations: list of the modulations in this dataset one_hot_encode: one_hot encoded data - the other maps the order of the mods lbl_SNR: list of each snr (for plotting) """ snrs,mods = map(lambda j: sorted(list(set(map(lambda x: x[j], dataset_location.keys())))), [1,0]) X = []; I = []; Q = []; lbl = []; for mod in mods: for snr in snrs: X.append(dataset_location[(mod,snr)]) for i in range(dataset_location[(mod,snr)].shape[0]): lbl.append((mod,snr)) X = np.vstack(X); lbl=np.vstack(lbl) X = np.transpose(X[:,:,:],(0,2,1)) # One-hot-encoding Y = []; for i in range(len(lbl)): mod = (lbl[i,0]) Y.append(mod) mapping = {} for x in range(len(mods)): mapping[mods[x]] = x ## integer representation for x in range(len(Y)): Y[x] = mapping[Y[x]] one_hot_encode = to_categorical(Y) # Assign SNR value to each vector repeat_n = X.shape[0]/len(mods)/len(snrs) repeat_n_mod = len(mods) lbl_SNR = np.tile(np.repeat(snrs, repeat_n), repeat_n_mod) return snrs, X, mods, one_hot_encode, lbl_SNR # RML2016.10b / just for the way it is saved in my GoogleDrive def load_RMLb_dataset(X, lbl): mods = np.unique(lbl[:,0]) snrs = np.unique(lbl[:,1]) snrs = list(map(int, snrs)) snrs.sort() # One-hot encoding Y = []; for i in range(len(lbl)): mod = (lbl[i,0]) Y.append(mod) mapping = {} for x in range(len(mods)): mapping[mods[x]] = x ## integer representation for x in range(len(Y)): Y[x] = mapping[Y[x]] one_hot_encode = to_categorical(Y) # Assign SNR value to each vector repeat_n = X.shape[0]/len(mods)/len(snrs) repeat_n_mod = len(mods) lbl_SNR = np.tile(np.repeat(snrs, repeat_n), repeat_n_mod) X = X return snrs, X, mods, one_hot_encode, lbl_SNR ``` ## Functions to handle the datasets ``` def train_test_valid_split(X, one_hot, train_split=0.7, valid_split=0.15, test_split=0.15): """ Train-Test split the data Input: X: X data one_hot: Y data encoded to one_hot train_split (default 0.7) valid_split (default 0.15) test_split (default 0.15) train_split : valid_split : test_split - ratio for splitting the dataset NOTE: the ratio split must be a sum of 1! Output: train_idx: indexes from X assinged to train data valid_idx: indexes from X assinged to validation data test_idx: indexes from X assinged to test data X_train: X data assigned for training X_valid: X data assigned for validation X_test: X data assigned for testing Y_train: one-hot encoded Y data assigned for training Y_valid: one-hot encoded Y data assigned for validation Y_test: one-hot encoded Y data assigned for testing """ # Set random seed np.random.seed(42) random.seed(42) # Get the number of samples n_examples = X.shape[0] n_train = int(n_examples * train_split) n_valid = int(n_examples * valid_split) n_test = int(n_examples * test_split) # Get indexes of train data train_idx = np.random.choice(range(0, n_examples), size=n_train, replace=False) # Left indexes for valid and test sets left_idx= list(set(range(0, n_examples)) - set(train_idx)) # Get indexes for the left indexes of the X data val = np.random.choice(range(0, (n_valid+n_test)), size=(n_valid), replace=False) test = list(set(range(0, len(left_idx))) - set(val)) # Assign indeces for validation to left indexes valid_idx = [] for i in val: val_idx = left_idx[i] valid_idx.append(val_idx) # Get the test set as the rest indexes test_idx = [] for i in test: tst_idx = left_idx[i] test_idx.append(tst_idx) # Shuffle the valid_idx and test_idx random.shuffle(valid_idx) random.shuffle(test_idx) # Assing the indexes to the X and Y data to create train and test sets X_train = X[train_idx] X_valid = X[valid_idx] X_test = X[test_idx] Y_train = one_hot[train_idx] Y_valid = one_hot[valid_idx] Y_test = one_hot[test_idx] return train_idx, valid_idx, test_idx, X_train, X_valid, X_test, Y_train, Y_valid, Y_test def normalize_data(X_train, X_valid, X_test): # mean-std normalization mean = X_train[:,:,:].mean(axis=0) X_train[:,:,:] -= mean std = X_train[:,:,:].std(axis=0) X_train[:,:,:] /= std X_valid[:,:,:] -= mean X_valid[:,:,:] /= std X_test[:,:,:] -= mean X_test[:,:,:] /= std return X_train, X_valid, X_test def return_indices_of_a(a, b): """ Compare two lists a, b for same items and return indeces of the item in list a a: List of items, its indeces will be returned b: List of items to search for in list a Credit: https://stackoverflow.com/users/97248/pts ; https://stackoverflow.com/questions/10367020/compare-two-lists-in-python-and-return-indices-of-matched-values """ b_set = set(b) return [i for i, v in enumerate(a) if v in b_set] ``` ## Functions for plotting ``` def show_confusion_matrix(validations, predictions, matrix_snr, save=False): """ Plot confusion matrix validations: True Y labels predictions: Predicted Y labels of your model matrix_snr: SNR information for plot's titel """ cm = confusion_matrix(validations, predictions) # Normalise cmn = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] fig, ax = plt.subplots(figsize=(10,10)) sns.heatmap(cmn, cmap='Blues', annot=True, fmt='.2f', xticklabels=mods, yticklabels=mods) sns.set(font_scale=1.3) if matrix_snr == None: plt.title("Confusion Matrix") else: plt.title("Confusion Matrix \n" + str(matrix_snr) + "dB") plt.ylabel('True Label') plt.xlabel('Predicted Label') if save == True: plt.savefig(base_dir + 'Own_dataset/' + str(matrix_snr) + '.png') plt.show(block=False) def All_SNR_show_confusion_matrix(X_test, save=False): """ Plot confusion matrix of all SNRs in one X_test: X_test data """ prediction = model.predict(X_test) Y_Pred = []; Y_Test = []; for i in range(len(prediction[:,0])): Y_Pred.append(np.argmax(prediction[i,:])) Y_Test.append(np.argmax(Y_test[i])) show_confusion_matrix(Y_Pred, Y_Test, None, save) def SNR_show_confusion_matrix(in_snr, lbl_SNR, X_test, save=False): """ Plot confusion matrices of chosen SNRs in_snr: must be list of SNRs X_test: X_test data """ for snr in in_snr: matrix_snr = snr m_snr = matrix_snr; Y_Pred = []; Y_Test = []; Y_Pred_SNR = []; Y_Test_SNR = []; matrix_snr_index = []; prediction = model.predict(X_test) for i in range(len(prediction[:,0])): Y_Pred.append(np.argmax(prediction[i,:])) Y_Test.append(np.argmax(Y_test[i])) for i in range(len(lbl_SNR)): if int(lbl_SNR[i]) == m_snr: matrix_snr_index.append(i) indeces_of_Y_test = return_indices_of_a(test_idx, matrix_snr_index) for i in indeces_of_Y_test: Y_Pred_SNR.append(Y_Pred[i]) Y_Test_SNR.append(Y_Test[i]) show_confusion_matrix(Y_Pred_SNR, Y_Test_SNR, matrix_snr, save) def plot_split_distribution(mods, Y_train, Y_valid, Y_test): x = np.arange(len(mods)) # the label locations width = 1 # the width of the bars fig, ax = plt.subplots() bar1 = ax.bar(x-width*0.3, np.count_nonzero(Y_train == 1, axis=0), width*0.3, label = "Train" ) bar2 = ax.bar(x , np.count_nonzero(Y_valid == 1, axis=0), width*0.3, label = "Valid" ) bar3 = ax.bar(x+width*0.3, np.count_nonzero(Y_test == 1, axis=0), width*0.3, label = "Test" ) # Add some text for labels, title and custom x-axis tick labels, etc. ax.set_ylabel('Distribution') ax.set_title('Distribution overview of splitted dataset') ax.set_xticks(x) ax.set_xticklabels(mods) ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), fancybox=True, shadow=True, ncol=5) def autolabel(rects): """Attach a text label above each bar in *rects*, displaying its height.""" for rect in rects: height = rect.get_height() ax.annotate('{}'.format(height), xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 0), # 3 points vertical offset textcoords="offset points", ha='center', va='bottom') # autolabel(bar1) # autolabel(bar2) # autolabel(bar3) # fig.tight_layout() return plt.show() def SNR_accuracy(in_snr, name): """ Computes accuracies of chosen SNRs individualy in_snr: must be list of SNRs """ acc = [] for snr in in_snr: acc_snr = snr idx_acc_snr = [] for i in range(len(test_idx)): if int(lbl_SNR[test_idx[i]]) == int(acc_snr): idx_acc_snr.append(i) acc_X_test = X_test[idx_acc_snr] # acc_X_f_test = X_f_test[idx_acc_snr] acc_Y_test = Y_test[idx_acc_snr] print('\nSNR ' + str(acc_snr) + 'dB:') accuracy_snr = model.evaluate([acc_X_test], acc_Y_test, batch_size=32, verbose=2) acc.append(accuracy_snr) acc = np.vstack(acc) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) plt.plot(SNR, (acc[:,1]*100), 'steelblue', marker='.', markersize= 15, label = name, linestyle = '-',) ax.legend(loc=4, prop={'size': 25}) x_major_ticks = np.arange(-20, 19, 2 ) ax.set_xticks(x_major_ticks) y_major_ticks = np.arange(0, 101, 10 ) y_minor_ticks = np.arange(0, 101, 2) ax.set_yticks(y_major_ticks) ax.set_yticks(y_minor_ticks, minor=True) ax.tick_params(axis='both', which='major', labelsize=20) ax.grid(which='both',color='lightgray', linestyle='-') ax.grid(which='minor', alpha=0.2) ax.grid(which='major', alpha=0.5) plt.xlim(-20, 18) plt.ylim(0,100) plt.title("Classification Accuracy",fontsize=20) plt.ylabel('Accuracy (%)',fontsize=20) plt.xlabel('SNR (dB)',fontsize=20) # plt.savefig(base_dir + name + '.png') plt.show() return acc[:,1] ``` ## Functions for visualization of layers ``` def layer_overview(model): """ Offers overview of the model's layers and theirs outputs model: specify trained model you want to have overview of """ # Names and outputs from layers layer_names = [layer.name for layer in model.layers] layer_outputs = [layer.output for layer in model.layers[:]] return layer_names, layer_outputs def model_visualization(nth_layer, nth_test_idx, mods, model, plot_sample = False, plot_activations = True, plot_feature_maps = True): """ The function provised overview of activation of specific layer and its feature maps. nth_layer: enter number which corresponds with the position of wanted layer nth_test_idx: enter number pointing at the test indexes from earlier mods: provide variable which holds listed modulations model: specify which trained model to load plot_sample = False: set to true to plot sample data plot_activations = True: plots activation of chosen layer plot_feature_maps = True: plots feature map of chosen layer """ # Sample data for visualization test_sample = X_test[nth_test_idx,:,:] # shape [128,2] test_sample = test_sample[None] # change to needed [1,128,2] SNR = lbl_SNR[test_idx[nth_test_idx]] mod = one_hot[test_idx[nth_test_idx]] f, u = factorize(mods) mod = mod.dot(u) # Names and outputs from layers layer_names = [layer.name for layer in model.layers] layer_outputs = [layer.output for layer in model.layers[:]] ## Activations ## # define activation model activation_model = tf.keras.models.Model(model.input, layer_outputs) # get the activations of chosen test sample activations = activation_model.predict(test_sample) ## Feature-maps ## # define feature maps model feature_maps_model = tf.keras.models.Model(model.inputs, model.layers[4].output) # get the activated features feature_maps = feature_maps_model.predict(test_sample) # Plot sample if plot_sample == True: plt.plot(test_sample[0,:,:]) plt.title(mod + ' ' + str(SNR) + 'dB') plt.show() # Plot activations if plot_activations == True: activation_layer = activations[nth_layer] activation_layer = np.transpose(activation_layer[:,:,:],(0,2,1)) # reshape fig, ax = plt.subplots(figsize=(20,10)) ax.matshow(activation_layer[0,:,:], cmap='viridis') # plt.matshow(activation_layer[0,:,:], cmap='viridis') plt.title('Activation of layer ' + layer_names[nth_layer]) ax.grid(False) ax.set_xlabel('Lenght of sequence') ax.set_ylabel('Filters') fig.show() plt.savefig(base_dir + 'activations.png') plt.savefig(base_dir + 'activations.svg') # Plot feature maps if plot_feature_maps == True: n_filters = int(feature_maps.shape[2]/2); ix = 1 fig = plt.figure(figsize=(25,15)) for _ in range(n_filters): for _ in range(2): # specify subplot and turn of axis ax =fig.add_subplot(n_filters, 5, ix) # ax = plt.subplot(n_filters, 5, ix, ) ax.set_xticks([]) ax.set_yticks([]) # plot filter channel in grayscale ax.plot(feature_maps[0, :, ix-1]) ix += 1 # show the figure fig.show() plt.savefig(base_dir + 'feature_map.png') plt.savefig(base_dir + 'feature_map.svg') ``` ## Transformer ``` def position_encoding_init(n_position, emb_dim): ''' Init the sinusoid position encoding table ''' # keep dim 0 for padding token position encoding zero vector position_enc = np.array([ [pos / np.power(10000, 2 * (j // 2) / emb_dim) for j in range(emb_dim)] if pos != 0 else np.zeros(emb_dim) for pos in range(n_position)]) position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1 return position_enc # Transformer Block class TransformerBlock(layers.Layer): def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1): super(TransformerBlock, self).__init__() self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim) self.ffn = keras.Sequential( [layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim),] ) self.layernorm1 = layers.LayerNormalization(epsilon=1e-6) self.layernorm2 = layers.LayerNormalization(epsilon=1e-6) self.dropout1 = layers.Dropout(rate) self.dropout2 = layers.Dropout(rate) def call(self, inputs, training): attn_output = self.att(inputs, inputs) attn_output = self.dropout1(attn_output, training=training) out1 = self.layernorm1(inputs + attn_output) ffn_output = self.ffn(out1) ffn_output = self.dropout2(ffn_output, training=training) return self.layernorm2(out1 + ffn_output) ``` # Access the datasets With the following cells, you can easily access the datasets. However, if you end up using them for your work, do not forget to credit the original authors! More info is provided for each of them below. ``` # Uncomment the following line, if needed, to download the datasets # !conda install -y gdown ``` ## RadioML Datasets * O'shea, Timothy J., and Nathan West. "Radio machine learning dataset generation with gnu radio." Proceedings of the GNU Radio Conference. Vol. 1. No. 1. 2016. * The datasets are available at: https://www.deepsig.ai/datasets * All datasets provided by Deepsig Inc. are licensed under the Creative Commons Attribution - [NonCommercial - ShareAlike 4.0 License (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/). Both datasets are left unchanged, however, the RadioML2016.10b version is not stored as the original data, but is already splitted into X and labels ``` # RadioML2016.10a stored as the original pkl file !gdown --id 1aus-u2xSKETW9Yv5Q-QG9tz9Xnbj5yHV dataset_pkl = open('RML2016.10a_dict.pkl','rb') RML_dataset_location = pickle.load(dataset_pkl, encoding='bytes') # RadioML2016.10b stored in X.pkl and label.pkl !gdown --id 10OdxNvtSbOm58t-MMHZcmSMqzEWDSpAr !gdown --id 1-MvVKNmTfqyfYD_usvAfEcizzBX0eEpE RMLb_X_data_file = open('X.pkl','rb') RMLb_labels_file = open('labels.pkl', 'rb') RMLb_X = pickle.load(RMLb_X_data_file, encoding='bytes') RMLb_lbl = pickle.load(RMLb_labels_file, encoding='ascii') ``` ## Migou-Mod Dataset * Utrilla, Ramiro (2020), “MIGOU-MOD: A dataset of modulated radio signals acquired with MIGOU, a low-power IoT experimental platform”, Mendeley Data, V1, doi: 10.17632/fkwr8mzndr.1 * The dataset is available at: https://data.mendeley.com/datasets/fkwr8mzndr/1 * The dataset is licensed under the Creative Commons Attribution - [NonCommercial - ShareAlike 4.0 License (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/). The following version of the dataset contain only a fraction of the original samples (550,000 samples compared to 8.8 million samples in the original dataset) ``` # Migou-Mod Dataset - 550,000 samples !gdown --id 1-CIL3bD4o9ylBkD0VZkGd5n1-8_RTRvs MIGOU_dataset_pkl = open('dataset_25.pkl','rb') MIGOU_dataset_location = pickle.load(MIGOU_dataset_pkl, encoding='bytes') ``` ## VUT Dataset This dataset was generated in MATLAB with 1000 samples per SNR value and each modulation type. It includes three QAM modulation schemes and further OFDM, GFDM, and FBMC modulations which are not included in previous datasets. To mimic the RadioML dataset, the data are represented as 2x128 vectors of I/Q signals in the SNR range from -20 dB to 18 dB. ``` # VUT Dataset !gdown --id 1G5WsgUze8qfuSzy6Edg_4qRIiAx_YUc4 VUT_dataset_location = 'NEW_Dataset_05_02_2021.mat' ``` # Load the data ## VUT Dataset ``` SNR, X, mods, one_hot, lbl_SNR = load_VUT_dataset(VUT_dataset_location) train_idx, valid_idx, test_idx, X_train, X_valid, X_test, Y_train, Y_valid, Y_test = train_test_valid_split(X, one_hot, train_split=0.7, valid_split=0.15, test_split=0.15) plot_split_distribution(mods, Y_train, Y_valid, Y_test) ``` ## DeepSig Dataset ``` # 10a # SNR, X, modulations, one_hot, lbl_SNR = load_dataset(RML_dataset_location) # 10b SNR, X, modulations, one_hot, lbl_SNR = load_RMLb_dataset(RMLb_X, RMLb_lbl) mods = [] for i in range(len(modulations)): modu = modulations[i].decode('utf-8') mods.append(modu) train_idx, valid_idx, test_idx, X_train, X_valid, X_test, Y_train, Y_valid, Y_test = train_test_valid_split(X, one_hot, train_split=0.7, valid_split=0.15, test_split=0.15) plot_split_distribution(mods, Y_train, Y_valid, Y_test) # X_train, X_valid, X_test = normalize_data(X_train, X_valid, X_test) ``` ## MIGOU-MOD ``` SNR, X, mods, one_hot, lbl_SNR = load_dataset(MIGOU_dataset_location) train_idx, valid_idx, test_idx, X_train, X_valid, X_test, Y_train, Y_valid, Y_test = train_test_valid_split(X, one_hot, train_split=0.7, valid_split=0.15, test_split=0.15) plot_split_distribution(mods, Y_train, Y_test, Y_test) ``` # Architectures for training ## CNN ``` cnn_in = keras.layers.Input(shape=(128,2)) cnn = keras.layers.ZeroPadding1D(padding=4)(cnn_in) cnn = keras.layers.Conv1D(filters=50, kernel_size=8, activation='relu')(cnn) cnn = keras.layers.MaxPool1D(pool_size=2)(cnn) cnn = keras.layers.Conv1D(filters=50, kernel_size=8, activation='relu')(cnn) cnn = keras.layers.MaxPool1D(pool_size=2)(cnn) cnn = keras.layers.Conv1D(filters=50, kernel_size=4, activation='relu')(cnn) cnn = keras.layers.Dropout(rate=0.6)(cnn) cnn = keras.layers.MaxPool1D(pool_size=2)(cnn) cnn = keras.layers.Flatten()(cnn) cnn = keras.layers.Dense(70, activation='selu')(cnn) cnn_out = keras.layers.Dense(len(mods), activation='softmax')(cnn) model_cnn = keras.models.Model(cnn_in, cnn_out) callbacks = [ keras.callbacks.ModelCheckpoint( "cnn_model.h5", save_best_only=True, monitor="val_loss"), keras.callbacks.ReduceLROnPlateau( monitor="val_loss", factor=0.3, patience=3, min_lr=0.00007), keras.callbacks.EarlyStopping(monitor="val_loss", patience=5, verbose=1)] optimizer = keras.optimizers.Adam(learning_rate=0.0007) model_cnn.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) # model_cldnn.summary() tf.keras.backend.clear_session() history = model_cnn.fit(X_train, Y_train, batch_size=128, epochs=4, verbose=2, validation_data= (X_valid, Y_valid), callbacks=callbacks) model = keras.models.load_model("cnn_model.h5") test_loss, test_acc = model.evaluate(X_test, Y_test) print("Test accuracy", test_acc) print("Test loss", test_loss) SNR_accuracy(SNR, 'CNN') ``` ## CLDNN ``` layer_in = keras.layers.Input(shape=(128,2)) layer = keras.layers.Conv1D(filters=64, kernel_size=8, activation='relu')(layer_in) layer = keras.layers.MaxPool1D(pool_size=2)(layer) layer = keras.layers.LSTM(64, return_sequences=True,)(layer) layer = keras.layers.Dropout(0.4)(layer) layer = keras.layers.LSTM(64, return_sequences=True,)(layer) layer = keras.layers.Dropout(0.4)(layer) layer = keras.layers.Flatten()(layer) layer_out = keras.layers.Dense(len(mods), activation='softmax')(layer) model_cldnn = keras.models.Model(layer_in, layer_out) optimizer = keras.optimizers.Adam(learning_rate=0.0007) callbacks = [ keras.callbacks.ModelCheckpoint( "cldnn_model.h5", save_best_only=True, monitor="val_loss"), keras.callbacks.ReduceLROnPlateau( monitor="val_loss", factor=0.4, patience=5, min_lr=0.000007), keras.callbacks.EarlyStopping(monitor="val_loss", patience=8, verbose=1)] model_cldnn.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) # model_cldnn.summary() tf.keras.backend.clear_session() history = model_cldnn.fit(X_train, Y_train, batch_size=128, epochs=100, verbose=2, validation_data= (X_valid, Y_valid), callbacks=callbacks) # history = model_iq.fit(X_train, Y_train, batch_size=128, epochs=100, verbose=2, validation_split=0.15, callbacks=callbacks) model = keras.models.load_model("cldnn_model.h5") test_loss, test_acc = model.evaluate(X_test, Y_test) print("Test accuracy", test_acc) print("Test loss", test_loss) SNR_accuracy(SNR, 'CLDNN') ``` ## GGDNN ``` layer_in = keras.layers.Input(shape=(128,2)) layer = keras.layers.Conv1D(filters=80, kernel_size=(12), activation='relu')(layer_in) layer = keras.layers.MaxPool1D(pool_size=(2))(layer) layer = keras.layers.GRU(40, return_sequences=True)(layer) layer = keras.layers.GaussianDropout(0.4)(layer) layer = keras.layers.GRU(40, return_sequences=True)(layer) layer = keras.layers.GaussianDropout(0.4)(layer) layer = keras.layers.Flatten()(layer) layer_out = keras.layers.Dense(10, activation='softmax')(layer) model_CGDNN = keras.models.Model(layer_in, layer_out) optimizer = keras.optimizers.Adam(learning_rate=0.002) callbacks = [ keras.callbacks.ModelCheckpoint( "cgdnn_model.h5", save_best_only=True, monitor="val_loss"), keras.callbacks.ReduceLROnPlateau( monitor="val_loss", factor=0.4, patience=4, min_lr=0.000007), keras.callbacks.EarlyStopping(monitor="val_loss", patience=10, verbose=1)] model_CGDNN.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) # model_CGDNN.summary() tf.keras.backend.clear_session() history = model_CGDNN.fit(X_train, Y_train, batch_size=128, epochs=100, verbose=2, validation_data=(X_valid,Y_valid), callbacks=callbacks) model = keras.model_CGDNN.load_model("cgdnn_model.h5") test_loss, test_acc = model.evaluate(X_test, Y_test) print("Test accuracy", test_acc) print("Test loss", test_loss) SNR_accuracy(SNR, 'CLGDNN') ``` ## MCTransformer ``` embed_dim = 64 # Embedding size for each token num_heads = 4 # Number of attention heads ff_dim = 16 # Hidden layer size in feed forward network inside transformer inputs = keras.layers.Input(shape=(128,2)) x = keras.layers.Conv1D(filters=embed_dim, kernel_size=8, activation='relu')(inputs) x = keras.layers.MaxPool1D(pool_size=2)(x) x = keras.layers.LSTM(embed_dim, return_sequences=True,)(x) x = keras.layers.Dropout(0.4)(x) pos_emb = position_encoding_init(60,64) x_pos = x+pos_emb transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim) x = transformer_block(x_pos) x = layers.GlobalAveragePooling1D()(x) x = layers.Dropout(0.1)(x) x = layers.Dense(20, activation="relu")(x) x = layers.Dropout(0.1)(x) outputs = layers.Dense(len(mods), activation="softmax")(x) model_MCT = keras.Model(inputs=inputs, outputs=outputs) # model_MCT.summary() optimizer = keras.optimizers.SGD(learning_rate=0.03) model_MCT.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) history = model_MCT.fit(X_train, Y_train, batch_size=16, epochs=20, validation_data= (X_valid, Y_valid)) ``` Uncomment and lower the learning rate, if the validation loss doesn't improve. ``` # optimizer = keras.optimizers.SGD(learning_rate=0.01) # model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) # history = model.fit(X_train, Y_train, batch_size=16, epochs=10, validation_data= (X_valid, Y_valid)) # optimizer = keras.optimizers.SGD(learning_rate=0.005) # model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) # history = model.fit(X_train, Y_train, batch_size=16, epochs=10, validation_data= (X_valid, Y_valid)) # optimizer = keras.optimizers.SGD(learning_rate=0.001) # model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) # history = model.fit(X_train, Y_train, batch_size=128, epochs=10, validation_data= (X_valid, Y_valid)) test_loss, test_acc = model_MCT.evaluate(X_test, Y_test) print("Test accuracy", test_acc) print("Test loss", test_loss) SNR_accuracy(SNR, 'MCT') ``` # Load saved CGDNN models Download the models ``` # # RadioML2016.10a # !gdown --id 1h0iVzR0qEPEwcUEPKM3hBGF46uXQEs_l # # RadioML2016.10b # !gdown --id 1XCPOHF8ZeSC61qR1hrFKhgUxPHbpHg6R # # Migou-Mod Dataset # !gdown --id 1s4Uz5KlkLVO9lQyrJwVTW_754RNkigoC # # VUT Dataset # !gdown --id 1DWr1uDzz7m7rEfcKWXZXJpJ692EC0vBw ``` Uncomment wanted model Don't forget you also need to load the right dataset before predicting ``` # RadioML2016.10a # model = tf.keras.models.load_model("cgd_model_10a.h5") # RadioML2016.10b # model = tf.keras.models.load_model("cgd_model_10b.h5") # Migou-Mod Dataset # model = tf.keras.models.load_model("CGD_MIGOU.h5") # VUT Dataset # model = tf.keras.models.load_model("CGD_VUT.h5") # model.summary() # prediction = model.predict([X_test[:,:,:]]) # Y_Pred = []; Y_Test = []; Y_Pred_SNR = []; Y_Test_SNR = []; # for i in range(len(prediction[:,0])): # Y_Pred.append(np.argmax(prediction[i,:])) # Y_Test.append(np.argmax(Y_test[i])) # Y_Pred[:20], Y_Test[:20] ``` # Visualize activation and feature map ``` model_visualization(1,9000, mods, model) ``` # Plot Confusion Matrix ``` All_SNR_show_confusion_matrix([X_test], save=False) SNR_show_confusion_matrix(mods, lbl_SNR[:], X_test, save=False) ```
github_jupyter
``` ## Hash Table ``` ### 1_TWO SUM ``` # interesting way is dictionary comprehension dict = dict((key, value) for (key, value) in iterable) dict = dict(key:value for key in iterable) # two way hash: # remeber the number sum combinations def twoSum( nums, target): """ :type nums: List[int] :type target: int :rtype: List[int] """ num_dict={} #t_minus_n = [target - i for i in nums] if nums.count(target/2)==2: result = [i for i,j in enumerate(nums) if j==target/2] else: comb = list(zip(nums,[target - i for i in nums])) #(1,2)!=(2,1) so we can use to see if both 2,1 in the nums list # like the parentheses question print(comb) for num in comb: # tuple can be hashed(used as keys), not list if num[0]!=num[1]: num_dict[num]=num_dict.get(num,0)+1 result = [i for i,j in enumerate(comb) if num_dict.get(j[::-1],0)==1] # need to check if they have opposit # only can enumerate give all the index return result # one pass hash - keeping remeber the position, until onece trace back def twoSum( nums, target): """ :type nums: List[int] :type target: int :rtype: List[int] """ num_dict = {} for key,value in enumerate(nums): com = target - value print(com) print(num_dict) if num_dict.__contains__(com): print(num_dict) return [num_dict.get(com), key] # at this time key is the current key # the next time would be a trigger to extract the previous one num_dict[value] = key # if not exist put down the position def twoSum(self, nums, target): num_dict={} for pos, num in enumerate(nums): com = target - num if num_dict.get(com,None)!=None: return [num_dict[com],pos] num_dict[num]=pos ``` ### 136._SingleNum ``` # using set def singleNumber(nums): return 2*sum(set(nums))-sum(nums) lists=[4,1,2,1,2] singleNumber(lists) def singleNumber2(nums): res = 0 for num in nums: res = res^ num print(res) return res # https://stackoverflow.com/questions/14526584/what-does-the-xor-operator-do # the number who are not single would be take out later as a mask # only left the single number singleNumber2(nums) ``` ### xor ^ is the Python bitwise XOR operator. XOR stands for exclusive OR. It is used in cryptography because it let's you 'flip' the bits using a mask in a reversable operation: ``` print(10 ^ 5) print(15 ^ 5) ``` where 5 is the mask; (input XOR mask) **XOR mask gives you the input again**. ### 217._ contains duplicate ``` class Solution: def containsDuplicate(self, nums): """ :type nums: List[int] :rtype: bool """ num_dict={} for num in nums: if num in num_dict: return True else: num_dict[num]=1 return False class Solution: def containsDuplicate(self, nums): """ :type nums: List[int] :rtype: bool """ return len(set(nums))!= len(nums) ``` ### 219._ contains duplicate II This one has some misleading message. The closest numbers should have the distance less or equal than k. Not all the distances between duplicates should at most k ``` def containsNearbyDuplicate(self, nums, k): """ :type nums: List[int] :type k: int :rtype: bool """ if nums==[]: return False if len(nums)==len(set(nums)): return False num_dict={i:[] for i in nums} min_dist= len(nums) for i in range(0,len(nums)): key= nums[i] num_dict[key].append(i) if len(num_dict[key])>1: min_dist=min(min_dist,(num_dict[key])[-1]-num_dict[key][-2]) return min_dist<=k ``` The first solution has a big disadvantage: 1. Need to create a dict with all the key into it, not necessary 2. we only need check dist(two close duplicated number)<=k, So we don't actually need remeber the failed index, we just need to calculate distance with the nearest one. So I did 2nd: ``` def containsNearbyDuplicate(nums, k): """ :type nums: List[int] :type k: int :rtype: bool """ if nums==[]: return False if len(nums)==len(set(nums)): return False num_dict={} for i in range(0,len(nums)): key= nums[i] if (key in num_dict) and (i -num_dict[key]<=k): return True else: num_dict[key] = i # only remeber the lastest one return False containsNearbyDuplicate([1,2,3,1,1], 1) ``` For solution 2, we don't need to retrive all the num[i], we can use enumerate, don't need else, return will break it: ``` def containsNearbyDuplicate(nums, k): """ :type nums: List[int] :type k: int :rtype: bool """ if nums==[] or len(nums)==len(set(nums)) or k<0: return False # use the condition to eliminate all unexpect situation num_dict={} for i,num in enumerate(nums): if (num in num_dict) and (i -num_dict[num]<=k): return True num_dict[num] = i # only remeber the lastest one return False ``` Truns out mine(solution 1) is faster..., reduced the loop key. So I modified my solution 1 to, make it simpler and elegant: ``` def containsNearbyDuplicate(self, nums, k): """ :type nums: List[int] :type k: int :rtype: bool """ if nums==[] or len(nums)==len(set(nums)) or k<0: return False num_dict={i:[] for i in nums} min_dist= len(nums) for i in range(0,len(nums)): key= nums[i] num_dict[key].append(i) if len(num_dict[key])>1: min_dist=min(min_dist,(num_dict[key])[-1]-num_dict[key][-2]) return min_dist<=k ``` ### 242._ Valid Anagram ``` # using array/set def isAnagram(self, s, t): """ :type s: str :type t: str :rtype: bool """ if len(s)!=len(t): return False if set(s)!=set(t): return False return sorted(s)==sorted(t) # using hash table1 def isAnagram(self, s, t): """ :type s: str :type t: str :rtype: bool """ if len(s)!=len(t): return False if set(s)!=set(t): return False s_dict,t_dict={},{} for char in s: s_dict[char] = s_dict.get(char,0)+1 for char in t: t_dict[char] = t_dict.get(char,0)+1 return s_dict==t_dict # using hash table2 - not quite fast.. since we rewrite the same dict def isAnagram(self, s, t): """ :type s: str :type t: str :rtype: bool """ if len(s)!=len(t) or set(s)!=set(t): return False char_dict = {} for char in s: char_dict [char] = char_dict.get(char,0)+1 for char in t: char_dict[char] = char_dict[char]-1 return all(value == 0 for value in char_dict.values()) ## Turns out the best solution using ... count... so I tried two: def isAnagram(self, s, t): """ :type s: str :type t: str :rtype: bool """ return all(s.count(c) == t.count(c) for c in "abcdefghijklmnopqrstuvwxyz") ## give an early break if count doesn't match def isAnagram(self, s, t): """ :type s: str :type t: str :rtype: bool """ l = "abcdefghijklmnopqrstuvwxyz" for c in l: if s.count(c) != t.count(c): return False return True ``` ### all(iterable) The all() method returns: True - If all elements in an iterable are true False - If any element in an iterable is false https://stackoverflow.com/questions/35253971/how-to-check-if-all-values-of-a-dictionary-are-0-in-python ### str.count("substr")
github_jupyter
``` %reload_ext autoreload %autoreload 2 from fastai.gen_doc.gen_notebooks import * from pathlib import Path ``` ### To update this notebook Run `tools/sgen_notebooks.py Or run below: You need to make sure to refresh right after ``` import glob for f in Path().glob('*.ipynb'): generate_missing_metadata(f) ``` # Metadata generated below ``` update_nb_metadata('torch_core.ipynb', summary='Basic functions using pytorch', title='torch_core') update_nb_metadata('gen_doc.convert2html.ipynb', summary='Converting the documentation notebooks to HTML pages', title='gen_doc.convert2html') update_nb_metadata('metrics.ipynb', summary='Useful metrics for training', title='metrics') update_nb_metadata('callbacks.fp16.ipynb', summary='Training in mixed precision implementation', title='callback.fp16') update_nb_metadata('callbacks.general_sched.ipynb', summary='Implementation of a flexible training API', title='callbacks.general_sched') update_nb_metadata('text.ipynb', keywords='fastai', summary='Application to NLP, including ULMFiT fine-tuning', title='text') update_nb_metadata('callback.ipynb', summary='Implementation of the callback system', title='callback') update_nb_metadata('tabular.models.ipynb', keywords='fastai', summary='Model for training tabular/structured data', title='tabular.model') update_nb_metadata('callbacks.mixup.ipynb', summary='Implementation of mixup', title='callbacks.mixup') update_nb_metadata('applications.ipynb', summary='Types of problems you can apply the fastai library to', title='applications') update_nb_metadata('vision.data.ipynb', summary='Basic dataset for computer vision and helper function to get a DataBunch', title='vision') update_nb_metadata('overview.ipynb', summary='Overview of the core modules', title='overview') update_nb_metadata('training.ipynb', keywords='fastai', summary='Overview of fastai training modules, including Learner, metrics, and callbacks', title='training') update_nb_metadata('text.transform.ipynb', summary='NLP data processing; tokenizes text and creates vocab indexes', title='text.transform') update_nb_metadata('jekyll_metadata.ipynb') update_nb_metadata('collab.ipynb', summary='Application to collaborative filtering', title='collab') update_nb_metadata('text.learner.ipynb', summary='Easy access of language models and ULMFiT', title='text.learner') update_nb_metadata('gen_doc.nbdoc.ipynb', summary='Helper function to build the documentation', title='gen_doc.nbdoc') update_nb_metadata('vision.learner.ipynb', summary='`Learner` support for computer vision', title='vision.learner') update_nb_metadata('core.ipynb', summary='Basic helper functions for the fastai library', title='core') update_nb_metadata('fastai_typing.ipynb', keywords='fastai', summary='Type annotations names', title='fastai_typing') update_nb_metadata('gen_doc.gen_notebooks.ipynb', summary='Generation of documentation notebook skeletons from python module', title='gen_doc.gen_notebooks') update_nb_metadata('basic_train.ipynb', summary='Learner class and training loop', title='basic_train') update_nb_metadata('gen_doc.ipynb', keywords='fastai', summary='Documentation modules overview', title='gen_doc') update_nb_metadata('callbacks.rnn.ipynb', summary='Implementation of a callback for RNN training', title='callbacks.rnn') update_nb_metadata('callbacks.one_cycle.ipynb', summary='Implementation of the 1cycle policy', title='callbacks.one_cycle') update_nb_metadata('tta.ipynb', summary='Module brings TTA (Test Time Functionality) to the `Learner` class. Use `learner.TTA()` instead', title='tta') update_nb_metadata('vision.ipynb', summary='Application to Computer Vision', title='vision') update_nb_metadata('vision.transform.ipynb', summary='List of transforms for data augmentation in CV', title='vision.transform') update_nb_metadata('callbacks.lr_finder.ipynb', summary='Implementation of the LR Range test from Leslie Smith', title='callbacks.lr_finder') update_nb_metadata('text.data.ipynb', summary='Basic dataset for NLP tasks and helper functions to create a DataBunch', title='text.data') update_nb_metadata('text.models.ipynb', summary='Implementation of the AWD-LSTM and the RNN models', title='text.models') update_nb_metadata('tabular.data.ipynb', summary='Base class to deal with tabular data and get a DataBunch', title='tabular.data') update_nb_metadata('callbacks.ipynb', keywords='fastai', summary='Callbacks implemented in the fastai library', title='callbacks') update_nb_metadata('train.ipynb', summary='Extensions to Learner that easily implement Callback', title='train') update_nb_metadata('callbacks.hooks.ipynb', summary='Implement callbacks using hooks', title='callbacks.hooks') update_nb_metadata('text.models.qrnn.ipynb') update_nb_metadata('vision.image.ipynb', summary='Image class, variants and internal data augmentation pipeline', title='vision.image') update_nb_metadata('vision.models.unet.ipynb', summary='Dynamic Unet that can use any pretrained model as a backbone.', title='vision.models.unet') update_nb_metadata('vision.models.ipynb', keywords='fastai', summary='Overview of the models used for CV in fastai', title='vision.models') update_nb_metadata('gen_doc.sgen_notebooks.ipynb', keywords='fastai', summary='Script to generate notebooks and update html', title='gen_doc.sgen_notebooks') update_nb_metadata('tabular.transform.ipynb', summary='Transforms to clean and preprocess tabular data', title='tabular.transform') update_nb_metadata('data.ipynb', summary='Basic classes to contain the data for model training.', title='data') update_nb_metadata('index.ipynb', keywords='fastai', toc='false') update_nb_metadata('layers.ipynb', summary='Provides essential functions to building and modifying `Model` architectures.', title='layers') update_nb_metadata('tabular.ipynb', keywords='fastai', summary='Application to tabular/structured data', title='tabular') update_nb_metadata('tmp.ipynb') update_nb_metadata('Untitled.ipynb') ```
github_jupyter
# Building your Recurrent Neural Network - Step by Step Welcome to Course 5's first assignment! In this assignment, you will implement key components of a Recurrent Neural Network in numpy. Recurrent Neural Networks (RNN) are very effective for Natural Language Processing and other sequence tasks because they have "memory". They can read inputs $x^{\langle t \rangle}$ (such as words) one at a time, and remember some information/context through the hidden layer activations that get passed from one time-step to the next. This allows a unidirectional RNN to take information from the past to process later inputs. A bidirectional RNN can take context from both the past and the future. **Notation**: - Superscript $[l]$ denotes an object associated with the $l^{th}$ layer. - Superscript $(i)$ denotes an object associated with the $i^{th}$ example. - Superscript $\langle t \rangle$ denotes an object at the $t^{th}$ time-step. - **Sub**script $i$ denotes the $i^{th}$ entry of a vector. Example: - $a^{(2)[3]<4>}_5$ denotes the activation of the 2nd training example (2), 3rd layer [3], 4th time step <4>, and 5th entry in the vector. #### Pre-requisites * We assume that you are already familiar with `numpy`. * To refresh your knowledge of numpy, you can review course 1 of this specialization "Neural Networks and Deep Learning". * Specifically, review the week 2 assignment ["Python Basics with numpy (optional)"](https://www.coursera.org/learn/neural-networks-deep-learning/item/Zh0CU). #### Be careful when modifying the starter code * When working on graded functions, please remember to only modify the code that is between the ```Python #### START CODE HERE ``` and ```Python #### END CODE HERE ``` * In particular, Be careful to not modify the first line of graded routines. These start with: ```Python # GRADED FUNCTION: routine_name ``` * The automatic grader (autograder) needs these to locate the function. * Even a change in spacing will cause issues with the autograder. * It will return 'failed' if these are modified or missing." ## <font color='darkblue'>Updates for 3b</font> #### If you were working on the notebook before this update... * The current notebook is version "3b". * You can find your original work saved in the notebook with the previous version name ("v3a") * To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory. #### List of updates * `rnn_cell_backward` - fixed error in equations - harmonize rnn backward diagram with rnn_forward diagram and fixed Wax multiple (changed from at to xt). - clarified dba batch as summing 'm' examples - aligned equations * `lstm_cell_backward` - aligned equations * `lstm_forward` - fixed typo, Wb to bf * `lstm_cell_forward` - changed c_next_tmp.shape to a_next_tmp.shape in test case - clarified dbxx batch as summing 'm' examples Let's first import all the packages that you will need during this assignment. ``` import numpy as np from rnn_utils import * ``` ## 1 - Forward propagation for the basic Recurrent Neural Network Later this week, you will generate music using an RNN. The basic RNN that you will implement has the structure below. In this example, $T_x = T_y$. <img src="images/RNN.png" style="width:500;height:300px;"> <caption><center> **Figure 1**: Basic RNN model </center></caption> ### Dimensions of input $x$ #### Input with $n_x$ number of units * For a single timestep of a single input example, $x^{(i) \langle t \rangle }$ is a one-dimensional input vector. * Using language as an example, a language with a 5000 word vocabulary could be one-hot encoded into a vector that has 5000 units. So $x^{(i)\langle t \rangle}$ would have the shape (5000,). * We'll use the notation $n_x$ to denote the number of units in a single timestep of a single training example. #### Time steps of size $T_{x}$ * A recurrent neural network has multiple time steps, which we'll index with $t$. * In the lessons, we saw a single training example $x^{(i)}$ consist of multiple time steps $T_x$. For example, if there are 10 time steps, $T_{x} = 10$ #### Batches of size $m$ * Let's say we have mini-batches, each with 20 training examples. * To benefit from vectorization, we'll stack 20 columns of $x^{(i)}$ examples. * For example, this tensor has the shape (5000,20,10). * We'll use $m$ to denote the number of training examples. * So the shape of a mini-batch is $(n_x,m,T_x)$ #### 3D Tensor of shape $(n_{x},m,T_{x})$ * The 3-dimensional tensor $x$ of shape $(n_x,m,T_x)$ represents the input $x$ that is fed into the RNN. #### Taking a 2D slice for each time step: $x^{\langle t \rangle}$ * At each time step, we'll use a mini-batches of training examples (not just a single example). * So, for each time step $t$, we'll use a 2D slice of shape $(n_x,m)$. * We're referring to this 2D slice as $x^{\langle t \rangle}$. The variable name in the code is `xt`. ### Definition of hidden state $a$ * The activation $a^{\langle t \rangle}$ that is passed to the RNN from one time step to another is called a "hidden state." ### Dimensions of hidden state $a$ * Similar to the input tensor $x$, the hidden state for a single training example is a vector of length $n_{a}$. * If we include a mini-batch of $m$ training examples, the shape of a mini-batch is $(n_{a},m)$. * When we include the time step dimension, the shape of the hidden state is $(n_{a}, m, T_x)$ * We will loop through the time steps with index $t$, and work with a 2D slice of the 3D tensor. * We'll refer to this 2D slice as $a^{\langle t \rangle}$. * In the code, the variable names we use are either `a_prev` or `a_next`, depending on the function that's being implemented. * The shape of this 2D slice is $(n_{a}, m)$ ### Dimensions of prediction $\hat{y}$ * Similar to the inputs and hidden states, $\hat{y}$ is a 3D tensor of shape $(n_{y}, m, T_{y})$. * $n_{y}$: number of units in the vector representing the prediction. * $m$: number of examples in a mini-batch. * $T_{y}$: number of time steps in the prediction. * For a single time step $t$, a 2D slice $\hat{y}^{\langle t \rangle}$ has shape $(n_{y}, m)$. * In the code, the variable names are: - `y_pred`: $\hat{y}$ - `yt_pred`: $\hat{y}^{\langle t \rangle}$ Here's how you can implement an RNN: **Steps**: 1. Implement the calculations needed for one time-step of the RNN. 2. Implement a loop over $T_x$ time-steps in order to process all the inputs, one at a time. ## 1.1 - RNN cell A recurrent neural network can be seen as the repeated use of a single cell. You are first going to implement the computations for a single time-step. The following figure describes the operations for a single time-step of an RNN cell. <img src="images/rnn_step_forward_figure2_v3a.png" style="width:700px;height:300px;"> <caption><center> **Figure 2**: Basic RNN cell. Takes as input $x^{\langle t \rangle}$ (current input) and $a^{\langle t - 1\rangle}$ (previous hidden state containing information from the past), and outputs $a^{\langle t \rangle}$ which is given to the next RNN cell and also used to predict $\hat{y}^{\langle t \rangle}$ </center></caption> #### rnn cell versus rnn_cell_forward * Note that an RNN cell outputs the hidden state $a^{\langle t \rangle}$. * The rnn cell is shown in the figure as the inner box which has solid lines. * The function that we will implement, `rnn_cell_forward`, also calculates the prediction $\hat{y}^{\langle t \rangle}$ * The rnn_cell_forward is shown in the figure as the outer box that has dashed lines. **Exercise**: Implement the RNN-cell described in Figure (2). **Instructions**: 1. Compute the hidden state with tanh activation: $a^{\langle t \rangle} = \tanh(W_{aa} a^{\langle t-1 \rangle} + W_{ax} x^{\langle t \rangle} + b_a)$. 2. Using your new hidden state $a^{\langle t \rangle}$, compute the prediction $\hat{y}^{\langle t \rangle} = softmax(W_{ya} a^{\langle t \rangle} + b_y)$. We provided the function `softmax`. 3. Store $(a^{\langle t \rangle}, a^{\langle t-1 \rangle}, x^{\langle t \rangle}, parameters)$ in a `cache`. 4. Return $a^{\langle t \rangle}$ , $\hat{y}^{\langle t \rangle}$ and `cache` #### Additional Hints * [numpy.tanh](https://www.google.com/search?q=numpy+tanh&rlz=1C5CHFA_enUS854US855&oq=numpy+tanh&aqs=chrome..69i57j0l5.1340j0j7&sourceid=chrome&ie=UTF-8) * We've created a `softmax` function that you can use. It is located in the file 'rnn_utils.py' and has been imported. * For matrix multiplication, use [numpy.dot](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) ``` # GRADED FUNCTION: rnn_cell_forward def rnn_cell_forward(xt, a_prev, parameters): """ Implements a single forward step of the RNN-cell as described in Figure (2) Arguments: xt -- your input data at timestep "t", numpy array of shape (n_x, m). a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m) parameters -- python dictionary containing: Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x) Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a) Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a) ba -- Bias, numpy array of shape (n_a, 1) by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1) Returns: a_next -- next hidden state, of shape (n_a, m) yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m) cache -- tuple of values needed for the backward pass, contains (a_next, a_prev, xt, parameters) """ # Retrieve parameters from "parameters" Wax = parameters["Wax"] Waa = parameters["Waa"] Wya = parameters["Wya"] ba = parameters["ba"] by = parameters["by"] ### START CODE HERE ### (≈2 lines) # compute next activation state using the formula given above a_next = None # compute output of the current cell using the formula given above yt_pred = None ### END CODE HERE ### # store values you need for backward propagation in cache cache = (a_next, a_prev, xt, parameters) return a_next, yt_pred, cache np.random.seed(1) xt_tmp = np.random.randn(3,10) a_prev_tmp = np.random.randn(5,10) parameters_tmp = {} parameters_tmp['Waa'] = np.random.randn(5,5) parameters_tmp['Wax'] = np.random.randn(5,3) parameters_tmp['Wya'] = np.random.randn(2,5) parameters_tmp['ba'] = np.random.randn(5,1) parameters_tmp['by'] = np.random.randn(2,1) a_next_tmp, yt_pred_tmp, cache_tmp = rnn_cell_forward(xt_tmp, a_prev_tmp, parameters_tmp) print("a_next[4] = \n", a_next_tmp[4]) print("a_next.shape = \n", a_next_tmp.shape) print("yt_pred[1] =\n", yt_pred_tmp[1]) print("yt_pred.shape = \n", yt_pred_tmp.shape) ``` **Expected Output**: ```Python a_next[4] = [ 0.59584544 0.18141802 0.61311866 0.99808218 0.85016201 0.99980978 -0.18887155 0.99815551 0.6531151 0.82872037] a_next.shape = (5, 10) yt_pred[1] = [ 0.9888161 0.01682021 0.21140899 0.36817467 0.98988387 0.88945212 0.36920224 0.9966312 0.9982559 0.17746526] yt_pred.shape = (2, 10) ``` ## 1.2 - RNN forward pass - A recurrent neural network (RNN) is a repetition of the RNN cell that you've just built. - If your input sequence of data is 10 time steps long, then you will re-use the RNN cell 10 times. - Each cell takes two inputs at each time step: - $a^{\langle t-1 \rangle}$: The hidden state from the previous cell. - $x^{\langle t \rangle}$: The current time-step's input data. - It has two outputs at each time step: - A hidden state ($a^{\langle t \rangle}$) - A prediction ($y^{\langle t \rangle}$) - The weights and biases $(W_{aa}, b_{a}, W_{ax}, b_{x})$ are re-used each time step. - They are maintained between calls to rnn_cell_forward in the 'parameters' dictionary. <img src="images/rnn_forward_sequence_figure3_v3a.png" style="width:800px;height:180px;"> <caption><center> **Figure 3**: Basic RNN. The input sequence $x = (x^{\langle 1 \rangle}, x^{\langle 2 \rangle}, ..., x^{\langle T_x \rangle})$ is carried over $T_x$ time steps. The network outputs $y = (y^{\langle 1 \rangle}, y^{\langle 2 \rangle}, ..., y^{\langle T_x \rangle})$. </center></caption> **Exercise**: Code the forward propagation of the RNN described in Figure (3). **Instructions**: * Create a 3D array of zeros, $a$ of shape $(n_{a}, m, T_{x})$ that will store all the hidden states computed by the RNN. * Create a 3D array of zeros, $\hat{y}$, of shape $(n_{y}, m, T_{x})$ that will store the predictions. - Note that in this case, $T_{y} = T_{x}$ (the prediction and input have the same number of time steps). * Initialize the 2D hidden state `a_next` by setting it equal to the initial hidden state, $a_{0}$. * At each time step $t$: - Get $x^{\langle t \rangle}$, which is a 2D slice of $x$ for a single time step $t$. - $x^{\langle t \rangle}$ has shape $(n_{x}, m)$ - $x$ has shape $(n_{x}, m, T_{x})$ - Update the 2D hidden state $a^{\langle t \rangle}$ (variable name `a_next`), the prediction $\hat{y}^{\langle t \rangle}$ and the cache by running `rnn_cell_forward`. - $a^{\langle t \rangle}$ has shape $(n_{a}, m)$ - Store the 2D hidden state in the 3D tensor $a$, at the $t^{th}$ position. - $a$ has shape $(n_{a}, m, T_{x})$ - Store the 2D $\hat{y}^{\langle t \rangle}$ prediction (variable name `yt_pred`) in the 3D tensor $\hat{y}_{pred}$ at the $t^{th}$ position. - $\hat{y}^{\langle t \rangle}$ has shape $(n_{y}, m)$ - $\hat{y}$ has shape $(n_{y}, m, T_x)$ - Append the cache to the list of caches. * Return the 3D tensor $a$ and $\hat{y}$, as well as the list of caches. #### Additional Hints - [np.zeros](https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html) - If you have a 3 dimensional numpy array and are indexing by its third dimension, you can use array slicing like this: `var_name[:,:,i]`. ``` # GRADED FUNCTION: rnn_forward def rnn_forward(x, a0, parameters): """ Implement the forward propagation of the recurrent neural network described in Figure (3). Arguments: x -- Input data for every time-step, of shape (n_x, m, T_x). a0 -- Initial hidden state, of shape (n_a, m) parameters -- python dictionary containing: Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a) Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x) Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a) ba -- Bias numpy array of shape (n_a, 1) by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1) Returns: a -- Hidden states for every time-step, numpy array of shape (n_a, m, T_x) y_pred -- Predictions for every time-step, numpy array of shape (n_y, m, T_x) caches -- tuple of values needed for the backward pass, contains (list of caches, x) """ # Initialize "caches" which will contain the list of all caches caches = [] # Retrieve dimensions from shapes of x and parameters["Wya"] n_x, m, T_x = x.shape n_y, n_a = parameters["Wya"].shape ### START CODE HERE ### # initialize "a" and "y_pred" with zeros (≈2 lines) a = None y_pred = None # Initialize a_next (≈1 line) a_next = None # loop over all time-steps of the input 'x' (1 line) for t in range(None): # Update next hidden state, compute the prediction, get the cache (≈2 lines) xt = None a_next, yt_pred, cache = None # Save the value of the new "next" hidden state in a (≈1 line) a[:,:,t] = None # Save the value of the prediction in y (≈1 line) y_pred[:,:,t] = None # Append "cache" to "caches" (≈1 line) None ### END CODE HERE ### # store values needed for backward propagation in cache caches = (caches, x) return a, y_pred, caches np.random.seed(1) x_tmp = np.random.randn(3,10,4) a0_tmp = np.random.randn(5,10) parameters_tmp = {} parameters_tmp['Waa'] = np.random.randn(5,5) parameters_tmp['Wax'] = np.random.randn(5,3) parameters_tmp['Wya'] = np.random.randn(2,5) parameters_tmp['ba'] = np.random.randn(5,1) parameters_tmp['by'] = np.random.randn(2,1) a_tmp, y_pred_tmp, caches_tmp = rnn_forward(x_tmp, a0_tmp, parameters_tmp) print("a[4][1] = \n", a_tmp[4][1]) print("a.shape = \n", a_tmp.shape) print("y_pred[1][3] =\n", y_pred_tmp[1][3]) print("y_pred.shape = \n", y_pred_tmp.shape) print("caches[1][1][3] =\n", caches_tmp[1][1][3]) print("len(caches) = \n", len(caches_tmp)) ``` **Expected Output**: ```Python a[4][1] = [-0.99999375 0.77911235 -0.99861469 -0.99833267] a.shape = (5, 10, 4) y_pred[1][3] = [ 0.79560373 0.86224861 0.11118257 0.81515947] y_pred.shape = (2, 10, 4) caches[1][1][3] = [-1.1425182 -0.34934272 -0.20889423 0.58662319] len(caches) = 2 ``` Congratulations! You've successfully built the forward propagation of a recurrent neural network from scratch. #### Situations when this RNN will perform better: - This will work well enough for some applications, but it suffers from the vanishing gradient problems. - The RNN works best when each output $\hat{y}^{\langle t \rangle}$ can be estimated using "local" context. - "Local" context refers to information that is close to the prediction's time step $t$. - More formally, local context refers to inputs $x^{\langle t' \rangle}$ and predictions $\hat{y}^{\langle t \rangle}$ where $t'$ is close to $t$. In the next part, you will build a more complex LSTM model, which is better at addressing vanishing gradients. The LSTM will be better able to remember a piece of information and keep it saved for many timesteps. ## 2 - Long Short-Term Memory (LSTM) network The following figure shows the operations of an LSTM-cell. <img src="images/LSTM_figure4_v3a.png" style="width:500;height:400px;"> <caption><center> **Figure 4**: LSTM-cell. This tracks and updates a "cell state" or memory variable $c^{\langle t \rangle}$ at every time-step, which can be different from $a^{\langle t \rangle}$. Note, the $softmax^{*}$ includes a dense layer and softmax</center></caption> Similar to the RNN example above, you will start by implementing the LSTM cell for a single time-step. Then you can iteratively call it from inside a "for-loop" to have it process an input with $T_x$ time-steps. ### Overview of gates and states #### - Forget gate $\mathbf{\Gamma}_{f}$ * Let's assume we are reading words in a piece of text, and plan to use an LSTM to keep track of grammatical structures, such as whether the subject is singular ("puppy") or plural ("puppies"). * If the subject changes its state (from a singular word to a plural word), the memory of the previous state becomes outdated, so we "forget" that outdated state. * The "forget gate" is a tensor containing values that are between 0 and 1. * If a unit in the forget gate has a value close to 0, the LSTM will "forget" the stored state in the corresponding unit of the previous cell state. * If a unit in the forget gate has a value close to 1, the LSTM will mostly remember the corresponding value in the stored state. ##### Equation $$\mathbf{\Gamma}_f^{\langle t \rangle} = \sigma(\mathbf{W}_f[\mathbf{a}^{\langle t-1 \rangle}, \mathbf{x}^{\langle t \rangle}] + \mathbf{b}_f)\tag{1} $$ ##### Explanation of the equation: * $\mathbf{W_{f}}$ contains weights that govern the forget gate's behavior. * The previous time step's hidden state $[a^{\langle t-1 \rangle}$ and current time step's input $x^{\langle t \rangle}]$ are concatenated together and multiplied by $\mathbf{W_{f}}$. * A sigmoid function is used to make each of the gate tensor's values $\mathbf{\Gamma}_f^{\langle t \rangle}$ range from 0 to 1. * The forget gate $\mathbf{\Gamma}_f^{\langle t \rangle}$ has the same dimensions as the previous cell state $c^{\langle t-1 \rangle}$. * This means that the two can be multiplied together, element-wise. * Multiplying the tensors $\mathbf{\Gamma}_f^{\langle t \rangle} * \mathbf{c}^{\langle t-1 \rangle}$ is like applying a mask over the previous cell state. * If a single value in $\mathbf{\Gamma}_f^{\langle t \rangle}$ is 0 or close to 0, then the product is close to 0. * This keeps the information stored in the corresponding unit in $\mathbf{c}^{\langle t-1 \rangle}$ from being remembered for the next time step. * Similarly, if one value is close to 1, the product is close to the original value in the previous cell state. * The LSTM will keep the information from the corresponding unit of $\mathbf{c}^{\langle t-1 \rangle}$, to be used in the next time step. ##### Variable names in the code The variable names in the code are similar to the equations, with slight differences. * `Wf`: forget gate weight $\mathbf{W}_{f}$ * `bf`: forget gate bias $\mathbf{b}_{f}$ * `ft`: forget gate $\Gamma_f^{\langle t \rangle}$ #### Candidate value $\tilde{\mathbf{c}}^{\langle t \rangle}$ * The candidate value is a tensor containing information from the current time step that **may** be stored in the current cell state $\mathbf{c}^{\langle t \rangle}$. * Which parts of the candidate value get passed on depends on the update gate. * The candidate value is a tensor containing values that range from -1 to 1. * The tilde "~" is used to differentiate the candidate $\tilde{\mathbf{c}}^{\langle t \rangle}$ from the cell state $\mathbf{c}^{\langle t \rangle}$. ##### Equation $$\mathbf{\tilde{c}}^{\langle t \rangle} = \tanh\left( \mathbf{W}_{c} [\mathbf{a}^{\langle t - 1 \rangle}, \mathbf{x}^{\langle t \rangle}] + \mathbf{b}_{c} \right) \tag{3}$$ ##### Explanation of the equation * The 'tanh' function produces values between -1 and +1. ##### Variable names in the code * `cct`: candidate value $\mathbf{\tilde{c}}^{\langle t \rangle}$ #### - Update gate $\mathbf{\Gamma}_{i}$ * We use the update gate to decide what aspects of the candidate $\tilde{\mathbf{c}}^{\langle t \rangle}$ to add to the cell state $c^{\langle t \rangle}$. * The update gate decides what parts of a "candidate" tensor $\tilde{\mathbf{c}}^{\langle t \rangle}$ are passed onto the cell state $\mathbf{c}^{\langle t \rangle}$. * The update gate is a tensor containing values between 0 and 1. * When a unit in the update gate is close to 1, it allows the value of the candidate $\tilde{\mathbf{c}}^{\langle t \rangle}$ to be passed onto the hidden state $\mathbf{c}^{\langle t \rangle}$ * When a unit in the update gate is close to 0, it prevents the corresponding value in the candidate from being passed onto the hidden state. * Notice that we use the subscript "i" and not "u", to follow the convention used in the literature. ##### Equation $$\mathbf{\Gamma}_i^{\langle t \rangle} = \sigma(\mathbf{W}_i[a^{\langle t-1 \rangle}, \mathbf{x}^{\langle t \rangle}] + \mathbf{b}_i)\tag{2} $$ ##### Explanation of the equation * Similar to the forget gate, here $\mathbf{\Gamma}_i^{\langle t \rangle}$, the sigmoid produces values between 0 and 1. * The update gate is multiplied element-wise with the candidate, and this product ($\mathbf{\Gamma}_{i}^{\langle t \rangle} * \tilde{c}^{\langle t \rangle}$) is used in determining the cell state $\mathbf{c}^{\langle t \rangle}$. ##### Variable names in code (Please note that they're different than the equations) In the code, we'll use the variable names found in the academic literature. These variables don't use "u" to denote "update". * `Wi` is the update gate weight $\mathbf{W}_i$ (not "Wu") * `bi` is the update gate bias $\mathbf{b}_i$ (not "bu") * `it` is the forget gate $\mathbf{\Gamma}_i^{\langle t \rangle}$ (not "ut") #### - Cell state $\mathbf{c}^{\langle t \rangle}$ * The cell state is the "memory" that gets passed onto future time steps. * The new cell state $\mathbf{c}^{\langle t \rangle}$ is a combination of the previous cell state and the candidate value. ##### Equation $$ \mathbf{c}^{\langle t \rangle} = \mathbf{\Gamma}_f^{\langle t \rangle}* \mathbf{c}^{\langle t-1 \rangle} + \mathbf{\Gamma}_{i}^{\langle t \rangle} *\mathbf{\tilde{c}}^{\langle t \rangle} \tag{4} $$ ##### Explanation of equation * The previous cell state $\mathbf{c}^{\langle t-1 \rangle}$ is adjusted (weighted) by the forget gate $\mathbf{\Gamma}_{f}^{\langle t \rangle}$ * and the candidate value $\tilde{\mathbf{c}}^{\langle t \rangle}$, adjusted (weighted) by the update gate $\mathbf{\Gamma}_{i}^{\langle t \rangle}$ ##### Variable names and shapes in the code * `c`: cell state, including all time steps, $\mathbf{c}$ shape $(n_{a}, m, T)$ * `c_next`: new (next) cell state, $\mathbf{c}^{\langle t \rangle}$ shape $(n_{a}, m)$ * `c_prev`: previous cell state, $\mathbf{c}^{\langle t-1 \rangle}$, shape $(n_{a}, m)$ #### - Output gate $\mathbf{\Gamma}_{o}$ * The output gate decides what gets sent as the prediction (output) of the time step. * The output gate is like the other gates. It contains values that range from 0 to 1. ##### Equation $$ \mathbf{\Gamma}_o^{\langle t \rangle}= \sigma(\mathbf{W}_o[\mathbf{a}^{\langle t-1 \rangle}, \mathbf{x}^{\langle t \rangle}] + \mathbf{b}_{o})\tag{5}$$ ##### Explanation of the equation * The output gate is determined by the previous hidden state $\mathbf{a}^{\langle t-1 \rangle}$ and the current input $\mathbf{x}^{\langle t \rangle}$ * The sigmoid makes the gate range from 0 to 1. ##### Variable names in the code * `Wo`: output gate weight, $\mathbf{W_o}$ * `bo`: output gate bias, $\mathbf{b_o}$ * `ot`: output gate, $\mathbf{\Gamma}_{o}^{\langle t \rangle}$ #### - Hidden state $\mathbf{a}^{\langle t \rangle}$ * The hidden state gets passed to the LSTM cell's next time step. * It is used to determine the three gates ($\mathbf{\Gamma}_{f}, \mathbf{\Gamma}_{u}, \mathbf{\Gamma}_{o}$) of the next time step. * The hidden state is also used for the prediction $y^{\langle t \rangle}$. ##### Equation $$ \mathbf{a}^{\langle t \rangle} = \mathbf{\Gamma}_o^{\langle t \rangle} * \tanh(\mathbf{c}^{\langle t \rangle})\tag{6} $$ ##### Explanation of equation * The hidden state $\mathbf{a}^{\langle t \rangle}$ is determined by the cell state $\mathbf{c}^{\langle t \rangle}$ in combination with the output gate $\mathbf{\Gamma}_{o}$. * The cell state state is passed through the "tanh" function to rescale values between -1 and +1. * The output gate acts like a "mask" that either preserves the values of $\tanh(\mathbf{c}^{\langle t \rangle})$ or keeps those values from being included in the hidden state $\mathbf{a}^{\langle t \rangle}$ ##### Variable names and shapes in the code * `a`: hidden state, including time steps. $\mathbf{a}$ has shape $(n_{a}, m, T_{x})$ * 'a_prev`: hidden state from previous time step. $\mathbf{a}^{\langle t-1 \rangle}$ has shape $(n_{a}, m)$ * `a_next`: hidden state for next time step. $\mathbf{a}^{\langle t \rangle}$ has shape $(n_{a}, m)$ #### - Prediction $\mathbf{y}^{\langle t \rangle}_{pred}$ * The prediction in this use case is a classification, so we'll use a softmax. The equation is: $$\mathbf{y}^{\langle t \rangle}_{pred} = \textrm{softmax}(\mathbf{W}_{y} \mathbf{a}^{\langle t \rangle} + \mathbf{b}_{y})$$ ##### Variable names and shapes in the code * `y_pred`: prediction, including all time steps. $\mathbf{y}_{pred}$ has shape $(n_{y}, m, T_{x})$. Note that $(T_{y} = T_{x})$ for this example. * `yt_pred`: prediction for the current time step $t$. $\mathbf{y}^{\langle t \rangle}_{pred}$ has shape $(n_{y}, m)$ ### 2.1 - LSTM cell **Exercise**: Implement the LSTM cell described in the Figure (4). **Instructions**: 1. Concatenate the hidden state $a^{\langle t-1 \rangle}$ and input $x^{\langle t \rangle}$ into a single matrix: $$concat = \begin{bmatrix} a^{\langle t-1 \rangle} \\ x^{\langle t \rangle} \end{bmatrix}$$ 2. Compute all the formulas 1 through 6 for the gates, hidden state, and cell state. 3. Compute the prediction $y^{\langle t \rangle}$. #### Additional Hints * You can use [numpy.concatenate](https://docs.scipy.org/doc/numpy/reference/generated/numpy.concatenate.html). Check which value to use for the `axis` parameter. * The functions `sigmoid()` and `softmax` are imported from `rnn_utils.py`. * [numpy.tanh](https://docs.scipy.org/doc/numpy/reference/generated/numpy.tanh.html) * Use [np.dot](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) for matrix multiplication. * Notice that the variable names `Wi`, `bi` refer to the weights and biases of the **update** gate. There are no variables named "Wu" or "bu" in this function. ``` # GRADED FUNCTION: lstm_cell_forward def lstm_cell_forward(xt, a_prev, c_prev, parameters): """ Implement a single forward step of the LSTM-cell as described in Figure (4) Arguments: xt -- your input data at timestep "t", numpy array of shape (n_x, m). a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m) c_prev -- Memory state at timestep "t-1", numpy array of shape (n_a, m) parameters -- python dictionary containing: Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x) bf -- Bias of the forget gate, numpy array of shape (n_a, 1) Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x) bi -- Bias of the update gate, numpy array of shape (n_a, 1) Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x) bc -- Bias of the first "tanh", numpy array of shape (n_a, 1) Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x) bo -- Bias of the output gate, numpy array of shape (n_a, 1) Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a) by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1) Returns: a_next -- next hidden state, of shape (n_a, m) c_next -- next memory state, of shape (n_a, m) yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m) cache -- tuple of values needed for the backward pass, contains (a_next, c_next, a_prev, c_prev, xt, parameters) Note: ft/it/ot stand for the forget/update/output gates, cct stands for the candidate value (c tilde), c stands for the cell state (memory) """ # Retrieve parameters from "parameters" Wf = parameters["Wf"] # forget gate weight bf = parameters["bf"] Wi = parameters["Wi"] # update gate weight (notice the variable name) bi = parameters["bi"] # (notice the variable name) Wc = parameters["Wc"] # candidate value weight bc = parameters["bc"] Wo = parameters["Wo"] # output gate weight bo = parameters["bo"] Wy = parameters["Wy"] # prediction weight by = parameters["by"] # Retrieve dimensions from shapes of xt and Wy n_x, m = xt.shape n_y, n_a = Wy.shape ### START CODE HERE ### # Concatenate a_prev and xt (≈1 line) concat = None # Compute values for ft (forget gate), it (update gate), # cct (candidate value), c_next (cell state), # ot (output gate), a_next (hidden state) (≈6 lines) ft = None # forget gate it = None # update gate cct = None # candidate value c_next = None # cell state ot = None # output gate a_next = None # hidden state # Compute prediction of the LSTM cell (≈1 line) yt_pred = None ### END CODE HERE ### # store values needed for backward propagation in cache cache = (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters) return a_next, c_next, yt_pred, cache np.random.seed(1) xt_tmp = np.random.randn(3,10) a_prev_tmp = np.random.randn(5,10) c_prev_tmp = np.random.randn(5,10) parameters_tmp = {} parameters_tmp['Wf'] = np.random.randn(5, 5+3) parameters_tmp['bf'] = np.random.randn(5,1) parameters_tmp['Wi'] = np.random.randn(5, 5+3) parameters_tmp['bi'] = np.random.randn(5,1) parameters_tmp['Wo'] = np.random.randn(5, 5+3) parameters_tmp['bo'] = np.random.randn(5,1) parameters_tmp['Wc'] = np.random.randn(5, 5+3) parameters_tmp['bc'] = np.random.randn(5,1) parameters_tmp['Wy'] = np.random.randn(2,5) parameters_tmp['by'] = np.random.randn(2,1) a_next_tmp, c_next_tmp, yt_tmp, cache_tmp = lstm_cell_forward(xt_tmp, a_prev_tmp, c_prev_tmp, parameters_tmp) print("a_next[4] = \n", a_next_tmp[4]) print("a_next.shape = ", a_next_tmp.shape) print("c_next[2] = \n", c_next_tmp[2]) print("c_next.shape = ", c_next_tmp.shape) print("yt[1] =", yt_tmp[1]) print("yt.shape = ", yt_tmp.shape) print("cache[1][3] =\n", cache_tmp[1][3]) print("len(cache) = ", len(cache_tmp)) ``` **Expected Output**: ```Python a_next[4] = [-0.66408471 0.0036921 0.02088357 0.22834167 -0.85575339 0.00138482 0.76566531 0.34631421 -0.00215674 0.43827275] a_next.shape = (5, 10) c_next[2] = [ 0.63267805 1.00570849 0.35504474 0.20690913 -1.64566718 0.11832942 0.76449811 -0.0981561 -0.74348425 -0.26810932] c_next.shape = (5, 10) yt[1] = [ 0.79913913 0.15986619 0.22412122 0.15606108 0.97057211 0.31146381 0.00943007 0.12666353 0.39380172 0.07828381] yt.shape = (2, 10) cache[1][3] = [-0.16263996 1.03729328 0.72938082 -0.54101719 0.02752074 -0.30821874 0.07651101 -1.03752894 1.41219977 -0.37647422] len(cache) = 10 ``` ### 2.2 - Forward pass for LSTM Now that you have implemented one step of an LSTM, you can now iterate this over this using a for-loop to process a sequence of $T_x$ inputs. <img src="images/LSTM_rnn.png" style="width:500;height:300px;"> <caption><center> **Figure 5**: LSTM over multiple time-steps. </center></caption> **Exercise:** Implement `lstm_forward()` to run an LSTM over $T_x$ time-steps. **Instructions** * Get the dimensions $n_x, n_a, n_y, m, T_x$ from the shape of the variables: `x` and `parameters`. * Initialize the 3D tensors $a$, $c$ and $y$. - $a$: hidden state, shape $(n_{a}, m, T_{x})$ - $c$: cell state, shape $(n_{a}, m, T_{x})$ - $y$: prediction, shape $(n_{y}, m, T_{x})$ (Note that $T_{y} = T_{x}$ in this example). - **Note** Setting one variable equal to the other is a "copy by reference". In other words, don't do `c = a', otherwise both these variables point to the same underlying variable. * Initialize the 2D tensor $a^{\langle t \rangle}$ - $a^{\langle t \rangle}$ stores the hidden state for time step $t$. The variable name is `a_next`. - $a^{\langle 0 \rangle}$, the initial hidden state at time step 0, is passed in when calling the function. The variable name is `a0`. - $a^{\langle t \rangle}$ and $a^{\langle 0 \rangle}$ represent a single time step, so they both have the shape $(n_{a}, m)$ - Initialize $a^{\langle t \rangle}$ by setting it to the initial hidden state ($a^{\langle 0 \rangle}$) that is passed into the function. * Initialize $c^{\langle t \rangle}$ with zeros. - The variable name is `c_next`. - $c^{\langle t \rangle}$ represents a single time step, so its shape is $(n_{a}, m)$ - **Note**: create `c_next` as its own variable with its own location in memory. Do not initialize it as a slice of the 3D tensor $c$. In other words, **don't** do `c_next = c[:,:,0]`. * For each time step, do the following: - From the 3D tensor $x$, get a 2D slice $x^{\langle t \rangle}$ at time step $t$. - Call the `lstm_cell_forward` function that you defined previously, to get the hidden state, cell state, prediction, and cache. - Store the hidden state, cell state and prediction (the 2D tensors) inside the 3D tensors. - Also append the cache to the list of caches. ``` # GRADED FUNCTION: lstm_forward def lstm_forward(x, a0, parameters): """ Implement the forward propagation of the recurrent neural network using an LSTM-cell described in Figure (4). Arguments: x -- Input data for every time-step, of shape (n_x, m, T_x). a0 -- Initial hidden state, of shape (n_a, m) parameters -- python dictionary containing: Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x) bf -- Bias of the forget gate, numpy array of shape (n_a, 1) Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x) bi -- Bias of the update gate, numpy array of shape (n_a, 1) Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x) bc -- Bias of the first "tanh", numpy array of shape (n_a, 1) Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x) bo -- Bias of the output gate, numpy array of shape (n_a, 1) Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a) by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1) Returns: a -- Hidden states for every time-step, numpy array of shape (n_a, m, T_x) y -- Predictions for every time-step, numpy array of shape (n_y, m, T_x) c -- The value of the cell state, numpy array of shape (n_a, m, T_x) caches -- tuple of values needed for the backward pass, contains (list of all the caches, x) """ # Initialize "caches", which will track the list of all the caches caches = [] ### START CODE HERE ### Wy = parameters['Wy'] # saving parameters['Wy'] in a local variable in case students use Wy instead of parameters['Wy'] # Retrieve dimensions from shapes of x and parameters['Wy'] (≈2 lines) n_x, m, T_x = None n_y, n_a = None # initialize "a", "c" and "y" with zeros (≈3 lines) a = None c = None y = None # Initialize a_next and c_next (≈2 lines) a_next = None c_next = None # loop over all time-steps for t in range(None): # Get the 2D slice 'xt' from the 3D input 'x' at time step 't' xt = None # Update next hidden state, next memory state, compute the prediction, get the cache (≈1 line) a_next, c_next, yt, cache = None # Save the value of the new "next" hidden state in a (≈1 line) a[:,:,t] = None # Save the value of the next cell state (≈1 line) c[:,:,t] = None # Save the value of the prediction in y (≈1 line) y[:,:,t] = None # Append the cache into caches (≈1 line) None ### END CODE HERE ### # store values needed for backward propagation in cache caches = (caches, x) return a, y, c, caches np.random.seed(1) x_tmp = np.random.randn(3,10,7) a0_tmp = np.random.randn(5,10) parameters_tmp = {} parameters_tmp['Wf'] = np.random.randn(5, 5+3) parameters_tmp['bf'] = np.random.randn(5,1) parameters_tmp['Wi'] = np.random.randn(5, 5+3) parameters_tmp['bi']= np.random.randn(5,1) parameters_tmp['Wo'] = np.random.randn(5, 5+3) parameters_tmp['bo'] = np.random.randn(5,1) parameters_tmp['Wc'] = np.random.randn(5, 5+3) parameters_tmp['bc'] = np.random.randn(5,1) parameters_tmp['Wy'] = np.random.randn(2,5) parameters_tmp['by'] = np.random.randn(2,1) a_tmp, y_tmp, c_tmp, caches_tmp = lstm_forward(x_tmp, a0_tmp, parameters_tmp) print("a[4][3][6] = ", a_tmp[4][3][6]) print("a.shape = ", a_tmp.shape) print("y[1][4][3] =", y_tmp[1][4][3]) print("y.shape = ", y_tmp.shape) print("caches[1][1][1] =\n", caches_tmp[1][1][1]) print("c[1][2][1]", c_tmp[1][2][1]) print("len(caches) = ", len(caches_tmp)) ``` **Expected Output**: ```Python a[4][3][6] = 0.172117767533 a.shape = (5, 10, 7) y[1][4][3] = 0.95087346185 y.shape = (2, 10, 7) caches[1][1][1] = [ 0.82797464 0.23009474 0.76201118 -0.22232814 -0.20075807 0.18656139 0.41005165] c[1][2][1] -0.855544916718 len(caches) = 2 ``` Congratulations! You have now implemented the forward passes for the basic RNN and the LSTM. When using a deep learning framework, implementing the forward pass is sufficient to build systems that achieve great performance. The rest of this notebook is optional, and will not be graded. ## 3 - Backpropagation in recurrent neural networks (OPTIONAL / UNGRADED) In modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers do not need to bother with the details of the backward pass. If however you are an expert in calculus and want to see the details of backprop in RNNs, you can work through this optional portion of the notebook. When in an earlier [course](https://www.coursera.org/learn/neural-networks-deep-learning/lecture/0VSHe/derivatives-with-a-computation-graph) you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in recurrent neural networks you can calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are quite complicated and we did not derive them in lecture. However, we will briefly present them below. Note that this notebook does not implement the backward path from the Loss 'J' backwards to 'a'. This would have included the dense layer and softmax which are a part of the forward path. This is assumed to be calculated elsewhere and the result passed to rnn_backward in 'da'. It is further assumed that loss has been adjusted for batch size (m) and division by the number of examples is not required here. This section is optional and ungraded. It is more difficult and has fewer details regarding its implementation. This section only implements key elements of the full path. ### 3.1 - Basic RNN backward pass We will start by computing the backward pass for the basic RNN-cell and then in the following sections, iterate through the cells. <img src="images/rnn_backward_overview_3a_1.png" style="width:500;height:300px;"> <br> <caption><center> **Figure 6**: RNN-cell's backward pass. Just like in a fully-connected neural network, the derivative of the cost function $J$ backpropagates through the time steps of the RNN by following the chain-rule from calculus. Internal to the cell, the chain-rule is also used to calculate $(\frac{\partial J}{\partial W_{ax}},\frac{\partial J}{\partial W_{aa}},\frac{\partial J}{\partial b})$ to update the parameters $(W_{ax}, W_{aa}, b_a)$. The operation can utilize the cached results from the forward path. </center></caption> Recall from lecture, the shorthand for the partial derivative of cost relative to a variable is dVariable. For example, $\frac{\partial J}{\partial W_{ax}}$ is $dW_{ax}$. This will be used throughout the remaining sections. <img src="images/rnn_cell_backward_3a_c.png" style="width:800;height:500px;"> <br> <caption><center> **Figure 7**: This implementation of rnn_cell_backward does **not** include the output dense layer and softmax which are included in rnn_cell_forward. $da_{next}$ is $\frac{\partial{J}}{\partial a^{\langle t \rangle}}$ and includes loss from previous stages and current stage output logic. The addition shown in green will be part of your implementation of rnn_backward. </center></caption> ##### Equations To compute the rnn_cell_backward you can utilize the following equations. It is a good exercise to derive them by hand. Here, $*$ denotes element-wise multiplication while the absence of a symbol indicates matrix multiplication. \begin{align} \displaystyle a^{\langle t \rangle} &= \tanh(W_{ax} x^{\langle t \rangle} + W_{aa} a^{\langle t-1 \rangle} + b_{a})\tag{-} \\[8pt] \displaystyle \frac{\partial \tanh(x)} {\partial x} &= 1 - \tanh^2(x) \tag{-} \\[8pt] \displaystyle {dW_{ax}} &= (da_{next} * ( 1-\tanh^2(W_{ax}x^{\langle t \rangle}+W_{aa} a^{\langle t-1 \rangle} + b_{a}) )) x^{\langle t \rangle T}\tag{1} \\[8pt] \displaystyle dW_{aa} &= (da_{next} * ( 1-\tanh^2(W_{ax}x^{\langle t \rangle}+W_{aa} a^{\langle t-1 \rangle} + b_{a}) )) a^{\langle t-1 \rangle T}\tag{2} \\[8pt] \displaystyle db_a& = \sum_{batch}( da_{next} * ( 1-\tanh^2(W_{ax}x^{\langle t \rangle}+W_{aa} a^{\langle t-1 \rangle} + b_{a}) ))\tag{3} \\[8pt] \displaystyle dx^{\langle t \rangle} &= { W_{ax}}^T (da_{next} * ( 1-\tanh^2(W_{ax}x^{\langle t \rangle}+W_{aa} a^{\langle t-1 \rangle} + b_{a}) ))\tag{4} \\[8pt] \displaystyle da_{prev} &= { W_{aa}}^T(da_{next} * ( 1-\tanh^2(W_{ax}x^{\langle t \rangle}+W_{aa} a^{\langle t-1 \rangle} + b_{a}) ))\tag{5} \end{align} #### Implementing rnn_cell_backward The results can be computed directly by implementing the equations above. However, the above can optionally be simplified by computing 'dz' and utlilizing the chain rule. This can be further simplified by noting that $\tanh(W_{ax}x^{\langle t \rangle}+W_{aa} a^{\langle t-1 \rangle} + b_{a})$ was computed and saved in the forward pass. To calculate dba, the 'batch' above is a sum across all 'm' examples (axis= 1). Note that you should use the keepdims = True option. It may be worthwhile to review Course 1 [Derivatives with a computational graph](https://www.coursera.org/learn/neural-networks-deep-learning/lecture/0VSHe/derivatives-with-a-computation-graph) through [Backpropagation Intuition](https://www.coursera.org/learn/neural-networks-deep-learning/lecture/6dDj7/backpropagation-intuition-optional), which decompose the calculation into steps using the chain rule. Matrix vector derivatives are described [here](http://cs231n.stanford.edu/vecDerivs.pdf), though the equations above incorporate the required transformations. Note rnn_cell_backward does __not__ include the calculation of loss from $y \langle t \rangle$, this is incorporated into the incoming da_next. This is a slight mismatch with rnn_cell_forward which includes a dense layer and softmax. Note: in the code: $\displaystyle dx^{\langle t \rangle}$ is represented by dxt, $\displaystyle d W_{ax}$ is represented by dWax, $\displaystyle da_{prev}$ is represented by da_prev, $\displaystyle dW_{aa}$ is represented by dWaa, $\displaystyle db_{a}$ is represented by dba, dz is not derived above but can optionally be derived by students to simplify the repeated calculations. ``` def rnn_cell_backward(da_next, cache): """ Implements the backward pass for the RNN-cell (single time-step). Arguments: da_next -- Gradient of loss with respect to next hidden state cache -- python dictionary containing useful values (output of rnn_cell_forward()) Returns: gradients -- python dictionary containing: dx -- Gradients of input data, of shape (n_x, m) da_prev -- Gradients of previous hidden state, of shape (n_a, m) dWax -- Gradients of input-to-hidden weights, of shape (n_a, n_x) dWaa -- Gradients of hidden-to-hidden weights, of shape (n_a, n_a) dba -- Gradients of bias vector, of shape (n_a, 1) """ # Retrieve values from cache (a_next, a_prev, xt, parameters) = cache # Retrieve values from parameters Wax = parameters["Wax"] Waa = parameters["Waa"] Wya = parameters["Wya"] ba = parameters["ba"] by = parameters["by"] ### START CODE HERE ### # compute the gradient of the loss with respect to z (optional) (≈1 line) dz = None # compute the gradient of the loss with respect to Wax (≈2 lines) dxt = None dWax = None # compute the gradient with respect to Waa (≈2 lines) da_prev = None dWaa = None # compute the gradient with respect to b (≈1 line) dba = None ### END CODE HERE ### # Store the gradients in a python dictionary gradients = {"dxt": dxt, "da_prev": da_prev, "dWax": dWax, "dWaa": dWaa, "dba": dba} return gradients np.random.seed(1) xt_tmp = np.random.randn(3,10) a_prev_tmp = np.random.randn(5,10) parameters_tmp = {} parameters_tmp['Wax'] = np.random.randn(5,3) parameters_tmp['Waa'] = np.random.randn(5,5) parameters_tmp['Wya'] = np.random.randn(2,5) parameters_tmp['ba'] = np.random.randn(5,1) parameters_tmp['by'] = np.random.randn(2,1) a_next_tmp, yt_tmp, cache_tmp = rnn_cell_forward(xt_tmp, a_prev_tmp, parameters_tmp) da_next_tmp = np.random.randn(5,10) gradients_tmp = rnn_cell_backward(da_next_tmp, cache_tmp) print("gradients[\"dxt\"][1][2] =", gradients_tmp["dxt"][1][2]) print("gradients[\"dxt\"].shape =", gradients_tmp["dxt"].shape) print("gradients[\"da_prev\"][2][3] =", gradients_tmp["da_prev"][2][3]) print("gradients[\"da_prev\"].shape =", gradients_tmp["da_prev"].shape) print("gradients[\"dWax\"][3][1] =", gradients_tmp["dWax"][3][1]) print("gradients[\"dWax\"].shape =", gradients_tmp["dWax"].shape) print("gradients[\"dWaa\"][1][2] =", gradients_tmp["dWaa"][1][2]) print("gradients[\"dWaa\"].shape =", gradients_tmp["dWaa"].shape) print("gradients[\"dba\"][4] =", gradients_tmp["dba"][4]) print("gradients[\"dba\"].shape =", gradients_tmp["dba"].shape) ``` **Expected Output**: <table> <tr> <td> **gradients["dxt"][1][2]** = </td> <td> -1.3872130506 </td> </tr> <tr> <td> **gradients["dxt"].shape** = </td> <td> (3, 10) </td> </tr> <tr> <td> **gradients["da_prev"][2][3]** = </td> <td> -0.152399493774 </td> </tr> <tr> <td> **gradients["da_prev"].shape** = </td> <td> (5, 10) </td> </tr> <tr> <td> **gradients["dWax"][3][1]** = </td> <td> 0.410772824935 </td> </tr> <tr> <td> **gradients["dWax"].shape** = </td> <td> (5, 3) </td> </tr> <tr> <td> **gradients["dWaa"][1][2]** = </td> <td> 1.15034506685 </td> </tr> <tr> <td> **gradients["dWaa"].shape** = </td> <td> (5, 5) </td> </tr> <tr> <td> **gradients["dba"][4]** = </td> <td> [ 0.20023491] </td> </tr> <tr> <td> **gradients["dba"].shape** = </td> <td> (5, 1) </td> </tr> </table> #### Backward pass through the RNN Computing the gradients of the cost with respect to $a^{\langle t \rangle}$ at every time-step $t$ is useful because it is what helps the gradient backpropagate to the previous RNN-cell. To do so, you need to iterate through all the time steps starting at the end, and at each step, you increment the overall $db_a$, $dW_{aa}$, $dW_{ax}$ and you store $dx$. **Instructions**: Implement the `rnn_backward` function. Initialize the return variables with zeros first and then loop through all the time steps while calling the `rnn_cell_backward` at each time timestep, update the other variables accordingly. * Note that this notebook does not implement the backward path from the Loss 'J' backwards to 'a'. * This would have included the dense layer and softmax which are a part of the forward path. * This is assumed to be calculated elsewhere and the result passed to rnn_backward in 'da'. * You must combine this with the loss from the previous stages when calling rnn_cell_backward (see figure 7 above). * It is further assumed that loss has been adjusted for batch size (m). * Therefore, division by the number of examples is not required here. ``` def rnn_backward(da, caches): """ Implement the backward pass for a RNN over an entire sequence of input data. Arguments: da -- Upstream gradients of all hidden states, of shape (n_a, m, T_x) caches -- tuple containing information from the forward pass (rnn_forward) Returns: gradients -- python dictionary containing: dx -- Gradient w.r.t. the input data, numpy-array of shape (n_x, m, T_x) da0 -- Gradient w.r.t the initial hidden state, numpy-array of shape (n_a, m) dWax -- Gradient w.r.t the input's weight matrix, numpy-array of shape (n_a, n_x) dWaa -- Gradient w.r.t the hidden state's weight matrix, numpy-arrayof shape (n_a, n_a) dba -- Gradient w.r.t the bias, of shape (n_a, 1) """ ### START CODE HERE ### # Retrieve values from the first cache (t=1) of caches (≈2 lines) (caches, x) = None (a1, a0, x1, parameters) = None # Retrieve dimensions from da's and x1's shapes (≈2 lines) n_a, m, T_x = None n_x, m = None # initialize the gradients with the right sizes (≈6 lines) dx = None dWax = None dWaa = None dba = None da0 = None da_prevt = None # Loop through all the time steps for t in reversed(range(None)): # Compute gradients at time step t. # Remember to sum gradients from the output path (da) and the previous timesteps (da_prevt) (≈1 line) gradients = None # Retrieve derivatives from gradients (≈ 1 line) dxt, da_prevt, dWaxt, dWaat, dbat = gradients["dxt"], gradients["da_prev"], gradients["dWax"], gradients["dWaa"], gradients["dba"] # Increment global derivatives w.r.t parameters by adding their derivative at time-step t (≈4 lines) dx[:, :, t] = None dWax += None dWaa += None dba += None # Set da0 to the gradient of a which has been backpropagated through all time-steps (≈1 line) da0 = None ### END CODE HERE ### # Store the gradients in a python dictionary gradients = {"dx": dx, "da0": da0, "dWax": dWax, "dWaa": dWaa,"dba": dba} return gradients np.random.seed(1) x_tmp = np.random.randn(3,10,4) a0_tmp = np.random.randn(5,10) parameters_tmp = {} parameters_tmp['Wax'] = np.random.randn(5,3) parameters_tmp['Waa'] = np.random.randn(5,5) parameters_tmp['Wya'] = np.random.randn(2,5) parameters_tmp['ba'] = np.random.randn(5,1) parameters_tmp['by'] = np.random.randn(2,1) a_tmp, y_tmp, caches_tmp = rnn_forward(x_tmp, a0_tmp, parameters_tmp) da_tmp = np.random.randn(5, 10, 4) gradients_tmp = rnn_backward(da_tmp, caches_tmp) print("gradients[\"dx\"][1][2] =", gradients_tmp["dx"][1][2]) print("gradients[\"dx\"].shape =", gradients_tmp["dx"].shape) print("gradients[\"da0\"][2][3] =", gradients_tmp["da0"][2][3]) print("gradients[\"da0\"].shape =", gradients_tmp["da0"].shape) print("gradients[\"dWax\"][3][1] =", gradients_tmp["dWax"][3][1]) print("gradients[\"dWax\"].shape =", gradients_tmp["dWax"].shape) print("gradients[\"dWaa\"][1][2] =", gradients_tmp["dWaa"][1][2]) print("gradients[\"dWaa\"].shape =", gradients_tmp["dWaa"].shape) print("gradients[\"dba\"][4] =", gradients_tmp["dba"][4]) print("gradients[\"dba\"].shape =", gradients_tmp["dba"].shape) ``` **Expected Output**: <table> <tr> <td> **gradients["dx"][1][2]** = </td> <td> [-2.07101689 -0.59255627 0.02466855 0.01483317] </td> </tr> <tr> <td> **gradients["dx"].shape** = </td> <td> (3, 10, 4) </td> </tr> <tr> <td> **gradients["da0"][2][3]** = </td> <td> -0.314942375127 </td> </tr> <tr> <td> **gradients["da0"].shape** = </td> <td> (5, 10) </td> </tr> <tr> <td> **gradients["dWax"][3][1]** = </td> <td> 11.2641044965 </td> </tr> <tr> <td> **gradients["dWax"].shape** = </td> <td> (5, 3) </td> </tr> <tr> <td> **gradients["dWaa"][1][2]** = </td> <td> 2.30333312658 </td> </tr> <tr> <td> **gradients["dWaa"].shape** = </td> <td> (5, 5) </td> </tr> <tr> <td> **gradients["dba"][4]** = </td> <td> [-0.74747722] </td> </tr> <tr> <td> **gradients["dba"].shape** = </td> <td> (5, 1) </td> </tr> </table> ## 3.2 - LSTM backward pass ### 3.2.1 One Step backward The LSTM backward pass is slightly more complicated than the forward pass. <img src="images/LSTM_cell_backward_rev3a_c2.png" style="width:500;height:400px;"> <br> <caption><center> **Figure 8**: lstm_cell_backward. Note the output functions, while part of the lstm_cell_forward, are not included in lstm_cell_backward </center></caption> The equations for the LSTM backward pass are provided below. (If you enjoy calculus exercises feel free to try deriving these from scratch yourself.) ### 3.2.2 gate derivatives Note the location of the gate derivatives ($\gamma$..) between the dense layer and the activation function (see graphic above). This is convenient for computing parameter derivatives in the next step. \begin{align} d\gamma_o^{\langle t \rangle} &= da_{next}*\tanh(c_{next}) * \Gamma_o^{\langle t \rangle}*\left(1-\Gamma_o^{\langle t \rangle}\right)\tag{7} \\[8pt] dp\widetilde{c}^{\langle t \rangle} &= \left(dc_{next}*\Gamma_u^{\langle t \rangle}+ \Gamma_o^{\langle t \rangle}* (1-\tanh^2(c_{next})) * \Gamma_u^{\langle t \rangle} * da_{next} \right) * \left(1-\left(\widetilde c^{\langle t \rangle}\right)^2\right) \tag{8} \\[8pt] d\gamma_u^{\langle t \rangle} &= \left(dc_{next}*\widetilde{c}^{\langle t \rangle} + \Gamma_o^{\langle t \rangle}* (1-\tanh^2(c_{next})) * \widetilde{c}^{\langle t \rangle} * da_{next}\right)*\Gamma_u^{\langle t \rangle}*\left(1-\Gamma_u^{\langle t \rangle}\right)\tag{9} \\[8pt] d\gamma_f^{\langle t \rangle} &= \left(dc_{next}* c_{prev} + \Gamma_o^{\langle t \rangle} * (1-\tanh^2(c_{next})) * c_{prev} * da_{next}\right)*\Gamma_f^{\langle t \rangle}*\left(1-\Gamma_f^{\langle t \rangle}\right)\tag{10} \end{align} ### 3.2.3 parameter derivatives $ dW_f = d\gamma_f^{\langle t \rangle} \begin{bmatrix} a_{prev} \\ x_t\end{bmatrix}^T \tag{11} $ $ dW_u = d\gamma_u^{\langle t \rangle} \begin{bmatrix} a_{prev} \\ x_t\end{bmatrix}^T \tag{12} $ $ dW_c = dp\widetilde c^{\langle t \rangle} \begin{bmatrix} a_{prev} \\ x_t\end{bmatrix}^T \tag{13} $ $ dW_o = d\gamma_o^{\langle t \rangle} \begin{bmatrix} a_{prev} \\ x_t\end{bmatrix}^T \tag{14}$ To calculate $db_f, db_u, db_c, db_o$ you just need to sum across all 'm' examples (axis= 1) on $d\gamma_f^{\langle t \rangle}, d\gamma_u^{\langle t \rangle}, dp\widetilde c^{\langle t \rangle}, d\gamma_o^{\langle t \rangle}$ respectively. Note that you should have the `keepdims = True` option. $\displaystyle db_f = \sum_{batch}d\gamma_f^{\langle t \rangle}\tag{15}$ $\displaystyle db_u = \sum_{batch}d\gamma_u^{\langle t \rangle}\tag{16}$ $\displaystyle db_c = \sum_{batch}d\gamma_c^{\langle t \rangle}\tag{17}$ $\displaystyle db_o = \sum_{batch}d\gamma_o^{\langle t \rangle}\tag{18}$ Finally, you will compute the derivative with respect to the previous hidden state, previous memory state, and input. $ da_{prev} = W_f^T d\gamma_f^{\langle t \rangle} + W_u^T d\gamma_u^{\langle t \rangle}+ W_c^T dp\widetilde c^{\langle t \rangle} + W_o^T d\gamma_o^{\langle t \rangle} \tag{19}$ Here, to account for concatenation, the weights for equations 19 are the first n_a, (i.e. $W_f = W_f[:,:n_a]$ etc...) $ dc_{prev} = dc_{next}*\Gamma_f^{\langle t \rangle} + \Gamma_o^{\langle t \rangle} * (1- \tanh^2(c_{next}))*\Gamma_f^{\langle t \rangle}*da_{next} \tag{20}$ $ dx^{\langle t \rangle} = W_f^T d\gamma_f^{\langle t \rangle} + W_u^T d\gamma_u^{\langle t \rangle}+ W_c^T dp\widetilde c^{\langle t \rangle} + W_o^T d\gamma_o^{\langle t \rangle}\tag{21} $ where the weights for equation 21 are from n_a to the end, (i.e. $W_f = W_f[:,n_a:]$ etc...) **Exercise:** Implement `lstm_cell_backward` by implementing equations $7-21$ below. Note: In the code: $d\gamma_o^{\langle t \rangle}$ is represented by `dot`, $dp\widetilde{c}^{\langle t \rangle}$ is represented by `dcct`, $d\gamma_u^{\langle t \rangle}$ is represented by `dit`, $d\gamma_f^{\langle t \rangle}$ is represented by `dft` ``` def lstm_cell_backward(da_next, dc_next, cache): """ Implement the backward pass for the LSTM-cell (single time-step). Arguments: da_next -- Gradients of next hidden state, of shape (n_a, m) dc_next -- Gradients of next cell state, of shape (n_a, m) cache -- cache storing information from the forward pass Returns: gradients -- python dictionary containing: dxt -- Gradient of input data at time-step t, of shape (n_x, m) da_prev -- Gradient w.r.t. the previous hidden state, numpy array of shape (n_a, m) dc_prev -- Gradient w.r.t. the previous memory state, of shape (n_a, m, T_x) dWf -- Gradient w.r.t. the weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x) dWi -- Gradient w.r.t. the weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x) dWc -- Gradient w.r.t. the weight matrix of the memory gate, numpy array of shape (n_a, n_a + n_x) dWo -- Gradient w.r.t. the weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x) dbf -- Gradient w.r.t. biases of the forget gate, of shape (n_a, 1) dbi -- Gradient w.r.t. biases of the update gate, of shape (n_a, 1) dbc -- Gradient w.r.t. biases of the memory gate, of shape (n_a, 1) dbo -- Gradient w.r.t. biases of the output gate, of shape (n_a, 1) """ # Retrieve information from "cache" (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters) = cache ### START CODE HERE ### # Retrieve dimensions from xt's and a_next's shape (≈2 lines) n_x, m = None n_a, m = None # Compute gates related derivatives, you can find their values can be found by looking carefully at equations (7) to (10) (≈4 lines) dot = None dcct = None dit = None dft = None # Compute parameters related derivatives. Use equations (11)-(18) (≈8 lines) dWf = None dWi = None dWc = None dWo = None dbf = None dbi = None dbc = None dbo = None # Compute derivatives w.r.t previous hidden state, previous memory state and input. Use equations (19)-(21). (≈3 lines) da_prev = None dc_prev = None dxt = None ### END CODE HERE ### # Save gradients in dictionary gradients = {"dxt": dxt, "da_prev": da_prev, "dc_prev": dc_prev, "dWf": dWf,"dbf": dbf, "dWi": dWi,"dbi": dbi, "dWc": dWc,"dbc": dbc, "dWo": dWo,"dbo": dbo} return gradients np.random.seed(1) xt_tmp = np.random.randn(3,10) a_prev_tmp = np.random.randn(5,10) c_prev_tmp = np.random.randn(5,10) parameters_tmp = {} parameters_tmp['Wf'] = np.random.randn(5, 5+3) parameters_tmp['bf'] = np.random.randn(5,1) parameters_tmp['Wi'] = np.random.randn(5, 5+3) parameters_tmp['bi'] = np.random.randn(5,1) parameters_tmp['Wo'] = np.random.randn(5, 5+3) parameters_tmp['bo'] = np.random.randn(5,1) parameters_tmp['Wc'] = np.random.randn(5, 5+3) parameters_tmp['bc'] = np.random.randn(5,1) parameters_tmp['Wy'] = np.random.randn(2,5) parameters_tmp['by'] = np.random.randn(2,1) a_next_tmp, c_next_tmp, yt_tmp, cache_tmp = lstm_cell_forward(xt_tmp, a_prev_tmp, c_prev_tmp, parameters_tmp) da_next_tmp = np.random.randn(5,10) dc_next_tmp = np.random.randn(5,10) gradients_tmp = lstm_cell_backward(da_next_tmp, dc_next_tmp, cache_tmp) print("gradients[\"dxt\"][1][2] =", gradients_tmp["dxt"][1][2]) print("gradients[\"dxt\"].shape =", gradients_tmp["dxt"].shape) print("gradients[\"da_prev\"][2][3] =", gradients_tmp["da_prev"][2][3]) print("gradients[\"da_prev\"].shape =", gradients_tmp["da_prev"].shape) print("gradients[\"dc_prev\"][2][3] =", gradients_tmp["dc_prev"][2][3]) print("gradients[\"dc_prev\"].shape =", gradients_tmp["dc_prev"].shape) print("gradients[\"dWf\"][3][1] =", gradients_tmp["dWf"][3][1]) print("gradients[\"dWf\"].shape =", gradients_tmp["dWf"].shape) print("gradients[\"dWi\"][1][2] =", gradients_tmp["dWi"][1][2]) print("gradients[\"dWi\"].shape =", gradients_tmp["dWi"].shape) print("gradients[\"dWc\"][3][1] =", gradients_tmp["dWc"][3][1]) print("gradients[\"dWc\"].shape =", gradients_tmp["dWc"].shape) print("gradients[\"dWo\"][1][2] =", gradients_tmp["dWo"][1][2]) print("gradients[\"dWo\"].shape =", gradients_tmp["dWo"].shape) print("gradients[\"dbf\"][4] =", gradients_tmp["dbf"][4]) print("gradients[\"dbf\"].shape =", gradients_tmp["dbf"].shape) print("gradients[\"dbi\"][4] =", gradients_tmp["dbi"][4]) print("gradients[\"dbi\"].shape =", gradients_tmp["dbi"].shape) print("gradients[\"dbc\"][4] =", gradients_tmp["dbc"][4]) print("gradients[\"dbc\"].shape =", gradients_tmp["dbc"].shape) print("gradients[\"dbo\"][4] =", gradients_tmp["dbo"][4]) print("gradients[\"dbo\"].shape =", gradients_tmp["dbo"].shape) ``` **Expected Output**: <table> <tr> <td> **gradients["dxt"][1][2]** = </td> <td> 3.23055911511 </td> </tr> <tr> <td> **gradients["dxt"].shape** = </td> <td> (3, 10) </td> </tr> <tr> <td> **gradients["da_prev"][2][3]** = </td> <td> -0.0639621419711 </td> </tr> <tr> <td> **gradients["da_prev"].shape** = </td> <td> (5, 10) </td> </tr> <tr> <td> **gradients["dc_prev"][2][3]** = </td> <td> 0.797522038797 </td> </tr> <tr> <td> **gradients["dc_prev"].shape** = </td> <td> (5, 10) </td> </tr> <tr> <td> **gradients["dWf"][3][1]** = </td> <td> -0.147954838164 </td> </tr> <tr> <td> **gradients["dWf"].shape** = </td> <td> (5, 8) </td> </tr> <tr> <td> **gradients["dWi"][1][2]** = </td> <td> 1.05749805523 </td> </tr> <tr> <td> **gradients["dWi"].shape** = </td> <td> (5, 8) </td> </tr> <tr> <td> **gradients["dWc"][3][1]** = </td> <td> 2.30456216369 </td> </tr> <tr> <td> **gradients["dWc"].shape** = </td> <td> (5, 8) </td> </tr> <tr> <td> **gradients["dWo"][1][2]** = </td> <td> 0.331311595289 </td> </tr> <tr> <td> **gradients["dWo"].shape** = </td> <td> (5, 8) </td> </tr> <tr> <td> **gradients["dbf"][4]** = </td> <td> [ 0.18864637] </td> </tr> <tr> <td> **gradients["dbf"].shape** = </td> <td> (5, 1) </td> </tr> <tr> <td> **gradients["dbi"][4]** = </td> <td> [-0.40142491] </td> </tr> <tr> <td> **gradients["dbi"].shape** = </td> <td> (5, 1) </td> </tr> <tr> <td> **gradients["dbc"][4]** = </td> <td> [ 0.25587763] </td> </tr> <tr> <td> **gradients["dbc"].shape** = </td> <td> (5, 1) </td> </tr> <tr> <td> **gradients["dbo"][4]** = </td> <td> [ 0.13893342] </td> </tr> <tr> <td> **gradients["dbo"].shape** = </td> <td> (5, 1) </td> </tr> </table> ### 3.3 Backward pass through the LSTM RNN This part is very similar to the `rnn_backward` function you implemented above. You will first create variables of the same dimension as your return variables. You will then iterate over all the time steps starting from the end and call the one step function you implemented for LSTM at each iteration. You will then update the parameters by summing them individually. Finally return a dictionary with the new gradients. **Instructions**: Implement the `lstm_backward` function. Create a for loop starting from $T_x$ and going backward. For each step call `lstm_cell_backward` and update the your old gradients by adding the new gradients to them. Note that `dxt` is not updated but is stored. ``` def lstm_backward(da, caches): """ Implement the backward pass for the RNN with LSTM-cell (over a whole sequence). Arguments: da -- Gradients w.r.t the hidden states, numpy-array of shape (n_a, m, T_x) caches -- cache storing information from the forward pass (lstm_forward) Returns: gradients -- python dictionary containing: dx -- Gradient of inputs, of shape (n_x, m, T_x) da0 -- Gradient w.r.t. the previous hidden state, numpy array of shape (n_a, m) dWf -- Gradient w.r.t. the weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x) dWi -- Gradient w.r.t. the weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x) dWc -- Gradient w.r.t. the weight matrix of the memory gate, numpy array of shape (n_a, n_a + n_x) dWo -- Gradient w.r.t. the weight matrix of the save gate, numpy array of shape (n_a, n_a + n_x) dbf -- Gradient w.r.t. biases of the forget gate, of shape (n_a, 1) dbi -- Gradient w.r.t. biases of the update gate, of shape (n_a, 1) dbc -- Gradient w.r.t. biases of the memory gate, of shape (n_a, 1) dbo -- Gradient w.r.t. biases of the save gate, of shape (n_a, 1) """ # Retrieve values from the first cache (t=1) of caches. (caches, x) = caches (a1, c1, a0, c0, f1, i1, cc1, o1, x1, parameters) = caches[0] ### START CODE HERE ### # Retrieve dimensions from da's and x1's shapes (≈2 lines) n_a, m, T_x = None n_x, m = None # initialize the gradients with the right sizes (≈12 lines) dx = None da0 = None da_prevt = None dc_prevt = None dWf = None dWi = None dWc = None dWo = None dbf = None dbi = None dbc = None dbo = None # loop back over the whole sequence for t in reversed(range(None)): # Compute all gradients using lstm_cell_backward gradients = None # Store or add the gradient to the parameters' previous step's gradient da_prevt = None dc_prevt = None dx[:,:,t] = None dWf += None dWi += None dWc += None dWo += None dbf += None dbi += None dbc += None dbo += None # Set the first activation's gradient to the backpropagated gradient da_prev. da0 = None ### END CODE HERE ### # Store the gradients in a python dictionary gradients = {"dx": dx, "da0": da0, "dWf": dWf,"dbf": dbf, "dWi": dWi,"dbi": dbi, "dWc": dWc,"dbc": dbc, "dWo": dWo,"dbo": dbo} return gradients np.random.seed(1) x_tmp = np.random.randn(3,10,7) a0_tmp = np.random.randn(5,10) parameters_tmp = {} parameters_tmp['Wf'] = np.random.randn(5, 5+3) parameters_tmp['bf'] = np.random.randn(5,1) parameters_tmp['Wi'] = np.random.randn(5, 5+3) parameters_tmp['bi'] = np.random.randn(5,1) parameters_tmp['Wo'] = np.random.randn(5, 5+3) parameters_tmp['bo'] = np.random.randn(5,1) parameters_tmp['Wc'] = np.random.randn(5, 5+3) parameters_tmp['bc'] = np.random.randn(5,1) parameters_tmp['Wy'] = np.zeros((2,5)) # unused, but needed for lstm_forward parameters_tmp['by'] = np.zeros((2,1)) # unused, but needed for lstm_forward a_tmp, y_tmp, c_tmp, caches_tmp = lstm_forward(x_tmp, a0_tmp, parameters_tmp) da_tmp = np.random.randn(5, 10, 4) gradients_tmp = lstm_backward(da_tmp, caches_tmp) print("gradients[\"dx\"][1][2] =", gradients_tmp["dx"][1][2]) print("gradients[\"dx\"].shape =", gradients_tmp["dx"].shape) print("gradients[\"da0\"][2][3] =", gradients_tmp["da0"][2][3]) print("gradients[\"da0\"].shape =", gradients_tmp["da0"].shape) print("gradients[\"dWf\"][3][1] =", gradients_tmp["dWf"][3][1]) print("gradients[\"dWf\"].shape =", gradients_tmp["dWf"].shape) print("gradients[\"dWi\"][1][2] =", gradients_tmp["dWi"][1][2]) print("gradients[\"dWi\"].shape =", gradients_tmp["dWi"].shape) print("gradients[\"dWc\"][3][1] =", gradients_tmp["dWc"][3][1]) print("gradients[\"dWc\"].shape =", gradients_tmp["dWc"].shape) print("gradients[\"dWo\"][1][2] =", gradients_tmp["dWo"][1][2]) print("gradients[\"dWo\"].shape =", gradients_tmp["dWo"].shape) print("gradients[\"dbf\"][4] =", gradients_tmp["dbf"][4]) print("gradients[\"dbf\"].shape =", gradients_tmp["dbf"].shape) print("gradients[\"dbi\"][4] =", gradients_tmp["dbi"][4]) print("gradients[\"dbi\"].shape =", gradients_tmp["dbi"].shape) print("gradients[\"dbc\"][4] =", gradients_tmp["dbc"][4]) print("gradients[\"dbc\"].shape =", gradients_tmp["dbc"].shape) print("gradients[\"dbo\"][4] =", gradients_tmp["dbo"][4]) print("gradients[\"dbo\"].shape =", gradients_tmp["dbo"].shape) ``` **Expected Output**: <table> <tr> <td> **gradients["dx"][1][2]** = </td> <td> [0.00218254 0.28205375 -0.48292508 -0.43281115] </td> </tr> <tr> <td> **gradients["dx"].shape** = </td> <td> (3, 10, 4) </td> </tr> <tr> <td> **gradients["da0"][2][3]** = </td> <td> 0.312770310257 </td> </tr> <tr> <td> **gradients["da0"].shape** = </td> <td> (5, 10) </td> </tr> <tr> <td> **gradients["dWf"][3][1]** = </td> <td> -0.0809802310938 </td> </tr> <tr> <td> **gradients["dWf"].shape** = </td> <td> (5, 8) </td> </tr> <tr> <td> **gradients["dWi"][1][2]** = </td> <td> 0.40512433093 </td> </tr> <tr> <td> **gradients["dWi"].shape** = </td> <td> (5, 8) </td> </tr> <tr> <td> **gradients["dWc"][3][1]** = </td> <td> -0.0793746735512 </td> </tr> <tr> <td> **gradients["dWc"].shape** = </td> <td> (5, 8) </td> </tr> <tr> <td> **gradients["dWo"][1][2]** = </td> <td> 0.038948775763 </td> </tr> <tr> <td> **gradients["dWo"].shape** = </td> <td> (5, 8) </td> </tr> <tr> <td> **gradients["dbf"][4]** = </td> <td> [-0.15745657] </td> </tr> <tr> <td> **gradients["dbf"].shape** = </td> <td> (5, 1) </td> </tr> <tr> <td> **gradients["dbi"][4]** = </td> <td> [-0.50848333] </td> </tr> <tr> <td> **gradients["dbi"].shape** = </td> <td> (5, 1) </td> </tr> <tr> <td> **gradients["dbc"][4]** = </td> <td> [-0.42510818] </td> </tr> <tr> <td> **gradients["dbc"].shape** = </td> <td> (5, 1) </td> </tr> <tr> <td> **gradients["dbo"][4]** = </td> <td> [ -0.17958196] </td> </tr> <tr> <td> **gradients["dbo"].shape** = </td> <td> (5, 1) </td> </tr> </table> ### Congratulations ! Congratulations on completing this assignment. You now understand how recurrent neural networks work! Let's go on to the next exercise, where you'll use an RNN to build a character-level language model.
github_jupyter
# Comparison FFTConv & SpatialConv In this notebook, we compare the speed and the error of utilizing fft and spatial convolutions. In particular, we will: * Perform a forward and backward pass on a small network utilizing different types of convolution. * Analyze their speed and their error response w.r.t. spatial convolutions. Let's go! First, we import some packages: ``` # Append .. to path import os,sys ckconv_source = os.path.join(os.getcwd(), '..') if ckconv_source not in sys.path: sys.path.append(ckconv_source) import torch import ckconv import matplotlib.pyplot as plt causal_fftconv = ckconv.nn.functional.causal_fftconv causal_conv = ckconv.nn.functional.causal_conv ``` First we create a (long) input signal and define the convolutional kernels. ``` input_size = 2000 no_channels = 20 batch_size = 3 # Input signal signal = torch.randn(batch_size, no_channels, input_size).cuda() signal.normal_(0, 0.01) # Conv. kernels: kernel1 = torch.nn.Parameter(torch.randn(20, 20, input_size)).cuda() kernel2 = torch.nn.Parameter(torch.randn(20, 20, input_size)).cuda() kernel3 = torch.nn.Parameter(torch.randn(20, 20, input_size)).cuda() kernel1.data.normal_(0, 0.01) kernel2.data.normal_(0, 0.01) kernel3.data.normal_(0, 0.01) print() ``` Now, we perform the forward pass: ``` # With spatialconv y1 = torch.relu(causal_conv(signal, kernel1)) y2 = torch.relu(causal_conv(y1, kernel2)) y3 = causal_conv(y2, kernel3) # With fftconv (double) y1_dfft = torch.relu(causal_fftconv(signal, kernel1, double_precision=True)) y2_dfft = torch.relu(causal_fftconv(y1_dfft, kernel2, double_precision=True)) y3_dfft = causal_fftconv(y2_dfft, kernel3, double_precision=True) # With fftconv (float) y1_fft = torch.relu(causal_fftconv(signal, kernel1, double_precision=False)) y2_fft = torch.relu(causal_fftconv(y1_fft, kernel2, double_precision=False)) y3_fft = causal_fftconv(y2_fft, kernel3, double_precision=False) plt.figure(figsize=(6.4,5)) plt.title('Result Conv. Network with Spatial Convolutions') plt.plot(y3.detach().cpu().numpy()[0, 0, :]) plt.show() fig, axs = plt.subplots(1, 2,figsize=(15,5)) axs[0].set_title('Spatial - FFT (Float precision)') axs[0].plot(y3.detach().cpu().numpy()[0, 0, :] - y3_fft.detach().cpu().numpy()[0, 0, :]) axs[1].set_title('Spatial - FFT (Double precision)') axs[1].plot(y3.detach().cpu().numpy()[0, 0, :] - y3_dfft.detach().cpu().numpy()[0, 0, :]) plt.show() print('Abs Error Mean. Float: {} , Double: {}'.format(torch.abs(y3 - y3_fft).mean(), torch.abs(y3 - y3_dfft).mean())) print('Abs Error Std Dev. Float: {} , Double: {}'.format(torch.abs(y3 - y3_fft).std(), torch.abs(y3 - y3_dfft).std())) ``` We observe that the error is very small. ### Speed analysis Now, we analyze their speed: ``` # With spatialconv with torch.autograd.profiler.profile(use_cuda=True) as prof: y1 = torch.relu(causal_conv(signal, kernel1)) y2 = torch.relu(causal_conv(y1, kernel2)) y3 = causal_conv(y2, kernel3) y3 = y3.sum() y3.backward() print(prof) # Self CPU time total: 103.309ms # CUDA time total: 103.847ms # With fft and double precision with torch.autograd.profiler.profile(use_cuda=True) as prof: y1_dfft = torch.relu(causal_fftconv(signal, kernel1, double_precision=True)) y2_dfft = torch.relu(causal_fftconv(y1_dfft, kernel2, double_precision=True)) y3_dfft = causal_fftconv(y2_dfft, kernel3, double_precision=True) y3_dfft = y3_dfft.sum() y3_dfft.backward() print(prof) # Self CPU time total: 32.416ms # CUDA time total: 31.895ms # With fft and float precision with torch.autograd.profiler.profile(use_cuda=True) as prof: y1_fft = torch.relu(causal_fftconv(signal, kernel1, double_precision=False)) y2_fft = torch.relu(causal_fftconv(y1_fft, kernel2, double_precision=False)) y3_fft = causal_fftconv(y2_fft, kernel3, double_precision=False) y3_fft = y3_fft.sum() y3_fft.backward() print(prof) # Self CPU time total: 12.797ms # CUDA time total: 13.138ms ``` We see that whilst the error is minimal, the gains in speed are extreme (10 times faster for kernels and inputs of size 2000).
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # 预创建的 Estimators <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://tensorflow.google.cn/beta/tutorials/estimators/premade_estimators"><img src="https://tensorflow.google.cn/images/tf_logo_32px.png" />在 tensorFlow.google.cn 上查看</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/zh-cn/beta/tutorials/estimators/premade_estimators.ipynb"><img src="https://tensorflow.google.cn/images/colab_logo_32px.png" />在 Google Colab 中运行</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/zh-cn/beta/tutorials/estimators/premade_estimators.ipynb"><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png" />在 GitHub 上查看源代码</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/zh-cn/beta/tutorials/estimators/premade_estimators.ipynb"><img src="https://tensorflow.google.cn/images/download_logo_32px.png" />下载 notebook</a> </td> </table> Note: 我们的 TensorFlow 社区翻译了这些文档。因为社区翻译是尽力而为, 所以无法保证它们是最准确的,并且反映了最新的 [官方英文文档](https://www.tensorflow.org/?hl=en)。如果您有改进此翻译的建议, 请提交 pull request 到 [tensorflow/docs](https://github.com/tensorflow/docs) GitHub 仓库。要志愿地撰写或者审核译文,请加入 [docs-zh-cn@tensorflow.org Google Group](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-zh-cn)。 本教程将向您展示如何使用 Estimators 解决 Tensorflow 中的鸢尾花(Iris)分类问题。Estimator 是 Tensorflow 完整模型的高级表示,它被设计用于轻松扩展和异步训练。更多细节请参阅 [Estimators](https://tensorflow.google.cn/guide/estimators)。 请注意,在 Tensorflow 2.0 中,[Keras API](https://tensorflow.google.cn/guide/keras) 可以完成许多相同的任务,而且被认为是一个更易学习的API。如果您刚刚开始入门,我们建议您从 Keras 开始。有关 Tensorflow 2.0 中可用高级API的更多信息,请参阅 [Keras标准化](https://medium.com/tensorflow/standardizing-on-keras-guidance-on-high-level-apis-in-tensorflow-2-0-bad2b04c819a)。 ## 首先要做的事 为了开始,您将首先导入 Tensorflow 和一系列您需要的库。 ``` from __future__ import absolute_import, division, print_function, unicode_literals try: # Colab only %tensorflow_version 2.x except Exception: pass import tensorflow as tf import pandas as pd ``` ## 数据集 本文档中的示例程序构建并测试了一个模型,该模型根据[花萼](https://en.wikipedia.org/wiki/Sepal)和[花瓣](https://en.wikipedia.org/wiki/Petal)的大小将鸢尾花分成三种物种。 您将使用鸢尾花数据集训练模型。该数据集包括四个特征和一个[标签](https://developers.google.com/machine-learning/glossary/#label)。这四个特征确定了单个鸢尾花的以下植物学特征: * 花萼长度 * 花萼宽度 * 花瓣长度 * 花瓣宽度 根据这些信息,您可以定义一些有用的常量来解析数据: ``` CSV_COLUMN_NAMES = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth', 'Species'] SPECIES = ['Setosa', 'Versicolor', 'Virginica'] ``` 接下来,使用 Keras 与 Pandas 下载并解析鸢尾花数据集。注意为训练和测试保留不同的数据集。 ``` train_path = tf.keras.utils.get_file( "iris_training.csv", "https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv") test_path = tf.keras.utils.get_file( "iris_test.csv", "https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv") train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0) test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0) ``` 通过检查数据您可以发现有四列浮点型特征和一列 int32 型标签。 ``` train.head() ``` 对于每个数据集都分割出标签,模型将被训练来预测这些标签。 ``` train_y = train.pop('Species') test_y = test.pop('Species') # 标签列现已从数据中删除 train.head() ``` ## Estimator 编程概述 现在您已经设定好了数据,您可以使用 Tensorflow Estimator 定义模型。Estimator 是从 `tf.estimator.Estimator` 中派生的任何类。Tensorflow提供了一组`tf.estimator`(例如,`LinearRegressor`)来实现常见的机器学习算法。此外,您可以编写您自己的[自定义 Estimator](https://tensorflow.google.cn/guide/custom_estimators)。入门阶段我们建议使用预创建的 Estimator。 为了编写基于预创建的 Estimator 的 Tensorflow 项目,您必须完成以下工作: * 创建一个或多个输入函数 * 定义模型的特征列 * 实例化一个 Estimator,指定特征列和各种超参数。 * 在 Estimator 对象上调用一个或多个方法,传递合适的输入函数以作为数据源。 我们来看看这些任务是如何在鸢尾花分类中实现的。 ## 创建输入函数 您必须创建输入函数来提供用于训练、评估和预测的数据。 **输入函数**是一个返回 `tf.data.Dataset` 对象的函数,此对象会输出下列含两个元素的元组: * [`features`](https://developers.google.com/machine-learning/glossary/#feature)——Python字典,其中: * 每个键都是特征名称 * 每个值都是包含此特征所有值的数组 * `label` 包含每个样本的[标签](https://developers.google.com/machine-learning/glossary/#label)的值的数组。 为了向您展示输入函数的格式,请查看下面这个简单的实现: ``` def input_evaluation_set(): features = {'SepalLength': np.array([6.4, 5.0]), 'SepalWidth': np.array([2.8, 2.3]), 'PetalLength': np.array([5.6, 3.3]), 'PetalWidth': np.array([2.2, 1.0])} labels = np.array([2, 1]) return features, labels ``` 您的输入函数可以以您喜欢的方式生成 `features` 字典与 `label` 列表。但是,我们建议使用 Tensorflow 的 [Dataset API](https://tensorflow.google.cn/guide/datasets),该 API 可以用来解析各种类型的数据。 Dataset API 可以为您处理很多常见情况。例如,使用 Dataset API,您可以轻松地从大量文件中并行读取记录,并将它们合并为单个数据流。 为了简化此示例,我们将使用 [pandas](https://pandas.pydata.org/) 加载数据,并利用此内存数据构建输入管道。 ``` def input_fn(features, labels, training=True, batch_size=256): """An input function for training or evaluating""" # 将输入转换为数据集。 dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels)) # 如果在训练模式下混淆并重复数据。 if training: dataset = dataset.shuffle(1000).repeat() return dataset.batch(batch_size) ``` ## 定义特征列(feature columns) [**特征列(feature columns)**](https://developers.google.com/machine-learning/glossary/#feature_columns)是一个对象,用于描述模型应该如何使用特征字典中的原始输入数据。当您构建一个 Estimator 模型的时候,您会向其传递一个特征列的列表,其中包含您希望模型使用的每个特征。`tf.feature_column` 模块提供了许多为模型表示数据的选项。 对于鸢尾花问题,4 个原始特征是数值,因此我们将构建一个特征列的列表,以告知 Estimator 模型将 4 个特征都表示为 32 位浮点值。故创建特征列的代码如下所示: ``` # 特征列描述了如何使用输入。 my_feature_columns = [] for key in train.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) ``` 特征列可能比上述示例复杂得多。您可以从[指南](https://tensorflow.google.cn/guide/feature_columns)获取更多关于特征列的信息。 我们已经介绍了如何使模型表示原始特征,现在您可以构建 Estimator 了。 ## 实例化 Estimator 鸢尾花为题是一个经典的分类问题。幸运的是,Tensorflow 提供了几个预创建的 Estimator 分类器,其中包括: * `tf.estimator.DNNClassifier` 用于多类别分类的深度模型 * `tf.estimator.DNNLinearCombinedClassifier` 用于广度与深度模型 * `tf.estimator.LinearClassifier` 用于基于线性模型的分类器 对于鸢尾花问题,`tf.estimator.LinearClassifier` 似乎是最好的选择。您可以这样实例化该 Estimator: ``` # 构建一个拥有两个隐层,隐藏节点分别为 30 和 10 的深度神经网络。 classifier = tf.estimator.DNNClassifier( feature_columns=my_feature_columns, # 隐层所含结点数量分别为 30 和 10. hidden_units=[30, 10], # 模型必须从三个类别中做出选择。 n_classes=3) ``` ## 训练、评估和预测 我们已经有一个 Estimator 对象,现在可以调用方法来执行下列操作: * 训练模型。 * 评估经过训练的模型。 * 使用经过训练的模型进行预测。 ### 训练模型 通过调用 Estimator 的 `Train` 方法来训练模型,如下所示: ``` # 训练模型。 classifier.train( input_fn=lambda: input_fn(train, train_y, training=True), steps=5000) ``` 注意将 ` input_fn` 调用封装在 [`lambda`](https://docs.python.org/3/tutorial/controlflow.html) 中以获取参数,同时提供不带参数的输入函数,如 Estimator 所预期的那样。`step` 参数告知该方法在训练多少步后停止训练。 ### 评估经过训练的模型 现在模型已经经过训练,您可以获取一些关于模型性能的统计信息。代码块将在测试数据上对经过训练的模型的准确率(accuracy)进行评估: ``` eval_result = classifier.evaluate( input_fn=lambda: input_fn(test, test_y, training=False)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) ``` 与对 `train` 方法的调用不同,我们没有传递 `steps` 参数来进行评估。用于评估的 `input_fn` 只生成一个 [epoch](https://developers.google.com/machine-learning/glossary/#epoch) 的数据。 `eval_result` 字典亦包含 `average_loss`(每个样本的平均误差),`loss`(每个 mini-batch 的平均误差)与 Estimator 的 `global_step`(经历的训练迭代次数)值。 ### 利用经过训练的模型进行预测(推理) 我们已经有一个经过训练的模型,可以生成准确的评估结果。我们现在可以使用经过训练的模型,根据一些无标签测量结果预测鸢尾花的品种。与训练和评估一样,我们使用单个函数调用进行预测: ``` # 由模型生成预测 expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } def input_fn(features, batch_size=256): """An input function for prediction.""" # 将输入转换为无标签数据集。 return tf.data.Dataset.from_tensor_slices(dict(features)).batch(batch_size) predictions = classifier.predict( input_fn=lambda: input_fn(predict_x)) ``` `predict` 方法返回一个 Python 可迭代对象,为每个样本生成一个预测结果字典。以下代码输出了一些预测及其概率: ``` for pred_dict, expec in zip(predictions, expected): class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print('Prediction is "{}" ({:.1f}%), expected "{}"'.format( SPECIES[class_id], 100 * probability, expec)) ```
github_jupyter
## [Experiments] Uncertainty Sampling with a 1D Gaussian Process as model First, we define a prior probablility for a model. The GaussianRegressor approximates this model using an optimization method (probably similar to EM) for a given data input. The resulting model has a mean and a certainty. We use these to determine the next data point that should be labeled and critizise the data set. ``` %matplotlib inline from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct, ConstantKernel) import math import numpy as np from matplotlib import pyplot as plt size = 100 kernel = 1.0 * RBF(length_scale=1.0,length_scale_bounds=(1e-1,10.0)) gp = GaussianProcessRegressor(kernel=kernel) # plot prior probability of model plt.figure(figsize=(8, 8)) plt.subplot(2, 1, 1) X_ = np.linspace(0, 5, size) y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True) plt.plot(X_, y_mean, 'k', lw=3, zorder=9) plt.fill_between(X_, y_mean - y_std, y_mean + y_std, alpha=0.2, color='k') y_samples = gp.sample_y(X_[:, np.newaxis], 10) plt.plot(X_, y_samples, lw=1) plt.xlim(0, 5) plt.ylim(-3, 3) plt.title("Prior (kernel: %s)" % kernel, fontsize=12) # Generate data and fit GP rng = np.random.RandomState(4) X = np.linspace(0, 5, 100)[:, np.newaxis] y = np.sin((X[:, 0] - 2.5) ** 2) budget = 10 requested_X = [] requested_y = [] # init model with random data point start = np.random.choice(np.arange(size)) requested_X.append(X[start]) requested_y.append(y[start]) gp.fit(requested_X, requested_y) y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True) for index in range(2,10): max_std = np.unravel_index(np.argmax(y_std, axis=None), y_std.shape) requested_X.append(X[max_std]) requested_y.append(y[max_std]) gp.fit(requested_X, requested_y) y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True) plt.plot(X_, y_mean, 'k', lw=3, zorder=9) plt.fill_between(X_, y_mean - y_std, y_mean + y_std, alpha=0.2, color='k') y_samples = gp.sample_y(X_[:, np.newaxis], 7) plt.plot(X_, y_samples, lw=1) plt.plot(X_, y, lw=2,color='b',zorder =8, dashes=[1,1],) plt.scatter(requested_X, requested_y, c='r', s=50, zorder=10, edgecolors=(0, 0, 0)) plt.xlim(0, 5) plt.ylim(-3, 3) plt.title("%s examles: Posterior (kernel: %s)\n Log-Likelihood: %.3f" % (index, gp.kernel_, gp.log_marginal_likelihood(gp.kernel_.theta)), fontsize=12) plt.show() ``` Note how the new data point we aquired after 9 iterations completely changed the certainty about our model.
github_jupyter
# Probability Distribution: In [probability theory](https://en.wikipedia.org/wiki/Probability_theory) and [statistics](https://en.wikipedia.org/wiki/statistics), a probability distribution is a [mathematical function](https://en.wikipedia.org/wiki/Function_(mathematics)) that, stated in simple terms, can be thought of as providing the probabilities of occurrence of different possible outcomes in an experiment. In more technical terms, the probability distribution is a description of a random phenomenon in terms of the probabilities of events. Examples of random phenomena can include the results of an experiment or survey. A probability distribution is defined in terms of an underlying sample space, which is the set of all possible outcomes of the random phenomenon being observed. ### Discrete and Continuous Distributions Probability distributions are generally divided into two classes. A __discrete probability distribution__ (applicable to the scenarios where the set of possible outcomes is discrete, such as a coin toss or a roll of dice) can be encoded by a discrete list of the probabilities of the outcomes, known as a [probability mass function](https://en.wikipedia.org/wiki/Probability_mass_function). On the other hand, a __continuous probability distribution__ (applicable to the scenarios where the set of possible outcomes can take on values in a continuous range (e.g. real numbers), such as the temperature on a given day) is typically described by probability density functions (with the probability of any individual outcome actually being 0). Such distributions are generally described with the help of [probability density functions](https://en.wikipedia.org/wiki/Probability_density_function). ### In this notebook, we discuss about most important distributions * **Bernoulli distribution** * **Binomial distribution** * **Poisson distribution** * **Normal distribution** #### Some Essential Terminologies * __Mode__: for a discrete random variable, the value with highest probability (the location at which the probability mass function has its peak); for a continuous random variable, a location at which the probability density function has a local peak. * __Support__: the smallest closed set whose complement has probability zero. * __Head__: the range of values where the pmf or pdf is relatively high. * __Tail__: the complement of the head within the support; the large set of values where the pmf or pdf is relatively low. * __Expected value or mean__: the weighted average of the possible values, using their probabilities as their weights; or the continuous analog thereof. * __Median__: the value such that the set of values less than the median, and the set greater than the median, each have probabilities no greater than one-half. * __Variance__: the second moment of the pmf or pdf about the mean; an important measure of the dispersion of the distribution. * __Standard deviation__: the square root of the variance, and hence another measure of dispersion. * __Symmetry__: a property of some distributions in which the portion of the distribution to the left of a specific value is a mirror image of the portion to its right. * __Skewness__: a measure of the extent to which a pmf or pdf "leans" to one side of its mean. The third standardized moment of the distribution. * __Kurtosis__: a measure of the "fatness" of the tails of a pmf or pdf. The fourth standardized moment of the distribution. ![kurtosis](https://anotherbloodybullshitblog.files.wordpress.com/2016/01/normal-not-always-the-norm.gif?w=809) ## Bernoulii distribution The Bernoulli distribution, named after Swiss mathematician [Jacob Bernoulli](https://en.wikipedia.org/wiki/Jacob_Bernoulli), is the probability distribution of a random variable which takes the value 1 with probability $p$ and the value 0 with probability $q = 1 − p$ — i.e., the probability distribution of any single experiment that asks a ___yes–no question___; the question results in a boolean-valued outcome, a single bit of information whose value is success/yes/true/one with probability $p$ and failure/no/false/zero with probability $q$. This distribution has only two possible outcomes and a single trial. It can be used to represent a coin toss where 1 and 0 would represent "head" and "tail" (or vice versa), respectively. In particular, unfair coins would have $p ≠ 0.5$. The probability mass function $f$ of this distribution, over possible outcomes $k$, is $${\displaystyle f(k;p)={\begin{cases}p&{\text{if }}k=1,\\[6pt]1-p&{\text{if }}k=0.\end{cases}}}$$ ``` import numpy as np from matplotlib import pyplot as plt from numpy import random import seaborn as sns from scipy.stats import bernoulli ``` #### Generate random variates ``` # p=0.5 i.e. fair coin s=bernoulli.rvs(p=0.5,size=10) s plt.hist(s) # p=0.2 i.e. more tails than heads bernoulli.rvs(p=0.2,size=10) # p=0.8 i.e. more heads than tails bernoulli.rvs(p=0.8,size=10) ``` #### Mean, variance, skew, and kurtosis ``` print("A fair coin is spinning...\n"+"-"*30) pr=0.5 # Fair coin toss probability mean, var, skew, kurt = bernoulli.stats(p=pr, moments='mvsk') print("Mean:",mean) print("Variance:",var) print("Skew:",skew) print("Kurtosis:",kurt) print("\nNow a biased coin is spinning...\n"+"-"*35) pr=0.7 # Biased coin toss probability mean, var, skew, kurt = bernoulli.stats(p=pr, moments='mvsk') print("Mean:",mean) print("Variance:",var) print("Skew:",skew) print("Kurtosis:",kurt) ``` #### Standard deviation, mean, median ``` print("\nA biased coin with likelihood 0.3 is spinning...\n"+"-"*50) pr=0.3 print("Std. dev:",bernoulli.std(p=pr)) print("Mean:",bernoulli.mean(p=pr)) print("Median:",bernoulli.median(p=pr)) ``` ## Binomial distribution The Binomial Distribution can instead be thought as the sum of outcomes of an event following a Bernoulli distribution. The Binomial Distribution is therefore used in binary outcome events and the probability of success and failure is the same in all the successive trials. This distribution takes two parameters as inputs: the number of times an event takes place and the probability assigned to one of the two classes. The binomial distribution is frequently used to model the number of successes in a sample of size n drawn with replacement from a population of size N. A simple example of a Binomial Distribution in action can be the toss of a biased/unbiased coin repeated a certain amount of times. In general, if the random variable $X$ follows the binomial distribution with parameters n ∈ ℕ and p ∈ [0,1], we write X ~ B(n, p). The probability of getting exactly $k$ successes in $n$ trials is given by the probability mass function: $${\Pr(k;n,p)=\Pr(X=k)={n \choose k}p^{k}(1-p)^{n-k}}$$ for k = 0, 1, 2, ..., n, where $${\displaystyle {\binom {n}{k}}={\frac {n!}{k!(n-k)!}}}$$ ``` from scipy.stats import binom ``` #### Generate random variates 8 coins are flipped (or 1 coin is flipped 8 times), each with probability of success (1) of 0.25 This trial/experiment is repeated for 10 times ``` k=binom.rvs(8,0.25,size=10) print("Number of success for each trial:",k) print("Average of the success:", np.mean(k)) sns.distplot(binom.rvs(n=10, p=0.5, size=1000), hist=True, kde=False) plt.show() print("A fair coin is spinning 5 times\n"+"-"*35) pr=0.5 # Fair coin toss probability n=5 mean, var, skew, kurt = binom.stats(n=n,p=pr, moments='mvsk') print("Mean:",mean) print("Variance:",var) print("Skew:",skew) print("Kurtosis:",kurt) print("\nNow a biased coin is spinning 5 times...\n"+"-"*45) pr=0.7 # Biased coin toss probability n=5 mean, var, skew, kurt = binom.stats(n=n,p=pr, moments='mvsk') print("Mean:",mean) print("Variance:",var) print("Skew:",skew) print("Kurtosis:",kurt) ``` #### Standard deviation, mean, median ``` n=5 pr=0.7 print("\n{} biased coins with likelihood {} are spinning...\n".format(n,pr)+"-"*50) print("Std. dev:",binom.std(n=n,p=pr)) print("Mean:",binom.mean(n=n,p=pr)) print("Median:",binom.median(n=n,p=pr)) ``` #### Visualize the probability mass function (pmf) ``` n=40 pr=0.5 rv = binom(n,pr) x=np.arange(0,41,1) pmf1 = rv.pmf(x) n=40 pr=0.15 rv = binom(n,pr) x=np.arange(0,41,1) pmf2 = rv.pmf(x) n=50 pr=0.6 rv = binom(n,pr) x=np.arange(0,41,1) pmf3 = rv.pmf(x) plt.figure(figsize=(12,6)) plt.title("Probability mass function: $\\binom{n}{k}\, p^k (1-p)^{n-k}$\n",fontsize=20) plt.scatter(x,pmf1) plt.scatter(x,pmf2) plt.scatter(x,pmf3,c='k') plt.legend(["$n=40, p=0.5$","$n=40, p=0.3$","$n=50, p=0.6$"],fontsize=15) plt.xlabel("Number of successful trials ($k$)",fontsize=15) plt.ylabel("Probability of success",fontsize=15) plt.xticks(fontsize=15) plt.yticks(fontsize=15) plt.grid(True) plt.show() ``` ## Poisson Distribution The Poisson distribution, is a discrete probability distribution that expresses the probability that an event might happen or not knowing how often it usually occurs. Poisson Distributions are for example frequently used by insurance companies to conduct risk analysis (eg. predict the number of car crash accidents within a predefined time span) to decide car insurance pricing. Other examples that may follow a Poisson include * number of phone calls received by a call center per hour * The number of patients arriving in an emergency room between 10 and 11 pm ``` from scipy.stats import poisson ``` #### Display probability mass function (pmf) An event can occur 0, 1, 2, … times in an interval. The average number of events in an interval is designated $\lambda$. This is the event rate, also called the rate parameter. The probability of observing k events in an interval is given by the equation ${\displaystyle P(k{\text{ events in interval}})=e^{-\lambda }{\frac {\lambda ^{k}}{k!}}}$ where, ${\lambda}$ is the average number of events per interval e is the number 2.71828... (Euler's number) the base of the natural logarithms k takes values 0, 1, 2, … k! = k × (k − 1) × (k − 2) × … × 2 × 1 is the factorial of k. #### Generate random variates ``` la=5 r = poisson.rvs(mu=la, size=20) print("Random variates with lambda={}: {}".format(la,r)) la=0.5 r = poisson.rvs(mu=la, size=20) print("Random variates with lambda={}: {}".format(la,r)) data_poisson = poisson.rvs(mu=3, size=10000) sns.distplot(data_poisson, kde=False) plt.show() print("For small lambda\n"+"-"*25) la=0.5 mean, var, skew, kurt = poisson.stats(mu=la, moments='mvsk') print("Mean:",mean) print("Variance:",var) print("Skew:",skew) print("Kurtosis:",kurt) print("\nNow for large lambda\n"+"-"*30) la=5 mean, var, skew, kurt = poisson.stats(mu=la, moments='mvsk') print("Mean:",mean) print("Variance:",var) print("Skew:",skew) print("Kurtosis:",kurt) ``` #### Standard deviation, mean, median ``` la=5 print("For lambda = {}\n-------------------------".format(la)) print("Std. dev:",poisson.std(mu=la)) print("Mean:",poisson.mean(mu=la)) print("Median:",poisson.median(mu=la)) ``` #### For the complete list of functions and methods please [see this link](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.poisson.html#scipy.stats.poisson). ## Normal (Gaussian) distribution In probability theory, the normal (or Gaussian or Gauss or Laplace–Gauss) distribution is a very common continuous probability distribution. Normal distributions are important in statistics and are often used in the natural and social sciences to represent real-valued random variables whose distributions are not known. A random variable with a Gaussian distribution is said to be normally distributed and is called a normal deviate. The normal distribution is useful because of the **[central limit theorem](https://en.wikipedia.org/wiki/Central_limit_theorem)**. In its most general form, under some conditions (which include finite variance), it states that **averages of samples of observations of random variables independently drawn from independent distributions converge in distribution to the normal**, that is, they become normally distributed when the number of observations is sufficiently large. Physical quantities that are expected to be the sum of many independent processes (such as measurement errors) often have distributions that are nearly normal. Moreover, many results and methods (such as propagation of uncertainty and least squares parameter fitting) can be derived analytically in explicit form when the relevant variables are normally distributed. ### PDF The probability density function (PDF) is given by, $$ f(x\mid \mu ,\sigma ^{2})={\frac {1}{\sqrt {2\pi \sigma ^{2}}}}e^{-{\frac {(x-\mu )^{2}}{2\sigma ^{2}}}} $$ where, - $\mu$ is the mean or expectation of the distribution (and also its median and mode), - $\sigma$ is the standard deviation, and $\sigma^2$ is the variance. ``` from scipy.stats import norm x = np.linspace(-3, 3, num = 100) constant = 1.0 / np.sqrt(2*np.pi) pdf_normal_distribution = constant * np.exp((-x**2) / 2.0) fig, ax = plt.subplots(figsize=(10, 5)); ax.plot(x, pdf_normal_distribution); ax.set_ylim(0); ax.set_title('Normal Distribution', size = 20); ax.set_ylabel('Probability Density', size = 20) mu, sigma = 0.5, 0.1 s = np.random.normal(mu, sigma, 1000) # create the bins and the histogram count, bins, ignored = plt.hist(s, 20, normed=True) # plot the distribution curve plt.plot(bins, 1/(sigma*np.sqrt(2*np.pi))*np.exp( -(bins - mu)**2 / (2*sigma**2)), linewidth = 3, color = "y") plt.show() a1 = np.random.normal(loc=0,scale=np.sqrt(0.2),size=100000) a2 = np.random.normal(loc=0,scale=1.0,size=100000) a3 = np.random.normal(loc=0,scale=np.sqrt(5),size=100000) a4 = np.random.normal(loc=-2,scale=np.sqrt(0.5),size=100000) plt.figure(figsize=(8,5)) plt.hist(a1,density=True,bins=100,color='blue',alpha=0.5) plt.hist(a2,density=True,bins=100,color='red',alpha=0.5) plt.hist(a3,density=True,bins=100,color='orange',alpha=0.5) plt.hist(a4,density=True,bins=100,color='green',alpha=0.5) plt.xlim(-7,7) plt.show() ``` ## References https://www.w3schools.com/python/numpy_random_normal.asp https://towardsdatascience.com/probability-distributions-in-data-science-cce6e64873a7 https://statisticsbyjim.com/basics/probabilitydistributions/#:~:text=A%20probability%20distribution%20is%20a,on%20the%20underlying%20probability%20distribution. https://bolt.mph.ufl.edu/6050-6052/unit-3b/binomial-random-variables/
github_jupyter
<h1>02 Pandas</h1> $\newcommand{\Set}[1]{\{#1\}}$ $\newcommand{\Tuple}[1]{\langle#1\rangle}$ $\newcommand{\v}[1]{\pmb{#1}}$ $\newcommand{\cv}[1]{\begin{bmatrix}#1\end{bmatrix}}$ $\newcommand{\rv}[1]{[#1]}$ $\DeclareMathOperator{\argmax}{arg\,max}$ $\DeclareMathOperator{\argmin}{arg\,min}$ $\DeclareMathOperator{\dist}{dist}$ $\DeclareMathOperator{\abs}{abs}$ ``` %load_ext autoreload %autoreload 2 %matplotlib inline import pandas as pd import numpy as np import matplotlib.pyplot as plt ``` <h1>Series</h1> <p> A Series is like a 1D array. The values in the Series have an index, which, by default, uses consecutive integers from 0. </p> ``` s = pd.Series([2, 4, -12, 0, 2]) s ``` <p> You can get its shape and dtype as we did with numpy arrays: </p> ``` s.shape s.dtype ``` <p> You can get the values as a numpy array: </p> ``` s.values ``` <p> You can access by index and by slicing, as in Python: </p> ``` s[3] s[1:3] s[1:] ``` <p> A nice feature is Boolean indexing, where you extract values using a list of Booleans (not square brackets twice) and it returns the values that correspond to the Trues in the list: </p> ``` s[[True, True, False, False, True]] ``` <p> Operators are vectorized, similar to numpy: </p> ``` s * 2 s > 0 ``` <p> The next example is neat. It combines a vectorized operator with the idea of Boolean indexing: </p> ``` s[s > 0] ``` <p> There are various methods, as you would expect, many building out from numpy e.g.: </p> ``` s.sum() s.mean() s.unique() s.value_counts() ``` <p> One method is astype, which can do data type conversions: </p> ``` s.astype(float) ``` <h1>DataFrame</h1> <p> A DataFrame is a table of data, comprising rows and columns. The rows and columns both have an index. If you want more dimensions (we won't), then they support hierarchical indexing. </p> <p> There are various ways of creating a DataFrame, e.g. supply to its constructor a dictionary of equal-sized lists: </p> ``` df = pd.DataFrame({'a' : [1, 2, 3], 'b' : [4, 5, 6], 'c' : [7, 8, 9]}) df ``` <p> The keys of the dictionary became the column index, and it assigned integers to the other index. </p> <p> But, instead of looking at all the possible ways of doing this, we'll be reading the data in from a CSV file. We will assume that the first line of the file contains headers. These become the column indexes. </p> ``` df = pd.read_csv('../datasets/dataset_stop_and_searchA.csv') df ``` <p> Notice when the CSV file has an empty value (a pair of consecutive commas), then Pandas treats this as NaN, which is a float. </p> <p> A useful method at this point is describe: </p> ``` df.describe(include='all') ``` <p> We can also get the column headers, row index, shape and dtypes (not dtype): </p> ``` df.columns df.index df.shape df.dtypes ``` <p> You can retrieve a whole column, as a Series, using column indexing: </p> ``` df['Suspect-ethnicity'] ``` <p> Now you have a Series, you might use the unique or value_counts methods that we looked at earlier. </p> ``` df['Suspect-ethnicity'].unique() df['Suspect-ethnicity'].value_counts() ``` <p> If you ask for more than one column, then you must give them as a list (note the nested brackets). Then, the result is not a Series, but a DataFrame: </p> ``` df[['Suspect-ethnicity', 'Officer-ethnicity']] ``` <p> How do we get an individual row? The likelihood of wanting this in this module is small. </p> <p> If you do need to get an individual row, you cannot do indexing using square brackets, because that notation is for columns. </p> <p> The iloc and loc methods are probably what you would use. iloc retrieves by position. So df.iloc[0] retrieves the first row. loc, on the other hand, retrieves by label, so df.loc[0] retrieves the row whose label in the row index is 0. Confusing, huh? Ordinarily, they'll be the same. </p> ``` df.iloc[4] df.loc[4] ``` <p> But sometimes the position and the label in the row index will not correspond. This can happen, for example, after shuffling the rows of the DataFrame or after deleting a row (see example later). </p> <p> In any case, we're much more likely to want to select several rows (hence a DataFrame) using Boolean indexing, defined by a Boolean expression. We use a Boolean expression that defines a Series and then use that to index the DataFrame. </p> <p> As an example, here's a Boolean expression: </p> ``` df['Officer-ethnicity'] == 'Black' ``` <p> And here we use that Boolean expression to extract rows: </p> ``` df[df['Officer-ethnicity'] == 'Black'] ``` <p> In our Boolean expressions, we can do and, or and not (&, |, ~), but note that this often requires extra parentheses, e.g. </p> ``` df[(df['Officer-ethnicity'] == 'Black') & (df['Object-of-search'] == 'Stolen goods')] ``` <p> We can use this idea to delete rows. </p> <p> We use Boolean indexing as above to select the rows we want to keep. Then we assign that dataframe back to the original variable. </p> <p> For example, let's delete all male suspects, in other words, keep all female suspects: </p> ``` df = df[df['Gender'] == 'Female'].copy() df ``` <p> This example also illustrates the point from earlier about the difference between position (iloc) and label in the row index (loc). </p> ``` df.iloc[0] df.loc[0] # raises an exception df.iloc[11] # raises an exception df.loc[11] ``` <p> This is often a source of errors when writing Pandas. So one tip is, whenever you perform an operation that has the potential to change the row index, then reset the index so that it corresponds to the positions: </p> ``` df.reset_index(drop=True, inplace=True) df ``` <p> Deleting columns can be done in the same way as we deleted rows, i.e. extract the ones you want to keep and then assign the result back to the original variable, e.g.: </p> ``` df = df[['Gender', 'Age', 'Object-of-search', 'Outcome']].copy() df ``` <p> But deletion can also be done using the drop method. If axis=0 (default), you're deleting rows. If axis=1, you're deleting columns (and this time you name the column you want to delete), e.g.: </p> ``` df.drop("Age", axis=1, inplace=True) df ``` <p> One handy variant is dropna with axis=0, which can be used to delete rows that contains NaN. We may see an example of this and a few other methods in our lectures and futuer labs. But, for now, we have enough for you to tackle something interesting. </p> <h1>Exercise</h1> <p> I've a larger file that contains all stop-and-searches by the Metropolitan Police for about a year (mid-2018 to mid-2019). </p> <p> Read it in: </p> ``` df = pd.read_csv('../datasets/dataset_stop_and_searchB.csv') df.shape ``` <p> Using this larger dataset, your job is to answer this question: Are the Metropolitan Police racist? </p>
github_jupyter
##### 1 ![1](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0001.jpg) ##### 2 ![2](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0002.jpg) ##### 3 ![3](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0003.jpg) ##### 4 ![4](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0004.jpg) and no similarity information included ##### 5 ![5](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0005.jpg) 纵向d表示单词数,横向n表示文档数,即$ d = vocab size $ 左边$U$矩阵的每一行就是每一个单词的K维表示,右边$V^T$矩阵每一列就是每一篇文档的$K$维表示 这就是Latent Sementic Index(Analysis)的做法 这与PCA的区别就是,这里没有减均值的操作(Centeralize)。 为什么不减均值?因为这里矩阵中的每一个数,其实是表示某个词在某篇文章中的出现频次,减去均值,那必然有很多词被表示成了负数,这是没有意义的。 教授举了图像的例子。对于图像矩阵做SVD,同样,行是像素点,列是某一个图片,那么,在SVD分解后,U矩阵的每一列单独拿出来,仍然可以看成是某一种图像,如果做了Centeralization,那就什么都不是了。那文本做了SVD之后,对于U的每一列,就相当于是一个Topic,上面的数字表示每一个词在当前这个topic的出现频率;对于V的每一列,则表示每一篇文章,分别由哪几个主题,以什么比例组成。 教授补充:对于一个非负矩阵,SVD分解的结果会出现负值。所以现在有了Non negative matrix factorization的方法。 这里还有点乱。 只有Non negatie matrix factorization之后的结果,才可以看成主题模型那样的理解,因为没有负值;而PCA和SVD都会出现负值,所以不能那样理解。 ##### 6 ![6](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0006.jpg) ##### 7 ![7](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0007.jpg) out of sample 就是没出现过的,不在训练集中的 ##### 8 ![8](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0008.jpg) ##### 9 ![9](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0009.jpg) ##### 10 ![10](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0010.jpg) ##### 11 ![11](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0011.jpg) Glove directly used, word2vec indirectly used this 这里的每一列(或者行,因为是对称的)表示一个词的window ##### 12 ![12](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0012.jpg) ##### 13 ![13](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0013.jpg) ##### 14 ![14](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0014.jpg) 这里的length of window表达有歧义,其实是在说input layer只有一个值是1,其它都是零。正常理解的length = 1,是会有两个值为1. ##### 15 ![15](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0015.jpg) ##### 16 ![16](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0016.jpg) ##### 17 ![17](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0017.jpg) ##### 18 ![18](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0018.jpg) ##### 19 ![19](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0019.jpg) ##### 20 ![20](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0020.jpg) ##### 21 ![21](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0021.jpg) ##### 22 ![22](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0022.jpg) ##### 23 ![23](http://7xqhfk.com1.z0.glb.clouddn.com/dl-waterloo/lec03/0023.jpg)
github_jupyter
# Traffic Light Classifier --- In this project, you’ll use your knowledge of computer vision techniques to build a classifier for images of traffic lights! You'll be given a dataset of traffic light images in which one of three lights is illuminated: red, yellow, or green. In this notebook, you'll pre-process these images, extract features that will help us distinguish the different types of images, and use those features to classify the traffic light images into three classes: red, yellow, or green. The tasks will be broken down into a few sections: 1. **Loading and visualizing the data**. The first step in any classification task is to be familiar with your data; you'll need to load in the images of traffic lights and visualize them! 2. **Pre-processing**. The input images and output labels need to be standardized. This way, you can analyze all the input images using the same classification pipeline, and you know what output to expect when you eventually classify a *new* image. 3. **Feature extraction**. Next, you'll extract some features from each image that will help distinguish and eventually classify these images. 4. **Classification and visualizing error**. Finally, you'll write one function that uses your features to classify *any* traffic light image. This function will take in an image and output a label. You'll also be given code to determine the accuracy of your classification model. 5. **Evaluate your model**. To pass this project, your classifier must be >90% accurate and never classify any red lights as green; it's likely that you'll need to improve the accuracy of your classifier by changing existing features or adding new features. I'd also encourage you to try to get as close to 100% accuracy as possible! Here are some sample images from the dataset (from left to right: red, green, and yellow traffic lights): <img src="images/all_lights.png" width="50%" height="50%"> --- ### *Here's what you need to know to complete the project:* Some template code has already been provided for you, but you'll need to implement additional code steps to successfully complete this project. Any code that is required to pass this project is marked with **'(IMPLEMENTATION)'** in the header. There are also a couple of questions about your thoughts as you work through this project, which are marked with **'(QUESTION)'** in the header. Make sure to answer all questions and to check your work against the [project rubric](https://review.udacity.com/#!/rubrics/1213/view) to make sure you complete the necessary classification steps! Your project submission will be evaluated based on the code implementations you provide, and on two main classification criteria. Your complete traffic light classifier should have: 1. **Greater than 90% accuracy** 2. ***Never* classify red lights as green** # 1. Loading and Visualizing the Traffic Light Dataset This traffic light dataset consists of 1484 number of color images in 3 categories - red, yellow, and green. As with most human-sourced data, the data is not evenly distributed among the types. There are: * 904 red traffic light images * 536 green traffic light images * 44 yellow traffic light images *Note: All images come from this [MIT self-driving car course](https://selfdrivingcars.mit.edu/) and are licensed under a [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/).* ### Import resources Before you get started on the project code, import the libraries and resources that you'll need. ``` import cv2 # computer vision library import helpers # helper functions import random import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg # for loading in images %matplotlib inline ``` ## Training and Testing Data All 1484 of the traffic light images are separated into training and testing datasets. * 80% of these images are training images, for you to use as you create a classifier. * 20% are test images, which will be used to test the accuracy of your classifier. * All images are pictures of 3-light traffic lights with one light illuminated. ## Define the image directories First, we set some variables to keep track of some where our images are stored: IMAGE_DIR_TRAINING: the directory where our training image data is stored IMAGE_DIR_TEST: the directory where our test image data is stored ``` # Image data directories IMAGE_DIR_TRAINING = "traffic_light_images/training/" IMAGE_DIR_TEST = "traffic_light_images/test/" ``` ## Load the datasets These first few lines of code will load the training traffic light images and store all of them in a variable, `IMAGE_LIST`. This list contains the images and their associated label ("red", "yellow", "green"). You are encouraged to take a look at the `load_dataset` function in the helpers.py file. This will give you a good idea about how lots of image files can be read in from a directory using the [glob library](https://pymotw.com/2/glob/). The `load_dataset` function takes in the name of an image directory and returns a list of images and their associated labels. For example, the first image-label pair in `IMAGE_LIST` can be accessed by index: ``` IMAGE_LIST[0][:]```. ``` # Using the load_dataset function in helpers.py # Load training data IMAGE_LIST = helpers.load_dataset(IMAGE_DIR_TRAINING) ``` ## Visualize the Data The first steps in analyzing any dataset are to 1. load the data and 2. look at the data. Seeing what it looks like will give you an idea of what to look for in the images, what kind of noise or inconsistencies you have to deal with, and so on. This will help you understand the image dataset, and **understanding a dataset is part of making predictions about the data**. --- ### Visualize the input images Visualize and explore the image data! Write code to display an image in `IMAGE_LIST`: * Display the image * Print out the shape of the image * Print out its corresponding label See if you can display at least one of each type of traffic light image – red, green, and yellow — and look at their similarities and differences. ``` plt.imshow(IMAGE_LIST[750][0]) plt.show() ## TODO: Write code to display an image in IMAGE_LIST (try finding a yellow traffic light!) print(IMAGE_LIST[750][0].shape) print(IMAGE_LIST[750][1]) ## TODO: Print out 1. The shape of the image and 2. The image's label # The first image in IMAGE_LIST is displayed below (without information about shape or label) selected_image = IMAGE_LIST[0][0] plt.imshow(selected_image) plt.show() ``` # 2. Pre-process the Data After loading in each image, you have to standardize the input and output! ### Input This means that every input image should be in the same format, of the same size, and so on. We'll be creating features by performing the same analysis on every picture, and for a classification task like this, it's important that **similar images create similar features**! ### Output We also need the output to be a label that is easy to read and easy to compare with other labels. It is good practice to convert categorical data like "red" and "green" to numerical data. A very common classification output is a 1D list that is the length of the number of classes - three in the case of red, yellow, and green lights - with the values 0 or 1 indicating which class a certain image is. For example, since we have three classes (red, yellow, and green), we can make a list with the order: [red value, yellow value, green value]. In general, order does not matter, we choose the order [red value, yellow value, green value] in this case to reflect the position of each light in descending vertical order. A red light should have the label: [1, 0, 0]. Yellow should be: [0, 1, 0]. Green should be: [0, 0, 1]. These labels are called **one-hot encoded labels**. *(Note: one-hot encoding will be especially important when you work with [machine learning algorithms](https://machinelearningmastery.com/how-to-one-hot-encode-sequence-data-in-python/)).* <img src="images/processing_steps.png" width="80%" height="80%"> --- <a id='task2'></a> ### (IMPLEMENTATION): Standardize the input images * Resize each image to the desired input size: 32x32px. * (Optional) You may choose to crop, shift, or rotate the images in this step as well. It's very common to have square input sizes that can be rotated (and remain the same size), and analyzed in smaller, square patches. It's also important to make all your images the same size so that they can be sent through the same pipeline of classification steps! ``` # This function should take in an RGB image and return a new, standardized version def standardize_input(image): standard_im = cv2.resize(image, (32,32)) return standard_im plt.imshow(IMAGE_LIST[0][0]) plt.show() plt.imshow(standardize_input(IMAGE_LIST[0][0])) ``` ## Standardize the output With each loaded image, we also specify the expected output. For this, we use **one-hot encoding**. * One-hot encode the labels. To do this, create an array of zeros representing each class of traffic light (red, yellow, green), and set the index of the expected class number to 1. Since we have three classes (red, yellow, and green), we have imposed an order of: [red value, yellow value, green value]. To one-hot encode, say, a yellow light, we would first initialize an array to [0, 0, 0] and change the middle value (the yellow value) to 1: [0, 1, 0]. --- <a id='task3'></a> ### (IMPLEMENTATION): Implement one-hot encoding ``` def one_hot_encode(label): ## TODO: Create a one-hot encoded label that works for all classes of traffic lights one_hot_encoded = [0,0,0] if(label == 'red'): one_hot_encoded[0] = 1 elif(label == 'yellow'): one_hot_encoded[1] = 1 elif(label == 'green'): one_hot_encoded[2] = 1 return one_hot_encoded ``` ### Testing as you Code After programming a function like this, it's a good idea to test it, and see if it produces the expected output. **In general, it's good practice to test code in small, functional pieces, after you write it**. This way, you can make sure that your code is correct as you continue to build a classifier, and you can identify any errors early on so that they don't compound. All test code can be found in the file `test_functions.py`. You are encouraged to look through that code and add your own testing code if you find it useful! One test function you'll find is: `test_one_hot(self, one_hot_function)` which takes in one argument, a one_hot_encode function, and tests its functionality. If your one_hot_label code does not work as expected, this test will print ot an error message that will tell you a bit about why your code failed. Once your code works, this should print out TEST PASSED. ``` # Importing the tests import test_functions tests = test_functions.Tests() # Test for one_hot_encode function tests.test_one_hot(one_hot_encode) ``` ## Construct a `STANDARDIZED_LIST` of input images and output labels. This function takes in a list of image-label pairs and outputs a **standardized** list of resized images and one-hot encoded labels. This uses the functions you defined above to standardize the input and output, so those functions must be complete for this standardization to work! ``` def standardize(image_list): # Empty image data array standard_list = [] # Iterate through all the image-label pairs for item in image_list: image = item[0] label = item[1] # Standardize the image standardized_im = standardize_input(image) # One-hot encode the label one_hot_label = one_hot_encode(label) # Append the image, and it's one hot encoded label to the full, processed list of image data standard_list.append((standardized_im, one_hot_label)) return standard_list # Standardize all training images STANDARDIZED_LIST = standardize(IMAGE_LIST) ``` ## Visualize the standardized data Display a standardized image from STANDARDIZED_LIST and compare it with a non-standardized image from IMAGE_LIST. Note that their sizes and appearance are different! ``` ## TODO: Display a standardized image and its label plt.imshow(IMAGE_LIST[0][0]) plt.show() image = standardize(IMAGE_LIST) plt.imshow(image[0][0]) plt.show() ``` # 3. Feature Extraction You'll be using what you now about color spaces, shape analysis, and feature construction to create features that help distinguish and classify the three types of traffic light images. You'll be tasked with creating **one feature** at a minimum (with the option to create more). The required feature is **a brightness feature using HSV color space**: 1. A brightness feature. - Using HSV color space, create a feature that helps you identify the 3 different classes of traffic light. - You'll be asked some questions about what methods you tried to locate this traffic light, so, as you progress through this notebook, always be thinking about your approach: what works and what doesn't? 2. (Optional): Create more features! Any more features that you create are up to you and should improve the accuracy of your traffic light classification algorithm! One thing to note is that, to pass this project you must **never classify a red light as a green light** because this creates a serious safety risk for a self-driving car. To avoid this misclassification, you might consider adding another feature that specifically distinguishes between red and green lights. These features will be combined near the end of his notebook to form a complete classification algorithm. ## Creating a brightness feature There are a number of ways to create a brightness feature that will help you characterize images of traffic lights, and it will be up to you to decide on the best procedure to complete this step. You should visualize and test your code as you go. Pictured below is a sample pipeline for creating a brightness feature (from left to right: standardized image, HSV color-masked image, cropped image, brightness feature): <img src="images/feature_ext_steps.png" width="70%" height="70%"> ## RGB to HSV conversion Below, a test image is converted from RGB to HSV colorspace and each component is displayed in an image. ``` # Convert and image to HSV colorspace # Visualize the individual color channels image_num = 0 test_im = STANDARDIZED_LIST[image_num][0] test_label = STANDARDIZED_LIST[image_num][1] # Convert to HSV hsv = cv2.cvtColor(test_im, cv2.COLOR_RGB2HSV) # Print image label print('Label [red, yellow, green]: ' + str(test_label)) # HSV channels h = hsv[:,:,0] s = hsv[:,:,1] v = hsv[:,:,2] # Plot the original image and the three channels f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('Standardized image') ax1.imshow(test_im) ax2.set_title('H channel') ax2.imshow(h, cmap='gray') ax3.set_title('S channel') ax3.imshow(s, cmap='gray') ax4.set_title('V channel') ax4.imshow(v, cmap='gray') ``` --- <a id='task7'></a> ### (IMPLEMENTATION): Create a brightness feature that uses HSV color space Write a function that takes in an RGB image and returns a 1D feature vector and/or single value that will help classify an image of a traffic light. The only requirement is that this function should apply an HSV colorspace transformation, the rest is up to you. From this feature, you should be able to estimate an image's label and classify it as either a red, green, or yellow traffic light. You may also define helper functions if they simplify your code. ``` ## This feature should use HSV colorspace values def create_feature(rgb_image): hsv = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2HSV) # apply red mask on the upper, yellow mask on the middle and the green mask on the lower low_s = int(np.mean(hsv[:,:,1])) low_v = int(np.mean(hsv[:,:,2])) high_v = 255 high_h = 255 red_lower = np.array([150,low_s,low_v]) red_upper = np.array([180, high_h, high_v]) green_lower = np.array([75, low_s, low_v]) green_upper = np.array([100, high_h, high_v]) yellow_lower = np.array([15, low_s, low_v]) yellow_upper = np.array([70, high_h,high_v]) red_mask = cv2.inRange(hsv, red_lower, red_upper) red_count = np.count_nonzero(red_mask) yellow_mask = cv2.inRange(hsv, yellow_lower, yellow_upper) yellow_count = np.count_nonzero(yellow_mask) green_mask = cv2.inRange(hsv, green_lower, green_upper) green_count = np.count_nonzero(green_mask) return [red_count, yellow_count, green_count] ``` ## (QUESTION 1): How do the features you made help you distinguish between the 3 classes of traffic light images? **Answer:** * converted the image to hsv color space to extract the features. * The hue component of the image indicates which color and as the traffic signal pixels have higher value component. * After tweaking with the thresholds for the mask, set the one resulting in maximum accuracy. * the mask is created and the number of non zero pixels in the mask is counted. * the mask (Red, green or Yellow) containing the maximum number of non zero components is sent to the one hot encoder. * The one hot encoder returns the encoded value of the color. # 4. Classification and Visualizing Error Using all of your features, write a function that takes in an RGB image and, using your extracted features, outputs whether a light is red, green or yellow as a one-hot encoded label. This classification function should be able to classify any image of a traffic light! You are encouraged to write any helper functions or visualization code that you may need, but for testing the accuracy, make sure that this `estimate_label` function returns a one-hot encoded label. --- <a id='task8'></a> ### (IMPLEMENTATION): Build a complete classifier ``` # This function should take in RGB image input # Analyze that image using your feature creation code and output a one-hot encoded label def estimate_label(rgb_image): standardized_image = standardize_input(rgb_image) rgb_count = create_feature(standardized_image) predicted_label = [] if(rgb_count[2] > rgb_count[1]) and (rgb_count[2] > rgb_count[0]): predicted_label = one_hot_encode('green') elif(rgb_count[1] > rgb_count[0]) and (rgb_count[1] > rgb_count[2]): predicted_label = one_hot_encode('yellow') else: predicted_label = one_hot_encode('red') return predicted_label ``` ## Testing the classifier Here is where we test your classification algorithm using our test set of data that we set aside at the beginning of the notebook! This project will be complete once you've pogrammed a "good" classifier. A "good" classifier in this case should meet the following criteria (and once it does, feel free to submit your project): 1. Get above 90% classification accuracy. 2. Never classify a red light as a green light. ### Test dataset Below, we load in the test dataset, standardize it using the `standardize` function you defined above, and then **shuffle** it; this ensures that order will not play a role in testing accuracy. ``` # Using the load_dataset function in helpers.py # Load test data TEST_IMAGE_LIST = helpers.load_dataset(IMAGE_DIR_TEST) # Standardize the test data STANDARDIZED_TEST_LIST = standardize(TEST_IMAGE_LIST) # Shuffle the standardized test data random.shuffle(STANDARDIZED_TEST_LIST) ``` ## Determine the Accuracy Compare the output of your classification algorithm (a.k.a. your "model") with the true labels and determine the accuracy. This code stores all the misclassified images, their predicted labels, and their true labels, in a list called `MISCLASSIFIED`. This code is used for testing and *should not be changed*. ``` 65# Constructs a list of misclassified images given a list of test images and their labels # This will throw an AssertionError if labels are not standardized (one-hot encoded) def get_misclassified_images(test_images): # Track misclassified images by placing them into a list misclassified_images_labels = [] # Iterate through all the test images # Classify each image and compare to the true label for image in test_images: # Get true data im = image[0] true_label = image[1] assert(len(true_label) == 3), "The true_label is not the expected length (3)." # Get predicted label from your classifier predicted_label = estimate_label(im) assert(len(predicted_label) == 3), "The predicted_label is not the expected length (3)." # Compare true and predicted labels if(predicted_label != true_label): # If these labels are not equal, the image has been misclassified misclassified_images_labels.append((im, predicted_label, true_label)) # Return the list of misclassified [image, predicted_label, true_label] values return misclassified_images_labels # Find all misclassified images in a given test set MISCLASSIFIED = get_misclassified_images(STANDARDIZED_TEST_LIST) # Accuracy calculations total = len(STANDARDIZED_TEST_LIST) num_correct = total - len(MISCLASSIFIED) accuracy = num_correct/total print('Accuracy: ' + str(accuracy)) print("Number of misclassified images = " + str(len(MISCLASSIFIED)) +' out of '+ str(total)) ``` --- <a id='task9'></a> ### Visualize the misclassified images Visualize some of the images you classified wrong (in the `MISCLASSIFIED` list) and note any qualities that make them difficult to classify. This will help you identify any weaknesses in your classification algorithm. ``` # Visualize misclassified example(s) for i in MISCLASSIFIED: plt.imshow(i[0]) plt.show() print(i[1], i[2]) ``` --- <a id='question2'></a> ## (Question 2): After visualizing these misclassifications, what weaknesses do you think your classification algorithm has? Please note at least two. **Answer:** * The considering the filter on the whole image results in considering the background component. Some images have dominant backgrounds hence resulting in misclassification. * Some images have arrow signal which result in less number of pixels. Hence with a very small background, there are chances of misclassification. ## Test if you classify any red lights as green **To pass this project, you must not classify any red lights as green!** Classifying red lights as green would cause a car to drive through a red traffic light, so this red-as-green error is very dangerous in the real world. The code below lets you test to see if you've misclassified any red lights as green in the test set. **This test assumes that `MISCLASSIFIED` is a list of tuples with the order: [misclassified_image, predicted_label, true_label].** Note: this is not an all encompassing test, but its a good indicator that, if you pass, you are on the right track! This iterates through your list of misclassified examples and checks to see if any red traffic lights have been mistakenly labelled [0, 1, 0] (green). ``` # Importing the tests import test_functions tests = test_functions.Tests() if(len(MISCLASSIFIED) > 0): # Test code for one_hot_encode function tests.test_red_as_green(MISCLASSIFIED) else: print("MISCLASSIFIED may not have been populated with images.") ```
github_jupyter
# Tutorial 01: Running Sumo Simulations This tutorial walks through the process of running non-RL traffic simulations in Flow. Simulations of this form act as non-autonomous baselines and depict the behavior of human dynamics on a network. Similar simulations may also be used to evaluate the performance of hand-designed controllers on a network. This tutorial focuses primarily on the former use case, while an example of the latter may be found in `exercise07_controllers.ipynb`. In this exercise, we simulate a initially perturbed single lane ring road. We witness in simulation that as time advances the initially perturbations do not dissipate, but instead propagates and expands until vehicles are forced to periodically stop and accelerate. For more information on this behavior, we refer the reader to the following article [1]. ## 1. Components of a Simulation All simulations, both in the presence and absence of RL, require two components: a *scenario*, and an *environment*. Scenarios describe the features of the transportation network used in simulation. This includes the positions and properties of nodes and edges constituting the lanes and junctions, as well as properties of the vehicles, traffic lights, inflows, etc. in the network. Environments, on the other hand, initialize, reset, and advance simulations, and act the primary interface between the reinforcement learning algorithm and the scenario. Moreover, custom environments may be used to modify the dynamical features of an scenario. ## 2. Setting up a Scenario Flow contains a plethora of pre-designed scenarios used to replicate highways, intersections, and merges in both closed and open settings. All these scenarios are located in flow/scenarios. In order to recreate a ring road network, we begin by importing the scenario `LoopScenario`. ``` from flow.scenarios.loop import LoopScenario ``` This scenario, as well as all other scenarios in Flow, is parametrized by the following arguments: * name * vehicles * net_params * initial_config * traffic_lights These parameters allow a single scenario to be recycled for a multitude of different network settings. For example, `LoopScenario` may be used to create ring roads of variable length with a variable number of lanes and vehicles. ### 2.1 Name The `name` argument is a string variable depicting the name of the scenario. This has no effect on the type of network created. ``` name = "ring_example" ``` ### 2.2 VehicleParams The `VehicleParams` class stores state information on all vehicles in the network. This class is used to identify the dynamical behavior of a vehicle and whether it is controlled by a reinforcement learning agent. Morover, information pertaining to the observations and reward function can be collected from various get methods within this class. The initial configuration of this class describes the number of vehicles in the network at the start of every simulation, as well as the properties of these vehicles. We begin by creating an empty `VehicleParams` object. ``` from flow.core.params import VehicleParams vehicles = VehicleParams() ``` Once this object is created, vehicles may be introduced using the `add` method. This method specifies the types and quantities of vehicles at the start of a simulation rollout. For a description of the various arguements associated with the `add` method, we refer the reader to the following documentation ([VehicleParams.add](https://flow.readthedocs.io/en/latest/flow.core.html?highlight=vehicleparam#flow.core.params.VehicleParams)). When adding vehicles, their dynamical behaviors may be specified either by the simulator (default), or by user-generated models. For longitudinal (acceleration) dynamics, several prominent car-following models are implemented in Flow. For this example, the acceleration behavior of all vehicles will be defined by the Intelligent Driver Model (IDM) [2]. ``` from flow.controllers.car_following_models import IDMController ``` Another controller we define is for the vehicle's routing behavior. For closed network where the route for any vehicle is repeated, the `ContinuousRouter` controller is used to perpetually reroute all vehicles to the initial set route. ``` from flow.controllers.routing_controllers import ContinuousRouter ``` Finally, we add 22 vehicles of type "human" with the above acceleration and routing behavior into the `Vehicles` class. ``` vehicles.add("human", acceleration_controller=(IDMController, {}), routing_controller=(ContinuousRouter, {}), num_vehicles=22) ``` ### 2.3 NetParams `NetParams` are network-specific parameters used to define the shape and properties of a network. Unlike most other parameters, `NetParams` may vary drastically depending on the specific network configuration, and accordingly most of its parameters are stored in `additional_params`. In order to determine which `additional_params` variables may be needed for a specific scenario, we refer to the `ADDITIONAL_NET_PARAMS` variable located in the scenario file. ``` from flow.scenarios.loop import ADDITIONAL_NET_PARAMS print(ADDITIONAL_NET_PARAMS) ``` Importing the `ADDITIONAL_NET_PARAMS` dict from the ring road scenario, we see that the required parameters are: * **length**: length of the ring road * **lanes**: number of lanes * **speed**: speed limit for all edges * **resolution**: resolution of the curves on the ring. Setting this value to 1 converts the ring to a diamond. At times, other inputs may be needed from `NetParams` to recreate proper network features/behavior. These requirements can be founded in the scenario's documentation. For the ring road, no attributes are needed aside from the `additional_params` terms. Furthermore, for this exercise, we use the scenario's default parameters when creating the `NetParams` object. ``` from flow.core.params import NetParams net_params = NetParams(additional_params=ADDITIONAL_NET_PARAMS) ``` ### 2.4 InitialConfig `InitialConfig` specifies parameters that affect the positioning of vehicle in the network at the start of a simulation. These parameters can be used to limit the edges and number of lanes vehicles originally occupy, and provide a means of adding randomness to the starting positions of vehicles. In order to introduce a small initial disturbance to the system of vehicles in the network, we set the `perturbation` term in `InitialConfig` to 1m. ``` from flow.core.params import InitialConfig initial_config = InitialConfig(spacing="uniform", perturbation=1) ``` ### 2.5 TrafficLightParams `TrafficLightParams` are used to describe the positions and types of traffic lights in the network. These inputs are outside the scope of this tutorial, and instead are covered in `exercise06_traffic_lights.ipynb`. For our example, we create an empty `TrafficLightParams` object, thereby ensuring that none are placed on any nodes. ``` from flow.core.params import TrafficLightParams traffic_lights = TrafficLightParams() ``` ## 3. Setting up an Environment Several envionrments in Flow exist to train autonomous agents of different forms (e.g. autonomous vehicles, traffic lights) to perform a variety of different tasks. These environments are often scenario or task specific; however, some can be deployed on an ambiguous set of scenarios as well. One such environment, `AccelEnv`, may be used to train a variable number of vehicles in a fully observable network with a *static* number of vehicles. ``` from flow.envs.loop.loop_accel import AccelEnv ``` Although we will not be training any autonomous agents in this exercise, the use of an environment allows us to view the cumulative reward simulation rollouts receive in the absence of autonomy. Envrionments in Flow are parametrized by three components: * `EnvParams` * `SumoParams` * `Scenario` ### 3.1 SumoParams `SumoParams` specifies simulation-specific variables. These variables include the length a simulation step (in seconds) and whether to render the GUI when running the experiment. For this example, we consider a simulation step length of 0.1s and activate the GUI. Another useful parameter is `emission_path`, which is used to specify the path where the emissions output will be generated. They contain a lot of information about the simulation, for instance the position and speed of each car at each time step. If you do not specify any emission path, the emission file will not be generated. More on this in Section 5. ``` from flow.core.params import SumoParams sumo_params = SumoParams(sim_step=0.1, render=True, emission_path='data') ``` ### 3.2 EnvParams `EnvParams` specify environment and experiment-specific parameters that either affect the training process or the dynamics of various components within the scenario. Much like `NetParams`, the attributes associated with this parameter are mostly environment specific, and can be found in the environment's `ADDITIONAL_ENV_PARAMS` dictionary. ``` from flow.envs.loop.loop_accel import ADDITIONAL_ENV_PARAMS print(ADDITIONAL_ENV_PARAMS) ``` Importing the `ADDITIONAL_ENV_PARAMS` variable, we see that it consists of only one entry, "target_velocity", which is used when computing the reward function associated with the environment. We use this default value when generating the `EnvParams` object. ``` from flow.core.params import EnvParams env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) ``` ## 4. Setting up and Running the Experiment Once the inputs to the scenario and environment classes are ready, we are ready to set up a `Experiment` object. ``` from flow.core.experiment import Experiment ``` These objects may be used to simulate rollouts in the absence of reinforcement learning agents, as well as acquire behaviors and rewards that may be used as a baseline with which to compare the performance of the learning agent. In this case, we choose to run our experiment for one rollout consisting of 3000 steps (300 s). **Note**: When executing the below code, remeber to click on the <img style="display:inline;" src="img/play_button.png"> Play button after the GUI is rendered. ``` # create the scenario object scenario = LoopScenario(name="ring_example", vehicles=vehicles, net_params=net_params, initial_config=initial_config, traffic_lights=traffic_lights) # create the environment object env = AccelEnv(env_params, sumo_params, scenario) # create the experiment object exp = Experiment(env) # run the experiment for a set number of rollouts / time steps _ = exp.run(1, 3000, convert_to_csv=True) ``` As we can see from the above simulation, the initial perturbations in the network instabilities propogate and intensify, eventually leading to the formation of stop-and-go waves after approximately 180s. ## 5. Visualizing Post-Simulation Once the simulation is done, a .xml file will be generated in the location of the specified `emission_path` in `SumoParams` (assuming this parameter has been specified) under the name of the scenario. In our case, this is: ``` import os emission_location = os.path.join(exp.env.sim_params.emission_path, exp.env.scenario.name) print(emission_location + '-emission.xml') ``` The .xml file contains various vehicle-specific parameters at every time step. This information is transferred to a .csv file if the `convert_to_csv` parameter in `exp.run()` is set to True. This file looks as follows: ``` import pandas as pd pd.read_csv(emission_location + '-emission.csv') ``` As you can see, each row contains vehicle information for a certain vehicle (specified under the *id* column) at a certain time (specified under the *time* column). These information can then be used to plot various representations of the simulation, examples of which can be found in the `flow/visualize` folder. ## 6. Modifying the Simulation This tutorial has walked you through running a single lane ring road experiment in Flow. As we have mentioned before, these simulations are highly parametrizable. This allows us to try different representations of the task. For example, what happens if no initial perturbations are introduced to the system of homogenous human-driven vehicles? ``` initial_config = InitialConfig() ``` In addition, how does the task change in the presence of multiple lanes where vehicles can overtake one another? ``` net_params = NetParams( additional_params={ 'length': 230, 'lanes': 2, 'speed_limit': 30, 'resolution': 40 } ) ``` Feel free to experiment with all these problems and more! ## Bibliography [1] Sugiyama, Yuki, et al. "Traffic jams without bottlenecks—experimental evidence for the physical mechanism of the formation of a jam." New journal of physics 10.3 (2008): 033001. [2] Treiber, Martin, Ansgar Hennecke, and Dirk Helbing. "Congested traffic states in empirical observations and microscopic simulations." Physical review E 62.2 (2000): 1805.
github_jupyter
### Data Frame Plots documentation: http://pandas.pydata.org/pandas-docs/stable/visualization.html ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt plt.style.use('ggplot') ``` The plot method on Series and DataFrame is just a simple wrapper around plt.plot() If the index consists of dates, it calls gcf().autofmt_xdate() to try to format the x-axis nicely as show in the plot window. ``` ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000)) ts = ts.cumsum() ts.plot() plt.show() ``` On DataFrame, plot() is a convenience to plot all of the columns, and include a legend within the plot. ``` df = pd.DataFrame(np.random.randn(1000, 4), index=pd.date_range('1/1/2016', periods=1000), columns=list('ABCD')) df = df.cumsum() plt.figure() df.plot() plt.show() ``` You can plot one column versus another using the x and y keywords in plot(): ``` df3 = pd.DataFrame(np.random.randn(1000, 2), columns=['B', 'C']).cumsum() df3['A'] = pd.Series(list(range(len(df)))) df3.plot(x='A', y='B') plt.show() df3.tail() ``` ### Plots other than line plots Plotting methods allow for a handful of plot styles other than the default Line plot. These methods can be provided as the kind keyword argument to plot(). These include: - ‘bar’ or ‘barh’ for bar plots - ‘hist’ for histogram - ‘box’ for boxplot - ‘kde’ or 'density' for density plots - ‘area’ for area plots - ‘scatter’ for scatter plots - ‘hexbin’ for hexagonal bin plots - ‘pie’ for pie plots For example, a bar plot can be created the following way: ``` plt.figure() df.ix[5].plot(kind='bar') plt.axhline(0, color='k') plt.show() df.ix[5] ``` ### stack bar chart ``` df2 = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd']) df2.plot.bar(stacked=True) plt.show() ``` ### horizontal bar chart ``` df2.plot.barh(stacked=True) plt.show() ``` ### box plot ``` df = pd.DataFrame(np.random.rand(10, 5), columns=['A', 'B', 'C', 'D', 'E']) df.plot.box() plt.show() ``` ### area plot ``` df = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd']) df.plot.area() plt.show() ``` ### Plotting with Missing Data Pandas tries to be pragmatic about plotting DataFrames or Series that contain missing data. Missing values are dropped, left out, or filled depending on the plot type. | Plot Type | NaN Handling | | |----------------|-------------------------|---| | Line | Leave gaps at NaNs | | | Line (stacked) | Fill 0’s | | | Bar | Fill 0’s | | | Scatter | Drop NaNs | | | Histogram | Drop NaNs (column-wise) | | | Box | Drop NaNs (column-wise) | | | Area | Fill 0’s | | | KDE | Drop NaNs (column-wise) | | | Hexbin | Drop NaNs | | | Pie | Fill 0’s | | If any of these defaults are not what you want, or if you want to be explicit about how missing values are handled, consider using fillna() or dropna() before plotting. ### density plot ``` ser = pd.Series(np.random.randn(1000)) ser.plot.kde() plt.show() ``` ### lag plot Lag plots are used to check if a data set or time series is random. Random data should not exhibit any structure in the lag plot. Non-random structure implies that the underlying data are not random. ``` from pandas.tools.plotting import lag_plot plt.figure() data = pd.Series(0.1 * np.random.rand(1000) + 0.9 * np.sin(np.linspace(-99 * np.pi, 99 * np.pi, num=1000))) lag_plot(data) plt.show() ``` ### matplotlib gallery documentation: http://matplotlib.org/gallery.html
github_jupyter
<a href="http://landlab.github.io"><img style="float: left" src="../media/landlab_header.png"></a> # The deAlmeida Overland Flow Component <hr> <small>For more Landlab tutorials, click here: <a href="https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html">https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html</a></small> <hr> This notebook illustrates running the deAlmeida overland flow component in an extremely simple-minded way on a real topography, then shows it creating a flood sequence along an inclined surface with an oscillating water surface at one end. First, import what we'll need: ``` from landlab.components.overland_flow import OverlandFlow from landlab.plot.imshow import imshow_grid from landlab.plot.colors import water_colormap from landlab import RasterModelGrid from landlab.io.esri_ascii import read_esri_ascii from matplotlib.pyplot import figure import numpy as np from time import time %matplotlib inline ``` Pick the initial and run conditions ``` run_time = 100 # duration of run, (s) h_init = 0.1 # initial thin layer of water (m) n = 0.01 # roughness coefficient, (s/m^(1/3)) g = 9.8 # gravity (m/s^2) alpha = 0.7 # time-step factor (nondimensional; from Bates et al., 2010) u = 0.4 # constant velocity (m/s, de Almeida et al., 2012) run_time_slices = (10, 50, 100) ``` Elapsed time starts at 1 second. This prevents errors when setting our boundary conditions. ``` elapsed_time = 1.0 ``` Use Landlab methods to import an ARC ascii grid, and load the data into the field that the component needs to look at to get the data. This loads the elevation data, z, into a "field" in the grid itself, defined on the nodes. ``` rmg, z = read_esri_ascii('Square_TestBasin.asc', name='topographic__elevation') rmg.set_closed_boundaries_at_grid_edges(True, True, True, True) # un-comment these two lines for a "real" DEM #rmg, z = read_esri_ascii('hugo_site.asc', name='topographic__elevation') #rmg.status_at_node[z<0.0] = rmg.BC_NODE_IS_CLOSED ``` We can get at this data with this syntax: ``` np.all(rmg.at_node['topographic__elevation'] == z) ``` Note that the boundary conditions for this grid mainly got handled with the final line of those three, but for the sake of completeness, we should probably manually "open" the outlet. We can find and set the outlet like this: ``` my_outlet_node = 100 # This DEM was generated using Landlab and the outlet node ID was known rmg.status_at_node[my_outlet_node] = rmg.BC_NODE_IS_FIXED_VALUE ``` Now initialize a couple more grid fields that the component is going to need: ``` rmg.add_zeros('surface_water__depth', at='node') # water depth (m) rmg.at_node['surface_water__depth'] += h_init ``` Let's look at our watershed topography ``` imshow_grid(rmg, 'topographic__elevation') #, vmin=1650.0) ``` Now instantiate the component itself ``` of = OverlandFlow( rmg, steep_slopes=True ) #for stability in steeper environments, we set the steep_slopes flag to True ``` Now we're going to run the loop that drives the component: ``` while elapsed_time < run_time: # First, we calculate our time step. dt = of.calc_time_step() # Now, we can generate overland flow. of.overland_flow() # Increased elapsed time print('Elapsed time: ', elapsed_time) elapsed_time += dt imshow_grid(rmg, 'surface_water__depth', cmap='Blues') ``` Now let's get clever, and run a set of time slices: ``` elapsed_time = 1. for t in run_time_slices: while elapsed_time < t: # First, we calculate our time step. dt = of.calc_time_step() # Now, we can generate overland flow. of.overland_flow() # Increased elapsed time elapsed_time += dt figure(t) imshow_grid(rmg, 'surface_water__depth', cmap='Blues') ``` ### Click here for more <a href="https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html">Landlab tutorials</a>
github_jupyter
# Modes of the Ball-Channel Pendulum Linear Model ``` import numpy as np import numpy.linalg as la import matplotlib.pyplot as plt from resonance.linear_systems import BallChannelPendulumSystem %matplotlib widget ``` A (almost) premade system is available in `resonance`. The only thing missing is the function that calculates the canonical coefficients. ``` sys = BallChannelPendulumSystem() sys.constants sys.states def can_coeffs(mp, mb, l, g, r): M = np.array([[mp * l**2 + mb * r**2, -mb * r**2], [-mb * r**2, mb * r**2]]) C = np.zeros((2, 2)) K = np.array([[g * l * mp, g * mb * r], [g * mb * r, g * mb * r]]) return M, C, K sys.canonical_coeffs_func = can_coeffs ``` Once the system is completely defined the mass, damping, and stiffness matrices can be calculated and inspected: ``` M, C, K = sys.canonical_coefficients() M C K ``` ## Convert to mass normalized form (calculate $\tilde{\mathbf{K}}$) First calculate the Cholesky lower triangular decomposition matrix of $\mathbf{M}$, which is symmetric and postive definite. ``` L = la.cholesky(M) L ``` The transpose can be computed with `np.transpose()`, `L.transpose()` or `L.T` for short: ``` np.transpose(L) L.transpose() L.T ``` Check that $\mathbf{L}\mathbf{L}^T$ returns $M$. Note that in Python the `@` operator is used for matrix multiplication. The `*` operator will do elementwise multiplication. ``` L @ L.T ``` `inv()` computes the inverse, giving $\left(\mathbf{L}^T\right)^{-1}$: ``` la.inv(L.T) ``` $\mathbf{L}^{-1}\mathbf{M}\left(\mathbf{L}^T\right)^{-1} = \mathbf{I}$. Note that the off-diagonal terms are very small numbers. The reason these are not precisely zero is due to floating point arithmetic and the associated truncation errors. ``` la.inv(L) @ M @ la.inv(L.T) ``` $\tilde{\mathbf{K}} = \mathbf{L}^{-1}\mathbf{K}\left(\mathbf{L}^T\right)^{-1}$. Note that this matrix is symmetric. It is guaranteed to be symmetric if $\mathbf{K}$ is symmetric. ``` Ktilde = la.inv(L) @ K @ la.inv(L.T) Ktilde ``` The entries of $\tilde{\mathbf{K}}$ can be accessed as so: ``` k11 = Ktilde[0, 0] k12 = Ktilde[0, 1] k21 = Ktilde[1, 0] k22 = Ktilde[1, 1] ``` # Calculate the eigenvalues of $\tilde{\mathbf{K}}$ The eigenvalues of this 2 x 2 matrix are found by forming the characteristic equation from: $$\textrm{det}\left(\tilde{\mathbf{K}} - \lambda \mathbf{I}\right) = 0$$ and solving the resulting quadratic polynomial for its roots, which are the eigenvalues. ``` lam1 = (k11 + k22) / 2 + np.sqrt((k11 + k22)**2 - 4 * (k11 * k22 - k12*k21)) / 2 lam1 lam2 = (k11 + k22) / 2 - np.sqrt((k11 + k22)**2 - 4 * (k11 * k22 - k12*k21)) / 2 lam2 ``` # Calculate the eigenfrequencies of the system $\omega_i = \sqrt{\lambda_i}$ ``` omega1 = np.sqrt(lam1) omega1 omega2 = np.sqrt(lam2) omega2 ``` And in Hertz: ``` fn1 = omega1/2/np.pi fn1 fn2 = omega2/2/np.pi fn2 ``` # Calculate the eigenvectors of $\tilde{\mathbf{K}}$ The eigenvectors can be found by substituting the value for $\lambda$ into: $$\tilde{\mathbf{K}}\hat{q}_0 = \lambda \hat{q}_0$$ and solving for $\hat{q}_0$. ``` v1 = np.array([-k12 / (k11 - lam1), 1]) v2 = np.array([-k12 / (k11 - lam2), 1]) ``` Check that they are orthogonal, i.e. the dot product should be zero. ``` np.dot(v1, v2) ``` The `norm()` function calculates the Euclidean norm, i.e. the vector's magnitude and the vectors can be normalized like so: ``` v1_hat = v1 / np.linalg.norm(v1) v2_hat = v2 / np.linalg.norm(v2) v1_hat v2_hat np.linalg.norm(v1_hat) ``` For any size $\tilde{\mathbf{K}}$ the `eig()` function can be used to calculate the eigenvalues and the normalized eigenvectors with one function call: ``` evals, evecs = np.linalg.eig(Ktilde) evals evecs ``` The columns of `evecs` correspond to the entries of `evals`. ``` P = evecs P ``` If P contains columns that are orthnormal, then $\mathbf{P}^T \mathbf{P} = \mathbf{I}$. Check this with: ``` P.T @ P ``` $\mathbf{P}$ can be used to find the matrix $\Lambda$ that decouples the differential equations. ``` Lam = P.T @ Ktilde @ P Lam ``` # Formulate solution to ODEs (simulation) The trajectory of the coordinates can be found with: $$ \bar{c}(t) = \sum_{i=1}^n c_i \sin(\omega_i t + \phi_i) \bar{u}_i $$ where $$ \phi_i = \arctan \frac{\omega_i \hat{q}_{0i}^T \bar{q}(0)}{\hat{q}_{0i}^T \dot{\bar{q}}(0)} $$ and $$ c_i = \frac{\hat{q}^T_{0i} \bar{q}(0)}{\sin\phi_i} $$ $c_i$ are the modal participation factors and reflect what propotional of each mode is excited given specific initial conditions. If the initial conditions are the eigenmode, $\bar{u}_i$, the all but the $i$th $c_i$ will be zero. A matrix $\mathbf{S} = \left(\mathbf{L}^T\right)^{-1} = \begin{bmatrix}\bar{u}_1 \quad \bar{u}_2\end{bmatrix}$ can be computed such that the columns are $\bar{u}_i$. ``` S = la.inv(L.T) @ P S u1 = S[:, 0] u2 = S[:, 1] u1 u2 ``` Define the initial coordinates as a scalar factor of the second eigenvector, which sets these values to small angles. ``` c0 = S[:, 1] / 400 np.rad2deg(c0) ``` Set the initial speeds to zero: ``` s0 = np.zeros(2) s0 ``` The initial mass normalized coordinates and speeds are then: ``` q0 = L.T @ c0 q0 qd0 = L.T @ s0 qd0 ``` Calculate the modal freqencies in radians per second. ``` ws = np.sqrt(evals) ws ``` The phase shifts for each mode can be found. Note that it is important to use `arctan2()` so that the quadrant and thus sign of the arc tangent is properly handled. $$ \phi_i = \arctan \frac{\omega_i \hat{q}_{0i}^T \bar{q}(0)}{\hat{q}_{0i}^T \dot{\bar{q}}(0)} $$ ``` phi1 = np.arctan2(ws * P[:, 0] @ q0, P[:, 0] @ qd0) phi1 phi2 = np.arctan2(ws * P[:, 1] @ q0, P[:, 1] @ qd0) phi2 ``` All $\phi$'s can be calculated in one line using NumPy's broadcasting feature: ``` phis = np.arctan2(ws * P.T @ q0, P.T @ qd0) phis ``` The phase shifts for this particular initial condition are $\pm90$ degrees. ``` np.rad2deg(phis) ``` Now calculate the modal participation factors. $$ c_i = \frac{\hat{q}^T_{0i} \bar{q}(0)}{\sin\phi_i} $$ ``` cs = P.T @ q0 / np.sin(phis) cs ``` Note that the first participation factor is zero. This is because we've set the initial coordinate to be a scalar function of the second eigenvector. ## Simulate ``` t = np.linspace(0, 5, num=500) cs[1] * np.sin(ws[1] * t) ``` The following line will give an error because the dimensions of `u1` are not compatible with the dimensions of the preceding portion. It is possible for a single line to work like this if you take advatnage of NumPy's broadcasting rules. See https://scipy-lectures.org/intro/numpy/operations.html#broadcasting for more info. The `tile()` function is used to repeat `u1` as many times as needed. ``` # cs[1] * np.sin(ws[1] * t) * u1 c1 = cs[1] * np.sin(ws[1] * t) * np.tile(u1, (len(t), 1)).T c1.shape ``` `tile()` can be used to create a 2 x 1000 vector that repeats the vector $\hat{u}_i$ allowing a single line to calculate the mode contribution. Now use a loop to calculate the contribution of each mode and build the summation of contributions from each mode: ``` ct = np.zeros((2, len(t))) # 2 x m array to hold coordinates as a function of time for ci, wi, phii, ui in zip(cs, ws, phis, S.T): print(ci, wi, phii, ui) ct += ci * np.sin(wi * t + phii) * np.tile(ui, (len(t), 1)).T def sim(c0, s0, t): """Returns the time history of the coordinate vector, c(t) given the initial state and time. Parameters ========== c0 : ndarray, shape(n,) s0 : ndarray, shape(n,) t : ndarray, shape(m,) Returns ======= c(t) : ndarray, shape(n, m) """ q0 = L.T @ c0 qd0 = L.T @ s0 ws = np.sqrt(evals) phis = np.arctan2(ws * P.T @ q0, P.T @ qd0) cs = P.T @ q0 / np.sin(phis) c = np.zeros((2, 1000)) for ci, wi, phii, ui in zip(cs, ws, phis, S.T): c += ci * np.sin(wi * t + phii) * np.tile(ui, (len(t), 1)).T return c ``` Simulate and plot the first mode: ``` t = np.linspace(0, 5, num=1000) c0 = S[:, 0] / np.max(S[:, 0]) * np.deg2rad(10) s0 = np.zeros(2) fig, ax = plt.subplots() ax.plot(t, np.rad2deg(sim(c0, s0, t).T)) ax.set_xlabel('Time [s]') ax.set_ylabel('Angle [deg]') ax.legend([r'$\theta$', r'$\phi$']) ``` Simulate and plot the second mode: ``` t = np.linspace(0, 5, num=1000) c0 = S[:, 1] / np.max(S[:, 1]) * np.deg2rad(10) s0 = np.zeros(2) fig, ax = plt.subplots() ax.plot(t, np.rad2deg(sim(c0, s0, t).T)) ax.set_xlabel('Time [s]') ax.set_ylabel('Angle [deg]') ax.legend([r'$\theta$', r'$\phi$']) ``` Compare this to the free response from the system: ``` sys.coordinates['theta'] = c0[0] sys.coordinates['phi'] = c0[1] sys.speeds['alpha'] = 0 sys.speeds['beta'] = 0 traj = sys.free_response(5.0) traj[['theta', 'phi']].plot() sys.animate_configuration(fps=30, repeat=False) ``` Simulate with arbitrary initial conditions. ``` sys.coordinates['theta'] = np.deg2rad(12.0) sys.coordinates['phi'] = np.deg2rad(3.0) traj = sys.free_response(5.0) traj[['theta', 'phi']].plot() ```
github_jupyter
# Accessing WordNet through the NLTK interface >- [Accessing WordNet](#Accessing-WordNet) > > >- [WN-based Semantic Similarity](#WN-based-Semantic-Similarity) --- ## Accessing WordNet WordNet 3.0 can be accessed from NLTK by calling the appropriate NLTK corpus reader ``` from nltk.corpus import wordnet as wn ``` ### Retrieving Synsets The easiest way to retrieve synsets is by submitting the relevant lemma to the `synsets()` method, that returns the list of all the synsets containing it: ``` print(wn.synsets('dog')) ``` The optional paramater `pos` allows you to constrain the search to a given part of speech - available options: `wn.NOUN`, `wn.VERB`, `wn.ADJ`, `wn.ADV` ``` # let's ignore the verbal synsets from our previous results print(wn.synsets('dog', pos = wn.NOUN)) ``` You can use the `synset()` method together with the notation `lemma.pos.number` (e.g. `dog.n.01`) to access a given synset ``` # retrive the gloss of a given synset wn.synset('dog.n.01').definition() # let's see some examples wn.synset('dog.n.01').examples() ``` Did anyone notice something weird in these results? Why did I get `frank.n.02`? ``` # let's retrieve the lemmas associated with a given synset wn.synset('frank.n.02').lemmas() ``` What's the definition? ``` wn.synset('frank.n.02').definition() ``` The notation `lemmas.pos.number` is used to identify the **name** of the synset, that is the unique id that is used to store it in the semantic resources - note that it is different from the notation used to refer to synset lemmas, e.g. `frank.n.02.frank` ``` wn.synset('frank.n.02').name() ``` Applied to our original query... ``` # synsets for a given word wn.synsets('dog', pos = wn.NOUN) # synonyms for a particular meaning of a word wn.synset('dog.n.01').lemmas() wn.synset('dog.n.01').definition() wn.synset('dog.n.03').lemmas() wn.synset('dog.n.03').definition() ``` **Q. How are the senses in WordNet ordered?** A. *WordNet senses are ordered using sparse data from semantically tagged text. The order of the senses is given simply so that some of the most common uses are listed above others (and those for which there is no data are randomly ordered). The sense numbers and ordering of senses in WordNet should be considered random for research purposes.* (source: the [FAQ section](https://wordnet.princeton.edu/frequently-asked-questions) of the official WordNet web page) Finally, the method `all_synsets()` allows you to retrieve all the synsets in the resource: ``` for synset in list(wn.all_synsets())[:10]: print(synset) ``` ... again, you can use the optional `pos` paramter to constrain your search: ``` for synset in list(wn.all_synsets(wn.ADV))[:10]: print(synset) ``` ### Retrieving Semantic and Lexical Relations #### the Nouns sub-net NLTK makes it easy to explore the WordNet hierarchy. The `hyponyms()` method allows you to retrieve all the immediate hyponyms of our target synset ``` wn.synset('dog.n.01').hyponyms() ``` to move in the opposite direction (i.e. towards more general synsets) we can use: - either the `hypernyms()` method to retrieve the immediate hypernym (or hypernyms in the following case) ``` wn.synset('dog.n.01').hypernyms() ``` - or the `hypernym_paths()` method to retrieve all the hyperonymyc chain **up to the root node** ``` wn.synset('dog.n.01').hypernym_paths() ``` Another important semantic relation for the nouns sub-net is **meronymy**, that links an object (holonym) with its parts (meronym). There are three semantic relations of this kind in WordNet: - **Part meronymy**: the relation between an object and its separable components: ``` wn.synset('tree.n.01').part_meronyms() ``` - **Substance meronymy**: the relation between an object and the substance it is made of ``` wn.synset('tree.n.01').substance_meronyms() ``` - **Member meronymy**: the relation between a group and its members ``` wn.synset('tree.n.01').member_holonyms() ``` **Instances** do not have hypernyms, but **instance_hypernyms**: ``` # amsterdam is a national capital vs *Amsterdam is a kind of a national capital wn.synset('amsterdam.n.01').instance_hypernyms() wn.synset('amsterdam.n.01').hypernyms() ``` #### the Verbs sub-net Moving in the Verbs sub-net, the **troponymy** relation can be navigated by using the same methods used to navigate the nominal hyperonymyc relations ``` wn.synset('sleep.v.01').hypernyms() wn.synset('sleep.v.01').hypernym_paths() ``` The other central relation in the organization of the verbs is the **entailment** one: ``` wn.synset('eat.v.01').entailments() ``` #### Adjective clusters Adjectives are organized in clusters of **satellites** adjectives (labeled as `lemma.s.number`) connected to a central adjective (labeled as `lemma.a.number`) by means of the **similar_to** relation ``` # a satellite adjective is linked just to one central adjective wn.synset('quick.s.01').similar_tos() # a central adjective is linked to many satellite adjectives wn.synset('fast.a.01').similar_tos() ``` The **lemmas** of the central adjective of each cluster, moreover, are connected to their **antonyms**, that is to lemmas that have the opposite meaning ``` wn.lemma('fast.a.01.fast').antonyms() ``` But take note: ``` try: wn.synset('fast.a.01').antonyms() except AttributeError: print("antonymy is a LEXICAL relation, it cannot involve synsets") ``` ## WN-based Semantic Similarity Simulating the human ability to estimate semantic distances between concepts is crucial for: - Psycholinguistics: for long time the study of human semantic memory has been tied to the study of concepts similarity - Natural Language Processing: for any task that requires some sort of semantic comprehensions ### Classes of Semantic Distance Measures #### Relatedness - two concepts are related if **a relation of any sort** holds between them - information can be extracted from: - semantic networks - dictionaries - corpora #### Similarity - it is a special case of relatedness - the relation holding between two concepts **by virtue of their ontological status**, i.e. by virtue of their taxonomic positions (Resnik, 1995) - car - bicycle - \*car - fuel - information can be extracted from - hierarchical networks - taxnomies ### WordNet-based Similarity Measures ``` dog = wn.synset('dog.n.01') cat = wn.synset('cat.n.01') hit = wn.synset('hit.v.01') slap = wn.synset('slap.v.01') fish = wn.synset('fish.n.01') bird = wn.synset('bird.n.01') ``` #### Path Length-based measures These measures are based on $pathlen(c_1, c_2)$: - i.e. the number of arc in the shorted path connecting two nodes $c_1$ and $c_2$ ![alt text](images/pathlen.png) you can use the `shortest_path_distance()` method to count the number of arcs ``` fish.shortest_path_distance(bird) dog.shortest_path_distance(cat) ``` When two notes belongs to different sub-nets, it does not return any values... ``` print(dog.shortest_path_distance(hit)) ``` ... unless you simulate the existance of a **dummy root** by setting the `simulate_root` option to `True` ``` print(dog.shortest_path_distance(hit, simulate_root = True)) ``` This is quite handy expecially when working on the **verb sub-net** that **do not have a unique root node** (differently to what happens in the nouns sub-net) ``` print(hit.shortest_path_distance(slap)) print(hit.shortest_path_distance(slap, simulate_root = True)) ``` **Simple Path Length**: $$sim_{simple}(c_1,c_2) = \frac{1}{pathlen(c_1,c_2) + 1}$$ use the `path_similarity()` method to calculate this measure ``` dog.path_similarity(cat) ``` **Leacock & Chodorow (1998)** $$sim_{L\&C}(c_1,c_2) = -log \left(\frac{pathlen(c_1,c_2)}{2 \times D}\right)$$ where $D$ is the maximum depth of the taxonomy - as a consequence, $2 \times D$ is the maximum possible pathlen ``` dog.lch_similarity(cat) ``` you cannot compare synset belonging to different pos ``` try: dog.lch_similarity(hit) except Exception as e: print(e) ``` #### Wu & Palmer (1994) This measure is based on the notion of **Least Common Subsumer** - i.e. the lowest node that dominates both synsets, e.g. `LCS({fish}, {bird}) = {vertebrate, craniate}` ![alt text](images/lcs.png) NLTK allows you to use the `lowest_common_hypernyms()` method to identify the Least Common Subsumer of two nodes ``` dog.lowest_common_hypernyms(cat) ``` If necessary, use option `simulate_root` to simulate the existance onf a dummy root: ``` print(hit.lowest_common_hypernyms(slap, simulate_root = True)) ``` Wu & Palmer (1998) proposed to measure the semantic simliiarity between concepts by contrasting the depth of the LCS with the depths of the nodes: $$sim_{W\&P(c_1, c_2)} = \frac{2 \times depth(LCS(c_1, c_2))}{depth(c_1) + depth(c_2)}$$ where $depth(s)$ is the number of arcs between the root node and the node $s$ the minimum and the maximum depths of each node can be calculated with the `min_depth()` and `max_depth()` modules ``` print(dog.min_depth(), dog.max_depth()) ``` ...and the `wup_similarity()` (authors' names) method to calculate this measure (option `simulate_root` available) ``` print(dog.wup_similarity(cat)) ``` #### Information Content-based measures - the **Information Content** of a concept $C$ is the probability of a randomly selected word to be an instance of the concept $C$ (i.e. the synset $c$ or one of its hyponyms) $$IC(C) = -log(P(C))$$ - Following Resnik (1995), corpus frequencies can be used to estimate this probability $$P(C) = \frac{freq(C)}{N} = \frac{\sum_{w \in words(c)}count(w)}{N}$$ - $words(c)$ = set of words that are hierarchically included by $C$ (i.e. its hyponyms) - N = number of corpus tokens for which there is a representation in WordNet A fragment of the WN nominal hierarchy, in which each node has been labeled with its $P(C)$ (from Lin, 1998) ![alt text](images/ic.png) **Resnik (1995)** $$sim_{resnik}(c_1,c_2) = IC(LCS(c_1,c_2)) = -log(P(LCS(c_1,c_2)))$$ Several Information Content dictionaries are available in NLTK... ``` from nltk.corpus import wordnet_ic # the IC estimated from the brown corpus brown_ic = wordnet_ic.ic('ic-brown.dat') # the IC estimated from the semcor semcor_ic = wordnet_ic.ic('ic-semcor.dat') ``` ... or it can be estimated form an available corpus ``` from nltk.corpus import genesis genesis_ic = wn.ic(genesis, False, 0.0) ``` Note that these calculation of the resnick measure depends on the corpus used to generate the information content ``` print(dog.res_similarity(cat, ic = brown_ic)) print(dog.res_similarity(cat, ic = semcor_ic)) print(dog.res_similarity(cat, ic = genesis_ic)) ``` **Lin (1998)** $$sim_{lin}(c_1,c_2) = \frac{log(P(common(c_1,c_2)))}{log(P(description(c_1,c_2)))} = \frac{2 \times IC(LCS(c_1,c_2))}{IC(c_1) + IC(c_2)}$$ - $common(c_1,c_2)$ = the information that is common between $c_1$ and $c_2$ - $description(c_1,c_2)$ = the information that is needed to describe $c_1$ and $c_2$ ``` print(dog.lin_similarity(cat, ic = brown_ic)) print(dog.lin_similarity(cat, ic = semcor_ic)) print(dog.lin_similarity(cat, ic = genesis_ic)) ``` **Jiang & Conrath (1997)** $$sim_{J\&C}(c_1,c_2) = \frac{1}{dist(c_1,c_2)} = \frac{1}{IC(c_1) + IC(c_2) - 2 \times IC(LCS(c_1, c_2))}$$ ``` print(dog.jcn_similarity(cat, ic = brown_ic)) print(dog.jcn_similarity(cat, ic = semcor_ic)) print(dog.jcn_similarity(cat, ic = genesis_ic)) ``` ### WordNet-based Relatedness Measures #### The Lesk algorithm (1986) - *“how to tell a pine cone from an ice cream cone”* - Lesk's intuition: let's have a look at the dictionary glosses pine [1]: *kind of **evergreen tree** with needle-shaped leaves* pine [2]: *waste away through sorrow or illness* cone [1]: *solid body which narrows to a point* cone [2]: *something of this shape wheter solid or hollow* cone [3]: *fruit of certain **evergreen tree**.* #### Extended Lesk (Banerjee and Pedersen, 2003) Glosses overlap score = sum of $n^2$, where $n$ is the length in words of each locution shared by two glosses - in what follows the gloss overlap score is $1^2 + 3^2$ `{chest of drawers, chest, bureau, dresser}` : *a **piece of furniture** with drawers for keeping **clothes**.* `{wardrobe, closet, press}` : *a tall **piece of furniture** that provides storage space for **clothes**.* This measure takes into consideration also che glosses of the synsets that are related to the target synsets by one of an apriori specified set of relations RELS: $$sim_{eLesk}(c_1, c_2) = \sum_{r,q \in RELS}overlap\ (gloss(r(c_1)),\ gloss(q(c_2)))$$ --- #### Now, here's a challenge for you... Let's suppose you have a list of word pair and that you want to measure their similarity by using WordNet. Your immediate problem is polisemy: a single word may refer to multiple concepts, so that a lemma may appear in more WordNet synsets. **Can you think of a way to deal with this issue** other that relying on some existing WSD tool? (TIP: *can you think of a way of filtering out some senses and/or combining multiple similarity scores in order to derive an unique word pair similarity score?*) ---
github_jupyter
Steane code fault tolerance encoding scheme b ======================================= 1. Set up two logical zero for Steane code based on the parity matrix in the book by Nielsen MA, Chuang IL. Quantum Computation and Quantum Information, 10th Anniversary Edition. Cambridge University Press; 2016. p. 474 2. Set up fault tolerance as per scheme (b) from Goto H. Minimizing resource overheads for fault-tolerant preparation of encoded states of the Steane code. Sci Rep. 2016 Jan 27;6:19578. 3. Find out if this scheme has a tolerance. Import the necessary function modules, including the SteaneCodeLogicalQubit class. The methods of this class are called in this notebook. ``` from qiskit import( QuantumCircuit, QuantumRegister, ClassicalRegister, execute, Aer ) from qiskit.providers.aer.noise import NoiseModel from qiskit.providers.aer.noise.errors import pauli_error, depolarizing_error from circuits import SteaneCodeLogicalQubit from helper_functions import ( get_noise, count_valid_output_strings, string_reverse, process_FT_results, mean_of_list, calculate_standard_error, get_parity_check_matrix, get_codewords ) ``` Define constants so the process flow can be controlled from one place: ``` SINGLE_GATE_ERRORS = ['x', 'y', 'z', 'h', 's', 'sdg'] TWO_GATE_ERRORS = ['cx', 'cz'] NOISE = True #Test with noise SHOTS = 250000 #Number of shots to run MEASURE_NOISE = 0.0046 #Measurement noise not relevant SINGLE_GATE_DEPOLARISING = 0.000366 #Single gate noise TWO_GATE_DEPOLARISING = 0.022 ITERATIONS = 1 POST_SELECTION = True SIMULATOR = Aer.get_backend('qasm_simulator') ``` We specify the parity check matrix, since this defines the Steane code. It is validated before the logical qubit is initiated to check that it is orthogonal to the valid codewords. ``` parity_check_matrix = get_parity_check_matrix() print(parity_check_matrix) codewords = get_codewords() print(codewords) if NOISE: noise_model = get_noise(MEASURE_NOISE, SINGLE_GATE_DEPOLARISING, TWO_GATE_DEPOLARISING, SINGLE_GATE_ERRORS, TWO_GATE_ERRORS ) rejected_accum = 0 accepted_accum = 0 valid_accum = 0 invalid_accum = 0 results = [] for iteration in range(ITERATIONS): qubit = SteaneCodeLogicalQubit(2, parity_check_matrix, codewords, ancilla = False, fault_tolerant_b = True, data_rounds = 3 ) qubit.set_up_logical_zero(0) for i in range(3): qubit.barrier() qubit.set_up_logical_zero(1) qubit.barrier() qubit.logical_gate_CX(0, 1) qubit.barrier() qubit.logical_measure_data_FT(logical_qubit = 1, measure_round = i + 1) qubit.barrier() qubit.logical_measure_data(0) if NOISE: result = execute(qubit, SIMULATOR, noise_model = noise_model, shots = SHOTS).result() else: result = execute(qubit, SIMULATOR, shots = SHOTS).result() counts = result.get_counts(qubit) error_rate, rejected, accepted, valid, invalid = process_FT_results(counts, codewords, verbose = True, data_start = 3, data_meas_qubits = 1, data_meas_repeats = 3, data_meas_strings = codewords, post_selection = POST_SELECTION ) rejected_accum = rejected + rejected_accum accepted_accum = accepted_accum + accepted valid_accum = valid_accum + valid invalid_accum = invalid_accum + invalid results.append(error_rate) mean_error_rate = mean_of_list(results) outside_accum = accepted_accum - valid_accum - invalid_accum standard_deviation, standard_error = calculate_standard_error(results) print(f'There are {rejected_accum} strings rejected and {accepted_accum} strings submitted for validation') print(f'Of these {accepted_accum} strings processed there are {valid_accum} valid strings and {invalid_accum} invalid_strings') if POST_SELECTION: print(f'There are {outside_accum} strings outside the codeword') print(f'The error rate is {mean_error_rate:.6f} and the standard error is {standard_error:.6f} ') qubit.draw(output='mpl', filename = './circuits/Steane_code_circuit_encoding_FTb.jpg', fold = 43) ```
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') plt.style.use("fivethirtyeight") %matplotlib inline # For reading stock data from yahoo from pandas_datareader.data import DataReader # For time stamps from datetime import datetime tech_list = ['AAPL', 'GOOG', 'MSFT', 'AMZN'] # Set up End and Start times for data grab end = datetime.now() start = datetime(end.year - 1, end.month, end.day) #For loop for grabing yahoo finance data and setting as a dataframe for stock in tech_list: # Set DataFrame as the Stock Ticker globals()[stock] = DataReader(stock, 'yahoo', start, end) company_list = [AAPL, GOOG, MSFT, AMZN] company_name = ["APPLE", "GOOGLE", "MICROSOFT", "AMAZON"] for company, com_name in zip(company_list, company_name): company["company_name"] = com_name df = pd.concat(company_list, axis=0) df.tail(10) plt.figure(figsize=(15, 6)) plt.subplots_adjust(top=1.25, bottom=1.2) for i, company in enumerate(company_list, 1): plt.subplot(2, 2, i) company['Adj Close'].plot() plt.ylabel('Adj Close') plt.xlabel(None) plt.title(f"Closing Price of {tech_list[i - 1]}") plt.tight_layout() end_date = datetime(2021,1,1) cut_date = datetime(2020,1,1) frac_missing = 0.3 df = DataReader('AAPL', data_source='yahoo', start='2012-01-01', end=end_date) df def delete_10(col): return col df_missing = df.copy().apply(delete_10, axis=0) df.isnull().sum() plt.figure(figsize=(16,6)) plt.title('Close Price History') plt.plot(df['Close'], 'o', markersize = 0.5) plt.xlabel('Date', fontsize=18) plt.ylabel('Close Price USD ($)', fontsize=18) plt.show() plt.figure(figsize=(16,6)) plt.title('Close Price History') plt.plot(df_missing['Close'], 'o', markersize = 0.5) plt.xlabel('Date', fontsize=18) plt.ylabel('Close Price USD ($)', fontsize=18) plt.show() # Select the variable of interest df_closed = df['Close'] # Define the X and Y vectors x = np.arange(0,df_closed.shape[0],1) y = df_closed.tolist() # Plot plt.figure(figsize = (16,6)) plt.plot(x,y, 'ok', markersize = 0.5) def stock(variable): index = np.floor(variable) difference = int((round(variable - index,1)*100)) values = np.linspace(y[index],y[index+1],100) value = values[difference] return value x_normalized = np.linspace(-1,1, len(x)) x_normalized df_missing['Close'] tointclose=df_missing['Close'] tointclose import pandas as pd import sympy as sym import numpy import matplotlib.pyplot as plt from pylab import mpl import numpy as np import pandas as pd import math class Interpolation: def __init__(self, data : pd.DataFrame): self.data = data self.x = np.arange(0,self.data.shape[0],1) self.y = self.data.tolist() self.xx=x.tolist() k=len(y) i=0 self.lista_x=[] self.lista_y=[] while (i<k): if pd.isna(self.y[i]): self.lista_y.append(self.y.pop(i)) self.lista_x.append(self.xx.pop(i)) k=len(self.y) i=0 else: i+=1 def newton_interpolation(self,X,Y,x): sum=Y[0] temp=np.zeros((len(X),len(X))) # Asignar la primera línea for i in range(0,len(X)): temp[i,0]=Y[i] temp_sum=1.0 for i in range(1,len(X)): #x polinomio temp_sum=temp_sum*(x-X[i-1]) # Calcular diferencia de medias for j in range(i,len(X)): temp[j,i]=(temp[j,i-1]-temp[j-1,i-1])/(X[j]-X[j-i]) sum+=temp_sum*temp[i,i] return sum def lagrange_interpolation(self,xi,fi): n = len(xi) x = sym.Symbol('x') polinomio = 0 divisorL = np.zeros(n, dtype = float) for i in range(0,n,1): # Termino de Lagrange numerador = 1 denominador = 1 for j in range(0,n,1): if (j!=i): numerador = numerador*(x-xi[j]) denominador = denominador*(xi[i]-xi[j]) terminoLi = numerador/denominador polinomio = polinomio + terminoLi*fi[i] divisorL[i] = denominador # simplifica el polinomio polisimple = polinomio.expand() # para evaluación numérica px = sym.lambdify(x,polisimple) # Puntos para la gráfica muestras = 101 a = np.min(xi) b = np.max(xi) pxi = np.linspace(a,b,muestras) pfi = px(pxi) return polisimple obj_interpolar=Interpolation(tointclose) funcion=obj_interpolar.lagrange_interpolation(obj_interpolar.xx[1:5],obj_interpolar.y[1:5]) min_pol = 3 max_pol = 6 interval = [] intervals = [] def cut_interval(interval): first_interval = interval[:min_pol+1] second_interval = [] done = False for x in interval[min_pol+1:]: if x != first_interval[-1] + 1 and not done: first_interval.append(x) else: second_interval.append(x) done = True return first_interval, second_interval for i, x in enumerate(obj_interpolar.xx): # Llenar la lista hasta su maximo if len(interval) == max_pol: intervals.append(interval) interval = [] # Si el intervalo esta vacio elif interval == []: # Verificar si lista de intervalos esta vacia if intervals != []: if x != intervals[-1][-1] + 1: first_interval, second_interval = cut_interval(intervals[-1]) intervals[-1] = first_interval interval = second_interval if len(interval) < max_pol: interval.append(x) else: intervals.append(interval) interval = [x] # Se puede agregar con tranquilidad, no hay discontinuidades else: interval.append(x) # Si esta vacia simplemente adicione elementos al intervalo actual # ya que esta construyendo el primer interbalo else: interval.append(x) # Si no esta vacio y no ha alzanzado el maximo adicionar hasta llegar a su # maximo else: interval.append(x) print(intervals) interval_tuples = [(interval[0], interval[-1]) for interval in intervals] interval_tuples cont = 0 last = -1 malos = 0 algo_pasa = [] for i, inter in enumerate(interval_tuples): print(inter) if obj_interpolar.xx.index(inter[1])-obj_interpolar.xx.index(inter[0]) > max_pol: cont += 1 if last != inter[0]-1: print('ALGO PASA') algo_pasa.append(i) malos +=1 last = inter[1] print(cont) print(malos) print(algo_pasa) # replacements = [] # for i in algo_pasa: # first_interval = intervals[i-1] # second_interval = intervals[i] # joint = first_interval + second_interval # new_intervals = [] # new_interval = [] # for i, x in enumerate(joint): # if len(new_interval) >= min_pol: # if new_intervals == []: # if x == new_interval[-1] + 1: # new_intervals.append(new_interval) # new_interval = [] # else: # new_interval.append(x) # else: # if x == new_interval[-1]+1: # new_intervals.append(new_interval) # new_interval = [] # else: # new_interval.append(x) # else: # new_interval.append(x) # if i == len(joint)-1: # new_intervals.append(new_interval) # replacements.extend(new_intervals) # replacements x_interpolate = [] y_interpolate = [] intervals = interval_tuples l=sym.Symbol('x') cont = 0 for inter in intervals: print(inter) beg = obj_interpolar.xx.index(inter[0]) end = obj_interpolar.xx.index(inter[1]) datos_interpolar_x = obj_interpolar.xx[beg:end+1] datos_interpolar_y = obj_interpolar.y[beg:end+1] funcion = obj_interpolar.lagrange_interpolation(datos_interpolar_x,datos_interpolar_y) for x in np.arange(datos_interpolar_x[0],datos_interpolar_x[-1]+1, 0.5): # print(x) cont += 1 x_interpolate.append(x) y = funcion.subs(l,x) y_interpolate.append(y) print(cont) plt.figure(figsize = (10,10)) plt.plot(x_interpolate[:2000],y_interpolate[:2000], 'k-', markersize = 0.5, label = 'Interpolacion') plt.plot(range(0,2000),df_closed.iloc[:2000], 'r-', markersize = 0.5, label = 'Real') plt.legend(loc = 'best') plt.show() plt.figure(figsize = (10,10)) plt.plot(x_interpolate,y_interpolate, 'k-', markersize = 0.5, label = 'Interpolacion') plt.plot(range(0,len(df_closed)),df_closed, 'r-', markersize = 0.5, label = 'Real') plt.legend(loc = 'best') plt.show() len(x_interpolate) tointclose.tail() len(x_interpolate) len(y_interpolate) dato_original dato_interpolado obj_interpolar.newton_interpolation(obj_interpolar.xx[1:5],obj_interpolar.y[1:5],obj_interpolar.lista_x[0]) #1. Randomly construct data import numpy as np x=range (10) y=np.random.randint (10, size=10) #2. Draw the original image import matplotlib as mpl import matplotlib.pyplot as plt %matplotlib inline #jupyter notebook import scipy from scipy.interpolate import splrep #with scipy library plt.plot (x, y) plt.show () #3. Draw a smooth curve from scipy.interpolate import splrep #Interpolation method, 50 represents the number of interpolation,Number>= number of actual data,Generally speaking, the greater the number of differences,The smoother the curve x_new=np.linspace (min (x), max (x), 50) y_smooth=splrep (x, y, x_new) plt.plot (x_new, y_smooth) plt.show () ```
github_jupyter
``` from pytorch_h5dataset.benchmark import Benchmarker, BenchmarkDataset from pytorch_h5dataset import H5DataLoader from torch.utils.data import DataLoader from torch import nn, float32, as_tensor from torch.nn import MSELoss from time import time from numpy import prod import seaborn as sns from matplotlib import pyplot as plt import psutil import platform cpu_count = 0 if platform.system() == 'Windows' else psutil.cpu_count() benchmarkdataset = BenchmarkDataset(dataset_root="H:/Datasets/coco2017") batch_size = 100 epochs = 100 device = 'cuda:0' benchmarker1 = Benchmarker() dataLoader1 = benchmarker1.decorate_iterator_class(H5DataLoader)(dataset=benchmarkdataset.h5dataset, device=device, batch_size=batch_size, return_meta_indices=True, pin_memory=True, num_workers=cpu_count) benchmarker2 = Benchmarker() dataloader2 = benchmarker2.decorate_iterator_class(DataLoader)(benchmarkdataset.imageFolderDataset, batch_size=batch_size, num_workers=cpu_count, pin_memory=True) criterion = MSELoss() for benchmarker, dataloader in ((benchmarker1, dataLoader1) , (benchmarker2,dataloader2)): benchmarker.reset_benchmarker() model = nn.Linear(3 * 244 * 244, 1000).to(device) sum_loss = 0 num_out = 0 t0 = time() for e in range(epochs): print('\r',e, end='') for sample, label in dataloader: if isinstance(label, tuple): label = label[0] x = sample.to(device).view(sample.size(0),-1) y = as_tensor(label.view(-1), dtype=float32,device=device).requires_grad_(True) y_out = model(x).argmax(1).float() num_out += prod(y_out.shape) loss = criterion(y, y_out) loss = loss.sum() sum_loss += loss.item() loss.backward() print(f"Time for {epochs} epochs was {time() - t0}") print(x.min(),x.max()) print(loss, num_out) #del dataloader, x,y, model, loss, sum_loss df = benchmarker1.get_stats_df() sns.set() legend = ['proc_cpu_util','proc_disk_io_bytes_read','proc_disk_io_count_read','proc_cpu_time_user', 'proc_mem_bytes_vms', 'sys_net_io_bytes_recv'] plt.rcParams["figure.figsize"] = (15,5) for col in legend: plt.plot(df[col]/ max(abs(df[col]))) plt.legend(legend) plt.show() legend = ['proc_cpu_util','proc_disk_io_bytes_read_acc','proc_disk_io_count_read_acc','proc_cpu_time_user_acc', 'proc_mem_bytes_vms_acc', 'sys_net_io_bytes_recv_acc'] plt.rcParams["figure.figsize"] = (15,5) for col in legend: plt.plot(df[col]) plt.legend(legend) plt.show() df = benchmarker2.get_stats_df() sns.set() legend = ['proc_cpu_util','proc_disk_io_bytes_read','proc_disk_io_count_read','proc_cpu_time_user', 'proc_mem_bytes_vms', 'sys_net_io_bytes_recv'] plt.rcParams["figure.figsize"] = (15,5) for col in legend: plt.plot(df[col]/ max(abs(df[col]))) plt.legend(legend) plt.show() legend = ['proc_cpu_util','proc_disk_io_bytes_read_acc','proc_disk_io_count_read_acc','proc_cpu_time_user_acc', 'proc_mem_bytes_vms_acc', 'sys_net_io_bytes_recv_acc'] plt.rcParams["figure.figsize"] = (15,5) for col in legend: plt.plot(df[col]) plt.legend(legend) plt.show() ```
github_jupyter
``` # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import random as rnd from random import seed from random import gauss import seaborn as sns#Understanding my variables import matplotlib.pyplot as plt %matplotlib inline # machine learning from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session test_data = pd.read_csv("/kaggle/input/titanic/test.csv") train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.shape test_data.shape train_data.columns test_data.columns train_data.head() test_data.head() graphshow_traindata = train_data.drop(['PassengerId'], axis=1) plt.figure(figsize=(10,10)) sns.lineplot(data=graphshow_traindata) test_data.isnull().values.sum() train_data.isnull().values.sum() test_data.columns[test_data.isna().any()].tolist() train_data.columns[train_data.isna().any()].tolist() test_data.nunique(axis=0) train_data.nunique(axis=0) sns.countplot('Survived',data=train_data) plt.show() train_data.describe().apply(lambda s: s.apply(lambda x: format(x, 'f'))) train_data.describe(include=['O']) test_data.describe().apply(lambda s: s.apply(lambda x: format(x, 'f'))) test_data.describe(include=['O']) corr = train_data.corr()# plot the heatmap sns.heatmap(corr, xticklabels=corr.columns, yticklabels=corr.columns, annot=True, cmap=sns.diverging_palette(220, 20, as_cmap=True)) sns.pairplot(train_data) train_data['Age'].plot(kind='hist', bins=20, figsize=(12,6), facecolor='grey',edgecolor='black') train_data['Pclass'].plot(kind='hist', bins=5, figsize=(2,2), facecolor='grey',edgecolor='black') train_data['Fare'].plot(kind='hist', bins=20, figsize=(10,5), facecolor='grey',edgecolor='black') firstclass = train_data.loc[train_data.Pclass == 1]["Survived"] rate_firstclass = sum(firstclass)/len(firstclass) secondclass = train_data.loc[train_data.Pclass == 2]["Survived"] rate_secondclass = sum(secondclass)/len(secondclass) thirdclass = train_data.loc[train_data.Pclass == 3]["Survived"] rate_thirdclass = sum(thirdclass)/len(thirdclass) print("% of First class who survived:", rate_firstclass) print("% of Second class who survived:", rate_secondclass) print("% of First class who survived:", rate_thirdclass) pd.crosstab([train_data.Pclass],train_data.Survived,margins=True).style.background_gradient('Greens') embarkedq = train_data.loc[train_data.Embarked == "Q"]["Survived"] rate_embarkedq = sum(embarkedq)/len(embarkedq) embarkeds = train_data.loc[train_data.Embarked == "S"]["Survived"] rate_embarkeds = sum(embarkeds)/len(embarkeds) embarkedc = train_data.loc[train_data.Embarked == "C"]["Survived"] rate_embarkedc = sum(embarkedc)/len(embarkedc) print("% of Queenstown passengers who survived:", rate_embarkedq) print("% of Cherbourg passengers who survived:", rate_embarkeds) print("% of Southampton passengers who survived:", rate_embarkedc) pd.crosstab([train_data.Embarked],train_data.Survived,margins=True).style.background_gradient('Greens') women = train_data.loc[train_data.Sex == 'female']["Survived"] rate_women = sum(women)/len(women) men = train_data.loc[train_data.Sex == 'male']["Survived"] rate_men = sum(men)/len(men) print("% of women who survived:", rate_women) print("% of men who survived:", rate_men) pd.crosstab([train_data.Sex,train_data.Survived],train_data.Pclass,margins=True).style.background_gradient(cmap='Greens') train_data.columns[train_data.isna().any()].tolist() g = sns.FacetGrid(train_data, col='Survived') g.map(plt.hist, 'Age', bins=20) sns.violinplot("Sex","Age", hue="Survived", data=train_data,split=True) sns.violinplot("Pclass","Age", hue="Survived", data=train_data,split=True) grid = sns.FacetGrid(train_data, row='Embarked', height=2.2, aspect=1.6) grid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex') grid.add_legend() age_guess = train_data["Age"].mean() train_data["Age"].fillna(age_guess, inplace = True) test_data["Age"].fillna(age_guess, inplace = True) train_data.Age.unique() mostCommonPort = train_data.Embarked.dropna().mode()[0] train_data["Embarked"].fillna(mostCommonPort, inplace = True) test_data["Embarked"].fillna(mostCommonPort, inplace = True) train_data = train_data.drop(['Ticket', 'Cabin'], axis=1) test_data = test_data.drop(['Ticket', 'Cabin'], axis=1) y = train_data["Survived"] features = ["Pclass", "Sex", "Embarked", "Age"] X = pd.get_dummies(train_data[features]) X_test = pd.get_dummies(test_data[features]) X.shape, y.shape, X_test.shape rdm_Forest = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1) rdm_Forest.fit(X, y) predictionsForest = rdm_Forest.predict(X_test) decision_tree = DecisionTreeClassifier() decision_tree.fit(X, y) predictionsDecisionTree = decision_tree.predict(X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictionsDecisionTree}) output.to_csv('my_submission_DecisionTree.csv', index=False) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictionsForest}) output.to_csv('my_submission_Forest.csv', index=False) print("Your submission was successfully saved!") ```
github_jupyter
#Design a Deep Neural Network using Keras and pyTorch ``` import keras print(keras.__version__) import torch print(torch.__version__) ``` ##Tensors and Attributes ``` data = torch.tensor([[1,2,3],[4,5,6]]) print(data.shape) print(data.dtype) #dimesnion along each axis print(data.ndim) #number of axes print(data.device) import numpy as np data_np = np.array([[1,2,3],[4,5,6]]) print(data_np.shape) print(data_np.dtype) print(data_np.ndim) ``` ##Special Tensors ``` np_zeros = np.zeros((3,4),dtype='uint8') np_ones = np.ones((3,4)) np_rand = np.random.rand(3,4) # 3,4 indicates the shape of the resulting array np_arr = np.array([[1,2],[3,4],[5,6]]) print(np_zeros.shape) print(np_zeros.dtype) print(np_ones.shape) print(np_arr.shape) print(np_arr.ndim) print(np_arr.dtype) shape = (2,3) rand_tensor = torch.rand(shape) ones_tensor = torch.ones(shape) zeros_tensor = torch.zeros(shape) print(rand_tensor.shape) print(rand_tensor.ndim) print(rand_tensor.dtype) ``` ##Indexing and Slicing ``` tensor = torch.ones((5, 4)) tensor[:,1] = 0 print(tensor) np_ones = np.ones((5,4)) np_ones[:,1] = 0 print(np_ones) print(np_ones.shape) torch.mean(tensor,dim=0) np.mean(np_ones,axis=0) tensor = tensor +5 print(tensor) tensor = tensor * 5 print(tensor) W = torch.ones([1,4]) y = torch.matmul(W , torch.transpose(tensor,0,1)) print(y) np_ones = np_ones * 5 W_np = np.ones((1,4)) y_np = np.dot(W,np.transpose(np_ones)) print(y_np) ``` #Designing a Feed Forward Neural Network ## Checking for arbitrary values of w Simple Function y = w * x x single dimensional Tensor y single scalar value for wach sample ``` import numpy as np from matplotlib import pyplot as plt x_data =[1.0,2.0,3.0] y_data =[2.0,3.0,4.0] w = 1.0 def forward(x): return w*x def loss(x,y): y_pred = forward(x) return (y_pred-y) * (y_pred - y) mse_list = [] w_list = [] for w in np.arange(0.0,4.1,0.1): l=0; for x,y in zip(x_data,y_data): l = l+loss(x,y) mse_list.append(l/3) w_list.append(w) #print(mse_list) #print(w_list) plt.plot(w_list,mse_list) plt.xlabel('Parameter(w)') plt.ylabel('Loss (mse)') plt.show() ``` ## Gradient Descent Computation ``` import numpy as np from matplotlib import pyplot as plt x_data =[1.0,2.0,3.0] y_data =[2.0,3.0,4.0] w = 1.0 def forward(x): return w*x def loss(x,y): y_pred = forward(x) return (y_pred-y) * (y_pred - y) def gradient(x,y,w): return 2*x*(w*x-y) epochs=[] loss_epoch=[] for epoch in range(100): for(x,y) in zip(x_data,y_data): grad = gradient(x,y,w) w = w - grad*0.01 l = loss(x,y) #print(str(epoch)+":"+str(l)) epochs.append(epoch) loss_epoch.append(l) plt.plot(epochs,loss_epoch) plt.xlabel('Epochs') plt.ylabel('Loss Value') plt.show() print(w) ``` # Keras Workflow ``` import numpy as np from keras import models from keras import layers # Define Data x_data =np.array([1.0,2.0,3.0]) y_data =np.array([2.0,3.0,4.0]) #Define layers in the model model = models.Sequential() model.add(layers.Dense(1, use_bias=False, input_shape=(1,))) print(model.summary()) #Configure the learning process from keras import optimizers import keras model.compile(optimizer='sgd', loss='mse', metrics=keras.metrics.MeanSquaredError()) #Iterate the training data using fit hist = model.fit(x_data.reshape(3,1),y_data.reshape(3,1),batch_size=1,epochs=100) for layer in model.layers: print(layer.get_weights()) for key,value in hist.history.items(): print(key) hist.history['loss'] plt.plot(hist.history['loss']) plt.plot(hist.history['mean_squared_error'],'g*') plt.show() model.predict([[8.1]]) for layer in model.layers: print(layer.get_weights()) ``` # PyTorch Workflow ``` # This is formatted as code ``` Autograd and NN in pyTorch ``` import torch a = torch.tensor([2.,3.], requires_grad=True) #requires_grad is not set, gradient will noot be computed for that tensor b = torch.tensor([6.,3.], requires_grad=True) Q = 3*a**3 - b**2 #Explixitly mention the gradient computation external_grad = torch.tensor([1.,1.]) # Number of input values for which gradient to be computed Q.backward(gradient=external_grad) # Computes gradients and store the information in the tensors grad attribute print(9*a**2 == a.grad) print(-2*b == b.grad) import torch # Define Data x_data =np.array([1.0,2.0,3.0]) y_data =np.array([2.0,3.0,4.0]) x_data = x_data.reshape((3,1)) y_data = y_data.reshape((3,1)) x_tensor = torch.from_numpy(x_data) y_tensor = torch.from_numpy(y_data) #nn depends on autograd to define models and differentiate them. An nn.Module contains layers, and a method forward(input) that returns the output. #Define architecture import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() # an affine operation: y = Wx + b self.fc1 = nn.Linear(1, 1,bias=False) # 13 feature dimension def forward(self, x): x = self.fc1(x) return x model = Net() print(model) params = list(model.parameters()) # Each layer contains Weight and Biases print(len(params)) print(params[0].shape) print(params[0].requires_grad) print(params[0].dtype) print(type(params[0])) # Configure the training process Criterion = torch.nn.MSELoss() optimizer = torch.optim.SGD(model.parameters(),lr=0.01) #Train_Model epoch_list=[] loss_list=[] for epoch in range(100): out = model(x_tensor.float()) loss = Criterion(out, y_tensor.float() ) optimizer.zero_grad() loss.backward() optimizer.step() epoch_list.append(epoch) loss_list.append(loss) from matplotlib import pyplot as plt plt.plot(epoch_list,loss_list) plt.xlabel('Epochs') plt.ylabel('Loss') plt.show() print(params[0]) ``` # Regression Example > Predict Median Price of home based on certain statistics about the area ## Keras Model It takes as an input a two dimensional array with 404 samples and 13 features Predicts a single sclar value for y ``` from keras.datasets import boston_housing (train_data,train_targets) , (test_data,test_targets) = boston_housing.load_data() print(train_data.shape) print(test_data.shape) train_data.mean(axis=0) #Preparing Data mean = train_data.mean(axis=0) train_data -=mean std = train_data.std(axis=0) train_data /=std test_data -=mean test_data /=std print(test_targets) def build_model(): model = models.Sequential() model.add(layers.Dense(64, activation='relu', input_shape=(train_data.shape[1],))) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(1)) model.compile(optimizer='rmsprop',loss='mse',metrics=['mse']) return model model = build_model() history = model.fit(train_data, train_targets, epochs=100, batch_size=4) model.evaluate(test_data,test_targets) model.predict(test_data[5:7]) test_targets[6] ``` ## pyTorch Model ``` #nn depends on autograd to define models and differentiate them. An nn.Module contains layers, and a method forward(input) that returns the output. import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() # an affine operation: y = Wx + b self.fc1 = nn.Linear(13, 64) # 13 feature dimension self.fc2 = nn.Linear(64, 64) self.fc3 = nn.Linear(64,1) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x model = Net() print(model) params = list(model.parameters()) # Each layer contains Weight and Biases print(len(params)) print(params[0].shape) print(params[0].requires_grad) print(params[0].dtype) print(type(params[0])) tensor_train_data = torch.from_numpy(train_data) tensor_train_targets = torch.from_numpy(train_targets) tensor_train_targets= tensor_train_targets.view((404,1)) out = model(tensor_train_data.float()) print(out.shape) Criterion = torch.nn.MSELoss() optimizer = torch.optim.RMSprop(model.parameters()) loss = Criterion(out, tensor_train_targets.float() ) dataset = torch.utils.data.TensorDataset(tensor_train_data, tensor_train_targets) trainloader = torch.utils.data.DataLoader(dataset, batch_size=4,shuffle=True) for epoch in range(100): # loop over the dataset multiple times running_loss = 0.0 for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels] inputs, labels = data # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs =model(inputs.float()) loss = Criterion(outputs, labels.float()) loss.backward() optimizer.step() # print statistics running_loss += loss.item() if i % 101 == 100: # print every 2000 mini-batches print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 101)) running_loss = 0.0 print('Finished Training') tensor_test_data = torch.from_numpy(test_data) tensor_test_data = tensor_test_data.float() print(model(tensor_test_data[5])) print(test_targets[5]) ```
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline from datetime import time import geopandas as gpd from shapely.geometry import Point, LineString, shape ``` ## Load Data ``` df = pd.read_csv(r'..\data\processed\trips_custom_variables.csv', dtype = {'VORIHORAINI':str, 'VDESHORAFIN':str}, parse_dates = ['start_time','end_time']) etap = pd.read_excel (r'..\data\raw\EDM2018XETAPAS.xlsx') df.set_index(["ID_HOGAR", "ID_IND", "ID_VIAJE"], inplace =True) etap.set_index(["ID_HOGAR", "ID_IND", "ID_VIAJE"], inplace =True) legs = df.join(etap, rsuffix = "_etap") # select only public transport trips legs = legs[legs.mode_simple == "public transport"] codes = pd.read_csv(r'..\data\processed\codes_translated.csv', dtype = {'CODE': float}) stops = gpd.read_file(r'..\data\raw\public_transport_madrid\madrid_crtm_stops.shp') legs_start_end = legs.sort_values("ID_ETAPA").groupby(["ID_HOGAR", "ID_IND", "ID_VIAJE"]).agg( {"C2SEXO": "first","ESUBIDA": "first", "ESUBIDA_cod": "first", "EBAJADA": "last", "EBAJADA_cod": "last", "N_ETAPAS_POR_VIAJE": "first", "VORIHORAINI": "first", "duration":"first", "DANNO": "first", "DMES": "first", "DDIA":"first"}) legs_start_end= legs_start_end[legs_start_end.ESUBIDA_cod.notna()] legs_start_end= legs_start_end[legs_start_end.EBAJADA_cod.notna()] ``` ### Preprocessing ``` # stops["id_custom"] = stops.stop_id.str.split("_").apply(lambda x: x[len(x)-1]) # s = stops.reset_index().set_index(["id_custom", "stop_name"])[["geometry"]] # Problem: match not working properly: id_custom multiple times within df_stations. For names not a match for every start / end stops_unique_name = stops.drop_duplicates("stop_name").set_index("stop_name") df_stations = legs_start_end.join(stops_unique_name, on ='ESUBIDA', how= "inner") df_stations = df_stations.join(stops_unique_name, how= "inner", on ='EBAJADA', lsuffix = "_dep", rsuffix = "_arrival") #df_stations["line"] = df_stations.apply(lambda x: LineString([x.geometry_dep, x.geometry_arrival]), axis = 1) #df_stations = gpd.GeoDataFrame(df_stations, geometry = df_stations.line) # df_stations[["VORIHORAINI", "VDESHORAFIN", "start_time", "end_time", "duration", "DANNO", "DMES", "DDIA", "activity_simple", "motive_simple", "daytime", "speed", "C2SEXO", "EDAD_FIN", "ESUBIDA", "ESUBIDA_cod", "EBAJADA", "EBAJADA_cod", "geometry_dep", "geometry_arrival"]].to_csv( # r'..\data\processed\public_transport_georeferenced.csv') #df_stations[["activity_simple", "motive_simple", "daytime", "speed", "C2SEXO", "EDAD_FIN", "ESUBIDA", "ESUBIDA_cod", "EBAJADA", "EBAJADA_cod", "geometry"]].to_file( # r'..\data\processed\public_transport_georeferenced.geojson', driver = "GeoJSON") ``` ### (use preprocessed data) ``` # df_stations = pd.read_csv(r'..\data\processed\public_transport_georeferenced.csv', dtype = {'VORIHORAINI':str, 'VDESHORAFIN':str, 'geometry_dep':'geometry'}) ``` ### counts for Flowmap ``` # todo: add linestring again for flowmap counts = df_stations.groupby(["ESUBIDA", "EBAJADA", "activity_simple", "C2SEXO"]).agg({"ID_ETAPA": "count", "ELE_G_POND_ESC2" : "sum", "geometry": "first"}) counts.rename({"ELE_G_POND_ESC2": "weighted_count"}, axis = 1, inplace = True) df_counts = gpd.GeoDataFrame(counts, geometry = "geometry") df_counts.to_file( r'..\data\processed\trip_counts_georef.geojson', driver = "GeoJSON") counts.shape counts_gender = df_stations.groupby(["ESUBIDA", "EBAJADA", "C2SEXO"]).agg({"ID_ETAPA": "count", "ELE_G_POND_ESC2" : "sum", "geometry": "first"}) counts_gender.rename({"ELE_G_POND_ESC2": "weighted_count"}, axis = 1, inplace = True) df_counts_gender = gpd.GeoDataFrame(counts_gender, geometry = "geometry") df_counts_gender.to_file( r'..\data\processed\trip_counts_gender_georef.geojson', driver = "GeoJSON") counts_activity = df_stations.groupby(["ESUBIDA", "EBAJADA", "activity_simple"]).agg({"ID_ETAPA": "count", "ELE_G_POND_ESC2" : "sum", "geometry": "first"}) counts_activity.rename({"ELE_G_POND_ESC2": "weighted_count"}, axis = 1, inplace = True) df_counts_activity = gpd.GeoDataFrame(counts_activity, geometry = "geometry") df_counts_activity.to_file( r'..\data\processed\trip_counts_activity_georef.geojson', driver = "GeoJSON") counts_motive = df_stations.groupby(["ESUBIDA", "EBAJADA", "motive_simple"]).agg({"ID_ETAPA": "count", "ELE_G_POND_ESC2" : "sum", "geometry": "first"}) counts_motive.rename({"ELE_G_POND_ESC2": "weighted_count"}, axis = 1, inplace = True) df_counts_motive = gpd.GeoDataFrame(counts_motive, geometry = "geometry") df_counts_motive.to_file( r'..\data\processed\trip_counts_motive_georef.geojson', driver = "GeoJSON") ``` ### comparison to car ``` import herepy routingApi = herepy.RoutingApi('i5L1qsCmPo7AkwqhCWGA9J2QKnuC-TSI9KNWBqEkdIk') # time and speed df_stations['start_time'] = pd.to_datetime(df_stations.VORIHORAINI, format = '%H%M') # df_stations['end_time'] = pd.to_datetime(df_stations.VDESHORAFIN, format = '%H%M', errors = 'coerce') # df_stations['duration'] = df_stations.end_time - df_stations.start_time df_stations["formatted_time"] = df_stations.DANNO.astype(str) + '-' + df_stations.DMES.astype(str).str.zfill(2) + '-' + df_stations.DDIA.astype(str).str.zfill(2) + 'T'+ df_stations.VORIHORAINI.str.slice(0,2) + ":" + df_stations.VORIHORAINI.str.slice(2,4) + ':00' df_stations["car_traveltime"] = None df_stations["pt_traveltime"] = None df_unique_routes = df_stations.drop_duplicates(["ESUBIDA", "EBAJADA", "geometry_dep", "geometry_arrival"]).copy() df_unique_routes.reset_index(drop = True, inplace = True) for i in range (len(df_unique_routes)): if(df_unique_routes.car_traveltime.notna()[i]): continue if i % 1000 == 0: print(i) try: resp_car = routingApi.car_route([df_unique_routes.iloc[i, ].geometry_dep.y, df_unique_routes.iloc[i, ].geometry_dep.x], [df_unique_routes.iloc[i, ].geometry_arrival.y, df_unique_routes.iloc[i, ].geometry_arrival.x], [herepy.RouteMode.car, herepy.RouteMode.fastest], departure = df_unique_routes.loc[i, "formatted_time"]) df_unique_routes.loc[i, "car_traveltime"] = resp_car.response["route"][0]["summary"]["travelTime"] except: print('car no route found, id:', i) df_unique_routes.loc[i, "car_traveltime"] = None try: resp_pt = routingApi.public_transport([df_unique_routes.iloc[i, ].geometry_dep.y, df_unique_routes.iloc[i, ].geometry_dep.x], [df_unique_routes.iloc[i, ].geometry_arrival.y, df_unique_routes.iloc[i, ].geometry_arrival.x], True, modes = [herepy.RouteMode.publicTransport, herepy.RouteMode.fastest], departure = df_unique_routes.loc[i, "formatted_time"]) df_unique_routes.loc[i, "pt_traveltime"] = resp_pt.response["route"][0]["summary"]["travelTime"] except: print('pt no route found, id:', i) df_unique_routes.loc[i, "pt_traveltime"] = None df_unique_routes[df_unique_routes.pt_traveltime.isna()].shape df_unique_routes[df_unique_routes.car_traveltime.isna()].shape df_unique_routes.to_csv(r'..\data\processed\unique_routings_run2_2.csv') df_unique_routes["car_traveltime_min"] = df_unique_routes.car_traveltime / 60 df_unique_routes["pt_traveltime_min"] = df_unique_routes.pt_traveltime / 60 df_stations = df_stations.join(df_unique_routes.set_index(["ESUBIDA", "EBAJADA"])[["car_traveltime_min", "pt_traveltime_min"]], on = ["ESUBIDA", "EBAJADA"]) df_stations = df_stations.join(legs["C2SEXO"],how = "left") df_stations = df_stations.join(legs["age_group"],how = "left") #days, seconds = df_stations.duration.dt.days, df_stations.duration.dt.seconds #df_stations["minutes"] = seconds % 3600 df_stations.drop_duplicates(inplace = True) df_stations["tt_ratio"] = None df_stations.loc[df_stations.pt_traveltime_min != 0, "tt_ratio"] = df_stations[df_stations.pt_traveltime_min != 0].pt_traveltime_min / df_stations[df_stations.pt_traveltime_min != 0].car_traveltime_min df_stations.loc[df_stations.car_traveltime_min != 0, "tt_ratio_duration"] = df_stations[df_stations.car_traveltime_min != 0].duration / df_stations[df_stations.car_traveltime_min != 0].car_traveltime_min df_stations[["start_time", "duration", "car_traveltime_min", "pt_traveltime_min", "tt_ratio", "tt_ratio_duration", "age_group"]] df_stations.tt_ratio = df_stations.tt_ratio.astype(float) df_stations.tt_ratio_duration = df_stations.tt_ratio_duration.astype(float) df_stations.groupby(["age_group", "C2SEXO"]).tt_ratio_duration.describe() df_stations.groupby(["age_group", "C2SEXO"]).tt_ratio.describe() ```
github_jupyter
# Variational Quantum Eigensolver - Ground State Energy for $LiH$ Molecule using the RY ansatz ``` import numpy as np # Importing standard Qiskit libraries from qiskit import QuantumCircuit, transpile, IBMQ from qiskit.tools.jupyter import * from qiskit.visualization import * from ibm_quantum_widgets import * from qiskit.providers.aer import QasmSimulator, StatevectorSimulator # Loading your IBM Quantum account(s) provider = IBMQ.load_account() # Chemistry Drivers from qiskit_nature.drivers.second_quantization.pyscfd import PySCFDriver from qiskit_nature.transformers.second_quantization.electronic import FreezeCoreTransformer from qiskit.opflow.primitive_ops import Z2Symmetries # Electroinic structure problem from qiskit_nature.problems.second_quantization.electronic import ElectronicStructureProblem # Qubit converter from qiskit_nature.converters.second_quantization.qubit_converter import QubitConverter # Mappers from qiskit_nature.mappers.second_quantization import ParityMapper, BravyiKitaevMapper, JordanWignerMapper # Initial state from qiskit_nature.circuit.library import HartreeFock # Variational form - circuit from qiskit.circuit.library import TwoLocal # Optimizer from qiskit.algorithms.optimizers import COBYLA, SLSQP, SPSA # Eigen Solvers # NumPy Minimum Eigen Solver from qiskit_nature.algorithms.ground_state_solvers.minimum_eigensolver_factories import NumPyMinimumEigensolverFactory # ground state from qiskit_nature.algorithms.ground_state_solvers import GroundStateEigensolver # VQE Solver from qiskit.algorithms import VQE ``` Backend ``` qasm_sim = QasmSimulator() state_sim = StatevectorSimulator() ``` Drivers Below we set up a PySCF driver for LIH molecule at equilibrium bond length 1.5474 Angstrom ``` def exact_diagonalizer(es_problem, qubit_converter): solver = NumPyMinimumEigensolverFactory() calc = GroundStateEigensolver(qubit_converter, solver) result = calc.solve(es_problem) return result def get_mapper(mapper_str: str): if mapper_str == "jw": mapper = JordanWignerMapper() elif mapper_str == "pa": mapper = ParityMapper() elif mapper_str == "bk": mapper = BravyiKitaevMapper() return mapper def initial_state_preparation(mapper_str: str = "jw"): molecule = "Li 0.0 0.0 0.0; H 0.0 0.0 1.5474" driver = PySCFDriver(atom=molecule) qmolecule = driver.run() transformer = FreezeCoreTransformer() qmolecule = transformer.transform(qmolecule) es_problem = ElectronicStructureProblem(driver) # generating second_quzntized operators second_q_ops = es_problem.second_q_ops() # Hamiltonian main_op = second_q_ops[0] # return tuple of number of particles if available num_particles = es_problem.num_particles # return the number of spin orbitals num_spin_orbitals = es_problem.num_spin_orbitals mapper = get_mapper(mapper_str) qubit_converter = QubitConverter(mapper=mapper, two_qubit_reduction=True, z2symmetry_reduction=[1, 1]) # Qubit Hamiltonian qubit_op = qubit_converter.convert(main_op, num_particles=num_particles) return (qubit_op, num_particles, num_spin_orbitals, qubit_converter, es_problem) qubit_op, num_particles, num_spin_orbitals, qubit_converter, es_problem = initial_state_preparation("pa") init_state = HartreeFock(num_spin_orbitals, num_particles, qubit_converter) init_state.barrier() init_state.draw("mpl", initial_state=True).savefig("ry_vqe_lih_init_state.png", dpi=300) init_state.draw("mpl", initial_state=True) # Setting up TwoLocal for our ansatz ansatz_type = "RY" # Single qubit rotations that are placed on all qubits with independent parameters rotation_blocks = ["ry"] # Entangling gates entanglement_blocks = "cx" # How the qubits are entangled? entanglement = 'linear' # Repetitions of rotation_blocks + entanglement_blocks with independent parameters repetitions = 1 # Skipoing the final rotation_blocks layer skip_final_rotation_layer = False ansatz = TwoLocal( qubit_op.num_qubits, rotation_blocks, entanglement_blocks, reps=repetitions, entanglement=entanglement, skip_final_rotation_layer=skip_final_rotation_layer, # insert_barriers=True ) # Add the initial state ansatz.compose(init_state, front=True, inplace=True) ansatz.draw(output="mpl", initial_state=True).savefig("ry_vqe_lih_ansatz.png", dpi=300) ansatz.draw(output="mpl", initial_state=True) ansatz.decompose().draw(output="mpl", initial_state=True).savefig("ry_vqe_lih_ansatz_decomposed.png", dpi=300) ansatz.decompose().draw(output="mpl", initial_state=True) optimizer = COBYLA(maxiter=10000) ``` ## Solver ### Exact Eigensolver using NumPyMinimumEigensolver ``` result_exact = exact_diagonalizer(es_problem, qubit_converter) exact_energy = np.real(result_exact.eigenenergies[0]) print("Exact Electronic Energy: {:.4f} Eh\n\n".format(exact_energy)) print("Results:\n\n", result_exact) ``` ### VQE Solver ``` from IPython.display import display, clear_output def callback(eval_count, parameters, mean, std): # overwrites same line when printing display("Evaluation: {},\tEnergy: {},\tStd: {}".format(eval_count, mean, std)) clear_output(wait=True) counts.append(eval_count) values.append(mean) params.append(parameters) deviation.append(std) counts = [] values = [] params = [] deviation = [] # Set initial parameters of the ansatz # we choose a fixed small displacement try: initial_point = [0.01] * len(ansatz.ordered_parameters) except: initial_point = [0.01] * ansatz.num_parameters algorithm = VQE( ansatz, optimizer=optimizer, quantum_instance=state_sim, callback=callback, initial_point=initial_point ) result = algorithm.compute_minimum_eigenvalue(qubit_op) print(result) # Storing results in a dictionary from qiskit.transpiler import PassManager from qiskit.transpiler.passes import Unroller # Unroller transpile our circuit into CNOTs and U gates pass_ = Unroller(['u', 'cx']) pm = PassManager(pass_) ansatz_tp = pm.run(ansatz) cnots = ansatz_tp.count_ops()['cx'] score = cnots accuracy_threshold = 4.0 # in mHa energy = result.optimal_value # if ansatz_type == "TwoLocal": result_dict = { 'optimizer': optimizer.__class__.__name__, 'mapping': qubit_converter.mapper.__class__.__name__, 'ansatz': ansatz.__class__.__name__, 'rotation blocks': rotation_blocks, 'entanglement_blocks': entanglement_blocks, 'entanglement': entanglement, 'repetitions': repetitions, 'skip_final_rotation_layer': skip_final_rotation_layer, 'energy (Ha)': energy, 'error (mHa)': (energy-exact_energy)*1000, 'pass': (energy-exact_energy)*1000 <= accuracy_threshold, '# of parameters': len(result.optimal_point), 'final parameters': result.optimal_point, '# of evaluations': result.optimizer_evals, 'optimizer time': result.optimizer_time, '# of qubits': int(qubit_op.num_qubits), '# of CNOTs': cnots, 'score': score} # Plotting the results import matplotlib.pyplot as plt fig, ax = plt.subplots(1, 1, figsize=(19.20, 10.80)) plt.rc('font', size=14) plt.rc('axes', labelsize=14) plt.rc('xtick', labelsize=14) plt.rc('ytick', labelsize=14) plt.rc('legend', fontsize=14) # ax.set_facecolor("#293952") ax.set_xlabel('Iterations') ax.set_ylabel('Energy (Eh)') ax.grid() fig.text(0.7, 0.75, f'VQE Energy: {result.optimal_value:.4f} Eh\nExact Energy: {exact_energy:.4f} Eh\nScore: {score:.0f}') plt.title(f"Ground State Energy of LiH using RY VQE Ansatz\nOptimizer: {result_dict['optimizer']} \n Mapper: {result_dict['mapping']}\nVariational Form: {result_dict['ansatz']} - RY") ax.plot(counts, values) ax.axhline(exact_energy, linestyle='--') # fig_title = f"\ # {result_dict['optimizer']}-\ # {result_dict['mapping']}-\ # {result_dict['ansatz']}-\ # Energy({result_dict['energy (Ha)']:.3f})-\ # Score({result_dict['score']:.0f})\ # .png" fig.savefig("ry_vqe_lih_fig", dpi=300) # Displaying and saving the data import pandas as pd result_df = pd.DataFrame.from_dict([result_dict]) result_df[['optimizer','ansatz', '# of qubits', 'error (mHa)', 'pass', 'score','# of parameters','rotation blocks', 'entanglement_blocks', 'entanglement', 'repetitions']] ```
github_jupyter
``` # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # AI Platform (Unified) client library: AutoML text sentiment analysis model for online prediction <table align="left"> <td> <a href="https://colab.research.google.com/github/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/showcase_automl_text_sentiment_analysis_online.ipynb"> <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab </a> </td> <td> <a href="https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/showcase_automl_text_sentiment_analysis_online.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> </table> <br/><br/><br/> ## Overview This tutorial demonstrates how to use the AI Platform (Unified) Python client library to create text sentiment analysis models and do online prediction using Google Cloud's [AutoML](https://cloud.google.com/ai-platform-unified/docs/start/automl-users). ### Dataset The dataset used for this tutorial is the [Crowdflower Claritin-Twitter dataset](https://data.world/crowdflower/claritin-twitter) from [data.world Datasets](https://data.world). The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. ### Objective In this tutorial, you create an AutoML text sentiment analysis model and deploy for online prediction from a Python script using the AI Platform (Unified) client library. You can alternatively create and deploy models using the `gcloud` command-line tool or online using the Google Cloud Console. The steps performed include: - Create a AI Platform (Unified) `Dataset` resource. - Train the model. - View the model evaluation. - Deploy the `Model` resource to a serving `Endpoint` resource. - Make a prediction. - Undeploy the `Model`. ### Costs This tutorial uses billable components of Google Cloud (GCP): * AI Platform (Unified) * Cloud Storage Learn about [AI Platform (Unified) pricing](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storage pricing](https://cloud.google.com/storage/pricing), and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage. ## Installation Install the latest version of AI Platform (Unified) client library. ``` import sys if "google.colab" in sys.modules: USER_FLAG = "" else: USER_FLAG = "--user" ! pip3 install -U google-cloud-aiplatform $USER_FLAG ``` Install the latest GA version of *google-cloud-storage* library as well. ``` ! pip3 install -U google-cloud-storage $USER_FLAG ``` ### Restart the kernel Once you've installed the AI Platform (Unified) client library and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages. ``` import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ``` ## Before you begin ### GPU runtime *Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU** ### Set up your Google Cloud project **The following steps are required, regardless of your notebook environment.** 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs. 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project) 3. [Enable the AI Platform (Unified) APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component) 4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in AI Platform (Unified) Notebooks. 5. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. ``` PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID ``` #### Region You can also change the `REGION` variable, which is used for operations throughout the rest of this notebook. Below are regions supported for AI Platform (Unified). We recommend that you choose the region closest to you. - Americas: `us-central1` - Europe: `europe-west4` - Asia Pacific: `asia-east1` You may not use a multi-regional bucket for training with AI Platform (Unified). Not all regions provide support for all AI Platform (Unified) services. For the latest support per region, see the [AI Platform (Unified) locations documentation](https://cloud.google.com/ai-platform-unified/docs/general/locations) ``` REGION = "us-central1" # @param {type: "string"} ``` #### Timestamp If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial. ``` from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") ``` ### Authenticate your Google Cloud account **If you are using AI Platform (Unified) Notebooks**, your environment is already authenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth. **Otherwise**, follow these steps: In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page. **Click Create service account**. In the **Service account name** field, enter a name, and click **Create**. In the **Grant this service account access to project** section, click the Role drop-down list. Type "AI Platform (Unified)" into the filter box, and select **AI Platform (Unified) Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**. Click Create. A JSON file that contains your key downloads to your local environment. Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell. ``` import os import sys # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. # If on AI Platform, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' ``` ### Set up variables Next, set up some variables used throughout the tutorial. ### Import libraries and define constants #### Import AI Platform (Unified) client library Import the AI Platform (Unified) client library into our Python environment. ``` import os import sys import time import google.cloud.aiplatform_v1 as aip from google.protobuf import json_format from google.protobuf.json_format import MessageToJson, ParseDict from google.protobuf.struct_pb2 import Struct, Value ``` #### AI Platform (Unified) constants Setup up the following constants for AI Platform (Unified): - `API_ENDPOINT`: The AI Platform (Unified) API service endpoint for dataset, model, job, pipeline and endpoint services. - `PARENT`: The AI Platform (Unified) location root path for dataset, model, job, pipeline and endpoint resources. ``` # API service endpoint API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION) # AI Platform (Unified) location root path for your dataset, model and endpoint resources PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION ``` #### AutoML constants Set constants unique to AutoML datasets and training: - Dataset Schemas: Tells the `Dataset` resource service which type of dataset it is. - Data Labeling (Annotations) Schemas: Tells the `Dataset` resource service how the data is labeled (annotated). - Dataset Training Schemas: Tells the `Pipeline` resource service the task (e.g., classification) to train the model for. ``` # Text Dataset type DATA_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/metadata/text_1.0.0.yaml" # Text Labeling type LABEL_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/ioformat/text_sentiment_io_format_1.0.0.yaml" # Text Training task TRAINING_SCHEMA = "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_text_sentiment_1.0.0.yaml" ``` # Tutorial Now you are ready to start creating your own AutoML text sentiment analysis model. ## Set up clients The AI Platform (Unified) client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the AI Platform (Unified) server. You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront. - Dataset Service for `Dataset` resources. - Model Service for `Model` resources. - Pipeline Service for training. - Endpoint Service for deployment. - Prediction Service for serving. ``` # client options same for all services client_options = {"api_endpoint": API_ENDPOINT} def create_dataset_client(): client = aip.DatasetServiceClient(client_options=client_options) return client def create_model_client(): client = aip.ModelServiceClient(client_options=client_options) return client def create_pipeline_client(): client = aip.PipelineServiceClient(client_options=client_options) return client def create_endpoint_client(): client = aip.EndpointServiceClient(client_options=client_options) return client def create_prediction_client(): client = aip.PredictionServiceClient(client_options=client_options) return client clients = {} clients["dataset"] = create_dataset_client() clients["model"] = create_model_client() clients["pipeline"] = create_pipeline_client() clients["endpoint"] = create_endpoint_client() clients["prediction"] = create_prediction_client() for client in clients.items(): print(client) ``` ## Dataset Now that your clients are ready, your first step in training a model is to create a managed dataset instance, and then upload your labeled data to it. ### Create `Dataset` resource instance Use the helper function `create_dataset` to create the instance of a `Dataset` resource. This function does the following: 1. Uses the dataset client service. 2. Creates an AI Platform (Unified) `Dataset` resource (`aip.Dataset`), with the following parameters: - `display_name`: The human-readable name you choose to give it. - `metadata_schema_uri`: The schema for the dataset type. 3. Calls the client dataset service method `create_dataset`, with the following parameters: - `parent`: The AI Platform (Unified) location root path for your `Database`, `Model` and `Endpoint` resources. - `dataset`: The AI Platform (Unified) dataset object instance you created. 4. The method returns an `operation` object. An `operation` object is how AI Platform (Unified) handles asynchronous calls for long running operations. While this step usually goes fast, when you first use it in your project, there is a longer delay due to provisioning. You can use the `operation` object to get status on the operation (e.g., create `Dataset` resource) or to cancel the operation, by invoking an operation method: | Method | Description | | ----------- | ----------- | | result() | Waits for the operation to complete and returns a result object in JSON format. | | running() | Returns True/False on whether the operation is still running. | | done() | Returns True/False on whether the operation is completed. | | canceled() | Returns True/False on whether the operation was canceled. | | cancel() | Cancels the operation (this may take up to 30 seconds). | ``` TIMEOUT = 90 def create_dataset(name, schema, labels=None, timeout=TIMEOUT): start_time = time.time() try: dataset = aip.Dataset( display_name=name, metadata_schema_uri=schema, labels=labels ) operation = clients["dataset"].create_dataset(parent=PARENT, dataset=dataset) print("Long running operation:", operation.operation.name) result = operation.result(timeout=TIMEOUT) print("time:", time.time() - start_time) print("response") print(" name:", result.name) print(" display_name:", result.display_name) print(" metadata_schema_uri:", result.metadata_schema_uri) print(" metadata:", dict(result.metadata)) print(" create_time:", result.create_time) print(" update_time:", result.update_time) print(" etag:", result.etag) print(" labels:", dict(result.labels)) return result except Exception as e: print("exception:", e) return None result = create_dataset("claritin-" + TIMESTAMP, DATA_SCHEMA) ``` Now save the unique dataset identifier for the `Dataset` resource instance you created. ``` # The full unique ID for the dataset dataset_id = result.name # The short numeric ID for the dataset dataset_short_id = dataset_id.split("/")[-1] print(dataset_id) ``` ### Data preparation The AI Platform (Unified) `Dataset` resource for text has a couple of requirements for your text data. - Text examples must be stored in a CSV or JSONL file. #### CSV For text sentiment analysis, the CSV file has a few requirements: - No heading. - First column is the text example or Cloud Storage path to text file. - Second column the label (i.e., sentiment). - Third column is the maximum sentiment value. For example, if the range is 0 to 3, then the maximum value is 3. #### Location of Cloud Storage training data. Now set the variable `IMPORT_FILE` to the location of the CSV index file in Cloud Storage. ``` IMPORT_FILE = "gs://cloud-samples-data/language/claritin.csv" SENTIMENT_MAX = 4 ``` #### Quick peek at your data You will use a version of the Crowdflower Claritin-Twitter dataset that is stored in a public Cloud Storage bucket, using a CSV index file. Start by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (`wc -l`) and then peek at the first few rows. ``` if "IMPORT_FILES" in globals(): FILE = IMPORT_FILES[0] else: FILE = IMPORT_FILE count = ! gsutil cat $FILE | wc -l print("Number of Examples", int(count[0])) print("First 10 rows") ! gsutil cat $FILE | head ``` ### Import data Now, import the data into your AI Platform (Unified) Dataset resource. Use this helper function `import_data` to import the data. The function does the following: - Uses the `Dataset` client. - Calls the client method `import_data`, with the following parameters: - `name`: The human readable name you give to the `Dataset` resource (e.g., claritin). - `import_configs`: The import configuration. - `import_configs`: A Python list containing a dictionary, with the key/value entries: - `gcs_sources`: A list of URIs to the paths of the one or more index files. - `import_schema_uri`: The schema identifying the labeling type. The `import_data()` method returns a long running `operation` object. This will take a few minutes to complete. If you are in a live tutorial, this would be a good time to ask questions, or take a personal break. ``` def import_data(dataset, gcs_sources, schema): config = [{"gcs_source": {"uris": gcs_sources}, "import_schema_uri": schema}] print("dataset:", dataset_id) start_time = time.time() try: operation = clients["dataset"].import_data( name=dataset_id, import_configs=config ) print("Long running operation:", operation.operation.name) result = operation.result() print("result:", result) print("time:", int(time.time() - start_time), "secs") print("error:", operation.exception()) print("meta :", operation.metadata) print( "after: running:", operation.running(), "done:", operation.done(), "cancelled:", operation.cancelled(), ) return operation except Exception as e: print("exception:", e) return None import_data(dataset_id, [IMPORT_FILE], LABEL_SCHEMA) ``` ## Train the model Now train an AutoML text sentiment analysis model using your AI Platform (Unified) `Dataset` resource. To train the model, do the following steps: 1. Create an AI Platform (Unified) training pipeline for the `Dataset` resource. 2. Execute the pipeline to start the training. ### Create a training pipeline You may ask, what do we use a pipeline for? You typically use pipelines when the job (such as training) has multiple steps, generally in sequential order: do step A, do step B, etc. By putting the steps into a pipeline, we gain the benefits of: 1. Being reusable for subsequent training jobs. 2. Can be containerized and ran as a batch job. 3. Can be distributed. 4. All the steps are associated with the same pipeline job for tracking progress. Use this helper function `create_pipeline`, which takes the following parameters: - `pipeline_name`: A human readable name for the pipeline job. - `model_name`: A human readable name for the model. - `dataset`: The AI Platform (Unified) fully qualified dataset identifier. - `schema`: The dataset labeling (annotation) training schema. - `task`: A dictionary describing the requirements for the training job. The helper function calls the `Pipeline` client service'smethod `create_pipeline`, which takes the following parameters: - `parent`: The AI Platform (Unified) location root path for your `Dataset`, `Model` and `Endpoint` resources. - `training_pipeline`: the full specification for the pipeline training job. Let's look now deeper into the *minimal* requirements for constructing a `training_pipeline` specification: - `display_name`: A human readable name for the pipeline job. - `training_task_definition`: The dataset labeling (annotation) training schema. - `training_task_inputs`: A dictionary describing the requirements for the training job. - `model_to_upload`: A human readable name for the model. - `input_data_config`: The dataset specification. - `dataset_id`: The AI Platform (Unified) dataset identifier only (non-fully qualified) -- this is the last part of the fully-qualified identifier. - `fraction_split`: If specified, the percentages of the dataset to use for training, test and validation. Otherwise, the percentages are automatically selected by AutoML. ``` def create_pipeline(pipeline_name, model_name, dataset, schema, task): dataset_id = dataset.split("/")[-1] input_config = { "dataset_id": dataset_id, "fraction_split": { "training_fraction": 0.8, "validation_fraction": 0.1, "test_fraction": 0.1, }, } training_pipeline = { "display_name": pipeline_name, "training_task_definition": schema, "training_task_inputs": task, "input_data_config": input_config, "model_to_upload": {"display_name": model_name}, } try: pipeline = clients["pipeline"].create_training_pipeline( parent=PARENT, training_pipeline=training_pipeline ) print(pipeline) except Exception as e: print("exception:", e) return None return pipeline ``` ### Construct the task requirements Next, construct the task requirements. Unlike other parameters which take a Python (JSON-like) dictionary, the `task` field takes a Google protobuf Struct, which is very similar to a Python dictionary. Use the `json_format.ParseDict` method for the conversion. The minimal fields we need to specify are: - `sentiment_max`: The maximum value for the sentiment (e.g., 4). Finally, create the pipeline by calling the helper function `create_pipeline`, which returns an instance of a training pipeline object. ``` PIPE_NAME = "claritin_pipe-" + TIMESTAMP MODEL_NAME = "claritin_model-" + TIMESTAMP task = json_format.ParseDict( { "sentiment_max": SENTIMENT_MAX, }, Value(), ) response = create_pipeline(PIPE_NAME, MODEL_NAME, dataset_id, TRAINING_SCHEMA, task) ``` Now save the unique identifier of the training pipeline you created. ``` # The full unique ID for the pipeline pipeline_id = response.name # The short numeric ID for the pipeline pipeline_short_id = pipeline_id.split("/")[-1] print(pipeline_id) ``` ### Get information on a training pipeline Now get pipeline information for just this training pipeline instance. The helper function gets the job information for just this job by calling the the job client service's `get_training_pipeline` method, with the following parameter: - `name`: The AI Platform (Unified) fully qualified pipeline identifier. When the model is done training, the pipeline state will be `PIPELINE_STATE_SUCCEEDED`. ``` def get_training_pipeline(name, silent=False): response = clients["pipeline"].get_training_pipeline(name=name) if silent: return response print("pipeline") print(" name:", response.name) print(" display_name:", response.display_name) print(" state:", response.state) print(" training_task_definition:", response.training_task_definition) print(" training_task_inputs:", dict(response.training_task_inputs)) print(" create_time:", response.create_time) print(" start_time:", response.start_time) print(" end_time:", response.end_time) print(" update_time:", response.update_time) print(" labels:", dict(response.labels)) return response response = get_training_pipeline(pipeline_id) ``` # Deployment Training the above model may take upwards of 180 minutes time. Once your model is done training, you can calculate the actual time it took to train the model by subtracting `end_time` from `start_time`. For your model, you will need to know the fully qualified AI Platform (Unified) Model resource identifier, which the pipeline service assigned to it. You can get this from the returned pipeline instance as the field `model_to_deploy.name`. ``` while True: response = get_training_pipeline(pipeline_id, True) if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED: print("Training job has not completed:", response.state) model_to_deploy_id = None if response.state == aip.PipelineState.PIPELINE_STATE_FAILED: raise Exception("Training Job Failed") else: model_to_deploy = response.model_to_upload model_to_deploy_id = model_to_deploy.name print("Training Time:", response.end_time - response.start_time) break time.sleep(60) print("model to deploy:", model_to_deploy_id) ``` ## Model information Now that your model is trained, you can get some information on your model. ## Evaluate the Model resource Now find out how good the model service believes your model is. As part of training, some portion of the dataset was set aside as the test (holdout) data, which is used by the pipeline service to evaluate the model. ### List evaluations for all slices Use this helper function `list_model_evaluations`, which takes the following parameter: - `name`: The AI Platform (Unified) fully qualified model identifier for the `Model` resource. This helper function uses the model client service's `list_model_evaluations` method, which takes the same parameter. The response object from the call is a list, where each element is an evaluation metric. For each evaluation -- you probably only have one, we then print all the key names for each metric in the evaluation, and for a small set (`meanAbsoluteError` and `precision`) you will print the result. ``` def list_model_evaluations(name): response = clients["model"].list_model_evaluations(parent=name) for evaluation in response: print("model_evaluation") print(" name:", evaluation.name) print(" metrics_schema_uri:", evaluation.metrics_schema_uri) metrics = json_format.MessageToDict(evaluation._pb.metrics) for metric in metrics.keys(): print(metric) print("meanAbsoluteError", metrics["meanAbsoluteError"]) print("precision", metrics["precision"]) return evaluation.name last_evaluation = list_model_evaluations(model_to_deploy_id) ``` ## Deploy the `Model` resource Now deploy the trained AI Platform (Unified) `Model` resource you created with AutoML. This requires two steps: 1. Create an `Endpoint` resource for deploying the `Model` resource to. 2. Deploy the `Model` resource to the `Endpoint` resource. ### Create an `Endpoint` resource Use this helper function `create_endpoint` to create an endpoint to deploy the model to for serving predictions, with the following parameter: - `display_name`: A human readable name for the `Endpoint` resource. The helper function uses the endpoint client service's `create_endpoint` method, which takes the following parameter: - `display_name`: A human readable name for the `Endpoint` resource. Creating an `Endpoint` resource returns a long running operation, since it may take a few moments to provision the `Endpoint` resource for serving. You call `response.result()`, which is a synchronous call and will return when the Endpoint resource is ready. The helper function returns the AI Platform (Unified) fully qualified identifier for the `Endpoint` resource: `response.name`. ``` ENDPOINT_NAME = "claritin_endpoint-" + TIMESTAMP def create_endpoint(display_name): endpoint = {"display_name": display_name} response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint) print("Long running operation:", response.operation.name) result = response.result(timeout=300) print("result") print(" name:", result.name) print(" display_name:", result.display_name) print(" description:", result.description) print(" labels:", result.labels) print(" create_time:", result.create_time) print(" update_time:", result.update_time) return result result = create_endpoint(ENDPOINT_NAME) ``` Now get the unique identifier for the `Endpoint` resource you created. ``` # The full unique ID for the endpoint endpoint_id = result.name # The short numeric ID for the endpoint endpoint_short_id = endpoint_id.split("/")[-1] print(endpoint_id) ``` ### Compute instance scaling You have several choices on scaling the compute instances for handling your online prediction requests: - Single Instance: The online prediction requests are processed on a single compute instance. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one. - Manual Scaling: The online prediction requests are split across a fixed number of compute instances that you manually specified. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and online prediction requests are evenly distributed across them. - Auto Scaling: The online prediction requests are split across a scaleable number of compute instances. - Set the minimum (`MIN_NODES`) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions. The minimum number of compute instances corresponds to the field `min_replica_count` and the maximum number of compute instances corresponds to the field `max_replica_count`, in your subsequent deployment request. ``` MIN_NODES = 1 MAX_NODES = 1 ``` ### Deploy `Model` resource to the `Endpoint` resource Use this helper function `deploy_model` to deploy the `Model` resource to the `Endpoint` resource you created for serving predictions, with the following parameters: - `model`: The AI Platform (Unified) fully qualified model identifier of the model to upload (deploy) from the training pipeline. - `deploy_model_display_name`: A human readable name for the deployed model. - `endpoint`: The AI Platform (Unified) fully qualified endpoint identifier to deploy the model to. The helper function calls the `Endpoint` client service's method `deploy_model`, which takes the following parameters: - `endpoint`: The AI Platform (Unified) fully qualified `Endpoint` resource identifier to deploy the `Model` resource to. - `deployed_model`: The requirements specification for deploying the model. - `traffic_split`: Percent of traffic at the endpoint that goes to this model, which is specified as a dictionary of one or more key/value pairs. - If only one model, then specify as **{ "0": 100 }**, where "0" refers to this model being uploaded and 100 means 100% of the traffic. - If there are existing models on the endpoint, for which the traffic will be split, then use `model_id` to specify as **{ "0": percent, model_id: percent, ... }**, where `model_id` is the model id of an existing model to the deployed endpoint. The percents must add up to 100. Let's now dive deeper into the `deployed_model` parameter. This parameter is specified as a Python dictionary with the minimum required fields: - `model`: The AI Platform (Unified) fully qualified model identifier of the (upload) model to deploy. - `display_name`: A human readable name for the deployed model. - `disable_container_logging`: This disables logging of container events, such as execution failures (default is container logging is enabled). Container logging is typically enabled when debugging the deployment and then disabled when deployed for production. - `automatic_resources`: This refers to how many redundant compute instances (replicas). For this example, we set it to one (no replication). #### Traffic Split Let's now dive deeper into the `traffic_split` parameter. This parameter is specified as a Python dictionary. This might at first be a tad bit confusing. Let me explain, you can deploy more than one instance of your model to an endpoint, and then set how much (percent) goes to each instance. Why would you do that? Perhaps you already have a previous version deployed in production -- let's call that v1. You got better model evaluation on v2, but you don't know for certain that it is really better until you deploy to production. So in the case of traffic split, you might want to deploy v2 to the same endpoint as v1, but it only get's say 10% of the traffic. That way, you can monitor how well it does without disrupting the majority of users -- until you make a final decision. #### Response The method returns a long running operation `response`. We will wait sychronously for the operation to complete by calling the `response.result()`, which will block until the model is deployed. If this is the first time a model is deployed to the endpoint, it may take a few additional minutes to complete provisioning of resources. ``` DEPLOYED_NAME = "claritin_deployed-" + TIMESTAMP def deploy_model( model, deployed_model_display_name, endpoint, traffic_split={"0": 100} ): deployed_model = { "model": model, "display_name": deployed_model_display_name, "automatic_resources": { "min_replica_count": MIN_NODES, "max_replica_count": MAX_NODES, }, } response = clients["endpoint"].deploy_model( endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split ) print("Long running operation:", response.operation.name) result = response.result() print("result") deployed_model = result.deployed_model print(" deployed_model") print(" id:", deployed_model.id) print(" model:", deployed_model.model) print(" display_name:", deployed_model.display_name) print(" create_time:", deployed_model.create_time) return deployed_model.id deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id) ``` ## Make a online prediction request Now do a online prediction to your deployed model. ### Get test item You will use an arbitrary example out of the dataset as a test item. Don't be concerned that the example was likely used in training the model -- we just want to demonstrate how to make a prediction. ``` test_item = ! gsutil cat $IMPORT_FILE | head -n1 if len(test_item[0]) == 3: _, test_item, test_label, max = str(test_item[0]).split(",") else: test_item, test_label, max = str(test_item[0]).split(",") print(test_item, test_label) ``` ### Make a prediction Now you have a test item. Use this helper function `predict_item`, which takes the following parameters: - `filename`: The Cloud Storage path to the test item. - `endpoint`: The AI Platform (Unified) fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed. - `parameters_dict`: Additional filtering parameters for serving prediction results. This function calls the prediction client service's `predict` method with the following parameters: - `endpoint`: The AI Platform (Unified) fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed. - `instances`: A list of instances (text files) to predict. - `parameters`: Additional filtering parameters for serving prediction results. *Note*, text models do not support additional parameters. #### Request The format of each instance is: { 'content': text_item } Since the `predict()` method can take multiple items (instances), you send your single test item as a list of one test item. As a final step, you package the instances list into Google's protobuf format -- which is what you pass to the `predict()` method. #### Response The `response` object returns a list, where each element in the list corresponds to the corresponding text in the request. You will see in the output for each prediction -- in our case there is just one: - The sentiment rating ``` def predict_item(data, endpoint, parameters_dict): parameters = json_format.ParseDict(parameters_dict, Value()) # The format of each instance should conform to the deployed model's prediction input schema. instances_list = [{"content": data}] instances = [json_format.ParseDict(s, Value()) for s in instances_list] response = clients["prediction"].predict( endpoint=endpoint, instances=instances, parameters=parameters ) print("response") print(" deployed_model_id:", response.deployed_model_id) predictions = response.predictions print("predictions") for prediction in predictions: print(" prediction:", dict(prediction)) return response response = predict_item(test_item, endpoint_id, None) ``` ## Undeploy the `Model` resource Now undeploy your `Model` resource from the serving `Endpoint` resoure. Use this helper function `undeploy_model`, which takes the following parameters: - `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed to. - `endpoint`: The AI Platform (Unified) fully qualified identifier for the `Endpoint` resource where the `Model` is deployed to. This function calls the endpoint client service's method `undeploy_model`, with the following parameters: - `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed. - `endpoint`: The AI Platform (Unified) fully qualified identifier for the `Endpoint` resource where the `Model` resource is deployed. - `traffic_split`: How to split traffic among the remaining deployed models on the `Endpoint` resource. Since this is the only deployed model on the `Endpoint` resource, you simply can leave `traffic_split` empty by setting it to {}. ``` def undeploy_model(deployed_model_id, endpoint): response = clients["endpoint"].undeploy_model( endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={} ) print(response) undeploy_model(deployed_model_id, endpoint_id) ``` # Cleaning up To clean up all GCP resources used in this project, you can [delete the GCP project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial. Otherwise, you can delete the individual resources you created in this tutorial: - Dataset - Pipeline - Model - Endpoint - Batch Job - Custom Job - Hyperparameter Tuning Job - Cloud Storage Bucket ``` delete_dataset = True delete_pipeline = True delete_model = True delete_endpoint = True delete_batchjob = True delete_customjob = True delete_hptjob = True delete_bucket = True # Delete the dataset using the AI Platform (Unified) fully qualified identifier for the dataset try: if delete_dataset and "dataset_id" in globals(): clients["dataset"].delete_dataset(name=dataset_id) except Exception as e: print(e) # Delete the training pipeline using the AI Platform (Unified) fully qualified identifier for the pipeline try: if delete_pipeline and "pipeline_id" in globals(): clients["pipeline"].delete_training_pipeline(name=pipeline_id) except Exception as e: print(e) # Delete the model using the AI Platform (Unified) fully qualified identifier for the model try: if delete_model and "model_to_deploy_id" in globals(): clients["model"].delete_model(name=model_to_deploy_id) except Exception as e: print(e) # Delete the endpoint using the AI Platform (Unified) fully qualified identifier for the endpoint try: if delete_endpoint and "endpoint_id" in globals(): clients["endpoint"].delete_endpoint(name=endpoint_id) except Exception as e: print(e) # Delete the batch job using the AI Platform (Unified) fully qualified identifier for the batch job try: if delete_batchjob and "batch_job_id" in globals(): clients["job"].delete_batch_prediction_job(name=batch_job_id) except Exception as e: print(e) # Delete the custom job using the AI Platform (Unified) fully qualified identifier for the custom job try: if delete_customjob and "job_id" in globals(): clients["job"].delete_custom_job(name=job_id) except Exception as e: print(e) # Delete the hyperparameter tuning job using the AI Platform (Unified) fully qualified identifier for the hyperparameter tuning job try: if delete_hptjob and "hpt_job_id" in globals(): clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id) except Exception as e: print(e) if delete_bucket and "BUCKET_NAME" in globals(): ! gsutil rm -r $BUCKET_NAME ```
github_jupyter
(ch:trainingModels)= # 모델 훈련 **감사의 글** 자료를 공개한 저자 오렐리앙 제롱과 강의자료를 지원한 한빛아카데미에게 진심어린 감사를 전합니다. **소스코드** 본문 내용의 일부를 파이썬으로 구현한 내용은 [(구글코랩) 모델 훈련](https://colab.research.google.com/github/codingalzi/handson-ml3/blob/master/notebooks/code_training_models.ipynb)에서 확인할 수 있다. **주요 내용** * 선형 회귀 모델 구현 * 선형대수 활용 * 경사하강법 활용 * 경사하강법 종류 * 배치 경사하강법 * 미니배치 경사하강법 * 확률적 경사하강법(SGD) * 다항 회귀: 비선형 회귀 모델 * 학습 곡선: 과소, 과대 적합 감지 * 모델 규제: 과대 적합 방지 * 로지스틱 회귀와 소프트맥스 회귀: 분류 모델 **목표** 모델 훈련의 기본 작동 과정과 원리를 살펴보며, 이를 통해 다음 사항들에 대한 이해를 넓힌다. - 적정 모델 선택 - 적정 훈련 알고리즘 선택 - 적정 하이퍼파라미터 선택 - 디버깅과 오차 분석 - 신경망 구현 및 훈련 과정 이해 ## 선형 회귀 **선형 회귀 예제: 1인당 GDP와 삶의 만족도** {numref}`%s 절 <sec:model_based_learning>`에서 1인당 GDP와 삶의 만족도 사이의 관계를 다음 1차 함수로 표현할 수 있었다. $$(\text{삶의만족도}) = \theta_0 + \theta_1\cdot (\text{1인당GDP})$$ 즉, 1인당 GDP가 주어지면 위 함수를 이용하여 삶의 만족도를 예측하였다. 주어진 1인당 GDP를 **입력 특성**<font size="2">input feature</font> $x$, 예측된 삶의 만족도를 **예측값** $\hat y$ 라 하면 다음 식으로 변환된다. $$\hat y = \theta_0 + \theta_1\cdot x_1$$ 절편 $\theta_0$ 와 기울기 $\theta_1$ 은 (선형) 모델의 **파라미터**<font size="2">weight parameter</font>이다. 머신러닝에서는 절편은 **편향**<font size="2">bias</font>, 기울기는 **가중치**<font size="2">weight</font> 라 부른다. 따라서 1인당 GDP와 삶의 만족도 사이의 선형 관계를 모델로 구현하려면 적절한 하나의 편향과 하나의 가중치, 즉 총 2개의 파라미터를 결정해야 한다. **선형 회귀 예제: 캘리포니아 주택 가격 예측** 반면에 {numref}`%s 장 <ch:end2end>`의 캘리포니아 주택 가격 예측 선형 회귀 모델은 24개의 입력 특성을 사용하는 다음 함수를 이용한다. $$\hat y = \theta_0 + \theta_1\cdot x_1 + \cdots + \theta_{24}\cdot x_{24}$$ * $\hat y$: 예측값 * $x_i$: 구역의 $i$ 번째 특성값(위도, 경도, 중간소득, 가구당 인원 등등등) * $\theta_0$: 편향 * $\theta_i$: $i$ 번째 특성에 대한 (가중치) 파라미터, 단 $i > 0$. 따라서 캘리포니아의 구역별 중간 주택 가격을 예측하는 선형 회귀 모델을 구하려면 적절한 하나의 편향과 24개의 가중치, 즉 총 25개의 파라미터를 결정해야 한다. **선형 회귀 함수** 이를 일반화하면 다음과 같다. $$\hat y = \theta_0 + \theta_1\cdot x_1 + \cdots + \theta_{n}\cdot x_{n}$$ * $\hat y$: 예측값 * $n$: 특성 수 * $x_i$: 구역의 $i$ 번째 특성값 * $\theta_0$: 편향 * $\theta_j$: $j$ 번째 특성에 대한 (가중치) 파라미터(단, $1 \le j \le n$) 일반적으로 선형 회귀 모델을 구현하려면 한 개의 편향과 $n$ 개의 가중치, 즉 총 $(1+n)$ 개의 파라미터를 결정해야 한다. **벡터 표기법** 예측값을 벡터의 **내적**<font size="2">inner product</font>으로 표현할 수 있다. $$ \hat y = h_\theta (\mathbf{x}) = \mathbf{\theta} \cdot \mathbf{x} $$ * $h_\theta(\cdot)$: 예측 함수, 즉 모델의 `predict()` 메서드. * $\mathbf{x} = (1, x_1, \dots, x_n)$ * $\mathbf{\theta} = (\theta_0, \theta_1, \dots, \theta_n)$ **2D 어레이 표기법** 머신러닝에서는 훈련 샘플을 나타내는 입력 벡터와 파라미터 벡터를 일반적으로 아래 모양의 행렬로 나타낸다. $$ \mathbf{x}= \begin{bmatrix} 1 \\ x_1 \\ \vdots \\ x_n \end{bmatrix}, \qquad \mathbf{\theta}= \begin{bmatrix} \theta_0\\ \theta_1 \\ \vdots \\ \theta_n \end{bmatrix} $$ 따라서 예측값은 다음과 같이 행렬 연산으로 표기된다. 단, $A^T$ 는 행렬 $A$의 전치행렬을 가리킨다. $$ \hat y = h_\theta (\mathbf{x}) = \mathbf{\theta}^{T} \mathbf{x} $$ **선형 회귀 모델의 행렬 연산 표기법** $\mathbf{X}$가 전체 입력 데이터셋, 즉 전체 훈련셋을 가리키는 (m, 1+n) 모양의 2D 어레이, 즉 행렬이라 하자. - $m$: 훈련셋의 크기. - $n$: 특성 수 그러면 $\mathbf{X}$ 는 다음과 같이 표현된다. 단, $\mathbf{x}_j^{(i)}$ 는 $i$-번째 입력 샘플의 $j$-번째 특성값을 가리킨다. $$ \mathbf{X}= \begin{bmatrix} [1, \mathbf{x}_1^{(1)}, \dots, \mathbf{x}_n^{(1)}] \\ \vdots \\ [1, \mathbf{x}_1^{(m)}, \dots, \mathbf{x}_n^{(m)}] \\ \end{bmatrix} $$ 결론적으로 모든 입력값에 대한 예측값을 하나의 행렬식으로 표현하면 다음과 같다. $$ \begin{bmatrix} \hat y_1 \\ \vdots \\ \hat y_m \end{bmatrix} = \begin{bmatrix} [1, \mathbf{x}_1^{(1)}, \dots, \mathbf{x}_n^{(1)}] \\ \vdots \\ [1, \mathbf{x}_1^{(m)}, \dots, \mathbf{x}_n^{(m)}] \\ \end{bmatrix} \,\, \begin{bmatrix} \theta_0\\ \theta_1 \\ \vdots \\ \theta_n \end{bmatrix} $$ 간략하게 줄이면 다음과 같다. $$ \hat{\mathbf y} = \mathbf{X}\, \mathbf{\theta} $$ 위 식에 사용된 기호들의 의미와 어레이 모양은 다음과 같다. | 데이터 | 어레이 기호 | 어레이 모양(shape) | |:-------------:|:-------------:|:---------------:| | 예측값 | $\hat{\mathbf y}$ | $(m, 1)$ | | 훈련셋 | $\mathbf X$ | $(m, 1+n)$ | | 파라미터 | $\mathbf{\theta}$ | $(1+n, 1)$ | **비용함수: 평균 제곱 오차(MSE)** 회귀 모델은 훈련 중에 **평균 제곱 오차**<font size="2">mean squared error</font>(MSE)를 이용하여 성능을 평가한다. $$ \mathrm{MSE}(\mathbf{\theta}) := \mathrm{MSE}(\mathbf X, h_{\mathbf{\theta}}) = \frac 1 m \sum_{i=1}^{m} \big(\mathbf{\theta}^{T}\, \mathbf{x}^{(i)} - y^{(i)}\big)^2 $$ 최종 목표는 훈련셋이 주어졌을 때 $\mathrm{MSE}(\mathbf{\theta})$가 최소가 되도록 하는 $\mathbf{\theta}$를 찾는 것이다. * 방식 1: 정규방정식 또는 특이값 분해(SVD) 활용 * 드물지만 수학적으로 비용함수를 최소화하는 $\mathbf{\theta}$ 값을 직접 계산할 수 있는 경우 활용 * 계산복잡도가 $O(n^2)$ 이상인 행렬 연산을 수행해야 함. * 따라서 특성 수($n$)이 큰 경우 메모리 관리 및 시간복잡도 문제때문에 비효율적임. * 방식 2: 경사하강법 * 특성 수가 매우 크거나 훈련 샘플이 너무 많아 메모리에 한꺼번에 담을 수 없을 때 적합 * 일반적으로 선형 회귀 모델 훈련에 적용되는 기법 ### 정규 방정식 비용함수를 최소화 하는 $\theta$를 정규 방정식<font size="2">normal equation</font>을 이용하여 아래와 같이 바로 계산할 수 있다. 단, $\mathbf{X}^T\, \mathbf{X}$ 의 역행렬이 존재해야 한다. $$ \hat{\mathbf{\theta}} = (\mathbf{X}^T\, \mathbf{X})^{-1}\, \mathbf{X}^T\, \mathbf{y} $$ ### `LinearRegression` 클래스 **SVD(특잇값 분해) 활용** 그런데 행렬 연산과 역행렬 계산은 계산 복잡도가 $O(n^{2.4})$ 이상이며 항상 역행렬 계산이 가능한 것도 아니다. 반면에, **특잇값 분해**를 활용하여 얻어지는 **무어-펜로즈(Moore-Penrose) 유사 역행렬** $\mathbf{X}^+$은 항상 존재하며 계산 복잡도가 $O(n^2)$ 로 보다 빠른 계산을 지원한다. 또한 다음이 성립한다. $$ \hat{\mathbf{\theta}} = \mathbf{X}^+\, \mathbf{y} $$ **`LinearRegression` 모델** 사이킷런의 `LinearRegression` 모델은 특잇값 분해와 무어-펜로즈 유사 역행렬을 이용하여 최적의 $\hat \theta$ 를 계산한다. (sec:gradient-descent)= ## 경사하강법 훈련 세트를 이용한 훈련 과정 중에 가중치 파라미터를 조금씩 반복적으로 조정한다. 이때 비용 함수의 크기를 줄이는 방향으로 조정한다. **경사하강법**<font size="2">gradient descent</font>(GD) 이해를 위해 다음 개념들을 충분히 이해하고 있어야 한다. **최적 학습 모델** 비용 함수를 최소화하는 또는 효용 함수를 최대화하는 파라미터를 사용하는 모델이며, 최종적으로 훈련시킬 대상이다. **파라미터<font size="2">parameter</font>** 선형 회귀 모델에 사용되는 편향과 가중치 파라미터처럼 모델 훈련중에 학습되는 파라미터를 가리킨다. **비용 함수<font size="2">cost function</font>** 평균 제곱 오차(MSE)처럼 모델이 얼마나 나쁜가를 측정하는 함수다. **전역 최솟값<font size="2">global minimum</font>** 비용 함수의 전역 최솟값이다. **비용 함수의 그레이디언트 벡터** MSE를 비용함수로 사용하는 경우 $\textrm{MSE}(\mathbf{\theta})$ 함수의 $\mathbf{\mathbf{\theta}}$ 에 대한 그레이디언트<font size="2">gradient</font> 벡터를 사용한다. $$ \nabla_\mathbf{\theta} \textrm{MSE}(\mathbf{\theta}) = \begin{bmatrix} \frac{\partial}{\partial \mathbf{\theta}_0} \textrm{MSE}(\mathbf{\theta}) \\ \frac{\partial}{\partial \mathbf{\theta}_1} \textrm{MSE}(\mathbf{\theta}) \\ \vdots \\ \frac{\partial}{\partial \mathbf{\theta}_n} \textrm{MSE}(\mathbf{\theta}) \end{bmatrix} $$ **학습률($\eta$)** 훈련 과정에서의 비용함수의 파라미터($\mathbf{\theta}$)를 조정할 때 사용하는 조정 비율이다. **에포크<font size="2">epoch</font>** 훈련셋에 포함된 모든 데이터를 대상으로 예측값을 계산하는 과정을 가리킨다. **허용오차<font size="2">tolerance</font>** 비용함수의 값이 허용오차보다 작아지면 훈련을 종료시킨다. **배치 크기<font size="2">batch size</font>** 파라미터를 업데이트하기 위해, 즉 그레이디언트 벡터를 계산하기 위해 사용되는 훈련 데이터의 개수이다. **하이퍼파라미터<font size="2">hyperparameter</font>** 학습률, 에포크, 허용오차, 배치 크기 처럼 모델을 지정할 때 사용되는 값을 나타낸다. ### 선형 회귀 모델과 경사하강법 선형회귀 모델 파라미터를 조정하는 과정을 이용하여 경사하강법의 기본 아이디어를 설명한다. 먼저 $\mathrm{MSE}(\mathbf{\theta})$ 는 $\mathbf{\theta}$ 에 대한 2차 함수임에 주의한다. 여기서는 $\mathbf{\theta}$ 가 하나의 파라미터로 구성되었다고 가정한다. 따라서 $\mathrm{MSE}(\mathbf{\theta})$의 그래프는 포물선이 된다. $$ \mathrm{MSE}(\mathbf{\theta}) = \frac 1 m \sum_{i=1}^{m} \big(\mathbf{\theta}^{T}\, \mathbf{x}^{(i)} - y^{(i)}\big)^2 $$ $\mathrm{MSE}(\mathbf{\theta})$의 그레이디언트 벡터는 다음과 같다. $$ \nabla_\theta \textrm{MSE}(\theta) = \frac{2}{m}\, \mathbf{X}^T\, (\mathbf{X}\, \theta^T - \mathbf y) $$ 경사하강법은 다음 과정으로 이루어진다. 1. $\mathbf{\theta}$를 임의의 값으로 지정한 후 훈련을 시작한다. 1. 아래 단계를 $\textrm{MSE}(\theta)$ 가 허용오차보다 적게 작아지는 단계까지 반복한다. * 지정된 수의 훈련 샘플을 이용한 학습. * $\mathrm{MSE}(\mathbf{\theta})$ 계산. * 이전 $\mathbf{\theta}$에서 $\nabla_\mathbf{\theta} \textrm{MSE}(\mathbf{\theta})$ 와 학습률 $\eta$를 곱한 값 빼기.<br><br> $$ \theta^{(\text{new})} = \theta^{(\text{old})}\, -\, \eta\cdot \nabla_\theta \textrm{MSE}(\theta^{(\text{old})}) $$ 위 수식은 산에서 가장 경사가 급한 길을 따를 때 가장 빠르게 하산하는 원리와 동일하다. 이유는 해당 지점에서 그레이디언트 벡터를 계산하면 정상으로 가는 가장 빠른 길을 안내할 것이기에 그 반대방향으로 움직여야 하기 때문이다. :::{admonition} 벡터의 방향과 크기 :class: info 모든 벡터는 방향과 크기를 갖는다. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/vector01.png" width="200"/></div> 그레이디언트 벡터 또한 방향과 크기에 대한 정보를 제공하며, 그레이디언트가 가리키는 방향의 __반대 방향__으로 움직이면 빠르게 전역 최솟값에 접근한다. 이는 아래 그림이 표현하듯이 산에서 가장 경사가 급한 길을 따를 때 가장 빠르게 하산하는 원리와 동일하다. 이유는 해당 지점에서 그레이디언트 벡터를 계산하면 정상으로 가는 가장 빠른 길을 안내할 것이기에 그 반대방향으로 움직여야 하기 때문이다. 아래 그림은 경사하강법을 담당하는 여러 알고리즘을 비교해서 보여준다. <table> <tr> <td style="padding:1px"> <figure> <img src="https://ruder.io/content/images/2016/09/contours_evaluation_optimizers.gif" style="width:90%" title="SGD without momentum"> <figcaption>SGD optimization on loss surface contours</figcaption> </figure> </td> <td style="padding:1px"> <figure> <img src="https://ruder.io/content/images/2016/09/saddle_point_evaluation_optimizers.gif" style="width:90%" title="SGD without momentum"> <figcaption>SGD optimization on saddle point</figcaption> </figure> </td> </tr> </table> **그림 출처:** [An overview of gradient descent optimization algorithms](https://ruder.io/optimizing-gradient-descent/index.html) ::: **학습률의 중요성** 선형 회귀 모델은 적절할 학습률로 훈련될 경우 빠른 시간에 비용 함수의 최솟값에 도달한다. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-01.png" width="500"/></div> 반면에 학습률이 너무 작거나 크면 비용 함수의 전역 최솟값에 수렴하지 않을 수 있다. - 학습률이 너무 작은 경우: 비용 함수가 전역 최소값에 너무 느리게 수렴. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-02.png" width="500"/></div> * 학습률이 너무 큰 경우: 비용 함수가 수렴하지 않음. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-03.png" width="500"/></div> 선형 회귀가 아닌 경우에는 시작점에 따라 지역 최솟값에 수렴하거나 정체될 수 있음을 아래 그림이 잘 보여준다. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-04.png" width="500"/></div> **특성 스케일링의 중요성** 특성들의 스켈일을 통일시키면 보다 빠른 학습이 이루어지는 이유를 아래 그림이 설명한다. * 왼편 그림: 두 특성의 스케일이 동일하게 조정된 경우 비용 함수의 최솟값으로 최단거리로 수렴한다. * 오른편 그림: 두 특성의 스케일이 다른 경우 비용 함수의 최솟값으로 보다 먼 거리를 지나간다. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-04a.png" width="500"/></div> ### 경사하강법 종류 모델을 지정할 때 지정하는 배치 크기에 따라 세 종류로 나뉜다. **참고:** 지정된 배치 크기의 샘플에 대해 예측을 한 후에 경사하강법을 이용하여 파라미터를 조정하는 단계를 스텝<font size="2">step</font>이라 하며, 다음이 성립힌다. 스텝 크기 = (훈련 샘플 수) / (배치 크기) 예를 들어, 훈련 세트의 크기가 1,000이고 배치 크기가 10이면, 에포크 당 100번의 스텝이 실행된다. #### 배치 경사하강법 에포크마다 그레이디언트를 계산하여 파라미터를 조정한다. 즉, 배치의 크기가 전체 훈련셋의 크기와 같고 따라서 스텝의 크기는 1이다. 단점으로 훈련 세트가 크면 그레이디언트를 계산하는 데에 많은 시간과 메모리가 필요해지는 문제가 있다. 이와 같은 이유로 인해 사이킷런은 배치 경사하강법을 지원하지 않는다. **학습율과 경사하강법의 관계** 학습률에 따라 파라미터($\theta$)의 수렴 여부와 속도가 달라진다. 최적의 학습률은 그리드 탐색 등을 이용하여 찾아볼 수 있다. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-04b.png" width="700"/></div> **에포크 수와 허용오차** 에포크 수는 크게 설정한 후 허용오차를 지정하여 학습 시간을 제한할 필요가 있다. 이유는 포물선의 최솟점에 가까워질 수록 그레이디언트 벡터의 크기가 0에 수렴하기 때문이다. 허용오차와 에포크 수는 서로 반비례의 관계이다. 예를 들어, 허용오차를 1/10로 줄이려면 에포크 수를 10배 늘려야한다. #### 확률적 경사하강법(SGD) 배치 크기가 1이다. 즉, 하나의 스텝에 하나의 훈련 셈플에 대한 예측값을 실행한 후에 그 결과를 이용하여 그레이디언트를 계산하고 파라미터를 조정한다. 샘플은 무작위로 선택된다. 따라서 경우에 따라 하나의 에포크에서 여러 번 선택되거나 전혀 선택되지 않는 샘플이 존재할 수도 있지만, 이는 별 문제가 되지 않는다. 확률적 경사하강법<font size="2">stochastic graidient descent</font>(SGD)을 이용하면 계산량이 상대적으로 적어 아주 큰 훈련 세트를 다룰 수 있으며, 따라서 외부 메모리(out-of-core) 학습에 활용될 수 있다. 또한 파라미터 조정이 불안정하게 이뤄질 수 있기 때문에 지역 최솟값에 상대적으로 덜 민감하다. 반면에 동일한 이유로 경우에 따라 전역 최솟값에 수렴하지 못하고 주변을 맴돌 수도 있다. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-04c.png" width="300"/></div> 아래 그림은 처음 20 단계 동안의 SGD 학습 과정을 보여주는데, 모델이 수렴하지 못함을 확인할 수 있다. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-04d.png" width="500"/></div> **독립 항등 분포** 확률적 경사하강법을 적용하려면 훈련셋이 독립 항등 분포<font size="2">independently and identically distributed</font>(iid)를 따르도록 해야 한다. 이를 위해 매 에포크마다 훈련 셋을 무작위로 섞는 방법이 일반적으로 사용된다. **학습 스케줄<font size="2">learning schedule</font>** 요동치는 파라미터를 제어하기 위해 학습률을 학습 과정 동안 천천히 줄어들게 만드는 기법을 의미한다. 일반적으로 훈련이 지속될 수록 학습률을 조금씩 줄이며, 에포크 수, 훈련 샘플 수, 학습되는 샘플의 인덱스를 이용하여 지정한다. **사이킷런의 `SGDRegressor` 클래스** 확률적 경사하강법을 기본적으로 지원한다. ```python SGDRegressor(max_iter=1000, tol=1e-5, penalty=None, eta0=0.01, n_iter_no_change=100, random_state=42) ``` * `max_iter=1000`: 최대 에포크 수 * `tol=1e-3`: 허용오차 * `eta0=0.1`: 학습 스케줄 함수에 사용되는 매개 변수. 일종의 학습률. * `penalty=None`: 규제 사용 여부 결정(추후 설명). 여기서는 사용하지 않음. #### 미니 배치 경사하강법 배치 크기가 2에서 수백 사이로 정해지며, 최적의 배치 크기는 경우에 따라 다르다. 배치 크기를 어느 정도 크게 하면 확률적 경사하강법(SGD) 보다 파라미터의 움직임이 덜 불규칙적이 되며, 배치 경사하강법보다 빠르게 학습한다. 반면에 SGD에 비해 지역 최솟값에 수렴할 위험도가 보다 커진다. **경사하강법 비교** 배치 GD, 미니 배치 GD, SGD의 순서대로 최적의 파라미터 값에 수렴할 확률이 높다. 훈련 시간 또한 동일한 순서대로 오래 걸린다. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-05.png" width="500"/></div> **선형 회귀 알고리즘 비교** | 알고리즘 | 많은 샘플 수 | 외부 메모리 학습 | 많은 특성 수 | 하이퍼 파라미터 수 | 스케일 조정 | 사이킷런 지원 | |:--------:|:---------:|:---------:|:---------:|:---------:|:---------:|:---------:| | 정규방정식 | 빠름 | 지원 안됨 | 느림 | 0 | 불필요 | 지원 없음 | | SVD | 빠름 | 지원 안됨 | 느림 | 0 | 불필요 | LinearRegression | | 배치 GD | 느림 | 지원 안됨 | 빠름 | 2 | 필요 | (?) | | SGD | 빠름 | 지원 | 빠름 | >= 2 | 필요 | SGDRegressor | | 미니배치 GD | 빠름 | 지원 | 빠름 | >=2 | 필요 | 지원 없음 | **참고:** 심층 신경망을 지원하는 텐서플로우<font size="2">Tensorflow</font>는 기본적으로 미니 배치 경사하강법을 지원한다. (sec:poly_reg)= ## 다항 회귀 비선형 데이터를 선형 회귀를 이용하여 학습하는 기법을 **다항 회귀**<font size="2">polynomial regression</font>라 한다. 이때 다항식을 이용하여 새로운 특성을 생성하는 아이디어를 사용한다. **2차 함수 모델를 따르는 데이터셋에 선형 회귀 모델 적용 결과** 아래 그림은 2차 함수의 그래프 형식으로 분포된 데이터셋을 선형 회귀 모델로 학습시킨 결과를 보여준다. $$\hat y = \theta_0 + \theta_1\, x_1$$ <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-06.png" width="500"/></div> **2차 함수 모델를 따르는 데이터셋에 2차 다항식 모델 적용 결과** 반면에 아래 그림은 $x_1^2$ 에 해당하는 특성 $x_2$ 를 새로이 추가한 후에 선형 회귀 모델을 학습시킨 결과를 보여준다. $$\hat y = \theta_0 + \theta_1\, x_1 + \theta_2\, x_{2}$$ <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-07.png" width="500"/></div> **사이킷런의 `PolynomialFeatures` 변환기** 활용하고자 하는 다항식에 포함되어야 하는 항목에 해당하는 특성들을 생성하는 변환기이다. ```python PolynomialFeatures(degree=d, include_bias=False) ``` `degree=d`는 몇 차 다항식을 활용할지 지정하는 하이퍼파라미터이다. :::{prf:example} 3차 다항 회귀 :label: exp:3rd_poly_reg 기존에 두 개의 $x_1, x_2$ 두 개의 특성을 갖는 데이터셋에 대해 3차 다항식 모델을 훈련시키고자 하면 $d=3$으로 설정한다. 그러면 $x_1, x_2$ 을 이용한 2차, 3차 다항식에 포함될 항목을 새로운 특성으로 추가해야 한다. 이는 $(x_1+x_2)^2$과 $(x_1+x_2)^3$의 항목에 해당하는 다음 7개의 특성을 추가해야 함을 의미한다. $$x_1^2,\,\, x_1 x_2,\,\, x_2^2,\,\, x_1^3,\,\, x_1^2 x_2,\,\, x_1 x_2^2,\,\, x_2^3$$ ::: ## 학습 곡선 다항 회귀 모델의 차수에 따라 훈련된 모델이 훈련 세트에 과소 또는 과대 적합할 수 있다. 아래 그림이 보여주듯이 선형 모델은 과소 적합되어 있는 반면에 300차 다항 회귀 모델 과대 적합 되어 있다. 그리고 2차 다항 회귀 모델의 일반화 성능이 가장 좋다. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-08.png" width="500"/></div> **교차 검증 vs. 학습 곡선** 하지만 일반적으로 몇 차 다항 회귀가 가장 좋은지 미리 알 수 없다. 따라서 다양한 모델을 대상으로 교차 검증을 진행하여 과소 또는 과대 적합 모델을 구별해야 한다. * 과소 적합: 훈련 세트와 교차 검증 점수 모두 낮은 경우 * 과대 적합: 훈련 세트에 대한 검증은 우수하지만 교차 검증 점수가 낮은 경우 다른 검증 방법은 **학습 곡선**<font size='2'>learning curve</font>을 잘 살펴보는 것이다. 학습 곡선은 훈련 세트와 검증 세트에 대한 모델 성능을 비교하는 그래프이며, 학습 곡선의 모양에 따라 과소 적합 또는 과대 적합 여부를 판정할 수 있다. 사이킷런의 `learning_curve()` 함수를 이용하여 학습 곡선을 그릴 수 있다. * x 축: 훈련셋 크기. 전체 훈련셋의 10%에서 출발하여 훈련셋 전체를 대상으로 할 때까지 훈련셋의 크기를 키워가며 교차 검증 진행. * y 축: 교차 검증을 통해 확인된 훈련셋 및 검증셋 대상 RMSE(평균 제곱근 오차). **과소 적합 모델의 학습 곡선 특징** * 훈련셋(빨강)에 대한 성능: 훈련 세트가 커지면서 RMSE 증가하지만 훈련 세트가 어느 정도 커지면 거의 불변. * 검증셋(파랑)에 대한 성능: 검증 세트에 대한 성능이 훈련 세트에 대한 성능과 거의 비슷해짐. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-09.png" width="500"/></div> **과대 적합 모델의 학습 곡선 특징** * 훈련셋(빨강)에 대한 성능: 훈련 데이터에 대한 평균 제곱근 오차가 매우 낮음. * 검증셋(파랑)에 대한 성능: 훈련 데이터에 대한 성능과 차이가 어느 정도 이상 벌어짐. * 과대 적합 모델 개선법: 두 그래프가 맞닿을 때까지 훈련 데이터 추가. 하지만 일반적으로 더 많은 훈련 데이터를 구하는 일이 매우 어렵거나 불가능할 수 있음. 아니면 모델에 규제를 가할 수 있음. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-10.png" width="500"/></div> **모델 일반화 오차의 종류** 훈련 후에 새로운 데이터 대한 예측에서 발생하는 오차를 가리키며 세 종류의 오차가 있다. - 편향: 실제로는 2차원 모델인데 1차원 모델을 사용하는 경우처럼 잘못된 가정으로 인해 발생한다. 과소 적합이 발생할 가능성이 매우 높다. - 분산: 모델이 훈련 데이터에 민감하게 반응하는 정도를 가리킨다. 고차 다항 회귀 모델일 수록 분산이 높아질 수 있다. 일반적으로 **자유도**<font size='2'>degree of freedom</font>가 높은 모델일 수록 분산이 커지며, 과대 적합이 발생할 가능성도 매우 높다. - 축소 불가능 오차: 잡음(noise) 등 데이터 자체의 한계로 인해 발생한다. 잡음 등을 제거해야 오차를 줄일 수 있다. :::{prf:example} 편향-분산 트레이드오프 :label: exp:bias_variance 복잡한 모델일 수록 편향을 줄어들지만 분산을 커진다. ::: ## 규제 사용 선형 모델 훈련 중에 과소 적합이 발생하면 보다 복잡한 모델을 선택해야 한다. 반면에 과대 적합이 발생할 경우 먼저 모델에 규제를 가해 과대 적합을 방지하거나 아니면 최소한 과대 적합이 최대한 늦게 발생하도록 유도해야 한다. 모델 규제는 보통 모델의 자유도를 제한하는 방식으로 이루어진다. **자유도**<font size="2">degree of freedom</font>는 모델 결정에 영향을 주는 요소들의 개수이다. 예를 들어 선형 회귀의 경우에는 특성 수가 자유도를 결정하며, 다항 회귀의 경우엔 차수도 자유도에 기여한다. 선형 회귀 모델에 대한 **규제**<font size='2'>regularization</font>는 가중치를 제한하는 방식으로 이루어지며, 방식에 따라 다음 세 가지 선형 회귀 모델이 지정된다. * 릿지 회귀 * 라쏘 회귀 * 엘라스틱 넷 :::{admonition} 주의 :class: warning 규제는 훈련 과정에만 사용된다. 테스트 과정에는 다른 기준으로 성능을 평가한다. * 훈련 과정: 비용 최소화 목표 * 테스트 과정: 최종 목표에 따른 성능 평가. 예를 들어, 분류기의 경우 재현율/정밀도 기준으로 모델의 성능을 평가한다. ::: ### 릿지 회귀<font size='2'>Ridge Regression</font> 다음 비용 함수를 사용하며, 특성 스케일링을 해야 규제의 성능이 좋아진다. $$J(\theta) = \textrm{MSE}(\theta) + \alpha \sum_{i=1}^{n}\theta_i^2$$ * $\alpha$(알파)는 규제의 강도를 지정한다. $\alpha=0$ 이면 규제가 전혀 없는 기본 선형 회귀이다. * $\alpha$ 가 커질 수록 가중치의 역할이 줄어든다. 왜냐하면 비용을 줄이기 위해 보다 작은 가중치를 선호하는 방향으로 훈련되기 때문이다. * $\theta_0$ 는 규제하지 않는다. 아래 그림은 릿지 규제를 적용한 적용한 6 개의 경우를 보여준다. - 왼편: 선형 회귀 모델에 세 개의 $\alpha$ 값 적용. - 오른편: 10차 다항 회귀 모델에 세 개의 $\alpha$ 값 적용. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/ridge01.png" width="600"/></div> :::{admonition} 릿지 회귀의 정규 방정식 :class: info $A$ 가 `(n+1)x(n+1)` 모양의 단위 행렬<font size='2'>identity matrix</font>일 때 다음이 성립한다. $$ \hat{\mathbf{\theta}} = (\mathbf{X}^T\, \mathbf{X} + \alpha A)^{-1}\, \mathbf{X}^T\, \mathbf{y} $$ ::: ### 라쏘 회귀<font size='2'>Lasso Regression</font> 다음 비용 함수를 사용한다. $$J(\theta) = \textrm{MSE}(\theta) + \alpha \, \sum_{i=1}^{n}\mid \theta_i\mid$$ * 별로 중요하지 않은 특성에 대해 $\theta_i$가 0에 빠르게 수렴하도록 훈련 중에 유도된다. 이유는 $\mid \theta_i \mid$ 의 미분값이 1또는 -1 이기에 상대적으로 큰 값이기에 파라미터 업데이크 과정에서 보다 작은 $\mid \theta_i \mid$ 가 보다 빠르게 0에 수렴하기 때문이다. * $\alpha$ 와 $\theta_0$ 에 대한 설명은 릿지 회귀의 경우와 동일하다. 아래 그림은 라쏘 규제를 적용한 적용한 6 개의 경우를 보여준다. - 왼편: 선형 회귀 모델에 세 개의 $\alpha$ 값 적용. - 오른편: 10차 다항 회귀 모델에 세 개의 $\alpha$ 값 적용. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/lasso01.png" width="600"/></div> :::{admonition} 주의 사항 :class: warning 라쏘 회귀는 정규 방정식을 지원하지 않는다. ::: ### 엘라스틱 넷<font size='2'>Elastic Net</font> 릿지 회귀와 라쏘 회귀를 절충한 모델이며 다음 비용 함수를 사용한다. $r$ 은 릿지 규제와 라쏘 규제의 사용 비율이다. 단, 규제 강도를 의미하는 `\alpha` 가 각 규제에 가해지는 정도가 다름에 주의한다. $$ J(\theta) = \textrm{MSE}(\theta) + r\cdot \bigg (2 \alpha \, \sum_{i=1}^{n}\mid\theta_i\mid \bigg) + (1-r)\cdot \bigg (\frac{\alpha}{m}\, \sum_{i=1}^{n}\theta_i^2 \bigg ) $$ :::{admonition} 규제 선택 :class: info 약간이라도 규제를 사용해야 하며, 일반적으로 릿지 회귀가 추천된다. 반면에 유용한 속성이 그렇게 많지 않다고 판단되는 경우엔 라쏘 회귀 또는 엘라스틱 넷이 추천된다. 하지만 특성 수가 훈련 샘플 수보다 크거나 특성 몇 개가 강하게 연관되어 있는 경우엔 엘라스틱 넷을 사용해야 한다. ::: (sec:early-stopping)= ### 조기 종료 **조기 종료**<font size='2'>Early Stopping</font>는 모델이 훈련셋에 과대 적합하는 것을 방지하기 위해 훈련을 적절한 시기에 중단시키는 기법이며, 검증 데이터에 대한 손실이 줄어들다가 다시 커지는 순간 훈련을 종료한다. 확률적 경사하강법, 미니 배치 경사하강법에서는 손실 곡선이 보다 많이 진동하기에 검증 손실이 언제 최소가 되었는지 알기 어렵다. 따라서 한동안 최솟값보다 높게 유지될 때 훈련을 멈추고 기억해둔 최적의 모델로 되돌린다. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-11.png" width="500"/></div> ## 로지스틱 회귀 회귀 모델을 분류 모델로 활용할 수 있다. * 이진 분류: 로지스틱 회귀 * 다중 클래스 분류: 소프트맥스 회귀 ### 확률 추정 선형 회귀 모델이 예측한 값에 **시그모이드**<font size='2'>sigmoid</font> 함수를 적용하여 0과 1 사이의 값, 즉 양성일 **확률** $\hat p$ 로 지정한다. $$ \hat p = h_\theta(\mathbf{x}) = \sigma(\mathbf{\theta}^T \, \mathbf{x}) = \sigma(\theta_0 + \theta_1\, x_1 + \cdots + \theta_n\, x_n) $$ **시그모이드 함수** $$\sigma(t) = \frac{1}{1 + \exp(-t)}$$ <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-12.png" width="500"/></div> 로지스틱 회귀 모델의 **예측값**은 다음과 같다. $$ \hat y = \begin{cases} 0 & \text{if}\,\, \hat p < 0.5 \\[1ex] 1 & \text{if}\,\, \hat p \ge 0.5 \end{cases} $$ 즉, 다음이 성립한다. * 양성: $\theta_0 + \theta_1\, x_1 + \cdots + \theta_n\, x_n \ge 0$ * 음성: $\theta_0 + \theta_1\, x_1 + \cdots + \theta_n\, x_n < 0$ ### 훈련과 비용함수 로지스틱 회귀 모델은 양성 샘플에 대해서는 1에 가까운 확률값을, 음성 샘플에 대해서는 0에 가까운 확률값을 내도록 훈련한다. 각 샘플에 대한 비용은 다음과 같다. $$ c(\theta) = \begin{cases} -\log(\,\hat p\,) & \text{$y=1$ 인 경우}\\ -\log(\,1 - \hat p\,) & \text{$y=0$ 인 경우} \end{cases} $$ 양성 샘플에 대해 0에 가까운 값을 예측하거나, 음성 샘플에 대해 1에 가까운 값을 예측하면 위 비용 함수의 값이 무한히 커진다. 모델 훈련은 따라서 전체 훈련셋에 대한 다음 **로그 손실**<font size='2'>log loss</font> 함수는 다음과 같다. $$ J(\theta) = - \frac{1}{m}\, \sum_{i=1}^{m}\, [y^{(i)}\, \log(\,\hat p^{(i)}\,) + (1-y^{(i)})\, \log(\,1 - \hat p^{(i)}\,)] $$ :::{admonition} 로그 손실 함수 이해 :class: info $c(\theta)$ 는 틀린 예측을 하면 손실값이 매우 커진다. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-12-10a.png" width="500"/></div> 훈련셋이 가우시안 분포를 따른다는 전제하에 로그 손실 함수를 최소화하면 **최대 우도**<font size='2'>maximal likelihood</font>를 갖는 최적의 모델을 얻을 수 있다는 사실은 수학적으로 증명되었다. 상세 내용은 [앤드류 응(Andrew Ng) 교수의 Stanford CS229](https://www.youtube.com/watch?v=jGwO_UgTS7I&list=PLoROMvodv4rMiGQp3WXShtMGgzqpfVfbU) 강의에서 들을 수 있다. ::: 최적의 $\theta$ 를 계산하는 정규 방정식은 하지 않는다. 하지만 다행히도 경사하강법은 적용할 수 있으며, 선형 회귀의 경우처럼 적절한 학습률을 사용하면 언제나 최소 비용에 수렴하도록 파라미터가 훈련된다. 참고로 로그 손실 함수의 그레이디이언트 벡터는 선형 회귀의 그것과 매우 유사하며, 다음 편도 함수들로 이루어진다. $$ \dfrac{\partial}{\partial \theta_j} J(\boldsymbol{\theta}) = \dfrac{1}{m}\sum\limits_{i=1}^{m}\left(\mathbf{\sigma(\boldsymbol{\theta}}^T \mathbf{x}^{(i)}) - y^{(i)}\right)\, x_j^{(i)} $$ ### 결정 경계 붓꽃 데이터셋을 이용하여 로지스틱 회귀의 사용법을 살펴 본다. 하나의 붓꽃 샘플은 꽃받침<font size='2'>sepal</font>의 길이와 너비, 꽃입<font size='2'>petal</font>의 길이와 너비 등 총 4개의 특성으로 이루어진다. 타깃값은 0, 1, 2 중에 하나이며 각 숫자는 다음 세 개의 품종을 가리킨다. * 0: Iris-Setosa(세토사) * 1: Iris-Versicolor(버시컬러) * 2: Iris-Virginica(버지니카) <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/iris01.png" width="600"/></div> **버지니카 품종 감지기** 로지스틱 회귀 모델을 이용하여 아이리스 데이터셋을 대상으로 버지니카 품종을 감지하는 이진 분류기를 다음과 같이 훈련시킨다. 단, 문제를 간단하기 만들기 위해 꽃잎의 너비 속성 하나만 이용한다. ```python X = iris.data[["petal width (cm)"]].values y = iris.target_names[iris.target] == 'virginica' X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) log_reg = LogisticRegression(random_state=42) log_reg.fit(X_train, y_train) ``` 훈련 결과 꽃잎의 넙가 약 1.65cm 보다 큰 경우 버지니카 품종일 가능성이 높아짐이 확인된다. 즉, 버지니카 품좀 감지기의 **결정 경계**<font size='2'>decision boundary</font>는 꽃잎 넙 기준으로 약 1.65cm 이다. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/iris02.png" width="700"/></div> 아래 그림은 꽃잎의 너비와 길이 두 속성을 이용한 버지니카 품종 감지기가 찾은 결정 경계(검정 파선)를 보여준다. 반면에 다양한 색상의 직선은 버지니카 품종일 가능성을 보여주는 영역을 표시한다. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-15.png" width="700"/></div> **로지스틱 회귀 규제** `LogisticRegression` 모델의 하이퍼파라미터 `penalty` 와 `C` 를 이용하여 규제와 규제의 강도를 지정한다. * `penalty`: `l1`(라쏘 규제), `l2`(릿지 규제), `elasticnet`(엘라스틱 넷) 방식 중 하나 선택하며, 기본값은 `l2`, 즉, 릿지 규제를 기본으로 적용한다. * `C`: 릿지 또는 라쏘 규제 정도를 지정하는 $\alpha$의 역수에 해당한다. 따라서 0에 가까울 수록 강한 규제를 의미한다. ### 소프트맥스 회귀 로지스틱 회귀 모델을 일반화하여 다중 클래스 분류를 지원하도록 만든 모델이 **소프트맥스 회귀**<font size='2'>Softmax Regression</font>이며, **다항 로지스틱 회귀** 라고도 불린다. **클래스별 확률 예측** 샘플 $\mathbf x$가 주어졌을 때 각각의 분류 클래스 $k$ 에 대한 점수 $s_k(\mathbf x)$를 선형 회귀 방식으로 계산한다. $$ s_k(\mathbf{x}) = \theta_0^{(k)} + \theta_1^{(k)} x_1 + \cdots + \theta_n^{(k)} x_n $$ 이는 $k\, (n+1)$ 개의 파라미터를 학습시켜야 함을 의미한다. 위 식에서 $\theta_i^{(k)}$ 는 분류 클래스 $k$에 대해 필요한 $i$ 번째 속성을 대상으로 파라미터를 가리킨다. 예를 들어, 붓꽃 데이터를 대상으로 하는 경우 최대 15개의 파라미터를 훈련시켜야 한다. $$ \Theta = \begin{bmatrix} \theta_0^{(0)} & \theta_1^{(0)} & \theta_2^{(0)} & \theta_3^{(0)} & \theta_4^{(0)}\\ \theta_0^{(1)} & \theta_1^{(1)} & \theta_2^{(1)} & \theta_3^{(1)} & \theta_4^{(1)}\\ \theta_0^{(2)} & \theta_1^{(2)} & \theta_2^{(2)} & \theta_3^{(2)} & \theta_4^{(2)} \end{bmatrix} $$ 이제 다음 **소프트맥스** 함수를 이용하여 클래스 $k$에 속할 확률 $\hat p_k$ 를 계산한다. 단, $K$ 는 클래스의 개수를 나타낸다. $$ \hat p_k = \frac{\exp(s_k(\mathbf x))}{\sum_{j=1}^{K}\exp(s_j(\mathbf x))} $$ 소프트맥스 회귀 모델은 각 샘플에 대해 추정 확률이 가장 높은 클래스를 선택한다. $$ \hat y = \mathrm{argmax}_k s_k(\mathbf x) $$ :::{admonition} 소프트맥스 회귀와 다중 출력 분류 :class: tip 소프트맥스 회귀는 다중 출력<font size='2'>multioutput</font> 분류를 지원하지 않는다. 예를 들어, 하나의 사진에서 여러 사람의 얼굴을 인식하는 데에 사용할 수 없다. ::: **소프트맥스 회귀의 비용 함수** 각 분류 클래스 $k$에 대한 적절한 가중치 벡터 $\theta_k$를 경사하강법을 이용하여 업데이트 한다. 이를 위해 **크로스 엔트로피**<font size='2'>cross entropy</font>를 비용 함수로 사용한다. $$ J(\Theta) = - \frac{1}{m}\, \sum_{i=1}^{m}\sum_{k=1}^{K} y^{(i)}_k\, \log\big( \hat{p}_k^{(i)}\big) $$ 위 식에서 $y^{(i)}_k$ 는 타깃 확률값을 가리키며, 0 또는 1 중에 하나의 값을 갖는다. $K=2$이면 로지스틱 회귀의 로그 손실 함수와 정확하게 일치한다. 크로스 엔트로피는 주어진 샘플의 타깃 클래스를 제대로 예측하지 못하는 경우 높은 값을 갖는다. 크로스 엔트로피 개념은 정보 이론에서 유래하며, 여기서는 더 이상 설명하지 않는다. **붓꽃 데이터 다중 클래스 분류** 사이킷런의 `LogisticRegression` 예측기를 활용한다. 기본값 `solver=lbfgs` 사용하면 모델이 알아서 다중 클래스 분류를 훈련한다. 아래 코드는 꽃잎의 길이와 너비 두 특성을 이용하여 세토사, 버시컬러, 버지니카 클래스를 선택하는 모델을 훈련시킨다. ```python X = iris.data[["petal length (cm)", "petal width (cm)"]].values y = iris["target"] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) softmax_reg = LogisticRegression(C=30, random_state=42) softmax_reg.fit(X_train, y_train) ``` 아래 그림은 붓꽃 꽃잎의 너비와 길이를 기준으로 세 개의 품종을 색까로 구분하는 결정 경계를 보여준다. 다양한 색상의 곡선은 버시컬러 품종에 속할 확률의 영력을 보여준다. <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch04/homl04-16.png" width="700"/></div> ## 연습문제 참고: [(실습) 모델 훈련](https://colab.research.google.com/github/codingalzi/handson-ml3/blob/master/practices/practice_training_models.ipynb)
github_jupyter
# OPTIMADE and *pymatgen* # What is *pymatgen*? [*pymatgen*](https://pymatgen.org) is a materials science analysis code written in the Python programming language. It helps power the [Materials Project](https://materialsproject.org)'s high-throughput DFT workflows. It supports integration with a wide variety of simulation codes and can perform many analysis tasks such as the generation of phase diagrams or diffraction patterns. # The motivation behind this tutorial **This tutorial is aimed either at:** * People who are already familiar with using *pymatgen* or the Materials Project * In particular, anyone already using the Materials Project API through the `MPRester`, and who would like to start using the OPTIMADE API in a similar way * People who like using Python and think they might appreciate an interface like the one provided by *pymatgen*. * *pymatgen* provides a lot of input/output routines (such as conversion to CIF, POSCAR, etc.) and analysis tools (such as determination of symmetry, analysis of possible bonds, etc.) that can be performed directly on structures retrieved from OPTIMADE providers. **What this tutorial is not:** * This is not necessarily the way everyone should be accessing OPTIMADE providers! * This tool may be useful to you, or it may not be. There are a lot of good tools available in our community. You are encouraged to try out different tools and find the one that's most useful for your own work. * It is not currently the best way to access OPTIMADE APIs for advanced users. * It is still under development. * It is unit tested against several OPTIMADE providers but **some do not work yet**. * It only currently supports information retrieval from `/v1/structures/` routes. # Pre-requisites This tutorial is aimed at people who already have a basic understanding of Python, including how to import modules, the use of basic data structures like dictionaries and lists, and how to intantiate and use objects. If you do not have this understanding of Python, this tutorial may help you become familiar, but you are highly encouraged to follow a dedicated Python course such as those provided by [Software Carpentry](https://software-carpentry.org). # Install pymatgen This tutorial uses the Python programming language. It can be run on any computer with Python installed. For convenience, here we are running in Google's "Colaboratory" notebook environment. Before we begin, we must install the `pymatgen` package: ``` !pip install pymatgen pybtex retrying ``` Next, let us **verify the correct version of *pymatgen* is installed**. This is good practice to do before starting out! For this tutorial we need version 2022.0.14 or above. We also need the `pybtex` package installed. ``` from importlib_metadata import version version("pymatgen") ``` # Import and learn about the `OptimadeRester` The `OptimadeRester` is a class that is designed to retrieve data from an OPTIMADE provider and automatically convert the data into *pymatgen* `Structure` objects. These `Structure` objects are designed as a good intermediate format for crystallographic structure analysis, transformation and input/output. You can read documentation on the `OptimadeRester` here: https://pymatgen.org/pymatgen.ext.optimade.html ``` from pymatgen.ext.optimade import OptimadeRester ``` The first step is to inspect the **documentation** for the `OptimadeRester`. We can run: ``` OptimadeRester? ``` # Understanding "aliases" as shortcuts for accessing given providers ``` OptimadeRester.aliases ``` These aliases are useful since they can provide a quick shorthand for a given database without having to remember a full URL. This list of aliases is updated periodically. However, new OPTIMADE providers can be made available and will be listed at https://providers.optimade.org. The `OptimadeRester` can query the OPTIMADE providers list to refresh the available aliases. You can do this as follows, but be aware this might take a few moments: ``` opt = OptimadeRester() opt.refresh_aliases() ``` # Connecting to one or more OPTIMADE providers Let's begin by connecting to the Materials Project (`mp`) and Materials Cloud "3DD" (`mcloud.threedd`) databases. ``` opt = OptimadeRester(["mp", "mcloud.threedd"]) ``` We can find more information about the OPTIMADE providers we are connected to using the `describe()` method. ``` print(opt.describe()) ``` # Query for materials: binary nitrides case study `OptimadeRester` provides an `get_structures` method. **It does not support all features of OPTIMADE filters** but is a good place to get started. For this case study, we will search for materials containing nitrogen and that have two elements. ``` results = opt.get_structures(elements=["N"], nelements=2) ``` We see that the `OptimadeRester` does some of the hard work for us: it automatically retrieves multiple pages of results when many results are available, and also gives us a progress bar. Let us inspect the `results`: ``` type(results) # this method returns a dictionary, so let's examine the keys of this dictionary... results.keys() # we see that the results dictionary is keyed by provider/alias results['mp'].keys() # and these are then keyed by that database's unique identifier ``` So let us inspect one structure as an example: ``` example_structure = results['mp']['mp-804'] print(example_structure) ``` We can then use *pymatgen* to further manipulate these `Structure` objects, for example to calculate the spacegroup or to convert to a CIF: ``` example_structure.get_space_group_info() print(example_structure.to(fmt="cif", symprec=0.01)) ``` # Data analysis This section I will use some code I prepared earlier to summarize the `results` into a tabular format (`DataFrame`). ``` import pandas as pd records = [] for provider, structures in results.items(): for identifier, structure in structures.items(): records.append({ "provider": provider, "identifier": identifier, "formula": structure.composition.reduced_formula, "spacegroup": structure.get_space_group_info()[0], "a_lattice_param": structure.lattice.a, "volume": structure.volume, }) df = pd.DataFrame(records) df ``` To pick one specific formula as an example, we can use tools from `pandas` to show the spacegroups present for that formula: ``` df[df["formula"] == "GaN"].spacegroup ``` Here, we see that there are a few common high-symmetry spacegroups (such as $P6_3mc$) there are also many low-symmetry structures ($P1$). I know that in this instance, this is because the $P1$ structures are actually amorphous and not crystalline. This highlights the importance of doing appropraiate **data cleaning** on retrieved data. ### Plotting data As a quick example, we can also plot information in our table: ``` import plotly.express as px px.bar(df, x="spacegroup", facet_row="provider") ``` **Remember, there is no single "best database" to use. Every database might be constructed for a specific purpose, subject to different biases, with different data qualities and sources.** The ideal database for one scientist with one application in mind may be different to the ideal database for another scientist with a different application. **The power of OPTIMADE is that you can query across multiple databases!** # Advanced usage: querying using the OPTIMADE filter grammar You can also query using an OPTIMADE filter as defined in the OPTIMADE specification and publication. **This is recommended** for advanced queries to use the full power of OPTIMADE. For example, the above query could have equally been performed as: ``` results = opt.get_structures_with_filter('(elements HAS ALL "N") AND (nelements=2)') ``` # Advanced usage: retrieving provider-specific property information The OPTIMADE specification allows for providers to include database-specific information in the returned data, prefixed by namespace. To access this information with *pymatgen* we have to request "snls" (`StructureNL`) instead of "structures". A `StructureNL` is a `Structure` with additional metadata included, such as the URL it was downloaded from and any of this additional database-specific information. ``` results_snls = OptimadeRester("odbx").get_snls(nelements=2) example_snl = results_snls['odbx']['odbx/2'] example_snl.data['_optimade']['_odbx_thermodynamics'] ``` This extra data provided differs from every database, and sometimes from material to material, so some exploration is required! # When Things Go Wrong and How to Get Help Bugs may be present! The `OptimadeRester` is still fairly new. If it does not work it is likely because of either: * A bug in the *pymatgen* code. This may be reported directly to Matthew Horton at mkhorton@lbl.gov or an issue can be opened in the *pymatgen* code repository. Matt apologises in advance if this is the case! * An issue with a provider. This may be because the provider does not yet fully follow the OPTIMADE specification, because the provider is suffering an outage, or because the filters are not yet optimized with that provider. * If this happens, you may try to first increase the `timeout` value to something larger. The default is too low for some providers. * Otherwise, you may want to contact the provider directly, or create a post at the OPTIMADE discussion forum: https://matsci.org/optimade # How to Get Involved New developers are very welcome to add code to *pymatgen*! If you want to get involved, help fix bugs or add new features, your help would be very much appreciated. *pymatgen* can only exist and be what it is today thanks to the many efforts of its [development team](https://pymatgen.org/team.html).
github_jupyter
# U.S. Border Patrol Nationwide Apprehensions by Citizenship and Sector **Data Source:** [CBP Apprehensions](https://www.cbp.gov/sites/default/files/assets/documents/2021-Aug/USBORD~3.PDF) <br> **Download the Output:** [here](../data/extracted_data/) ## Overview The source PDF is a large and complex PDF with varying formats across pages. This notebook demonstrates how to extract all data from this PDF into a single structured table. Though not explored in this notebook there are many other PDFs which could be extracted, including many more that CBP posts on their website. This code can be use to extract data from PDFs, and convert them into a more usable format (either within Python, or a csv). **See**: dataset source: https://www.cbp.gov/newsroom/media-resources/stats <br> ## Technical Approach We download our PDF of interest and then use [tabula](https://github.com/chezou/tabula-py) and a good deal of custom Python code to process all pages of the PDF into a single structured table that can be used for further analysis. ## Skills Learned 1. How to download a PDF 2. How to use tabula to extract data from a complex pdf 3. How to deal with errors generated in the extraction process 4. How to clean up and format final output table ## The Code **PLEASE NOTE**: We have made this notebook READ only to ensure you receive all updates we make to it. Do not edit this notebook directly, create a copy instead. To customize and experiment with this notebook: 1. Create a copy: `Select File -> Make a Copy` at the top-left of the notebook 2. Unlock cells in your copy: Press `CMD + A` on your keyboard to select all cells, then click the small unlocked padlock button near the mid-top right of the notebook. ``` import logging import logging.config from pathlib import Path import pandas as pd import requests import tabula from tabula.io import read_pdf from PyPDF2 import PdfFileReader pd.set_option("max_rows", 400) # Below just limits warnings that can be ignored logging.config.dictConfig( { "version": 1, "disable_existing_loggers": True, } ) ``` --------- # 1. Download PDF Let's first download the [PDF](https://www.cbp.gov/sites/default/files/assets/documents/2021-Aug/USBORD~3.PDF) we want to extract data from. **Below we pass the:** * Path to the pdf file on the internet * What we want to call it * And the folder we want to save the file to ``` def download_pdf(url, name, output_folder): """ Function to download a single pdf file from a provided link. Parameters: url: Url of the file you want to download name: name label you want to apply to the file output_folder: Folder path to savae file Returns: Saves the file to the output directory, function itself returns nothing. Example: download_pdf( 'https://travel.state.gov/content/travel/en/legal/visa-law0/visa-statistics/immigrant-visa-statistics/monthly-immigrant-visa-issuances.html', 'July 2020 - IV Issuances by Post and Visa Class', 'visa_test/' ) """ output_folder = Path(output_folder) response = requests.get(url) if response.status_code == 200: # Write content in pdf file outpath = output_folder / f"{name}.pdf" pdf = open(str(outpath), "wb") pdf.write(response.content) pdf.close() print("File ", f"{name}.pdf", " downloaded") else: print("File ", f"{name}.pdf", " not found.") ``` Now call our function ``` download_pdf( "https://www.cbp.gov/sites/default/files/assets/documents/2021-Aug/USBORD~3.PDF", # <- the url "US Border Patrol Nationwide Apps by Citizenship & Sector", # <- our name for it "../data/raw_source_files/", # <- Output directory ) ``` **We have now downloaded the file locally** We will create variable to store path to local PDF file path ``` pdf_path = "../data/raw_source_files/US Border Patrol Nationwide Apps by Citizenship & Sector.pdf" ``` ## 2. Reviewing the PDF and Preparing to Extract Data This file is somewhat hard to extract data from. The columns merged fields and sub headings etc. Also if you scroll through the whole file you will see that the table format changes somewhat. Therefore we are going to hardcode the actual columnns we are interested in. Below we see an image of the first table in the pdf. ![cbp_appr_col_example.png](attachment:cbp_appr_col_example.png) Since it is hard to capture the correct column names, below we create a variable called `cols` where we save the columns names we will use in our table. These columns refer to citizenship of the person, where they were encountered and different aggregations based on border location (SW, North, Coast). ``` cols = [ "citizenship", "bbt", "drt", "elc", "ept", "lrt", "rgv", "sdc", "tca", "yum", "sbo_total", # SBO "blw", "bun", "dtm", "gfn", "hlt", "hvm", "spw", "swb", "nbo_total", "mip", "nll", "rmy", "cbo_total", "total", ] ``` ------- ## 3. Extracting the Data Below we have a bunch of code that will iterate through the PDF pages and extract data. We know this is a lot but suggest reviewing the comments in the code (anything starting with a #) to get a sense of what is going on. **Now run the process** ``` print("*Starting Process") def fix_header_pages(df): df.columns = cols df = df.drop([0, 1], axis=0) return df # List to store the tables we encounter tables = [] # Dataframe to store table segments table_segments = pd.DataFrame() # Start on page 1 (PDF is not zero indexed like python but regular indexed .. starts with 1 not 0) start = 1 # Read the pdf with PdfFileReader to get the number of pages stop = PdfFileReader(pdf_path).getNumPages() + 1 # Something to count the number of table swe encounter table_num = -1 for page_num in range(start, stop): print(f" **Processing Page: {page_num} of {stop}") new_table = False # New tables are where a new year starts (2007, 2008, etc) # Extract data using tabula df = read_pdf( pdf_path, pages=f"{page_num}", lattice=True, pandas_options={"header": None} )[0] # If it is AFGHANISTAN we have a new table if "AFGHANISTAN" in df.loc[2][0]: new_table = True table_num += 1 # If CITIZENSHIP is in the first row - its a header not data so we want to remove if "CITIZENSHIP" in df.loc[0][0]: df = fix_header_pages(df) # Mixed formats in this pdf else: df.columns = cols # Check for errors check_for_error = df[df.citizenship.str.isdigit()] if len(check_for_error) > 0: # If there was an error we try to fix it with some special tabula arguments fixed = False missing_country_df = read_pdf( pdf_path, pages=f"{page_num}", stream=True, area=(500, 5.65, 570, 5.65 + 800), pandas_options={"header": None}, )[0] missing_country = missing_country_df.tail(1)[0].squeeze() print( f" *** --> ERROR!! pg:{page_num}, country={missing_country}, review table_num={table_num} in tables (list object) - if not fixed automatically" ) if missing_country_df.shape[1] == df.shape[1]: fixed = True print(" *** --> --> !! Success - Likely Fixed Automatically") missing_country_df.columns = cols df.loc[check_for_error.index[0]] = missing_country_df.iloc[-1] if not fixed: df.loc[ check_for_error.index[0], "citizenship" ] = f" *** -->ERROR - {missing_country}" # Check if new table if page_num != start and new_table: tables.append(table_segments) table_segments = df else: table_segments = table_segments.append(df) tables.append(table_segments) tables = [table.reset_index(drop=True) for table in tables if len(table) > 0] print("*Process Complete") ``` ### Manual Fixes Above, we see that there were 3 errors. 1. pg: 35, Syria 2. pg: 37, Ireland 3. pg: 38, Unknown We were able to fix `#2` automatically but `#1` and `#3` need manual correction. If you are wondering why these were not collected correctly it is because on pg 35, 37 and 38 the table is missing a strong black line at the bottom of the table. Tabula uses strong lines to differentiate data from other parts of the pdf. Below we see the pg 35, Syria example. Ireland was fixed automatically by using some different arguments for the python tabula package. In that instance it worked and allowed for automatically correcting the data, for Syria and Unknown though it was not successful. ![cbp_apprehension_missing_bottom.png](attachment:cbp_apprehension_missing_bottom.png) We can examine the actual data by reviweing the table in the `tables` list. ``` example = tables[12].reset_index() example.iloc[117:120] ``` Above we look at table `#12` which referes to FY2018, and specifically the end of page 35 and the beginning of page 36. We see that SYRIA has no information. But if we look at the pdf (see image above) it does have information. Therefore we will have to correct this manually. **Below is just a list of values that provides the information that was not collected for Syria on pg 35** ``` syria_correct = [ "SYRIA", 0, 0, 0, 1, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, ] len(syria_correct) ``` **And then the Unknown countries for page 38** ``` unknown_correct = [ "UNNKOWN", 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ] len(unknown_correct) ``` **We grab the table and then assign the correct data to that row** Fix Syria ``` # the value assigned to tbl_index corresponds to the table_num value shown in our error message for each country tbl_index = 11 tables[tbl_index].loc[ tables[tbl_index][tables[tbl_index].citizenship.str.contains("SYRIA")].index[0] ] = syria_correct ``` Fix Unkown ``` tbl_index = 12 tables[tbl_index].loc[ tables[tbl_index][tables[tbl_index].citizenship.str.contains("UNKNOWN")].index[0] ] = unknown_correct ``` ----------- ## 4. Clean Up Tables We need to remove commas from numbers and convert string numbers to actual integer values. Below we can see that there are many cell values with `,` present. ``` tables[0][tables[0].total.str.contains(",")] ``` We will also create a dictionary with the cleaned tables and better labels ``` # Get just the specific station/crossing columns (not totals) station_cols = [ i for i in cols if i not in ["citizenship", "sbo_total", "nbo_total", "cbo_total", "total"] ] total_cols = ["sbo_total", "nbo_total", "cbo_total", "total"] def clean_tables(df): df = df.fillna(0).reset_index(drop=True) df["total"] = [ int(i.replace(",", "")) if isinstance(i, str) else i for i in df["total"] ] for c in station_cols + total_cols: df.loc[:, c] = [ int(i.replace(",", "")) if isinstance(i, str) else i for i in df[c] ] return df data = { f"total_apprehensions_FY{idx+7:02}": clean_tables(df) for idx, df in enumerate(tables) } ``` **Here are the keys in the dictionary - they relate to the specific `FY-Year` of the data** ``` data.keys() ``` **Sanity Check** We can compare the `TOTAL` column to the actual summed row totals to see if the data was extracted correctly ``` table_name = "total_apprehensions_FY19" totals = data[table_name].query('citizenship == "TOTAL"') pd.concat( [data[table_name].query('citizenship != "TOTAL"').sum(axis=0), totals.T], axis=1 ) ``` Looks pretty good! ## Combine the data into a single dataframe We will create a single dataframe but will add two columns, one (`label`) that will store the file key, and two (`year`) the fiscal year. ``` combined = pd.DataFrame() for k in data: tmp = data[k] tmp["label"] = k combined = combined.append(tmp) combined["year"] = combined.label.apply(lambda x: int(f"20{x[-2:]}")) combined combined.citizenship = [str(i) for i in combined.citizenship] ``` **Export file to csv** ``` combined.to_csv("../data/extracted_data/cbp-apprehensions-nov2021.csv") ``` ----------- # Appendix ## Visualizations ### Sample Visualization Now that we have the data in a usable format, we can also visualize the data. One visualization we can make is a graph of apprehensions by citizenship. ``` pd.pivot( index="year", columns="citizenship", values="total", data=combined[ combined.citizenship.isin( combined.groupby("citizenship") .sum() .sort_values("total", ascending=False) .head(6) .index.tolist() ) ], ).plot( figsize=(15, 8), marker="o", color=["yellow", "red", "blue", "black", "gray", "orange"], title="FY07-19 Total Apprehensions by Citizenship at US Borders", ) ``` # End
github_jupyter
# FAQs for Regression, MAP and MLE * So far we have focused on regression. We began with the polynomial regression example where we have training data $\mathbf{X}$ and associated training labels $\mathbf{t}$ and we use these to estimate weights, $\mathbf{w}$ to fit a polynomial curve through the data: \begin{equation} y(x, \mathbf{w}) = \sum_{j=0}^M w_j x^j \end{equation} * We derived how to estimate the weights using both maximum likelihood estimation (MLE) and maximum a-posteriori estimation (MAP). * Then, last class we said that we can generalize this further using basis functions (instead of only raising x to the jth power): \begin{equation} y(x, \mathbf{w}) = \sum_{j=0}^M w_j \phi_j(x) \end{equation} where $\phi_j(\cdot)$ is any basis function you choose to use on the data. * *Why is regression useful?* * Regression is a common type of machine learning problem where we want to map inputs to a value (instead of a class label). For example, the example we used in our first class was mapping silhouttes of individuals to their age. So regression is an important technique whenever you want to map from a data set to another value of interest. *Can you think of other examples of regression problems?* * *Why would I want to use other basis functions?* * So, we began with the polynomial curve fitting example just so we can have a concrete example to work through but polynomial curve fitting is not the best approach for every problem. You can think of the basis functions as methods to extract useful features from your data. For example, if it is more useful to compute distances between data points (instead of raising each data point to various powers), then you should do that instead! * *Why did we go through all the math derivations? You could've just provided the MLE and MAP solution to us since that is all we need in practice to code this up.* * In practice, you may have unique requirements for a particular problem and will need to decide upon and set up a different data likelihood and prior for a problem. For example, we assumed Gaussian noise for our regression example with a Gaussian zero-mean prior on the weights. You may have an application in which you know the noise is Gamma disributed and have other requirements for the weights that you want to incorporate into the prior. Knowing the process used to derive the estimate for weights in this case is a helpful guide for deriving your solution. (Also, on a practical note for the course, stepping through the math served as a quick review of various linear algebra, calculus and statistics topics that will be useful throughout the course.) * *What is overfitting and why is it bad?* * The goal of a supervised machine learning algorithm is to be able to learn a mapping from inputs to desired outputs from training data. When you overfit, you memorize your training data such that you can recreate the samples perfectly. This often comes about when you have a model that is more complex than your underlying true model and/or you do not have the data to support such a complex model. However, you do this at the cost of generalization. When you overfit, you do very well on training data but poorly on test (or unseen) data. So, to have useful trained machine learning model, you need to avoid overfitting. You can avoid overfitting through a number of ways. The methods we discussed in class are using *enough* data and regularization. Overfitting is related to the "bias-variance trade-off" (discussed in section 3.2 of the reading). There is a trade-off between bias and variance. Complex models have low bias and high variance (which is another way of saying, they fit the training data very well but may oscillate widely between training data points) where as rigid (not-complex-enough) models have high bias and low variance (they do not oscillate widely but may not fit the training data very well either). * *What is the goal of MLE and MAP?* * MLE and MAP are general approaches for estimating parameter values. For example, you may have data from some unknown distribution that you would like to model as best you can with a Gaussian distribution. You can use MLE or MAP to estimate the Gaussian parameters to fit the data and determine your estimate at what the true (but unknown) distribution is. * *Why would you use MAP over MLE (or vice versa)?* * As we saw in class, MAP is a method to add in other terms to trade off against the data likelihood during optimization. It is a mechanism to incorporate our "prior belief" about the parameters. In our example in class, we used the MAP solution for the weights in regression to help prevent overfitting by imposing the assumptions that the weights should be small in magnitude. When you have enough data, the MAP and the MLE solution converge to the same solution. The amount of data you need for this to occur varies based on how strongly you impose the prior (which is done using the variance of the prior distribution). # Probabilistic Generative Models * So far we have focused on regression. Today we will begin to discuss classification. * Suppose we have training data from two classes, $C_1$ and $C_2$, and we would like to train a classifier to assign a label to incoming test points whether they belong to class 1 or 2. * There are *many* classifiers in the machine learning literature. We will cover a few in this class. Today we will focus on probabilistic generative approaches for classification. * A *generative* approach for classification is one in which we estimate the parameters for distributions that generate the data for each class. Then, when we have a test point, we can compute the posterior probability of that point belonging to each class and assign the point to the class with the highest posterior probability. ``` import numpy as np import matplotlib.pyplot as plt from scipy.stats import multivariate_normal %matplotlib inline mean1 = [-1.5, -1] mean2 = [1, 1] cov1 = [[1,0], [0,2]] cov2 = [[2,.1],[.1,.2]] N1 = 250 N2 = 100 def generateData(mean1, mean2, cov1, cov2, N1=100, N2=100): # We are generating data from two Gaussians to represent two classes. # In practice, we would not do this - we would just have data from the problem we are trying to solve. class1X = np.random.multivariate_normal(mean1, cov1, N1) class2X = np.random.multivariate_normal(mean2, cov2, N2) fig = plt.figure() ax = fig.add_subplot(*[1,1,1]) ax.scatter(class1X[:,0], class1X[:,1], c='r') ax.scatter(class2X[:,0], class2X[:,1]) plt.show() return class1X, class2X class1X, class2X = generateData(mean1, mean2,cov1,cov2, N1,N2) ``` In the data we generated above, we have a "red" class and a "blue" class. When we are given a test sample, we will want to assign the label of either red or blue. We can compute the posterior probability for class $C_1$ as follows: \begin{eqnarray} p(C_1 | x) &=& \frac{p(x|C_1)p(C_1)}{p(x)}\\ &=& \frac{p(x|C_1)p(C_1)}{p(x|C_1)p(C_1) + p(x|C_2)p(C_2)}\\ \end{eqnarray} We can similarly compute the posterior probability for class $C_2$: \begin{eqnarray} p(C_2 | x) &=& \frac{p(x|C_2)p(C_2)}{p(x|C_1)p(C_1) + p(x|C_2)p(C_2)}\\ \end{eqnarray} Note that $p(C_1|x) + p(C_2|x) = 1$. So, to train the classifier, what we need is to determine the parametric forms and estimate the parameters for $p(x|C_1)$, $p(x|C_2)$, $p(C_1)$ and $p(C_2)$. For example, we can assume that the data from both $C_1$ and $C_2$ are distributed according to Gaussian distributions. In this case, \begin{eqnarray} p(\mathbf{x}|C_k) = \frac{1}{(2\pi)^{1/2}}\frac{1}{|\Sigma|^{1/2}}\exp\left\{ - \frac{1}{2} (\mathbf{x}-\mu_k)^T\Sigma_k^{-1}(\mathbf{x}-\mu_k)\right\} \end{eqnarray} Given the assumption of the Gaussian form, how would you estimate the parameter for $p(x|C_1)$ and $p(x|C_2)$? *You can use maximum likelihood estimate for the mean and covariance!* The MLE estimate for the mean of class $C_k$ is: \begin{eqnarray} \mu_{k,MLE} = \frac{1}{N_k} \sum_{n \in C_k} \mathbf{x}_n \end{eqnarray} where $N_k$ is the number of training data points that belong to class $C_k$ The MLE estimate for the covariance of class $C_k$ is: \begin{eqnarray} \Sigma_k = \frac{1}{N_k} \sum_{n \in C_k} (\mathbf{x}_n - \mu_{k,MLE})(\mathbf{x}_n - \mu_{k,MLE})^T \end{eqnarray} We can determine the values for $p(C_1)$ and $p(C_2)$ from the number of data points in each class: \begin{eqnarray} p(C_k) = \frac{N_k}{N} \end{eqnarray} where $N$ is the total number of data points. ``` #Estimate the mean and covariance for each class from the training data mu1 = np.mean(class1X, axis=0) print(mu1) cov1 = np.cov(class1X.T) print(cov1) mu2 = np.mean(class2X, axis=0) print(mu2) cov2 = np.cov(class2X.T) print(cov2) # Estimate the prior for each class pC1 = class1X.shape[0]/(class1X.shape[0] + class2X.shape[0]) print(pC1) pC2 = class2X.shape[0]/(class1X.shape[0] + class2X.shape[0]) print(pC2) #We now have all parameters needed and can compute values for test samples from scipy.stats import multivariate_normal x = np.linspace(-5, 4, 100) y = np.linspace(-6, 6, 100) xm,ym = np.meshgrid(x, y) X = np.dstack([xm,ym]) #look at the pdf for class 1 y1 = multivariate_normal.pdf(X, mean=mu1, cov=cov1) plt.imshow(y1) #look at the pdf for class 2 y2 = multivariate_normal.pdf(X, mean=mu2, cov=cov2); plt.imshow(y2) #Look at the posterior for class 1 pos1 = (y1*pC1)/(y1*pC1 + y2*pC2 ); plt.imshow(pos1) #Look at the posterior for class 2 pos2 = (y2*pC2)/(y1*pC1 + y2*pC2 ); plt.imshow(pos2) #Look at the decision boundary plt.imshow(pos1>pos2) ``` *How did we come up with using the MLE solution for the mean and variance? How did we determine how to compute $p(C_1)$ and $p(C_2)$? * We can define a likelihood for this problem and maximize it! \begin{eqnarray} p(\mathbf{t}, \mathbf{X}|\pi, \mu_1, \mu_2, \Sigma_1, \Sigma_2) = \prod_{n=1}^N \left[\pi N(x_n|\mu_1, \Sigma_1)\right]^{t_n}\left[(1-\pi)N(x_n|\mu_2, \Sigma_2) \right]^{1-t_n} \end{eqnarray} * *How would we maximize this?* As usual, we would use our "trick" and take the log of the likelihood function. Then, we would take the derivative with respect to each parameter we are interested in, set the derivative to zero, and solve for the parameter of interest. ## Reading Assignment: Read Section 4.2 and Section 2.5.2
github_jupyter
# 7. Overfitting Prevention ## Why do we need to solve overfitting? - To increase the generalization ability of our deep learning algorithms - Able to make predictions well for out-of-sample data ## Overfitting and Underfitting: Examples ![](./images/overfitting.png) - **_This is an example from scikit-learn's website where you can easily (but shouldn't waste time) recreate via matplotlib :)_** #### Degree 1: underfitting - Insufficiently fits data - High training loss - Unable to represent the true function - Bad generalization ability - Low testing accuracy #### Degree 4: "goodfitting" - Sufficiently fits data - Low training loss - Able to represent the true function - Good generalization ability - High testing accuracy #### Degree 15: overfitting - Overfits data - Very low to zero training loss - Unable to represent the true function - Bad generalization ability - Low testing accuracy ## Overfitting and Underfitting: Learning Curves - Separate training/testing datasets - Understand generalization ability through the learning curve ![](./images/overfitting_2.png) #### Underfitting: High Bias - Training/testing errors converged at a high level - More data does not help - Model has insufficient representational capacity $\rightarrow$ unable to represent underlying function - Poor data fit (high training error) - Poor generalization (high testing error) - Solution - Increase model's complexity/capacity - More layers - Larger hidden states #### Overfitting: High Variance - Training/testing errors converged with a large gap between - Excessive data fit (almost 0 training error) - Poor generalization (high testing error) - Solutions - Decrease model complexity - More data #### Goodfitting - Training/testing errors converged with very small gap at a low error level - Good data fit (low training error; not excessively low) - Good generalization (low testing error) ## Solving Overfitting - Data augmentation (more data) - Early stopping - Regularization: any changes to the learning algorithm to reduce testing error, not training error - Weight decay (L2 regularization) - Dropout - Batch Normalization ## Overfitting Solution 1: Data Augmentation - Expanding the existing dataset, MNIST (28x28 images) - Works for most if not all image datasets (CIFAR-10, CIFAR-100, SVHN, etc.) ### Centre Crop: 28 pixels ``` import torch import torch.nn as nn import torchvision.transforms as transforms import torchvision.datasets as dsets from torch.autograd import Variable # Set seed torch.manual_seed(0) ''' STEP 0: CREATE TRANSFORMATIONS ''' transform = transforms.Compose([ transforms.CenterCrop(28), transforms.ToTensor(), ]) ''' STEP 1: LOADING DATASET ''' train_dataset = dsets.MNIST(root='./data', train=True, #transform=transforms.ToTensor(), transform=transform, download=True) train_dataset_orig = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True) ''' STEP 2: MAKING DATASET ITERABLE ''' batch_size = 100 n_iters = 3000 num_epochs = n_iters / (len(train_dataset) / batch_size) num_epochs = int(num_epochs) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) train_loader_orig = torch.utils.data.DataLoader(dataset=train_dataset_orig, batch_size=batch_size, shuffle=True) import matplotlib.pyplot as plt %matplotlib inline for i, (images, labels) in enumerate(train_loader): torch.manual_seed(0) # Transformed image plt.imshow(images.numpy()[i][0], cmap='gray') plt.title('Transformed image') plt.show() if i == 1: break for i, (images, labels) in enumerate(train_loader_orig): torch.manual_seed(0) # Transformed image plt.imshow(images.numpy()[i][0], cmap='gray') plt.title('Original image') plt.show() if i == 1: break ``` ### Centre Crop: 22 pixels ``` import torch import torch.nn as nn import torchvision.transforms as transforms import torchvision.datasets as dsets # Set seed torch.manual_seed(0) ''' STEP 0: CREATE TRANSFORMATIONS ''' transform = transforms.Compose([ transforms.CenterCrop(22), transforms.ToTensor(), ]) ''' STEP 1: LOADING DATASET ''' train_dataset = dsets.MNIST(root='./data', train=True, #transform=transforms.ToTensor(), transform=transform, download=True) train_dataset_orig = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True) ''' STEP 2: MAKING DATASET ITERABLE ''' batch_size = 100 n_iters = 3000 num_epochs = n_iters / (len(train_dataset) / batch_size) num_epochs = int(num_epochs) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) train_loader_orig = torch.utils.data.DataLoader(dataset=train_dataset_orig, batch_size=batch_size, shuffle=True) import matplotlib.pyplot as plt %matplotlib inline for i, (images, labels) in enumerate(train_loader): torch.manual_seed(0) # Transformed image plt.imshow(images.numpy()[i][0], cmap='gray') plt.title('Transformed image') plt.show() if i == 1: break for i, (images, labels) in enumerate(train_loader_orig): torch.manual_seed(0) # Transformed image plt.imshow(images.numpy()[i][0], cmap='gray') plt.title('Original image') plt.show() if i == 1: break ``` ### Random Crop: 22 pixels ``` import torch import torch.nn as nn import torchvision.transforms as transforms import torchvision.datasets as dsets # Set seed torch.manual_seed(0) ''' STEP 0: CREATE TRANSFORMATIONS ''' transform = transforms.Compose([ transforms.RandomCrop(22), transforms.ToTensor(), ]) ''' STEP 1: LOADING DATASET ''' train_dataset = dsets.MNIST(root='./data', train=True, #transform=transforms.ToTensor(), transform=transform, download=True) train_dataset_orig = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True) ''' STEP 2: MAKING DATASET ITERABLE ''' batch_size = 100 n_iters = 3000 num_epochs = n_iters / (len(train_dataset) / batch_size) num_epochs = int(num_epochs) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) train_loader_orig = torch.utils.data.DataLoader(dataset=train_dataset_orig, batch_size=batch_size, shuffle=True) import matplotlib.pyplot as plt %matplotlib inline for i, (images, labels) in enumerate(train_loader): torch.manual_seed(0) # Transformed image plt.imshow(images.numpy()[i][0], cmap='gray') plt.title('Transformed image') plt.show() if i == 1: break for i, (images, labels) in enumerate(train_loader_orig): torch.manual_seed(0) # Transformed image plt.imshow(images.numpy()[i][0], cmap='gray') plt.title('Original image') plt.show() if i == 1: break ``` ### Random Horizontal Flip: p=0.5 ``` import torch import torch.nn as nn import torchvision.transforms as transforms import torchvision.datasets as dsets # Set seed torch.manual_seed(0) ''' STEP 0: CREATE TRANSFORMATIONS ''' transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.ToTensor(), ]) ''' STEP 1: LOADING DATASET ''' train_dataset = dsets.MNIST(root='./data', train=True, #transform=transforms.ToTensor(), transform=transform, download=True) train_dataset_orig = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True) ''' STEP 2: MAKING DATASET ITERABLE ''' batch_size = 100 n_iters = 3000 num_epochs = n_iters / (len(train_dataset) / batch_size) num_epochs = int(num_epochs) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) train_loader_orig = torch.utils.data.DataLoader(dataset=train_dataset_orig, batch_size=batch_size, shuffle=True) import matplotlib.pyplot as plt %matplotlib inline for i, (images, labels) in enumerate(train_loader): torch.manual_seed(0) # Transformed image plt.imshow(images.numpy()[i][0], cmap='gray') plt.title('Transformed image') plt.show() if i == 3: break for i, (images, labels) in enumerate(train_loader_orig): torch.manual_seed(0) # Transformed image plt.imshow(images.numpy()[i][0], cmap='gray') plt.title('Original image') plt.show() if i == 3: break ``` ### Normalization - Not augmentation, but required for our initializations to have constant variance (Xavier/He) - We assumed inputs/weights drawn i.i.d. with Gaussian distribution of mean=0 - We can normalize by calculating the mean and standard deviation of each channel - MNIST only 1 channel, black - 1 mean, 1 standard deviation - Once we've the mean/std $\rightarrow$ normalize our images to have zero mean - $X = \frac{X - mean}{std}$ - X: 28 by 28 pixels (1 channel, grayscale) ``` import torch import torch.nn as nn import torchvision.transforms as transforms import torchvision.datasets as dsets # Set seed torch.manual_seed(0) ''' STEP 0: CREATE TRANSFORMATIONS ''' transform = transforms.Compose([ transforms.ToTensor(), # Normalization always after ToTensor and all transformations transforms.Normalize((0.1307,), (0.3081,)), ]) ''' STEP 1: LOADING DATASET ''' train_dataset = dsets.MNIST(root='./data', train=True, #transform=transforms.ToTensor(), transform=transform, download=True) ''' STEP 2: MAKING DATASET ITERABLE ''' batch_size = 100 n_iters = 3000 num_epochs = n_iters / (len(train_dataset) / batch_size) num_epochs = int(num_epochs) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) ``` #### How did we get the mean/std? - mean=0.1307 - std=0.3081 ``` print(list(train_dataset.train_data.size())) print(train_dataset.train_data.float().mean()/255) print(train_dataset.train_data.float().std()/255) ``` #### Why divide by 255? - 784 inputs: each pixel 28x28 - Each pixel value: 0-255 (single grayscale) - Divide by 255 to have any single pixel value to be within [0,1] $\rightarrow$simple rescaling ### Putting everything together ``` import torch import torch.nn as nn import torchvision.transforms as transforms import torchvision.datasets as dsets from torch.autograd import Variable # Set seed torch.manual_seed(0) # Scheduler import from torch.optim.lr_scheduler import StepLR ''' STEP 0: CREATE TRANSFORMATIONS ''' train_dataset = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True) mean_mnist = train_dataset.train_data.float().mean()/255 std_mnist = train_dataset.train_data.float().std()/255 transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((mean_mnist,), (std_mnist,)), ]) ''' STEP 1: LOADING DATASET ''' train_dataset = dsets.MNIST(root='./data', train=True, #transform=transforms.ToTensor(), transform=transform, download=True) test_dataset = dsets.MNIST(root='./data', train=False, #transform=transforms.ToTensor(), transform=transform) ''' STEP 2: MAKING DATASET ITERABLE ''' batch_size = 128 n_iters = 10000 num_epochs = n_iters / (len(train_dataset) / batch_size) num_epochs = int(num_epochs) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) ''' STEP 3: CREATE MODEL CLASS ''' class FeedforwardNeuralNetModel(nn.Module): def __init__(self, input_dim, hidden_dim, output_dim): super(FeedforwardNeuralNetModel, self).__init__() # Linear function self.fc1 = nn.Linear(input_dim, hidden_dim) # Linear weight, W, Y = WX + B nn.init.kaiming_normal_(self.fc1.weight) # Non-linearity self.relu = nn.ReLU() # Linear function (readout) self.fc2 = nn.Linear(hidden_dim, output_dim) nn.init.kaiming_normal_(self.fc2.weight) def forward(self, x): # Linear function out = self.fc1(x) # Non-linearity out = self.relu(out) # Linear function (readout) out = self.fc2(out) return out ''' STEP 4: INSTANTIATE MODEL CLASS ''' input_dim = 28*28 hidden_dim = 100 output_dim = 10 model = FeedforwardNeuralNetModel(input_dim, hidden_dim, output_dim) ####################### # USE GPU FOR MODEL # ####################### device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.to(device) ''' STEP 5: INSTANTIATE LOSS CLASS ''' criterion = nn.CrossEntropyLoss() ''' STEP 6: INSTANTIATE OPTIMIZER CLASS ''' learning_rate = 0.1 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, nesterov=True) ''' STEP 7: INSTANTIATE STEP LEARNING SCHEDULER CLASS ''' # step_size: at how many multiples of epoch you decay # step_size = 1, after every 2 epoch, new_lr = lr*gamma # step_size = 2, after every 2 epoch, new_lr = lr*gamma # gamma = decaying factor scheduler = StepLR(optimizer, step_size=1, gamma=0.96) ''' STEP 8: TRAIN THE MODEL ''' iter = 0 for epoch in range(num_epochs): # Decay Learning Rate scheduler.step() # Print Learning Rate print('Epoch:', epoch,'LR:', scheduler.get_lr()) for i, (images, labels) in enumerate(train_loader): # Load images as tensors with gradient accumulation abilities images = images.view(-1, 28*28).requires_grad_().to(device) labels = labels.to(device) # Clear gradients w.r.t. parameters optimizer.zero_grad() # Forward pass to get output/logits outputs = model(images) # Calculate Loss: softmax --> cross entropy loss loss = criterion(outputs, labels) # Getting gradients w.r.t. parameters loss.backward() # Updating parameters optimizer.step() iter += 1 if iter % 500 == 0: # Calculate Accuracy correct = 0 total = 0 # Iterate through test dataset for images, labels in test_loader: # Load images and resize images = images.view(-1, 28*28).to(device) # Forward pass only to get logits/output outputs = model(images) # Get predictions from the maximum value _, predicted = torch.max(outputs.data, 1) # Total number of labels total += labels.size(0) # Total correct predictions correct += (predicted.type(torch.FloatTensor).cpu() == labels.type(torch.FloatTensor)).sum() accuracy = 100. * correct.item() / total # Print Loss print('Iteration: {}. Loss: {}. Accuracy: {}'.format(iter, loss.item(), accuracy)) ``` ## Overfitting Solution 2: Early Stopping ![](./images/early_stopping.png) ### How do we do this via PyTorch? 3 Steps. 1. Track validation accuracy 2. Whenever validation accuracy is better, we save the model's parameters 3. Load the model's best parameters to test ``` import torch import torch.nn as nn import torchvision.transforms as transforms import torchvision.datasets as dsets from torch.autograd import Variable # New import for creating directories in your folder import os # Set seed torch.manual_seed(0) # Scheduler import from torch.optim.lr_scheduler import StepLR ''' CHECK LOG OR MAKE LOG DIRECTORY ''' # This will create a directory if there isn't one to store models if not os.path.isdir('logs'): os.mkdir('logs') ''' STEP 0: CREATE TRANSFORMATIONS ''' train_dataset = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True) mean_mnist = train_dataset.train_data.float().mean()/255 std_mnist = train_dataset.train_data.float().std()/255 transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((mean_mnist,), (std_mnist,)), ]) ''' STEP 1: LOADING DATASET ''' train_dataset = dsets.MNIST(root='./data', train=True, #transform=transforms.ToTensor(), transform=transform, download=True) test_dataset = dsets.MNIST(root='./data', train=False, #transform=transforms.ToTensor(), transform=transform) ''' STEP 2: MAKING DATASET ITERABLE ''' batch_size = 128 n_iters = 10000 num_epochs = n_iters / (len(train_dataset) / batch_size) num_epochs = int(num_epochs) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) ''' STEP 3: CREATE MODEL CLASS ''' class FeedforwardNeuralNetModel(nn.Module): def __init__(self, input_dim, hidden_dim, output_dim): super(FeedforwardNeuralNetModel, self).__init__() # Linear function self.fc1 = nn.Linear(input_dim, hidden_dim) # Linear weight, W, Y = WX + B nn.init.kaiming_normal_(self.fc1.weight) # Non-linearity self.relu = nn.ReLU() # Linear function (readout) self.fc2 = nn.Linear(hidden_dim, output_dim) nn.init.kaiming_normal_(self.fc2.weight) def forward(self, x): # Linear function out = self.fc1(x) # Non-linearity out = self.relu(out) # Linear function (readout) out = self.fc2(out) return out ''' STEP 4: INSTANTIATE MODEL CLASS ''' input_dim = 28*28 hidden_dim = 100 output_dim = 10 model = FeedforwardNeuralNetModel(input_dim, hidden_dim, output_dim) ####################### # USE GPU FOR MODEL # ####################### device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.to(device) ''' STEP 5: INSTANTIATE LOSS CLASS ''' criterion = nn.CrossEntropyLoss() ''' STEP 6: INSTANTIATE OPTIMIZER CLASS ''' learning_rate = 0.1 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, nesterov=True) ''' STEP 7: INSTANTIATE STEP LEARNING SCHEDULER CLASS ''' # step_size: at how many multiples of epoch you decay # step_size = 1, after every 2 epoch, new_lr = lr*gamma # step_size = 2, after every 2 epoch, new_lr = lr*gamma # gamma = decaying factor scheduler = StepLR(optimizer, step_size=1, gamma=0.96) ''' STEP 8: TRAIN THE MODEL ''' iter = 0 # Validation accuracy tracker val_acc = 0 for epoch in range(num_epochs): # Decay Learning Rate scheduler.step() # Print Learning Rate print('Epoch:', epoch,'LR:', scheduler.get_lr()) for i, (images, labels) in enumerate(train_loader): # Load images images = images.view(-1, 28*28).requires_grad_().to(device) labels = labels.to(device) # Clear gradients w.r.t. parameters optimizer.zero_grad() # Forward pass to get output/logits outputs = model(images) # Calculate Loss: softmax --> cross entropy loss loss = criterion(outputs, labels) # Getting gradients w.r.t. parameters loss.backward() # Updating parameters optimizer.step() iter += 1 # Calculate Accuracy at every epoch correct = 0 total = 0 # Iterate through test dataset for images, labels in test_loader: # Load images images = images.view(-1, 28*28).to(device) # Forward pass only to get logits/output outputs = model(images) # Get predictions from the maximum value _, predicted = torch.max(outputs.data, 1) # Total number of labels total += labels.size(0) # Total correct predictions correct += (predicted.type(torch.FloatTensor).cpu() == labels.type(torch.FloatTensor)).sum() accuracy = 100. * correct.item() / total # if epoch 0, best accuracy is this if epoch == 0: val_acc = accuracy elif accuracy > val_acc: val_acc = accuracy # Save your model torch.save(model.state_dict(), './logs/best_model.pt') # Print Loss print('Iteration: {}. Loss: {}. Accuracy: {}. Best Accuracy: {}'.format(iter, loss.item(), accuracy, val_acc)) ''' STEP 9: TEST THE MODEL This model should produce the exact same best test accuracy! 96.48% ''' # Load the model model.load_state_dict(torch.load('./logs/best_model.pt')) # Evaluate model model.eval() # Calculate Accuracy at every epoch correct = 0 total = 0 # Iterate through test dataset for images, labels in test_loader: # Load images images = images.view(-1, 28*28).to(device) # Forward pass only to get logits/output outputs = model(images) # Get predictions from the maximum value _, predicted = torch.max(outputs.data, 1) # Total number of labels total += labels.size(0) # Total correct predictions correct += (predicted.type(torch.FloatTensor).cpu() == labels.type(torch.FloatTensor)).sum() accuracy = 100. * correct.item() / total # Print Loss print('Iteration: {}. Loss: {}. Accuracy: {}'.format(iter, loss.item(), accuracy)) ``` ## Overfitting Solution 3: Regularization ## Overfitting Solution 3a: Weight Decay (L2 Regularization) ## Overfitting Solution 3b: Dropout ## Overfitting Solution 4: Batch Normalization
github_jupyter
# Soccerstats Predictions v1.2 The changelog from v1.1: * Train on `train` data, and validate using `test` data. ## A. Data Cleaning & Preparation ### 1. Read csv file ``` # load and cache data stat_df = sqlContext.read\ .format("com.databricks.spark.csv")\ .options(header = True)\ .load("data/teamFixtures.csv")\ .cache() # from pyspark.sql.functions import isnan, when, count, col # count hyphen nulls ("-") per column # stat_df.select([count(when(stat_df[c] == "-", c)).alias(c) for c in stat_df.columns]).show() ``` ### 2. Filter-out "gameFtScore" column values ``` from pyspark.sql.functions import udf from pyspark.sql.types import StringType # replace non-"-" values with null: gameFtScore nullify_ft_scores = udf( lambda row_value: None if row_value != "-" else row_value, StringType() ) # replace "-" values with null: HTS_teamAvgOpponentPPG, ATS_teamAvgOpponentPPG nullify_hyphen_cols = udf( lambda row_value: None if row_value == "-" else row_value, StringType() ) stat_df = (stat_df.withColumn("gameFtScore", nullify_ft_scores(stat_df.gameFtScore))) stat_df = (stat_df.withColumn("HTS_teamAvgOpponentPPG", nullify_hyphen_cols(stat_df.HTS_teamAvgOpponentPPG)) .withColumn("ATS_teamAvgOpponentPPG", nullify_hyphen_cols(stat_df.ATS_teamAvgOpponentPPG)) ) # drop Null values stat_df = stat_df.dropna() stat_df.select("gameFtScore", "HTS_teamAvgOpponentPPG", "ATS_teamAvgOpponentPPG").show(5) print("Total rows: {}".format(stat_df.count())) ``` ### 3. Write-out new dataframe to Json ``` # optional: save to file # stat_df.coalesce(1).write.format('json').save('sstats_fixtures.json') ``` ### 4. Read fixtures Json to dataframe ``` fx_df = spark.read.json('data/fixtures1.json') fx_df.printSchema() ``` ### 5. Encode "fixture_id" on stat_df dataframe ``` import hashlib from pyspark.sql.functions import array def encode_string(value): return hashlib.sha1( value.encode("utf-8") ).hexdigest() # add an encoded col to "stat_df"; fixture_id fxcol_df = udf( lambda row_value: encode_string(u"".join([x for x in row_value])), StringType() ) stat_df = (stat_df.withColumn("fixture_id", fxcol_df(array( "leagueName", "leagueDivisionName", "gamePlayDate", "gameHomeTeamName", "gameAwayTeamName" )))) # display some encoded fixtures stat_df.select("fixture_id").show(5, False) ``` ### 6. Concat the two dataframes: "stat_df" and "fx_df" ``` from pyspark.sql.functions import col # use "left-outer-join" to concat full_df = stat_df.alias("a")\ .join(fx_df, stat_df.fixture_id == fx_df.fixture_id, "left_outer")\ .select(*[col("a."+c) for c in stat_df.columns] + [fx_df.ft_score]) full_df.select("leagueName", "leagueDivisionName", "gamePlayDate", "gameHomeTeamName", "gameAwayTeamName", "ft_score").show(5, False) ``` ### 7. Assess damage on "ft_score " nulls ``` # count nulls per column def count_null(df, col): return df.where(df[col].isNull()).count() print("Total rows: {}".format(full_df.count())) print("Ft_score nulls: {}".format(count_null(full_df, "ft_score"))) # drop null values in ft_Score full_df = full_df.dropna() print("Total rows: {}".format(full_df.count())) print("Ft_score nulls: {}".format(count_null(full_df, "ft_score"))) ``` ## B. Deep Learning ### 1. Clean data ``` # drop unnecessary columns ml_df = full_df.drop( "gameID", "gamePlayDate", "gamePlayTime", "gameHomeTeamName", "gameAwayTeamName", "gameHomeTeamID","gameAwayTeamID", "leagueName", "leagueDivisionName", "gameFtScore", "fixture_id" ) # separate col types: double & string # double type features dtype_features = [ "leagueCompletion", "HTS_teamPosition", "HTS_teamGamesPlayed", "HTS_teamGamesWon", "HTS_teamGamesDraw", "HTS_teamGamesLost", "HTS_teamGoalsScored", "HTS_teamGoalsConceded", "HTS_teamPoints", "HTS_teamPointsPerGame", "HTS_teamPPGlast8", "HTS_homeGamesWon", "HTS_homeGamesDraw", "HTS_homeGamesLost", "HTS_homeGamesPlayed", "HTS_awayGamesWon", "HTS_awayGamesDraw", "HTS_awayGamesLost", "HTS_awayGamesPlayed", "HTS_teamPPGHome", "HTS_teamPPGAway", "HTS_teamAvgOpponentPPG", "HTS_homeGoalMargin_by1_wins", "HTS_homeGoalMargin_by1_losses", "HTS_homeGoalMargin_by2_wins", "HTS_homeGoalMargin_by2_losses", "HTS_homeGoalMargin_by3_wins", "HTS_homeGoalMargin_by3_losses", "HTS_homeGoalMargin_by4p_wins", "HTS_homeGoalMargin_by4p_losses", "HTS_awayGoalMargin_by1_wins", "HTS_awayGoalMargin_by1_losses", "HTS_awayGoalMargin_by2_wins", "HTS_awayGoalMargin_by2_losses", "HTS_awayGoalMargin_by3_wins", "HTS_awayGoalMargin_by3_losses", "HTS_awayGoalMargin_by4p_wins", "HTS_awayGoalMargin_by4p_losses", "HTS_totalGoalMargin_by1_wins", "HTS_totalGoalMargin_by1_losses", "HTS_totalGoalMargin_by2_wins", "HTS_totalGoalMargin_by2_losses", "HTS_totalGoalMargin_by3_wins", "HTS_totalGoalMargin_by3_losses", "HTS_totalGoalMargin_by4p_wins", "HTS_totalGoalMargin_by4p_losses", "HTS_homeGoalsScored", "HTS_homeGoalsConceded", "HTS_homeGoalsScoredPerMatch", "HTS_homeGoalsConcededPerMatch", "HTS_homeScored_ConcededPerMatch", "HTS_awayGoalsScored", "HTS_awayGoalsConceded", "HTS_awayGoalsScoredPerMatch", "HTS_awayGoalsConcededPerMatch", "HTS_awayScored_ConcededPerMatch", "ATS_teamPosition", "ATS_teamGamesPlayed", "ATS_teamGamesWon", "ATS_teamGamesDraw", "ATS_teamGamesLost", "ATS_teamGoalsScored", "ATS_teamGoalsConceded", "ATS_teamPoints", "ATS_teamPointsPerGame", "ATS_teamPPGlast8", "ATS_homeGamesWon", "ATS_homeGamesDraw", "ATS_homeGamesLost", "ATS_homeGamesPlayed", "ATS_awayGamesWon", "ATS_awayGamesDraw", "ATS_awayGamesLost", "ATS_awayGamesPlayed", "ATS_teamPPGHome", "ATS_teamPPGAway", "ATS_teamAvgOpponentPPG", "ATS_homeGoalMargin_by1_wins", "ATS_homeGoalMargin_by1_losses", "ATS_homeGoalMargin_by2_wins", "ATS_homeGoalMargin_by2_losses", "ATS_homeGoalMargin_by3_wins", "ATS_homeGoalMargin_by3_losses", "ATS_homeGoalMargin_by4p_wins", "ATS_homeGoalMargin_by4p_losses", "ATS_awayGoalMargin_by1_wins", "ATS_awayGoalMargin_by1_losses", "ATS_awayGoalMargin_by2_wins", "ATS_awayGoalMargin_by2_losses", "ATS_awayGoalMargin_by3_wins", "ATS_awayGoalMargin_by3_losses", "ATS_awayGoalMargin_by4p_wins", "ATS_awayGoalMargin_by4p_losses", "ATS_totalGoalMargin_by1_wins", "ATS_totalGoalMargin_by1_losses", "ATS_totalGoalMargin_by2_wins", "ATS_totalGoalMargin_by2_losses", "ATS_totalGoalMargin_by3_wins", "ATS_totalGoalMargin_by3_losses", "ATS_totalGoalMargin_by4p_wins", "ATS_totalGoalMargin_by4p_losses", "ATS_homeGoalsScored", "ATS_homeGoalsConceded", "ATS_homeGoalsScoredPerMatch", "ATS_homeGoalsConcededPerMatch", "ATS_homeScored_ConcededPerMatch", "ATS_awayGoalsScored", "ATS_awayGoalsConceded", "ATS_awayGoalsScoredPerMatch", "ATS_awayGoalsConcededPerMatch", "ATS_awayScored_ConcededPerMatch" ] # string type features stype_features = [ "HTS_teamGoalsDifference", "HTS_teamCleanSheetPercent", "HTS_homeOver1_5GoalsPercent", "HTS_homeOver2_5GoalsPercent", "HTS_homeOver3_5GoalsPercent", "HTS_homeOver4_5GoalsPercent", "HTS_awayOver1_5GoalsPercent", "HTS_awayOver2_5GoalsPercent", "HTS_awayOver3_5GoalsPercent", "HTS_awayOver4_5GoalsPercent", "HTS_homeCleanSheets", "HTS_homeWonToNil", "HTS_homeBothTeamsScored", "HTS_homeFailedToScore", "HTS_homeLostToNil", "HTS_awayCleanSheets", "HTS_awayWonToNil", "HTS_awayBothTeamsScored", "HTS_awayFailedToScore", "HTS_awayLostToNil", "HTS_homeScored_ConcededBy_0", "HTS_homeScored_ConcededBy_1", "HTS_homeScored_ConcededBy_2", "HTS_homeScored_ConcededBy_3", "HTS_homeScored_ConcededBy_4", "HTS_homeScored_ConcededBy_5p", "HTS_homeScored_ConcededBy_0_or_1", "HTS_homeScored_ConcededBy_2_or_3", "HTS_homeScored_ConcededBy_4p", "HTS_awayScored_ConcededBy_0", "HTS_awayScored_ConcededBy_1", "HTS_awayScored_ConcededBy_2", "HTS_awayScored_ConcededBy_3", "HTS_awayScored_ConcededBy_4", "HTS_awayScored_ConcededBy_5p", "HTS_awayScored_ConcededBy_0_or_1", "HTS_awayScored_ConcededBy_2_or_3", "HTS_awayScored_ConcededBy_4p", "ATS_teamGoalsDifference", "ATS_teamCleanSheetPercent", "ATS_homeOver1_5GoalsPercent", "ATS_homeOver2_5GoalsPercent", "ATS_homeOver3_5GoalsPercent", "ATS_homeOver4_5GoalsPercent", "ATS_awayOver1_5GoalsPercent", "ATS_awayOver2_5GoalsPercent", "ATS_awayOver3_5GoalsPercent", "ATS_awayOver4_5GoalsPercent", "ATS_homeCleanSheets", "ATS_homeWonToNil", "ATS_homeBothTeamsScored", "ATS_homeFailedToScore", "ATS_homeLostToNil", "ATS_awayCleanSheets", "ATS_awayWonToNil", "ATS_awayBothTeamsScored", "ATS_awayFailedToScore", "ATS_awayLostToNil", "ATS_homeScored_ConcededBy_0", "ATS_homeScored_ConcededBy_1", "ATS_homeScored_ConcededBy_2", "ATS_homeScored_ConcededBy_3", "ATS_homeScored_ConcededBy_4", "ATS_homeScored_ConcededBy_5p", "ATS_homeScored_ConcededBy_0_or_1", "ATS_homeScored_ConcededBy_2_or_3", "ATS_homeScored_ConcededBy_4p", "ATS_awayScored_ConcededBy_0", "ATS_awayScored_ConcededBy_1", "ATS_awayScored_ConcededBy_2", "ATS_awayScored_ConcededBy_3", "ATS_awayScored_ConcededBy_4", "ATS_awayScored_ConcededBy_5p", "ATS_awayScored_ConcededBy_0_or_1", "ATS_awayScored_ConcededBy_2_or_3", "ATS_awayScored_ConcededBy_4p" ] # cast types to columns: doubles ml_df = ml_df.select(*[col(c).cast("double").alias(c) for c in dtype_features] + stype_features + [ml_df.ft_score]) # add extra column; over/under over_under_udf = udf( lambda r: "over" if (int(r.split("-")[0]) + int(r.split("-")[1])) > 2 else "under", StringType() ) ml_df = (ml_df.withColumn("over_under", over_under_udf(ml_df.ft_score))) ml_df.select("ft_score", "over_under").show(5) # drop "ft_score" ml_df = ml_df.drop("ft_score") from pyspark.sql.types import DoubleType # convert percent cols to float percent_udf = udf( lambda r: float(r.split("%")[0])/100, DoubleType() ) ml_df = ml_df.select(*[percent_udf(col(col_name)).name(col_name) for col_name in stype_features] + dtype_features + [ml_df.over_under]) ``` ### 2. Some featurization ``` from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorIndexer, VectorAssembler from pyspark.sql import Row from pyspark.ml import Pipeline # index the label; "over_under" si = StringIndexer(inputCol = "over_under", outputCol = "over_under_indx") df_indexed = si\ .fit(ml_df)\ .transform(ml_df)\ .drop("over_under")\ .withColumnRenamed("over_under_indx", "over_under") from pyspark.ml.feature import Normalizer from pyspark.sql.functions import mean, stddev # normalize feature columns; [(x - mean)/std_dev] def normalize_col(df, cols): # find mean & std for each column aggExpr = [] aggStd = [] for col in cols: aggExpr.append(mean(df[col]).alias(col)) aggStd.append(stddev(df[col]).alias(col + "_stddev")) averages = df.agg(*aggExpr).collect()[0] std_devs = df.agg(*aggStd).collect()[0] # standardize dataframe for col in cols: df = df.withColumn(col + "_norm", ((df[col] - averages[col]) / std_devs[col + "_stddev"])) return df, averages, std_devs # normalize dataframe feature_cols = dtype_features + stype_features df_indexed, averages, std_devs = normalize_col(df_indexed, feature_cols) # # display some normalized column # df_indexed.select("HTS_teamPosition", "HTS_teamPosition_norm").show(5) from pyspark.ml.linalg import Vectors from pyspark.sql import Row feature_cols = [col+"_norm" for col in feature_cols] df_indexed = df_indexed[feature_cols + ["over_under"]] # # vectorize labels and features # row = Row("label", "features") # label_fts = df_indexed.rdd.map( # lambda r: (row(r[-1], Vectors.dense(r[:-1]))) # ).toDF() # label_fts.show(5) # label_fts.select("features").take(1) # split train/test values train, test = df_indexed.randomSplit([0.8, 0.2]) # split train/validate values train, validate = train.randomSplit([0.9, 0.1]) print("Train shape: '{}, {}'".format(train.count(), len(train.columns))) print("Test shape: '{}, {}'".format(test.count(), len(test.columns))) print("Validate shape: '{}, {}'".format(validate.count(), len(validate.columns))) ``` ### 3. Compose Neural-network ``` import numpy as np X = np.array(train.select(feature_cols).collect()) y = np.array(train.select("over_under").collect()) print("train features shape: '{}'".format(X.shape)) print("train labels shape: '{}'".format(y.shape)) X_test = np.array(test.select(feature_cols).collect()) y_test = np.array(test.select("over_under").collect()) print("test features shape: '{}'".format(X_test.shape)) print("test labels shape: '{}'".format(y_test.shape)) # get some Keras essentials from keras.models import Sequential from keras.layers import Dense, Dropout # build model model = Sequential() model.add(Dense(60, activation="relu", input_dim = 187)) model.add(Dropout(0.4)) model.add(Dense(50, activation="relu")) # output layer model.add(Dense(1, activation="sigmoid")) # compile & evaluate training model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) model.fit(X, y, epochs=1000, batch_size=60) # evaluate the model scores = model.evaluate(X_test, y_test) print("{}: {}%".format(model.metrics_names[1], scores[1]*100)) print("Loss: {}".format(scores[0])) ```
github_jupyter
# Credits Updated to detectwaste by: * Sylwia Majchrowska ``` %matplotlib inline import sys from pycocotools.coco import COCO import json import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns; sns.set() import os import skimage import skimage.io as io import copy def show_values_on_bars(axs, h_v="v", space=0.4): def _show_on_single_plot(ax): if h_v == "v": for p in ax.patches: _x = p.get_x() + p.get_width() / 2 _y = p.get_y() + p.get_height() value = int(p.get_height()) ax.text(_x, _y, value, ha="center") elif h_v == "h": for p in ax.patches: _x = p.get_x() + p.get_width() + float(space) _y = p.get_y() + p.get_height() value = int(p.get_width()) ax.text(_x, _y, value, ha="left") if isinstance(axs, np.ndarray): for idx, ax in np.ndenumerate(axs): _show_on_single_plot(ax) else: _show_on_single_plot(axs) ``` ## TrashCan 1.0 - background: under watter - classes: 8 - comment: captured frames of 3 videos (very similiar photos of the same objects) - annotation: inastance masks ``` dataDir='/dih4/dih4_2/wimlds/data/TrashCan_v1/material_version' dataType='all' annFile='{}/instances_{}_trashcan.json'.format(dataDir,dataType) # initialize COCO api for instance annotations coco=COCO(annFile) # display COCO categories and supercategories cats = coco.loadCats(coco.getCatIds()) nms=[cat['name'] for cat in cats] print('COCO categories: \n{}\n'.format(', '.join(nms))) nms = set([cat['supercategory'] for cat in cats]) print('COCO supercategories: \n{}'.format(', '.join(nms))) # load and display image catIds = coco.getCatIds(catNms=['trash_wood']); imgIds = coco.getImgIds(catIds=catIds); img_id = imgIds[np.random.randint(0,len(imgIds))] print('Image n°{}'.format(img_id)) img = coco.loadImgs(img_id)[0] img_name = '%s/%s/%s'%(dataDir, dataType, img['file_name']) #img_name = '%s/%s'%(dataDir, img['file_name']) print('Image name: {}'.format(img_name)) I = io.imread(img_name) plt.figure() plt.imshow(I) plt.axis('off') # load and display instance annotations plt.imshow(I); plt.axis('off') annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds) anns = coco.loadAnns(annIds) coco.showAnns(anns, draw_bbox=True) with open(annFile, 'r') as f: dataset = json.loads(f.read()) # select only trash #trash_categories = [item for item in dataset['categories'] if item['name'].startswith('trash')] cat_names = [item['name'] for item in dataset['categories'] if item['name'].startswith('trash')] #trash_categories # define variables categories = dataset['categories'] anns = dataset['annotations'] imgs = dataset['images'] nr_cats = len(categories) nr_annotations = len(anns) nr_images = len(imgs) # Count annotations cat_histogram = np.zeros(len(dataset['categories']),dtype=int) for ann in dataset['annotations']: cat_histogram[ann['category_id'] - 1] += 1 # Initialize the matplotlib figure f, ax = plt.subplots(figsize=(5,15)) # Convert to DataFrame df = pd.DataFrame({'Categories': cat_names, 'Number of annotations': cat_histogram}) df = df.sort_values('Number of annotations', 0, False) # Plot the histogram sns.set_color_codes("pastel") sns.set(style="whitegrid") plot_1 = sns.barplot(x="Number of annotations", y="Categories", data=df, label="Total", color="b") show_values_on_bars(plot_1, "h", 0.3) print(len(dataset['images']), len([ann for ann in dataset['annotations'] if ann['image_id'] in [i['id'] for i in dataset['images']]])) def trashcan_to_detectwaste(label): metals_and_plastics = ['trash_plastic', 'trash_metal'] non_recyclable = ['trash_fabric', 'trash_rubber', 'trash_paper'] other = ['trash_fishing_gear'] bio = ['trash_wood'] unknown = ['trash_etc'] if (label in metals_and_plastics): label="metals_and_plastics" elif(label in non_recyclable): label="non-recyclable" elif(label in other): label="other" elif(label in bio): label="bio" elif(label in unknown): label="unknown" else: print(label, "is non-trashcan label") label = "unknown" return label ``` ## Trash-ICRA19 - background: under watter - classes: 7 - comment: captured frames of 3 videos (very similiar photos of the same objects) - annotation: bboxes ``` dataDir='/dih4/dih4_2/wimlds/data/trash_icra19/' dataType='all' annFile='{}/{}_icra_coco.json'.format(dataDir,dataType) # initialize COCO api for instance annotations coco=COCO(annFile) # display COCO categories and supercategories cats = coco.loadCats(coco.getCatIds()) nms=[cat['name'] for cat in cats] print('COCO categories: \n{}\n'.format(', '.join(nms))) nms = set([cat['supercategory'] for cat in cats]) print('COCO supercategories: \n{}'.format(', '.join(nms))) # load and display image catIds = coco.getCatIds(catNms=['rubber']); imgIds = coco.getImgIds(catIds=catIds); img_id = imgIds[np.random.randint(0,len(imgIds))] print('Image n°{}'.format(img_id)) img = coco.loadImgs(img_id)[0] img_name = '%s/%s/%s'%(dataDir, dataType, img['file_name']) #img_name = '%s/%s'%(dataDir, img['file_name']) print('Image name: {}'.format(img_name)) I = io.imread(img_name) plt.figure() plt.imshow(I) plt.axis('off') # load and display instance annotations plt.imshow(I); plt.axis('off') annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds) anns = coco.loadAnns(annIds) coco.showAnns(anns, draw_bbox=True) with open(annFile, 'r') as f: dataset = json.loads(f.read()) # select only trash allowed_items = ['plastic', 'unknown', 'cloth', 'rubber', 'metal', 'wood', 'platstic', 'paper', 'papper'] cat_names = [item['name'] for item in dataset['categories'] if item['name'] in allowed_items] trash_categories = [item for item in dataset['categories'] if item['name'] in allowed_items] print(trash_categories) # define variables categories = dataset['categories'] anns = dataset['annotations'] imgs = dataset['images'] nr_cats = len(categories) nr_annotations = len(anns) nr_images = len(imgs) # Count annotations cat_histogram = np.zeros(len(trash_categories),dtype=int) for ann in dataset['annotations']: cat_histogram[ann['category_id'] - 1] += 1 # Initialize the matplotlib figure f, ax = plt.subplots(figsize=(5,15)) # Convert to DataFrame df = pd.DataFrame({'Categories': cat_names, 'Number of annotations': cat_histogram}) df = df.sort_values('Number of annotations', 0, False) # Plot the histogram sns.set_color_codes("pastel") sns.set(style="whitegrid") plot_1 = sns.barplot(x="Number of annotations", y="Categories", data=df, label="Total", color="b") show_values_on_bars(plot_1, "h", 0.3) print(len(dataset['images']), len([ann for ann in dataset['annotations'] if ann['image_id'] in [i['id'] for i in dataset['images']]])) def trashicra_to_detectwaste(label): metals_and_plastics = ['plastic', 'metal', 'rubber'] non_recyclable = ['cloth', 'paper'] bio = ['wood'] unknown = ['unknown'] if (label in metals_and_plastics): label="metals_and_plastics" elif(label in non_recyclable): label="non-recyclable" elif(label in bio): label="bio" elif(label in unknown): label="unknown" else: print(label, "is non-trashicra label") label = "unknown" return label ``` ## UAVVaste - background: outside - classes: 1 - comment: very distance trash (from dron) - annotation: instance masks ``` dataDir='/dih4/dih4_2/wimlds/data/uavvaste' dataType='images' annFile='{}/annotations.json'.format(dataDir,dataType) # initialize COCO api for instance annotations coco=COCO(annFile) # display COCO categories and supercategories cats = coco.loadCats(coco.getCatIds()) nms=[cat['name'] for cat in cats] print('COCO categories: \n{}\n'.format(', '.join(nms))) nms = set([cat['supercategory'] for cat in cats]) print('COCO supercategories: \n{}'.format(', '.join(nms))) # load and display image catIds = coco.getCatIds(catNms=['rubber']); imgIds = coco.getImgIds(catIds=catIds); img_id = imgIds[np.random.randint(0,len(imgIds))] print('Image n°{}'.format(img_id)) img = coco.loadImgs(img_id)[0] img_name = '%s/%s/%s'%(dataDir, dataType, img['file_name']) #img_name = '%s/%s'%(dataDir, img['file_name']) print('Image name: {}'.format(img_name)) I = io.imread(img_name) plt.figure() plt.imshow(I) plt.axis('off') # load and display instance annotations plt.imshow(I); plt.axis('off') annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds) anns = coco.loadAnns(annIds) coco.showAnns(anns, draw_bbox=True) with open(annFile, 'r') as f: dataset = json.loads(f.read()) cat_names = [item['name'] for item in dataset['categories'] if item['name']] trash_categories = dataset['categories'] # define variables categories = dataset['categories'] anns = dataset['annotations'] imgs = dataset['images'] nr_cats = len(categories) nr_annotations = len(anns) nr_images = len(imgs) # Count annotations cat_histogram = np.zeros(len(trash_categories),dtype=int) for ann in dataset['annotations']: cat_histogram[ann['category_id'] - 1] += 1 # Initialize the matplotlib figure f, ax = plt.subplots(figsize=(5,15)) # Convert to DataFrame df = pd.DataFrame({'Categories': cat_names, 'Number of annotations': cat_histogram}) df = df.sort_values('Number of annotations', 0, False) # Plot the histogram sns.set_color_codes("pastel") sns.set(style="whitegrid") plot_1 = sns.barplot(x="Number of annotations", y="Categories", data=df, label="Total", color="b") show_values_on_bars(plot_1, "h", 0.3) ``` ## Drink waste - background: indoor - classes: 4 - comment: very similiar photos of the same objects - annotation: bboxes ``` dataDir='/dih4/dih4_2/wimlds/data/' dataType='drinking-waste/YOLO_imgs' annFile='{}/drinkwaste_coco.json'.format(dataDir) # initialize COCO api for instance annotations coco=COCO(annFile) # display COCO categories and supercategories cats = coco.loadCats(coco.getCatIds()) nms=[cat['name'] for cat in cats] print('COCO categories: \n{}\n'.format(', '.join(nms))) nms = set([cat['supercategory'] for cat in cats]) print('COCO supercategories: \n{}'.format(', '.join(nms))) # load and display image catIds = coco.getCatIds(catNms=['Glass']); imgIds = coco.getImgIds(catIds=catIds); img_id = imgIds[np.random.randint(0,len(imgIds))] print('Image n°{}'.format(img_id)) img = coco.loadImgs(img_id)[0] img_name = '%s/%s/%s'%(dataDir, dataType, img['file_name']) #img_name = '%s/%s'%(dataDir, img['file_name']) print('Image name: {}'.format(img_name)) I = io.imread(img_name) plt.figure() plt.imshow(I) plt.axis('off') # load and display instance annotations plt.imshow(I); plt.axis('off') annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds) anns = coco.loadAnns(annIds) coco.showAnns(anns, draw_bbox=True) with open(annFile, 'r') as f: dataset = json.loads(f.read()) cat_names = [item['name'] for item in dataset['categories'] if item['name']] trash_categories = dataset['categories'] # define variables categories = dataset['categories'] anns = dataset['annotations'] imgs = dataset['images'] nr_cats = len(categories) nr_annotations = len(anns) nr_images = len(imgs) # Count annotations cat_histogram = np.zeros(len(trash_categories),dtype=int) for ann in dataset['annotations']: cat_histogram[ann['category_id'] - 1] += 1 # Initialize the matplotlib figure f, ax = plt.subplots(figsize=(5,15)) # Convert to DataFrame df = pd.DataFrame({'Categories': cat_names, 'Number of annotations': cat_histogram}) df = df.sort_values('Number of annotations', 0, False) # Plot the histogram sns.set_color_codes("pastel") sns.set(style="whitegrid") plot_1 = sns.barplot(x="Number of annotations", y="Categories", data=df, label="Total", color="b") show_values_on_bars(plot_1, "h", 0.3) def drinkingwaste_to_detectwaste(label): metals_and_plastics = ['PET', 'HDPEM', 'AluCan'] glass = ['Glass'] if (label in metals_and_plastics): label="metals_and_plastics" elif(label in glass): label="glass" else: print(label, "is non-drinkingwaste label") label = "unknown" return label ``` ## MJU-Waste v1.0 - background: indoor, in hand - classes: 1 - comment: such simply background, labolatroy - annotation: instance masks (and depth - RGBD images) ``` dataDir='/dih4/dih4_2/wimlds/data/mju-waste-v1' dataType='JPEGImages' type_ann='all' annFile='{}/mju-waste/{}.json'.format(dataDir, type_ann) # initialize COCO api for instance annotations coco=COCO(annFile) # display COCO categories and supercategories cats = coco.loadCats(coco.getCatIds()) nms=[cat['name'] for cat in cats] print('COCO categories: \n{}\n'.format(', '.join(nms))) nms = set([cat['supercategory'] for cat in cats]) print('COCO supercategories: \n{}'.format(', '.join(nms))) # load and display image catIds = coco.getCatIds(catNms=['Rubbish']); imgIds = coco.getImgIds(catIds=catIds); img_id = imgIds[np.random.randint(0,len(imgIds))] print('Image n°{}'.format(img_id)) img = coco.loadImgs(img_id)[0] img_name = '%s/%s/%s'%(dataDir, dataType, img['file_name']) #img_name = '%s/%s'%(dataDir, img['file_name']) print('Image name: {}'.format(img_name)) I = io.imread(img_name) plt.figure() plt.imshow(I) plt.axis('off') # load and display instance annotations plt.imshow(I); plt.axis('off') annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds) anns = coco.loadAnns(annIds) coco.showAnns(anns, draw_bbox=True) with open(annFile, 'r') as f: dataset = json.loads(f.read()) cat_names = [item['name'] for item in dataset['categories'] if item['name']] trash_categories = dataset['categories'] # define variables categories = dataset['categories'] anns = dataset['annotations'] imgs = dataset['images'] nr_cats = len(categories) nr_annotations = len(anns) nr_images = len(imgs) # Count annotations cat_histogram = np.zeros(len(trash_categories),dtype=int) for ann in dataset['annotations']: cat_histogram[ann['category_id'] - 1] += 1 # Initialize the matplotlib figure f, ax = plt.subplots(figsize=(5,15)) # Convert to DataFrame df = pd.DataFrame({'Categories': cat_names, 'Number of annotations': cat_histogram}) df = df.sort_values('Number of annotations', 0, False) # Plot the histogram sns.set_color_codes("pastel") sns.set(style="whitegrid") plot_1 = sns.barplot(x="Number of annotations", y="Categories", data=df, label="Total", color="b") show_values_on_bars(plot_1, "h", 0.3) ``` ## wade-ai - background: outside, google maps - classes: 1 - comment: roads and pavements - annotation: instance masks ``` dataDir='/dih4/dih4_2/wimlds/data/wade-ai' dataType='wade-ai_images' type_ann='all' annFile='{}/{}_wade_ai.json'.format(dataDir, type_ann) # initialize COCO api for instance annotations coco=COCO(annFile) # display COCO categories and supercategories cats = coco.loadCats(coco.getCatIds()) nms=[cat['name'] for cat in cats] print('COCO categories: \n{}\n'.format(', '.join(nms))) nms = set([cat['supercategory'] for cat in cats]) print('COCO supercategories: \n{}'.format(', '.join(nms))) # load and display image catIds = coco.getCatIds(catNms=['Rubbish']); imgIds = coco.getImgIds(catIds=catIds); img_id = imgIds[np.random.randint(0,len(imgIds))] print('Image n°{}'.format(img_id)) img = coco.loadImgs(img_id)[0] img_name = '%s/%s/%s'%(dataDir, dataType, img['file_name']) #img_name = '%s/%s'%(dataDir, img['file_name']) print('Image name: {}'.format(img_name)) I = io.imread(img_name) plt.figure() plt.imshow(I) plt.axis('off') # load and display instance annotations plt.imshow(I); plt.axis('off') annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds) anns = coco.loadAnns(annIds) coco.showAnns(anns)#, draw_bbox=True) with open(annFile, 'r') as f: dataset = json.loads(f.read()) cat_names = [item['name'] for item in dataset['categories'] if item['name']] trash_categories = dataset['categories'] # define variables categories = dataset['categories'] anns = dataset['annotations'] imgs = dataset['images'] nr_cats = len(categories) nr_annotations = len(anns) nr_images = len(imgs) # Count annotations cat_histogram = np.zeros(len(trash_categories),dtype=int) for ann in dataset['annotations']: ann['category_id'] = 1 cat_histogram[ann['category_id'] - 1] += 1 # Initialize the matplotlib figure f, ax = plt.subplots(figsize=(5,15)) # Convert to DataFrame df = pd.DataFrame({'Categories': cat_names, 'Number of annotations': cat_histogram}) df = df.sort_values('Number of annotations', 0, False) # Plot the histogram sns.set_color_codes("pastel") sns.set(style="whitegrid") plot_1 = sns.barplot(x="Number of annotations", y="Categories", data=df, label="Total", color="b") show_values_on_bars(plot_1, "h", 0.3) with open('/dih4/dih4_home/smajchrowska/detect-waste/annotations/annotations_binary_train.json', 'r') as f: dataset = json.loads(f.read()) cat_names = [item['name'] for item in dataset['categories'] if item['name']] trash_categories = dataset['categories'] # define variables categories = dataset['categories'] anns = dataset['annotations'] imgs = dataset['images'] nr_cats = len(categories) nr_annotations = len(anns) nr_images = len(imgs) # Count annotations cat_histogram = np.zeros(len(trash_categories),dtype=int) for ann in dataset['annotations']: cat_histogram[ann['category_id']-1] += 1 # Initialize the matplotlib figure f, ax = plt.subplots(figsize=(5,15)) # Convert to DataFrame df = pd.DataFrame({'Categories': cat_names, 'Number of annotations': cat_histogram}) df = df.sort_values('Number of annotations', 0, False) # Plot the histogram sns.set_color_codes("pastel") sns.set(style="whitegrid") plot_1 = sns.barplot(x="Number of annotations", y="Categories", data=df, label="Total", color="b") show_values_on_bars(plot_1, "h", 0.3) len(imgs) ```
github_jupyter
``` import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline train = pd.read_csv('titanic_train.csv') test = pd.read_csv('titanic_test.csv') train.head() sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap='viridis') ##There is maximum number of data points missing in 'Cabin' sns.set_style('whitegrid') sns.countplot(x='Survived', hue='Pclass', data=train) ## Max survivors from class '1', and least from class '2' ## Max deaths from class '3', and least from class '1' sns.countplot(x='Survived', hue='Sex', data=train) ## Max survivors were women ## Max deaths were of men sns.distplot(train['Age'].dropna(), kde=False, bins=30) ## Average age of passengers was between 20-30 years with elder people the least on board. train.info() sns.countplot(x='SibSp', data=train) ## Most people were alone on board without any siblings or spouses train['Fare'].hist(bins=40, figsize=(10,4)) ## Most passengers are in the cheaper tickets sections wich is in agreement ## with the max-deaths plot with respect to 'Pclass' plt.figure(figsize=(20,10)) sns.boxplot(x='Pclass', y='Age', data=train) def impute_age(cols): Age = cols[0] Pclass = cols[1] if pd.isnull(Age): if Pclass == 1: return 37 elif Pclass == 2: return 29 else: return 24 else: return Age train['Age'] = train[['Age', 'Pclass']].apply(impute_age, axis=1) test['Age'] = test[['Age', 'Pclass']].apply(impute_age, axis=1) sns.heatmap(train.isnull(), yticklabels=False, cbar=False) ## Solved the missing data in Age ## However Cabin needs to be dropped train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) ## Drop the missing row in 'Embarked' train.dropna(inplace=True) test.dropna(inplace=True) sns.heatmap(train.isnull(), yticklabels=False, cbar=False) ## Data is cleaned! sex = pd.get_dummies(train['Sex'], drop_first=True) sex_test = pd.get_dummies(test['Sex'], drop_first=True) embark = pd.get_dummies(train['Embarked'], drop_first=True) embark_test = pd.get_dummies(test['Embarked'], drop_first=True) train = pd.concat([train, sex, embark], axis=1) test = pd.concat([test, sex_test, embark_test], axis=1) train.head() test.head() names = test['Name'] names train.drop(['Sex', 'Embarked', 'Name', 'Ticket'], axis=1, inplace=True) test.drop(['Sex', 'Embarked', 'Name', 'Ticket'], axis=1, inplace=True) train.head() test.head() train.drop('PassengerId', axis=1, inplace=True) ##index_test = test.drop('PassengerId', axis=1, inplace=False) index_test = test['PassengerId'] index_test ##test.drop('PassengerId', axis=1, inplace=True) test.drop('PassengerId', axis=1, inplace=True) test ## Data filtered for machine learning algorithms x = train.drop('Survived', axis=1) y = train['Survived'] from sklearn.cross_validation import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=1, random_state=300) from sklearn.linear_model import LogisticRegression logmodel = LogisticRegression() logmodel.fit(x_train, y_train) predictions = logmodel.predict(test) from sklearn.metrics import classification_report print(classification_report(index_test,predictions)) from sklearn.metrics import confusion_matrix confusion_matrix(index_test, predictions) predictions predictionsDataFrame = pd.DataFrame( data={'Name':names, 'Survived':predictions}) predictionsDataFrame predictionsDataFrame.to_csv('predictions_TitanicSurvival.csv', header=True, index_label='Index') ```
github_jupyter
## In this notebook we are going to Predict the Growth of Google Stock using LSTM Model and CRISP-DM. ``` #importing the libraries import math import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from keras.models import Sequential from keras.layers import Dense, LSTM import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') """For LSTM model please use Numpy --version = 1.19 or lower Cause latest Tensorflow array don't accept np tensors """ ``` # Data Understanding The data is already processed to price-split values so it is easy to analysis but we are creating new tables to optimize our model ``` #importing Price Split Data data = pd.read_csv('prices-split-adjusted.csv') data #checking data for null values data.isnull().sum() ``` # Data Preprocessing Creating Table for a specific Stock ``` #Initializing the Dataset for the Stock to be Analysized data = data.loc[(data['symbol'] == 'GOOG')] data = data.drop(columns=['symbol']) data = data[['date','open','close','low','volume','high']] data #Number of rows and columns we are working with data.shape ``` Ploting the closing price of the Stock ``` plt.figure(figsize=(16,8)) plt.title('Closing Price of the Stock Historically') plt.plot(data['close']) plt.xlabel('Year', fontsize=20) plt.ylabel('Closing Price Historically ($)', fontsize=20) plt.show() ``` #### Here we can see that there is Long-Term growth in this stock. # Preparing Data for LSTM Here we are going to use LSTM to more accurate prediction of the stock value change. We are checking for accuracy on a particular Stock. First we create a seperate dataframe only with "Close" cloumn ``` #Getting the rows and columns we need data = data.filter(['close']) dataset = data.values #Find out the number of rows that are present in this dataset in order to train our model. training_data_len = math.ceil(len(dataset)* .8) training_data_len ``` Scaling the Data to make better Predictions ``` scaler = MinMaxScaler(feature_range=(0,1)) scaled_data = scaler.fit_transform(dataset) scaled_data #Creating a train test datasets train_data = scaled_data[0:training_data_len , :] x_train = [] y_train = [] for j in range(60, len(train_data)): x_train.append(train_data[j-60:j,0]) y_train.append(train_data[j,0]) if j<=60: print(x_train) print(y_train) print() x_train, y_train = np.array(x_train), np.array(y_train) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) x_train.shape ``` # Building LSTM Model ``` model = Sequential() model.add(LSTM(50, return_sequences=True, input_shape = (x_train.shape[1], 1))) model.add(LSTM(50, return_sequences=False)) model.add(Dense(25)) model.add(Dense(1)) model.compile(optimizer='adam', loss='mean_squared_error') ``` ##### Training the Model ``` model.fit(x_train, y_train, batch_size=1, epochs=1) test_data = scaled_data[training_data_len - 60: , :] x_test = [] y_test = dataset[training_data_len:, :] for j in range(60, len(test_data)): x_test.append(test_data[j-60:j, 0]) x_test = np.array(x_test) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1)) predictions = model.predict(x_test) predictions = scaler.inverse_transform(predictions) #Finding the Root Mean Squared Error for the Stock rmse = np.sqrt( np.mean( predictions - y_test)**2) rmse ``` # Visualization ### Plotting Acutal Close values vs Predicted Values in LR Model ``` #builing close value and prediction value table for comparison train = data[:training_data_len] val = data[training_data_len:] val['Predictions'] = predictions plt.figure(figsize=(16,8)) plt.title('LSTM Model Data') plt.xlabel('Date', fontsize=16) plt.ylabel('Close Price', fontsize=16) plt.plot(train['close']) plt.plot(val[['close', 'Predictions']]) plt.legend(['Trained Dataset', 'Actual Value', 'Predictions']) plt.show() ``` # Evaluation of the model Making table for Actual price and Predicted Price ``` #actual close values against predictions val new_data = pd.read_csv('prices-split-adjusted.csv') new_data = data.filter(['close']) last_60_days = new_data[-60:].values last_60_scaled = scaler.transform(last_60_days) X_test = [] X_test.append(last_60_scaled) X_test = np.array(X_test) X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1)) predicted_price = model.predict(X_test) predicted_price = scaler.inverse_transform(predicted_price) print('The predicted price of the final value of the dataset', predicted_price) new_data.tail(1) ``` #### The predicted price is USD 122.0, whereas the actual observed value is USD 115.82 ``` #check predicted values predictions = model.predict(x_test) #Undo scaling predictions = scaler.inverse_transform(predictions) #Calculate RMSE score rmse=np.sqrt(np.mean(((predictions- y_test)**2))) rmse neww_data = pd.read_csv('prices-split-adjusted.csv') val.describe() x = val.close.mean() y = val.Predictions.mean() Accuracy = x/y*100 print("The accuracy of the model is " , Accuracy) ``` The LSTM model Accuracy is 99.39% As we can see the predictions made by LSTM model show a greater accuracy than LR model. So we can finally conclude that the stock is going to grow for long-term.
github_jupyter
# Training a dense neural network The handwritten digit recognition is a classification problem. We will start with the simplest possible approach for image classification - a fully-connected neural network (which is also called a *perceptron*). We use `pytorchcv` helper to load all data we have talked about in the previous unit. ``` !wget https://raw.githubusercontent.com/MicrosoftDocs/pytorchfundamentals/main/computer-vision-pytorch/pytorchcv.py import torch import torch.nn as nn import torchvision import matplotlib.pyplot as plt import pytorchcv pytorchcv.load_mnist() ``` ## Fully-connected dense neural networks A basic **neural network** in PyTorch consists of a number of **layers**. The simplest network would include just one fully-connected layer, which is called **Linear** layer, with 784 inputs (one input for each pixel of the input image) and 10 outputs (one output for each class). ![A graph showing how an image is broken into layers based on the pixels.](./images/dense-onelayer-network.png) As we discussed above, the dimension of our digit images is $1\times28\times28$. Because the input dimension of a fully-connected layer is 784, we need to insert another layer into the network, called **Flatten**, to change tensor shape from $1\times28\times28$ to $784$. We want $n$-th output of the network to return the probability of the input digit being equal to $n$. Because the output of a fully-connected layer is not normalized to be between 0 and 1, it cannot be thought of as probability. To turn it into a probability we need to apply another layer called **Softmax**. In PyTorch, it is easier to use **LogSoftmax** function, which will also compute logarithms of output probabilities. To turn the output vector into the actual probabilities, we need to take **torch.exp** of the output. Thus, the architecture of our network can be represented by the following sequence of layers: ![An image showing the architecture of the network broken into a sequence of layers.](./images/onelayer-network-layers.png) It can be defined in PyTorch in the following way, using `Sequential` syntax: ``` net = nn.Sequential( nn.Flatten(), nn.Linear(784,10), # 784 inputs, 10 outputs nn.LogSoftmax()) ``` ## Training the network A network defined this way can take any digit as input and produce a vector of probabilities as an output. Let's see how this network performs by giving it a digit from our dataset: ``` print('Digit to be predicted: ',data_train[0][1]) torch.exp(net(data_train[0][0])) ``` As you can see the network predicts similar probabilities for each digit. This is because it has not been trained on how to recognize the digits. We need to give it our training data to train it on our dataset. To train the model we will need to create **batches** of our datasets of a certain size, let's say 64. PyTorch has an object called **DataLoader** that can create batches of our data for us automatically: ``` train_loader = torch.utils.data.DataLoader(data_train,batch_size=64) test_loader = torch.utils.data.DataLoader(data_test,batch_size=64) # we can use larger batch size for testing ``` The training process steps are as follows: 1. We take a minibatch from the input dataset, which consists of input data (features) and expected result (label). 2. We calculate the predicted result for this minibatch. 3. The difference between this result and expected result is calculated using a special function called the **loss function** 4. We calculate the gradients of this loss function with respect to model weights (parameters), which are then used to adjust the weights to optimize the performance of the network. The amount of adjustment is controlled by a parameter called **learning rate**, and the details of optimization algorithm are defined in the **optimizer** object. 5. We repeat those steps until the whole dataset is processed. One complete pass through the dataset is called **an epoch**. Here is a function that performs one epoch training: ``` def train_epoch(net,dataloader,lr=0.01,optimizer=None,loss_fn = nn.NLLLoss()): optimizer = optimizer or torch.optim.Adam(net.parameters(),lr=lr) net.train() total_loss,acc,count = 0,0,0 for features,labels in dataloader: optimizer.zero_grad() out = net(features) loss = loss_fn(out,labels) #cross_entropy(out,labels) loss.backward() optimizer.step() total_loss+=loss _,predicted = torch.max(out,1) acc+=(predicted==labels).sum() count+=len(labels) return total_loss.item()/count, acc.item()/count train_epoch(net,train_loader) ``` Since this function is pretty generic we will be able to use it later in our other examples. The function takes the following parameters: * **Neural network** * **DataLoader**, which defines the data to train on * **Loss Function**, which is a function that measures the difference between the expected result and the one produced by the network. In most of the classification tasks `NLLLoss` is used, so we will make it a default. * **Optimizer**, which defined an *optimization algorithm*. The most traditional algorithm is *stochastic gradient descent*, but we will use a more advanced version called **Adam** by default. * **Learning rate** defines the speed at which the network learns. During learning, we show the same data multiple times, and each time weights are adjusted. If the learning rate is too high, new values will overwrite the knowledge from the old ones, and the network would perform badly. If the learning rate is too small it results in a very slow learning process. Here is what we do when training: * Switch the network to training mode (`net.train()`) * Go over all batches in the dataset, and for each batch do the following: - compute predictions made by the network on this batch (`out`) - compute `loss`, which is the discrepancy between predicted and expected values - try to minimize the loss by adjusting weights of the network (`optimizer.step()`) - compute the number of correctly predicted cases (**accuracy**) The function calculates and returns the average loss per data item, and training accuracy (percentage of cases guessed correctly). By observing this loss during training we can see whether the network is improving and learning from the data provided. It is also important to control the accuracy on the test dataset (also called **validation accuracy**). A good neural network with a lot of parameters can predict with decent accuracy on any training dataset, but it may poorly generalize to other data. That's why in most cases we set aside part of our data, and then periodically check how well the model performs on them. Here is the function to evaluate the network on test dataset: ``` def validate(net, dataloader,loss_fn=nn.NLLLoss()): net.eval() count,acc,loss = 0,0,0 with torch.no_grad(): for features,labels in dataloader: out = net(features) loss += loss_fn(out,labels) pred = torch.max(out,1)[1] acc += (pred==labels).sum() count += len(labels) return loss.item()/count, acc.item()/count validate(net,test_loader) ``` We train the model for several epochs observing training and validation accuracy. If training accuracy increases while validation accuracy decreases that would be an indication of **overfitting**. Meaning it will do well on your dataset but not on new data. Below is the training function that can be used to perform both training and validation. It prints the training and validation accuracy for each epoch, and also returns the history that can be used to plot the loss and accuracy on the graph. ``` def train(net,train_loader,test_loader,optimizer=None,lr=0.01,epochs=10,loss_fn=nn.NLLLoss()): optimizer = optimizer or torch.optim.Adam(net.parameters(),lr=lr) res = { 'train_loss' : [], 'train_acc': [], 'val_loss': [], 'val_acc': []} for ep in range(epochs): tl,ta = train_epoch(net,train_loader,optimizer=optimizer,lr=lr,loss_fn=loss_fn) vl,va = validate(net,test_loader,loss_fn=loss_fn) print(f"Epoch {ep:2}, Train acc={ta:.3f}, Val acc={va:.3f}, Train loss={tl:.3f}, Val loss={vl:.3f}") res['train_loss'].append(tl) res['train_acc'].append(ta) res['val_loss'].append(vl) res['val_acc'].append(va) return res # Re-initialize the network to start from scratch net = nn.Sequential( nn.Flatten(), nn.Linear(784,10), # 784 inputs, 10 outputs nn.LogSoftmax()) hist = train(net,train_loader,test_loader,epochs=5) ``` This function logs messages with the accuracy on training and validation data from each epoch. It also returns this data as a dictionary (called **history**). We can then visualize this data to better understand our model training. ``` plt.figure(figsize=(15,5)) plt.subplot(121) plt.plot(hist['train_acc'], label='Training acc') plt.plot(hist['val_acc'], label='Validation acc') plt.legend() plt.subplot(122) plt.plot(hist['train_loss'], label='Training loss') plt.plot(hist['val_loss'], label='Validation loss') plt.legend() ``` The diagram on the left shows the `training accuracy` increasing (which corresponds to the network learning to classify our training data better and better), while `validation accuracy` starts to fall. The diagram on the right show the `training loss` and `validation loss`, you can see the `training loss` decreasing (meaning its performing better) and the `validation loss` increasing (meaning its performing worse). These graphs would indicate the model is **overfitted**. ## Visualizing network weights Now lets visualize our weights of our neural network and see what they look like. When the network is more complex than just one layer it can be a difficult to visulize the results like this. However, in our case (classification of a digit) it happens by multiplying the initial image by a weight matrix allowing us to visualize the network weights with a bit of added logic. Let's create a `weight_tensor` which will have a dimension of 784x10. This tensor can be obtained by calling the `net.parameters()` method. In this example, if we want to see if our number is 0 or not, we will multiply input digit by `weight_tensor[0]` and pass the result through a softmax normalization to get the answer. This results in the weight tensor elements somewhat resembling the average shape of the digit it classifies: ``` weight_tensor = next(net.parameters()) fig,ax = plt.subplots(1,10,figsize=(15,4)) for i,x in enumerate(weight_tensor): ax[i].imshow(x.view(28,28).detach()) ``` ## Takeaway Training a neural network in PyTorch can be programmed with a training loop. It may seem like a complicated process, but in real life we need to write it once, and we can then re-use this training code later without changing it. We can see that a single-layer dense neural network shows relatively good performance, but we definitely want to get higher than 91% on accuracy! In the next unit, we will try to use multi-level perceptrons.
github_jupyter
# Gather HTML from Newsrooms After confirming all of the newsroom links, the next step is to figure out how to best iterate through the pages/tabs of these links, and collect all of the HTML from each page/tab of the company's newsroom. This HTML will contain the links to the press releases, which can then be used to gather the press release text and then begin to model. In this notebook, I limit the number of companies I am working with to just the top five Fortune 100 companies. However, I have included several different code blocks that future iterations of this project can use and expand upon to more easily include more companies. ## Imports ``` import pandas as pd from tqdm import tqdm import time from selenium import webdriver from selenium.webdriver.common.by import By import warnings warnings.filterwarnings('ignore') ``` ## Read in the data ``` # read in the data cos = pd.read_csv('../data/fortune_100_data_w_links.csv') ``` Because this project is a proof of concept, I am limiting the DataFrame to just the top five Fortune 100 companies. ``` cos = cos[cos['rank'] <=5] cos ``` ## Adding the `loop_url`, `type` and `page_type` columns Part of the overall process not seen in these notebooks is determining how each company website works. For instance, Walmart's newsroom has pages that can be iterated through, while Amazon's press releases are organized in a long list by year, which each year getting its own tab on its newsroom. For some of the companies not included in the reduced list, the structure of their page urls don't follow the same pattern. After examining the newsrooms for the top five Fortune 100 companies, I've saved the additional information needed to get us one step closer to our end goal in `company_loop_info.csv`. Below are the descriptions for what each column contains: * **company**: The name of the company. * **loop_url**: This column is the base of the url that the code can use to iterate through. * **type**: The category of iteration used - for the top five companies, the types are `pages` and `years` * **page_type**: This is used in a function created below to return the right ending as the code loops through the values. ``` loops = pd.read_csv('../data/company_loop_info.csv') loops cos = cos.merge(loops, on='company') cos ``` ## Get html for companies with `type` == `pages` I decided to split up this part of the data collection by `type` in order to keep the code blocks shorter and more manageable, rather than try to cram all of the code into one long block. Additionally, I believe this will make the code and these notebooks more adaptable for future iterations of this project that include more companies. ``` pages = cos[cos['type'] == 'pages'].reset_index(drop= True) pages ``` Another way I've made this code more flexible is by creating functions that can be used to get the end of the url for the iteration process. Other `page_type`s aren't necessarily as straightforward as adding on page number as a string at the end of a url. ``` # create a function that will return the appropriate page ending def get_page_ending(i, page_type): if page_type == 'page': return str(i) # set up the webdriver options = webdriver.ChromeOptions() options.page_load_strategy = 'normal' options.add_argument('headless') options.add_experimental_option("excludeSwitches", ["enable-automation"]) options.add_experimental_option('useAutomationExtension', False) browser = webdriver.Chrome(options=options) browser.execute_cdp_cmd( 'Network.setUserAgentOverride', { "userAgent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \ Chrome/83.0.4103.53 Safari/537.36' }) # loop through each row in the `pages` dataframe for row in range(len(pages)): # create a list that can be appended to htmls = [] # get the page type as a variable page_type = pages.loc[row, 'page_type'] # get the url url = pages.loc[row, 'loop_url'] for i in tqdm(range(50)): try: # create a dictionary that we can add to page_html = {} # create a variable for the end of the page url, # calling on the previously created function ending = get_page_ending(i, page_type) # add the page number to the end of the url page_url = url + ending # open the browser browser.get(page_url) time.sleep(5) browser.execute_script("window.scrollTo(0, document.body.scrollHeight/2);") # add information for each row in case needed later page_html['company'] = pages.loc[row, 'company'] page_html['base_url'] = pages.loc[row, 'final'] page_html['url'] = page_url page_html['page_num'] = i # add the html to the dictionary page_html['html'] = browser.page_source # append the dictionary for this page to the list htmls.append(page_html) time.sleep(3) except: print() print(f"Company: {pages.loc[row, 'company']} | Web page: {i} | Page type: {pages.loc[row,'page_type']} | Status: Error" ) #create a dataframe and save locally html_df = pd.DataFrame(htmls) html_df.to_csv(f'../data/html/{pages.loc[row,"company"].replace(" ","_").lower()}_html.csv',index=False) ``` ## Getting html for companies with `type` == `years` ``` years = cos[cos['type'] == 'years'].reset_index(drop= True) years ``` Similar to the function I created above (i.e., `get_page_ending()`), some of the year endings are formatted differently than just the year as a string. Creating the function now makes this code more adaptable for future iterations. ``` def get_year_ending(i, page_type): if page_type == 'year': return str(i) # set up the webdriver options = webdriver.ChromeOptions() options.page_load_strategy = 'normal' options.add_argument('headless') options.add_experimental_option("excludeSwitches", ["enable-automation"]) options.add_experimental_option('useAutomationExtension', False) browser = webdriver.Chrome(options=options) browser.execute_cdp_cmd( 'Network.setUserAgentOverride', { "userAgent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \ Chrome/83.0.4103.53 Safari/537.36' }) # loop through each row in the `pages` dataframe for row in range(len(years)): # create a list that can be appended to htmls = [] # get the page type as a variable page_type = years.loc[row, 'page_type'] # get the url url = years.loc[row, 'loop_url'] for i in tqdm(range(2019,2022)): try: # create a dictionary that we can add to page_html = {} # create a variable for the end of the page url, # calling on the previously created function ending = get_year_ending(i, page_type) # add the page number to the end of the url page_url = url + ending # open the browser browser.get(page_url) time.sleep(5) browser.execute_script("window.scrollTo(0, document.body.scrollHeight/2);") # add information for each row in case needed later page_html['company'] = years.loc[row, 'company'] page_html['base_url'] = years.loc[row, 'final'] page_html['url'] = page_url page_html['page_num'] = i # add the html to the dictionary page_html['html'] = browser.page_source # append the dictionary for this page to the list htmls.append(page_html) time.sleep(3) except: print() print(f"Company: {years.loc[row, 'company']} | Web page: {i} | Page type: {years.loc[row,'page_type']} | Status: Error" ) #create a dataframe and save locally html_df = pd.DataFrame(htmls) html_df.to_csv(f'../data/html/{years.loc[row,"company"].replace(" ","_").lower()}_html.csv',index=False) ```
github_jupyter
![data-x](http://oi64.tinypic.com/o858n4.jpg) --- # Pandas Introduction ### with Stock Data and Correlation Examples **Author list:** Alexander Fred-Ojala & Ikhlaq Sidhu **References / Sources:** Includes examples from Wes McKinney and the 10min intro to Pandas **License Agreement:** Feel free to do whatever you want with this code ___ ## What Does Pandas Do? <img src="https://github.com/ikhlaqsidhu/data-x/raw/master/imgsource/pandas-p1.jpg"> ## What is a Pandas Table Object? <img src="https://github.com/ikhlaqsidhu/data-x/raw/master/imgsource/pandas-p2.jpg"> # Import packages ``` # import packages import pandas as pd # Extra packages import numpy as np import matplotlib.pyplot as plt # for plotting import seaborn as sns # for plotting and styling # jupyter notebook magic to display plots in output %matplotlib inline plt.rcParams['figure.figsize'] = (10,6) # make the plots bigger ``` # Part 1 ### Simple creation and manipulation of Pandas objects **Key Points:** Pandas has two / three main data types: * Series (similar to numpy arrays, but with index) * DataFrames (table or spreadsheet with Series in the columns) * Panels (3D version of DataFrame, not as common) ### It is easy to create a DataFrame ### We use `pd.DataFrame(**inputs**)` and can insert almost any data type as an argument **Function:** `pd.DataFrame(data=None, index=None, columns=None, dtype=None, copy=False)` Input data can be a numpy ndarray (structured or homogeneous), dict, or DataFrame. Dict can contain Series, arrays, constants, or list-like objects as the values. ``` # Try it with an array np.random.seed(0) # set seed for reproducibility a1 = np.array(np.random.randn(3)) a2 = np.array(np.random.randn(3)) a3 = np.array(np.random.randn(3)) print (a1) print (a2) print (a3) # Create our first DataFrame w/ an np.array - it becomes a column df0 = pd.DataFrame(a1) print(type(df0)) df0 # DataFrame from list of np.arrays df0 = pd.DataFrame([a1, a2, a3]) df0 # notice that there is no column label, only integer values, # and the index is set automatically # DataFrame from 2D np.array ax = np.random.randn(9).reshape(3,3) ax df0 = pd.DataFrame(ax,columns=['rand_normal_1','Random Again','Third'], index=[100,200,99]) # we can also assign columns and indices, sizes have to match df0 # DataFrame from a Dictionary dict1 = {'A':a1, 'B':a2} df1 = pd.DataFrame(dict1) df1 # note that we now have columns without assignment # We can easily add another column (just as you add values to a dictionary) df1['C']=a3 df1 # We can add a list with strings and ints as a column df1['L'] = ["Something", 3, "words"] df1 ``` # Pandas Series object ### Like an np.array, but we can combine data types and it has its own index Note: Every column in a DataFrame is a Series ``` print(df1[['L','A']]) print(type(df1['L'])) df1 # We can also rename columns df1 = df1.rename(columns = {'L':'Renamed'}) df1 # We can delete columns del df1['C'] df1 # or drop columns df1.drop('A',axis=1,inplace=True) # does not change df1 if we don't set inplace=True df1 df1 # or drop rows df1.drop(0) # Example: view only one column df1['B'] # Or view several column df1[['B','Renamed']] ``` # Other ways of slicing In the 10 min Pandas Guide, you will see many ways to view, slice a dataframe * view/slice by rows, eg `df[1:3]`, etc. * view by index location, see `df.iloc` (iloc) * view by ranges of labels, ie index label 2 to 5, or dates feb 3 to feb 25, see `df.loc` (loc) * view a single row by the index `df.xs` (xs) or `df.ix` (ix) * filtering rows that have certain conditions * add column * add row * How to change the index and more... ``` print (df1[0:2]) # ok df1 df1.iloc[1,1] df1 ``` # Part 2 ## Finance example: Large Data Frames ### Now, lets get some data in CSV format. See https://www.quantshare.com/sa-43-10-ways-to-download-historical-stock-quotes-data-for-free ``` !ls data/ # We can download data from the web by using pd.read_csv # A CSV file is a comma seperated file # We can use this 'pd.read_csv' method with urls that host csv files base_url = 'https://google.com/finance?output=csv&q=' dfg = pd.read_csv('data/googl.csv').drop('Unnamed: 0',axis=1) # Google stock data dfa = pd.read_csv('data/apple.csv').drop('Unnamed: 0',axis=1) dfg dfg.head() # show first five values dfg.tail(3) # last three dfg.columns # returns columns, can be used to loop over dfg.index # return ``` # Convert the index to pandas datetime object ``` dfg['Date'][0] type(dfg['Date'][0]) dfg.index = pd.to_datetime(dfg['Date']) # set index dfg.drop(['Date'],axis=1,inplace=True) dfg.head() print(type(dfg.index[0])) dfg.index[0] dfg.index dfg['2017-08':'2017-06'] ``` # Attributes & general statitics of a Pandas DataFrame ``` dfg.shape # 251 business days last year dfg.columns dfg.size # Some general statistics dfg.describe() # Boolean indexing dfg['Open'][dfg['Open']>1130] # check what dates the opening # Check where Open, High, Low and Close where greater than 1130 dfg[dfg>1000].drop('Volume',axis=1) # If you want the values in an np array dfg.values ``` ## .loc() ``` # Getting a cross section with .loc - BY VALUES of the index and columns # df.loc[a:b, x:y], by rows and column location # Note: You have to know indices and columns dfg.loc['2017-08-31':'2017-08-21','Open':'Low'] ``` ## .iloc() ``` # .iloc slicing at specific location - BY POSITION in the table # Recall: # dfg[a:b] by rows # dfg[[col]] or df[[col1, col2]] by columns # df.loc[a:b, x:y], by index and column values + location # df.iloc[3:5,0:2], numeric position in table dfg.iloc[1:4,3:5] # 2nd to 4th row, 4th to 5th column ``` ### More Basic Statistics ``` # We can change the index sorting dfg.sort_index(axis=0, ascending=True).head() # starts a year ago # sort by value dfg.sort_values(by='Open')[0:10] ``` # Boolean ``` dfg[dfg>1115].head(10) # we can also drop all NaN values dfg[dfg>1115].head(10).dropna() dfg2 = dfg # make a copy and not a view dfg2 is dfg ``` ### Setting Values ``` # Recall dfg.head(4) # All the ways to view # can also be used to set values # good for data normalization dfg['Volume'] = dfg['Volume']/100000.0 dfg.head(4) ``` ### More Statistics and Operations ``` # mean by column, also try var() for variance dfg.mean() dfg[0:5].mean(axis = 1) # row means of first five rows ``` # PlotCorrelation ### Load several stocks ``` # Reload dfg = pd.read_csv('data/googl.csv').drop('Unnamed: 0',axis=1) # Google stock data dfa = pd.read_csv('data/apple.csv').drop('Unnamed: 0',axis=1) # Apple stock data dfm = pd.read_csv('data/microsoft.csv').drop('Unnamed: 0',axis=1) # Google stock data dfn = pd.read_csv('data/nike.csv').drop('Unnamed: 0',axis=1) # Apple stock data dfb = pd.read_csv('data/boeing.csv').drop('Unnamed: 0',axis=1) # Apple stock data dfb.head() # Rename columns dfg = dfg.rename(columns = {'Close':'GOOG'}) #print (dfg.head()) dfa = dfa.rename(columns = {'Close':'AAPL'}) #print (dfa.head()) dfm = dfm.rename(columns = {'Close':'MSFT'}) #print (dfm.head()) dfn = dfn.rename(columns = {'Close':'NKE'}) #print (dfn.head()) dfb = dfb.rename(columns = {'Close':'BA'}) dfb.head(2) # Lets merge some tables # They will all merge on the common column Date df = dfg[['Date','GOOG']].merge(dfa[['Date','AAPL']]) df = df.merge(dfm[['Date','MSFT']]) df = df.merge(dfn[['Date','NKE']]) df = df.merge(dfb[['Date','BA']]) df.head() df['Date'] = pd.to_datetime(df['Date']) df = df.set_index('Date') df.head() df.plot() df['2017'][['NKE','BA']].plot() # show a correlation matrix (pearson) crl = df.corr() crl crl.sort_values(by='GOOG',ascending=False) s = crl.unstack() so = s.sort_values(ascending=False) so[so<1] df.mean() sim=df-df.mean() sim.tail() sim[['MSFT','BA']].plot() ```
github_jupyter
| Name | Surname | Student No | Department | |---|---|---|---| | Emin | Kartci | S014877 | EE Engineering | ## Emin Kartci #### Student ID: S014877 #### Department : Electrical & Electronics Engineering --- ### Semester Project - Foursquare & Restaurant Report --- #### This module is prepared for GUI --- ``` # To interact with user use ipywidgets library - Generate a simple GUI from __future__ import print_function from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets import matplotlib.pyplot as plt import numpy as np ################################-- Function Description --################################# # Purpose: # This class represents a company. For other modules we will need its values. # Moreover, creating a class makes simple our code. # PROPERTIES: # # From constructor: # name -> Name of the company (String) # longitude -> To represent at the map (String) # latitude -> To represent at the map (String) # servicesList -> To compare with others (List) # averagePrice -> For income statement - Simulation (Float) # averageUnitCost -> For incoma statement - Simulation (Float) # salesVolume -> For incoma statement - Simulation (Float) # fixedCost -> For incoma statement - Simulation (Float) # taxRate -> For incoma statement - Simulation (Float) # # Calculate: # # contributionMargin -> For incoma statement - Simulation (Float) # revenue -> For incoma statement - Simulation (Float) # costOfGoodSold -> For incoma statement - Simulation (Float) # grossMargin -> For incoma statement - Simulation (Float) # taxes -> For incoma statement - Simulation (Float) # netIncome -> For incoma statement - Simulation (Float) # # BEHAVIOUR: # # print_company_description -> prints the company inputs to the console # print_income_statement -> prints the income statemnt to the console #################################-- END Function Description --############################## # Create a Company class class Company(): # Constuctor def __init__(self, name,longitude,latitude,servicesList,averagePrice,averageUnitCost,salesVolume,fixedCost,taxRate): self.name = name self.longitude = longitude self.latitude = latitude self.servicesList = servicesList self.averagePrice = averagePrice self.averageUnitCost = averageUnitCost self.salesVolume = salesVolume self.fixedCost = fixedCost self.taxRate = taxRate/100 # calculate remain properties self.contributionMargin = self.calculate_contribution_margin() self.revenue = self.calculate_revenue() self.costOfGoodSold = self.calculate_COGS() self.totalCost = self.calculate_total_cost() self.grossMargin = self.calculate_gross_margin() self.taxes = self.calculate_taxes() self.netIncome = self.calculate_net_income() def calculate_contribution_margin(self): return self.averagePrice - self.averageUnitCost def calculate_revenue(self): return self.averagePrice * self.salesVolume def calculate_COGS(self): return self.salesVolume * self.averageUnitCost def calculate_gross_margin(self): return self.revenue - self.costOfGoodSold def calculate_taxes(self): return self.grossMargin * self.taxRate def calculate_net_income(self): return self.grossMargin - self.taxes def calculate_total_cost(self): return self.costOfGoodSold + self.fixedCost ######################################################################## def print_company_description(self): companyDescription = """ Company Name: {} Location: - Longitude : {}° N - Latitude : {}° E Services: {} Average Price : {} Average Unit Cost : {} Sales Volume : {} Fixed Cost : {} Tax Rate : {} """.format(self.name,self.longitude,self.latitude,self.set_services_string(),self.averagePrice,self.averageUnitCost,self.salesVolume,self.fixedCost,self.taxRate) print(companyDescription) def set_services_string(self): serviesString = "" for index in range(1,len(self.servicesList)+1): serviesString += "{} - {}\n\t\t".format(index,self.servicesList[index-1]) return serviesString def print_income_statement(self): incomeStatementStr = """ ========== {}'s MONTHLY INCOME STATEMENT ========== +------------------------------------------------------ | Unit Price : {} | Unit Cost : {} +------------------ | Contribution Margin : {} | Sales Volume : {} | Revenue : {} (Monthly) +------------------ | Cost of Goods Sold : {} (Monthly) | Total Fixed Cost : {} (Monthly) | Total Cost : {} +------------------ | Gross Margin : {} | Taxes : {} +------------------ | NET INCOME : {} +------------------------------------------------------ """.format(self.name,self.averagePrice,self.averageUnitCost,self.contributionMargin,self.salesVolume,self.revenue ,self.costOfGoodSold,self.fixedCost,self.totalCost,self.grossMargin,self.taxes,self.netIncome) print(incomeStatementStr) programLabel = widgets.Label('--------------------------> RESTAURANT SIMULATOR PROGRAM <--------------------------', layout=widgets.Layout(width='100%')) companyName = widgets.Text(description="Comp. Name",value="Example LTD",layout=widgets.Layout(width="50%")) longitude = widgets.Text(description="Longitude",value="48.8566",layout=widgets.Layout(width="30%")) latitude = widgets.Text(description="Latitude",value="2.3522",layout=widgets.Layout(width="30%")) br1Label = widgets.Label('-----------------------------------------------------------------------------------------------------', layout=widgets.Layout(width='100%')) servicesLabel = widgets.Label('Select Services:', layout=widgets.Layout(width='100%')) Dessertbox = widgets.Checkbox(False, description='Dessert') Saladbox = widgets.Checkbox(False, description='Salad') Drinkbox = widgets.Checkbox(False, description='Drink') br2Label = widgets.Label('-----------------------------------------------------------------------------------------------------', layout=widgets.Layout(width='100%')) expectedPriceLabel = widgets.Label('Expected Average Price:', layout=widgets.Layout(width='100%')) expectedAveragePrice = widgets.IntSlider(min=0, max=100, step=1, description='(Euro): ',value=0) expectedUnitCostLabel = widgets.Label('Expected Average Unit Cost:', layout=widgets.Layout(width='100%')) expectedUnitCost = widgets.IntSlider(min=0, max=100, step=1, description='(Euro): ',value=0) expectedSalesLabel = widgets.Label('Expected Sales Monthly:', layout=widgets.Layout(width='100%')) expectedSales = widgets.IntSlider(min=0, max=10000, step=1, description='(Euro): ',value=0) fixedCostLabel = widgets.Label('Fixed Costs:', layout=widgets.Layout(width='100%')) fixedCost = widgets.FloatText(value=10000, description='(Euro): ',color = 'blue') taxRateLabel = widgets.Label('Tax Rate:', layout=widgets.Layout(width='100%')) taxRate = widgets.FloatSlider(min=0, max=100, step=1, description='%: ',value=0) br3Label = widgets.Label('-----------------------------------------------------------------------------------------------------', layout=widgets.Layout(width='100%')) # create a string list bu considering checkbox widgets def set_service_list(): # create an empty list serviceList = [] # if it is checked if Dessertbox.value: # add to the list serviceList.append('Dessert') # if it is checked if Saladbox.value: # add to the list serviceList.append('Salad') # if it is checked if Drinkbox.value: # add to the list serviceList.append('Drink') # return the list return serviceList # display the widgets display(programLabel) display(companyName) display(longitude) display(latitude) display(br1Label) display(servicesLabel) display(Dessertbox) display(Saladbox) display(Drinkbox) display(br2Label) display(expectedPriceLabel) display(expectedAveragePrice) display(expectedUnitCostLabel) display(expectedUnitCost) display(expectedSalesLabel) display(expectedSales) display(fixedCostLabel) display(fixedCost) display(taxRateLabel) display(taxRate) display(br3Label) # create a company object company = Company(companyName.value,longitude.value,latitude.value,set_service_list(),expectedAveragePrice.value,expectedUnitCost.value,expectedSales.value,fixedCost.value,taxRate.value) # print income statement company.print_income_statement() company.plotting_price_cost() plt.plot(self.priceList, "g--") plt.plot(self.costList, "o--") plt.axhline(y=0, color='r', linewidth=0.5, linestyle='-') plt.axvline(x=0, color='r', linewidth=0.5, linestyle='-') plt.xlabel("Price"); plt.ylabel("Cost") plt.legend(["Corresponding Cost","Price"]) plt.title("Price vs. Cost") plt.grid() plt.show() x_labels = ["PROFIT", "Avg Price", "Avg Cost", "Contribution Margin", "Sales Vol"] plt.bar(x_labels, [96, 21.31, 10.53, 10.78, 899], color = "g") plt.legend(["Profit is shown as %, e.g, 96%"]) plt.show() ```
github_jupyter
# CNN for Classification --- In this notebook, we define **and train** an CNN to classify images from the [Fashion-MNIST database](https://github.com/zalandoresearch/fashion-mnist). ### Load the [data](http://pytorch.org/docs/master/torchvision/datasets.html) In this cell, we load in both **training and test** datasets from the FashionMNIST class. ``` # our basic libraries import torch import torchvision # data loading and transforming from torchvision.datasets import FashionMNIST from torch.utils.data import DataLoader from torchvision import transforms # The output of torchvision datasets are PILImage images of range [0, 1]. # We transform them to Tensors for input into a CNN ## Define a transform to read the data in as a tensor data_transform = transforms.ToTensor() # choose the training and test datasets train_data = FashionMNIST(root='./data', train=True, download=True, transform=data_transform) test_data = FashionMNIST(root='./data', train=False, download=True, transform=data_transform) # Print out some stats about the training and test data print('Train data, number of images: ', len(train_data)) print('Test data, number of images: ', len(test_data)) # prepare data loaders, set the batch_size ## TODO: you can try changing the batch_size to be larger or smaller ## when you get to training your network, see how batch_size affects the loss batch_size = 20 train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True) test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True) # specify the image classes classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] ``` ### Visualize some training data This cell iterates over the training dataset, loading a random batch of image/label data, using `dataiter.next()`. It then plots the batch of images and labels in a `2 x batch_size/2` grid. ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline # obtain one batch of training images dataiter = iter(train_loader) images, labels = dataiter.next() images = images.numpy() print(images.shape) # plot the images in the batch, along with the corresponding labels fig = plt.figure(figsize=(25, 4)) for idx in np.arange(batch_size): ax = fig.add_subplot(2, batch_size/2, idx+1, xticks=[], yticks=[]) ax.imshow(np.squeeze(images[idx]), cmap='gray') ax.set_title(classes[labels[idx]]) ``` ### Define the network architecture The various layers that make up any neural network are documented, [here](http://pytorch.org/docs/master/nn.html). For a convolutional neural network, we'll use a simple series of layers: * Convolutional layers * Maxpooling layers * Fully-connected (linear) layers You are also encouraged to look at adding [dropout layers](http://pytorch.org/docs/stable/nn.html#dropout) to avoid overfitting this data. --- ### TODO: Define the Net Define the layers of your **best, saved model from the classification exercise** in the function `__init__` and define the feedforward behavior of that Net in the function `forward`. Defining the architecture here, will allow you to instantiate and load your best Net. ``` import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() # 1 input image channel (grayscale), 10 output channels/feature maps # 3x3 square convolution kernel self.conv1 = nn.Conv2d(1, 10, 3) self.pool = nn.MaxPool2d(2,2) self.conv2 = nn.Conv2d(10,20,3) self.fc1 = nn.Linear(20*5*5,30) self.fc1_dropout= nn.Dropout(p=0.4) self.fc2 = nn.Linear(30,10) ## TODO: Define the rest of the layers: # include another conv layer, maxpooling layers, and linear layers # also consider adding a dropout layer to avoid overfitting ## TODO: define the feedforward behavior def forward(self, x): # one activated conv layer x = F.relu(self.conv1(x)) x = self.pool(x) x = F.relu(self.conv2(x)) x = self.pool(x) x = x.view(x.size[0],-1) x = self.fc1_dropout(self.fc1(x)) x = self.fc2(x) # final output return x ``` ### Load a Trained, Saved Model To instantiate a trained model, you'll first instantiate a new `Net()` and then initialize it with a saved dictionary of parameters. This notebook needs to know the network architecture, as defined above, and once it knows what the "Net" class looks like, we can instantiate a model and load in an already trained network. You should have a trained net in `saved_models/`. ``` # instantiate your Net net = Net() # load the net parameters by name, uncomment the line below to load your model # net.load_state_dict(torch.load('saved_models/model_1.pt')) print(net) ``` ## Feature Visualization To see what your network has learned, make a plot of the learned image filter weights and the activation maps (for a given image) at each convolutional layer. ### TODO: Visualize the learned filter weights and activation maps of the convolutional layers in your trained Net Choose a sample input image and apply the filters in every convolutional layer to that image to see the activation map. ``` # As a reminder, here is how we got the weights in the first conv layer (conv1), before weights = net.conv1.weight.data w = weights.numpy() ``` ### Question: Choose a filter from one of your trained convolutional layers; looking at these activations, what purpose do you think it plays? What kind of feature do you think it detects? ``` import cv2 dataiter = iter(test_loader) image,label = dataiter.next() image = image.numpy() idx = 11 img = np.squeeze(image[idx]) plt.imshow(img,cmap="gray") row = 2 column = 5*2 fig = plt.figure(figsize=(30,10)) for i in range(0,column*row): fig.add_subplot(row,column,i+1) if(i%2)==0: plt.imshow(w[int(i/2)][0],cmap="gray") else: c = cv2.filter2D(img,-1,w[int((i-1)/2)][0]) plt.imshow(c,cmap="gray") plt.show() ```
github_jupyter
``` # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) # Any results you write to the current directory are saved as output. import tensorflow as tf ## Load Data data = pd.read_csv("../input/loan.csv", low_memory=False) #data.info() #data.shape ## Clean data. clean_data = data.dropna(thresh=len(data),axis=1) #clean_data.shape list(clean_data) #clean_data.loan_status.str.contains("Fully Paid").astype(int) #clean_data.loan_status[clean_data.loan_status.str.contains("Fully Paid") == True] = 1 #clean_data.loan_status[clean_data.loan_status.str.contains("Fully Paid") == False] = 0 ## Remove data that does not meet the credit policy. clean_data = clean_data[clean_data.loan_status.str.contains("Does not meet the credit policy") == False] #clean_data.loan_status[clean_data.loan_status.str.contains("Fully Paid")].astype(int) clean_data.loan_status[clean_data.loan_status.str.contains("Fully Paid") == True] = 1 clean_data.loan_status[clean_data.loan_status.str.contains("Fully Paid") == False] = 0 clean_data.loan_status.unique() clean_data.shape clean_data_orig = clean_data.copy() list(clean_data) ## Split Data ratio = 0.7 msk = np.random.rand(len(clean_data)) < ratio train_data = clean_data[msk] test_data = clean_data[~msk] ## Use loan status as label for loan defaulters y_label['loan_status'] = clean_data['loan_status'][msk] y_test_label['loan_status'] = clean_data['loan_status'][~msk] train_data = train_data.select_dtypes(exclude=[np.object]) test_data = test_data.select_dtypes(exclude=[np.object]) len(train_data) len(test_data) #train_data['loan_amnt'].hist() ##Vizualization import matplotlib.pyplot as plt #train_data.plot() #plt.figure(); train_data.plot(); plt.legend(loc='best') #y_label[y_label.str.contains("Does not") == True].size list(train_data) #train_data.drop('id', axis=1, inplace=True) #train_data.drop('member_id', axis=1, inplace=True) train_data.drop('funded_amnt_inv', axis=1, inplace=True) #train_data.drop('url', axis=1, inplace=True) #train_data.drop('loan_status', axis=1, inplace=True) #train_data.drop('application_type', axis=1, inplace=True) #test_data.drop('id', axis=1, inplace=True) #test_data.drop('member_id', axis=1, inplace=True) test_data.drop('funded_amnt_inv', axis=1, inplace=True) #test_data.drop('url', axis=1, inplace=True) #test_data.drop('loan_status', axis=1, inplace=True) #test_data.drop('application_type', axis=1, inplace=True) train_data.shape # machine learning from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import Perceptron from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier unique, counts = np.unique(msk, return_counts=True) counts y_label.shape # Logistic Regression logreg = LogisticRegression() logreg.fit(train_data, y_label) Y_pred = logreg.predict(test_data) acc_log = round(logreg.score(train_data, y_label) * 100, 2) acc_log train_data.info() import numpy as np def get_series_ids(x): '''Function returns a pandas series consisting of ids, corresponding to objects in input pandas series x Example: get_series_ids(pd.Series(['a','a','b','b','c'])) returns Series([0,0,1,1,2], dtype=int)''' values = np.unique(x) values2nums = dict(zip(values,range(len(values)))) return x.replace(values2nums) x = tf.placeholder(tf.float32, shape=[len(train_data), None]) y = tf.placeholder(tf.float32, shape=[None, 2]) W = tf.Variable(tf.zeros([len(train_data),2])) b = tf.Variable(tf.zeros([2])) learning_rate = 0.01 training_epochs = 25 batch_size = 100 display_step = 1 pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax # Minimize error using cross entropy cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1)) # Gradient Descent optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Initializing the variables init = tf.global_variables_initializer() # Launch the graph with tf.Session() as sess: sess.run(init) # Training cycle for epoch in range(training_epochs): avg_cost = 0. total_batch = len(train_data) # Loop over all batches for i in range(total_batch): batch_xs = train_data batch_ys = y_label # Run optimization op (backprop) and cost op (to get loss value) _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs, y: batch_ys}) # Compute average loss avg_cost += c / total_batch # Display logs per epoch step if (epoch+1) % display_step == 0: print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)) print("Optimization Finished!") # Test model correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) # Calculate accuracy accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print("Accuracy:", accuracy.eval({x: test_data, y: y_test_labels})) ```
github_jupyter
### Run in python console import nltk; nltk.download('stopwords') ### Run in terminal or command prompt python3 -m spacy download en ### Import Packages The core packages used in this tutorial are re, gensim, spacy and pyLDAvis. Besides this we will also using matplotlib, numpy and pandas for data handling and visualization. Let’s import them. ``` import re import numpy as np import pandas as pd from pprint import pprint # Gensim import gensim import gensim.corpora as corpora from gensim.utils import simple_preprocess from gensim.models import CoherenceModel # https://radimrehurek.com/gensim/models/coherencemodel.html # Spacy for lemmatization import spacy #Plotting tools # conda install -c memex pyldavis import pyLDAvis import pyLDAvis.gensim # don't skip this import matplotlib.pyplot as plt %matplotlib inline ``` #### import data ``` dataframe = pd.read_csv('voted-kaggle-dataset.csv') #dataframe['Description'] dataframe.head() # !pip install seaborn import matplotlib.pyplot as plt import seaborn as sns from nltk import FreqDist def freq_words(text_data, terms): all_words = ' '.join([str(text) for text in text_data]) all_words = all_words.split() fdist = FreqDist(all_words) words_df = pd.DataFrame({'word':list(fdist.keys()), 'count':list(fdist.values())}) #selecting top 20 most frequent words most_frequent_word = words_df.nlargest(columns='count', n=terms) plt.figure(figsize=(20,5)) ax = sns.barplot(data=most_frequent_word, x='word', y='count') ax.set(ylabel='Count') plt.show() freq_words(dataframe['Description'], terms=30) from nltk.corpus import stopwords stop_words = stopwords.words('english') print(stop_words) stop_words.extend(['from', 'subject', 're', 'edu', 'use']) # Convert to list data = dataframe['Description'].tolist() # Remove Emails data = [re.sub('\S*@\S*\s?', '', str(sent)) for sent in data] # Remove new line characters data = [re.sub('\s+', ' ', sent) for sent in data] # Remove distracting single quotes data = [re.sub("\'", "", sent) for sent in data] pprint(data[:1]) freq_words(data, terms=30) def sent_to_words(sentences): for sentence in sentences: yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations data_words = list(sent_to_words(data)) print(data_words[:1]) # Build the bigram and trigram models bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases. trigram = gensim.models.Phrases(bigram[data_words], threshold=100) # Faster way to get a sentence clubbed as a trigram/bigram bigram_mod = gensim.models.phrases.Phraser(bigram) trigram_mod = gensim.models.phrases.Phraser(trigram) # See trigram example print(trigram_mod[bigram_mod[data_words[0]]]) ``` ### remove the stopwords ``` def remove_stop_words(texts): nostop_data_words = list() for doc in texts: data = list() for word in simple_preprocess(str(doc)): if word not in stop_words: data.append(word) nostop_data_words.append(data) return nostop_data_words ``` #### make bigram, trigram ``` def make_bigrams(texts): return [bigram_mod[doc] for doc in texts] def make_trigram(texts): return [trigram_mod[bigram_mod[doc]] for doc in texts] ``` ### lemmatization ``` def lemmatization(texts, allowed_postags): texts_output = [] nlp = spacy.load('en', disable=['parser', 'ner']) for sent in texts: doc = nlp(" ".join(sent)) texts_output.append([token.lemma_ for token in doc if token.pos_ in allowed_postags]) return texts_output nostop_data_words = remove_stop_words(data_words) print(nostop_data_words[:1]) data_words_bigram = make_bigrams(nostop_data_words) data_lemma = lemmatization(texts = data_words_bigram, allowed_postags=['NOUN','ADJ','VERB','ADV']) print(data_lemma[:1]) freq_words(data_lemma, terms=30) ``` #### Gensim creates a unique id for each word in the document. The produced corpus shown above is a mapping of (word_id, word_frequency). ``` # Create dictionary # This is used as the input by the LDA model. id2word = corpora.Dictionary(data_lemma) texts = data_lemma corpus = [id2word.doc2bow(text) for text in texts] print(corpus[:1]) ``` #### # Human readable format of corpus (term-frequency) ``` [[(id2word[id], freq) for id, freq in corpora] for corpora in corpus[:1]] # Build LDA model LDA = gensim.models.ldamodel.LdaModel # lda_model = LDA(corpus=corpus, # id2word=id2word, # num_topics=20, # random_state=100, # update_every=1, # chunksize=100, # passes=10, # alpha='auto', # per_word_topics=True) from gensim.test.utils import datapath model_name = '/home/akash/Dev/Topic modeling/LDA_model' temp_file = datapath(model_name) # # Save model to disk. # lda_model.save(temp_file) # Load a potentially pretrained model from disk. lda_model = LDA.load(temp_file) # https://rare-technologies.com/what-is-topic-coherence/ # Compute Coherence Score # coherence_model_lda = CoherenceModel(model=lda_model, texts=data_lemma, dictionary=id2word, coherence='c_v') # coherence_lda = coherence_model_lda.get_coherence() # print('\nCoherence Score: ', coherence_lda) # a measure of how good the model is. lower the better. # Compute Perplexity #print('\nPerplexity: ', lda_model.log_perplexity(corpus)) # Visualize the topics pyLDAvis.enable_notebook() vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word) vis ``` In LDA models, each document is composed of multiple topics. But, typically only one of the topics is dominant. The below code extracts this dominant topic for each sentence and shows the weight of the topic and the keywords in a nicely formatted output. This way, you will know which document belongs predominantly to which topic. ``` def format_topics_sentences(ldamodel, corpus, texts): # Init output sent_topics_df = pd.DataFrame() # Get main topic in each document for i, row_list in enumerate(ldamodel[corpus]): row = row_list[0] if ldamodel.per_word_topics else row_list #print(row) row = sorted(row, key=lambda x: (x[1]), reverse=True) # Get the Dominant topic, Perc Contribution and Keywords for each document for j, (topic_num, prop_topic) in enumerate(row): if j == 0: # => dominant topic wp = ldamodel.show_topic(topic_num) topic_keywords = ", ".join([word for word, prop in wp]) sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True) else: break sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords'] # Add original text to the end of the output contents = pd.Series(texts) sent_topics_df = pd.concat([sent_topics_df, contents], axis=1) return(sent_topics_df) df_topic_sents_keywords=format_topics_sentences(ldamodel=lda_model, corpus=corpus, texts=data_lemma) # Format df_dominant_topic = df_topic_sents_keywords.reset_index() df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text'] df_dominant_topic.head(10) ``` #### Frequency Distribution of Word Counts in Documents ``` doc_lens = [len(d) for d in df_dominant_topic.Text] # Plot plt.figure(figsize=(16,7), dpi=120) plt.hist(doc_lens, bins = 1000, color='green') plt.text(750, 450, "Mean : " + str(round(np.mean(doc_lens)))) plt.text(750, 400, "Median : " + str(round(np.median(doc_lens)))) plt.text(750, 350, "Stdev : " + str(round(np.std(doc_lens)))) plt.text(750, 300, "1%ile : " + str(round(np.quantile(doc_lens, q=0.01)))) plt.text(750, 250, "99%ile : " + str(round(np.quantile(doc_lens, q=0.99)))) plt.gca().set(xlim=(0, 1100), ylabel='Number of Documents', xlabel='Document Word Count') plt.tick_params(size=16) plt.xticks(np.linspace(0,1100,9)) plt.title('Distribution of Document Word Counts', fontdict=dict(size=22)) plt.show() import seaborn as sns import matplotlib.colors as mcolors cols = [color for name, color in mcolors.TABLEAU_COLORS.items()] # more colors: 'mcolors.XKCD_COLORS' fig, axes = plt.subplots(2,2,figsize=(16,14), dpi=160, sharex=True, sharey=True) for i, ax in enumerate(axes.flatten()): df_dominant_topic_sub = df_dominant_topic.loc[df_dominant_topic.Dominant_Topic == i, :] doc_lens = [len(d) for d in df_dominant_topic_sub.Text] ax.hist(doc_lens, bins = 1000, color=cols[i]) ax.tick_params(axis='y', labelcolor=cols[i], color=cols[i]) sns.kdeplot(doc_lens, color="black", shade=False, ax=ax.twinx()) ax.set(xlim=(0, 1000), xlabel='Document Word Count') ax.set_ylabel('Number of Documents', color=cols[i]) ax.set_title('Topic: '+str(i), fontdict=dict(size=16, color=cols[i])) fig.tight_layout() fig.subplots_adjust(top=0.90) plt.xticks(np.linspace(0,1000,9)) fig.suptitle('Distribution of Document Word Counts by Dominant Topic', fontsize=22) plt.show() ``` #### t-SNE Clustering Chart ``` # pip install bokeh # Get topic weights and dominant topics ------------ # from sklearn.manifold import TSNE # from bokeh.plotting import figure, output_file, show # from bokeh.models import Label # from bokeh.io import output_notebook # # Get topic weights # topic_weights = [] # for i, row_list in enumerate(lda_model[corpus]): # topic_weights.append([w for i, w in row_list[0]]) # # Array of topic weights # arr = pd.DataFrame(topic_weights).fillna(0).values # # Keep the well separated points (optional) # arr = arr[np.amax(arr, axis=1) > 0.35] # # Dominant topic number in each doc # topic_num = np.argmax(arr, axis=1) # # tSNE Dimension Reduction # tsne_model = TSNE(n_components=2, verbose=1, random_state=0, angle=.99, init='pca') # tsne_lda = tsne_model.fit_transform(arr) # # Plot the Topic Clusters using Bokeh # output_notebook() # n_topics = 4 # mycolors = np.array([color for name, color in mcolors.TABLEAU_COLORS.items()]) # plot = figure(title="t-SNE Clustering of {} LDA Topics".format(n_topics), # plot_width=900, plot_height=700) # plot.scatter(x=tsne_lda[:,0], y=tsne_lda[:,1], color=mycolors[topic_num]) # # show(plot) ```
github_jupyter
<img src="https://upload.wikimedia.org/wikipedia/commons/4/47/Logo_UTFSM.png" width="200" alt="utfsm-logo" align="left"/> # MAT281 ### Aplicaciones de la Matemática en la Ingeniería ## Módulo 04 ## Laboratorio Clase 02: Regresión Lineal ### Instrucciones * Completa tus datos personales (nombre y rol USM) en siguiente celda. * La escala es de 0 a 4 considerando solo valores enteros. * Debes _pushear_ tus cambios a tu repositorio personal del curso. * Como respaldo, debes enviar un archivo .zip con el siguiente formato `mXX_cYY_lab_apellido_nombre.zip` a alonso.ogueda@gmail.com, debe contener todo lo necesario para que se ejecute correctamente cada celda, ya sea datos, imágenes, scripts, etc. * Se evaluará: - Soluciones - Código - Que Binder esté bien configurado. - Al presionar `Kernel -> Restart Kernel and Run All Cells` deben ejecutarse todas las celdas sin error. __Nombre__: Simón Masnú __Rol__: 201503026-K ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import altair as alt from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error, r2_score alt.themes.enable('opaque') %matplotlib inline ``` ## Ejercicio 1: Diabetes Realizar análisis de regresión a los datos de diabetes disponibles en scikit-learn ``` diabetes = datasets.load_diabetes() print(dir(diabetes)) ## Atributos print(diabetes.DESCR) diabetes_df = ( pd.DataFrame( diabetes.data, columns=diabetes.feature_names ) .assign(prog=diabetes.target) ) diabetes_df.head() ``` #### Pregunta 1 (1 pto): * ¿Por qué la columna de sexo tiene esos valores? * ¿Cuál es la columna a predecir? Respuesta: * La columna sexo tiene esos datos pues seguramente se le asignó un valor cuantitativo a cada sexo y luego, como se menciona en la descripición del dataset, se normalizaron los datos de esa columna de modo que la suma de los cuadrados de la columna sea 1. * La columna a predecir es "prog", una medida cuantitativa de la progresión de la enfermedad a lo largo de un año. #### Pregunta 2 (1 pto) Realiza una regresión lineal con todas las _features_ incluyendo intercepto. ``` X = diabetes_df.drop("prog",axis=1).values y = diabetes_df["prog"].values print(X) ``` Ajusta el modelo ``` from sklearn.linear_model import LinearRegression regr = LinearRegression(fit_intercept=True) regr.fit(X,y) regr.coef_ ``` Imprime el intercepto y los coeficientes luego de ajustar el modelo. ``` print(f"Intercept: \n{ regr.intercept_ }\n") print(f"Coefficients: \n{regr.coef_}\n") ``` Haz una predicción del modelo con los datos `X`. ``` y_pred=regr.predict(X) ``` Calcula e imprime el error cuadrático medio y el coeficiente de determinación de este modelo ajustado. ``` # Error cuadrático medio print(f"Mean squared error: {mean_squared_error(y, y_pred):.2f}\n") # Coeficiente de determinación print(f"Coefficient of determination: {r2_score(y,y_pred):.2f}") ``` **Pregunta: ¿Qué tan bueno fue el ajuste del modelo?** Basandonos en el coeficiente de determinación, podemos decir que el 52% de la variabilidad de los datos es explicada por el modelo, lo es bastante pobre. Por regla general, un modelo se puede considerar bueno con un $r^{2}$ en torno al 0.9 o mayor. ### Pregunta 3 (2 ptos). Realizar multiples regresiones lineales utilizando una sola _feature_ a la vez. En cada iteración: - Crea un arreglo `X`con solo una feature filtrando `X`. - Crea un modelo de regresión lineal con intercepto. - Ajusta el modelo anterior. - Genera una predicción con el modelo. - Calcula e imprime las métricas de la pregunta anterior. ``` for i in range(X.shape[1]): X_i = X[:, np.newaxis, i] # Protip! Trata de entender este paso por tu cuenta, es muy clever regr_i = LinearRegression(fit_intercept=True) regr_i.fit(X_i,y) y_pred_i = regr_i.predict(X_i) print(f"{diabetes_df.columns[i]}:") print(f"\tCoefficients: {regr.coef_}") print(f"\tIntercept: {regr.intercept_}") print(f"\tMean squared error: {mean_squared_error(y,y_pred_i):.2f}") print(f"\tCoefficient of determination: {r2_score(y,y_pred_i):.2f}\n") ``` **Si tuvieras que escoger una sola _feauture_, ¿Cuál sería? ¿Por qué?** Escogería el bmi pues es el que presenta menor error medio cuadrático y mayor coeficiente de determinación Con la feature escogida haz el siguiente gráfico: - Scatter Plot - Eje X: Valores de la feature escogida. - Eje Y: Valores de la columna a predecir (target). - En color rojo dibuja la recta correspondiente a la regresión lineal (utilizando `intercept_`y `coefs_`). - Coloca un título adecuado, nombre de los ejes, etc. Puedes utilizar `matplotlib` o `altair`, el que prefiera. ``` def ajuste(x): return x*regr.coef_ + regr.intercept_ x = diabetes_df["bmi"].values[:, np.newaxis] regr=LinearRegression(fit_intercept=True) regr.fit(x,y) fig=plt.figure(figsize=(20, 12)) aj=plt.plot(x,ajuste(x),'r') sc=plt.scatter(x,y) plt.title('Ajuste lineal de la Progresión de la enfermedad en función del BMI') plt.ylabel('Progesión') plt.xlabel('BMI') ```
github_jupyter
``` import argparse import glob import io import os import random import numpy from PIL import Image, ImageFont, ImageDraw from scipy.ndimage.interpolation import map_coordinates from scipy.ndimage.filters import gaussian_filter SCRIPT_PATH = os.path.dirname(os.path.abspath('./hangul-WR')) # Default data paths. DEFAULT_LABEL_FILE = os.path.join(SCRIPT_PATH, './labels/2350-common-hangul.txt') DEFAULT_FONTS_DIR = os.path.join(SCRIPT_PATH, './fonts') DEFAULT_OUTPUT_DIR = os.path.join(SCRIPT_PATH, './image-data') # Number of random distortion images to generate per font and character. DISTORTION_COUNT = 3 # Width and height of the resulting image. IMAGE_WIDTH = 64 IMAGE_HEIGHT = 64 def generate_hangul_images(label_file, fonts_dir, output_dir): """Generate Hangul image files. This will take in the passed in labels file and will generate several images using the font files provided in the font directory. The font directory is expected to be populated with *.ttf (True Type Font) files. The generated images will be stored in the given output directory. Image paths will have their corresponding labels listed in a CSV file. """ with io.open(label_file, 'r', encoding='utf-8') as f: labels = f.read().splitlines() image_dir = os.path.join(output_dir, 'hangul-images') if not os.path.exists(image_dir): os.makedirs(os.path.join(image_dir)) # Get a list of the fonts. fonts = glob.glob(os.path.join(fonts_dir, '*.ttf')) labels_csv = io.open(os.path.join(output_dir, 'labels-map.csv'), 'w', encoding='utf-8') total_count = 0 prev_count = 0 for character in labels: # Print image count roughly every 5000 images. if total_count - prev_count > 5000: prev_count = total_count print('{} images generated...'.format(total_count)) for font in fonts: total_count += 1 image = Image.new('L', (IMAGE_WIDTH, IMAGE_HEIGHT), color=0) font = ImageFont.truetype(font, 48) drawing = ImageDraw.Draw(image) w, h = drawing.textsize(character, font=font) drawing.text( ((IMAGE_WIDTH-w)/2, (IMAGE_HEIGHT-h)/2), character, fill=(255), font=font ) file_string = 'hangul_{}.jpeg'.format(total_count) file_path = os.path.join(image_dir, file_string) image.save(file_path, 'JPEG') labels_csv.write(u'{},{}\n'.format(file_path, character)) for i in range(DISTORTION_COUNT): total_count += 1 file_string = 'hangul_{}.jpeg'.format(total_count) file_path = os.path.join(image_dir, file_string) arr = numpy.array(image) distorted_array = elastic_distort( arr, alpha=random.randint(30, 36), sigma=random.randint(5, 6) ) distorted_image = Image.fromarray(distorted_array) distorted_image.save(file_path, 'JPEG') labels_csv.write(u'{},{}\n'.format(file_path, character)) print('Finished generating {} images.'.format(total_count)) labels_csv.close() def elastic_distort(image, alpha, sigma): """Perform elastic distortion on an image. Here, alpha refers to the scaling factor that controls the intensity of the deformation. The sigma variable refers to the Gaussian filter standard deviation. """ random_state = numpy.random.RandomState(None) shape = image.shape dx = gaussian_filter( (random_state.rand(*shape) * 2 - 1), sigma, mode="constant" ) * alpha dy = gaussian_filter( (random_state.rand(*shape) * 2 - 1), sigma, mode="constant" ) * alpha x, y = numpy.meshgrid(numpy.arange(shape[0]), numpy.arange(shape[1])) indices = numpy.reshape(y+dy, (-1, 1)), numpy.reshape(x+dx, (-1, 1)) return map_coordinates(image, indices, order=1).reshape(shape) # label_file = './labels/2350-common-hangul.txt' label_file = DEFAULT_LABEL_FILE # fonts_dir = './fonts' fonts_dir = DEFAULT_FONTS_DIR # output_dir = './image-data' output_dir = DEFAULT_OUTPUT_DIR generate_hangul_images(label_file, fonts_dir, output_dir) ```
github_jupyter
# PosTagging and Named Entity Recognition (NER) We consider some texts from QA SQuAD collection to annotate for its characterization with PosTagging and Named Entity Reconigtion (NER) open source frameworks: treetagger, Stanford CoreNLP, spacy, stanza ### Example texts ``` question_example = 'When was the Tower Theatre built?' response_example = '1939' context_example = 'The popular neighborhood known as the Tower District is centered around the historic Tower Theatre, which is included on the National List of Historic Places. The theater was built in 1939 and is at Olive and Wishon Avenues in the heart of the Tower District. (The name of the theater refers to a well-known landmark water tower, which is actually in another nearby area). The Tower District neighborhood is just north of downtown Fresno proper, and one-half mile south of Fresno City College. Although the neighborhood was known as a residential area prior, the early commercial establishments of the Tower District began with small shops and services that flocked to the area shortly after World War II. The character of small local businesses largely remains today. To some extent, the businesses of the Tower District were developed due to the proximity of the original Fresno Normal School, (later renamed California State University at Fresno). In 1916 the college moved to what is now the site of Fresno City College one-half mile north of the Tower District.' amazon_context_example= "The Amazon rainforest (Portuguese: Floresta Amazônica or Amazônia; Spanish: Selva Amazónica, Amazonía or usually Amazonia; French: Forêt amazonienne; Dutch: Amazoneregenwoud), also known in English as Amazonia or the Amazon Jungle, is a moist broadleaf forest that covers most of the Amazon basin of South America. This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. This region includes territory belonging to nine nations. The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. States or departments in four nations contain 'Amazonas' in their names. The Amazon represents over half of the planet's remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species." beyonce_context= 'In August, the couple attended the 2011 MTV Video Music Awards, at which Beyoncé performed "Love on Top" and started the performance saying "Tonight I want you to stand up on your feet, I want you to feel the love that\'s growing inside of me". At the end of the performance, she dropped her microphone, unbuttoned her blazer and rubbed her stomach, confirming her pregnancy she had alluded to earlier in the evening. Her appearance helped that year\'s MTV Video Music Awards become the most-watched broadcast in MTV history, pulling in 12.4 million viewers; the announcement was listed in Guinness World Records for "most tweets per second recorded for a single event" on Twitter, receiving 8,868 tweets per second and "Beyonce pregnant" was the most Googled term the week of August 29, 2011.' ``` ### PosTagging #### TreeTagger ``` # https://www.cis.uni-muenchen.de/~schmid/tools/TreeTagger/ import treetaggerwrapper tagger = treetaggerwrapper.TreeTagger(TAGLANG='en', TAGDIR='C:\\Users\\larao_000\\Documents\\nlp\\tree-tagger-windows-3.2.3\\TreeTagger\\') def pos_tagging(text, max_length=1000): results = [] for i in range(0, len(text), max_length): partial_text = text[i:i+max_length] tags = tagger.tag_text(partial_text) results += treetaggerwrapper.make_tags(tags) return results %%time pos_tagging(question_example) %%time pos_tagging(response_example) print(pos_tagging('Which name is also used to describe the Amazon rainforest in English?')) print(pos_tagging('also known in English as Amazonia or the Amazon Jungle,')) print(pos_tagging('Jay Z and Beyonce attended which event together in August of 2011?')) print(pos_tagging('MTV Video Music Awards')) %%time pos_tagging(context_example) ``` #### Spacy ``` import spacy nlp_spacy = spacy.load("en_core_web_sm") def pos_tagging_spacy(nlp, text): postags = [] doc = nlp(text) for token in doc: postags.append((token.text, token.lemma_, token.pos_, token.tag_, token.dep_, token.shape_, token.is_alpha, token.is_stop)) return postags %%time pos_tagging_spacy(nlp_spacy, question_example) %%time pos_tagging_spacy(nlp_spacy, response_example) %%time pos_tagging_spacy(nlp_spacy, context_example) ``` #### Stanza ``` #!pip install stanza import stanza #stanza.download('en') nlp = stanza.Pipeline('en') def pos_tagging_stanza(nlp, text): postags = [] doc = nlp(text) for sent in doc.sentences: for token in sent.words: postags.append((token.text, token.upos, token.xpos, token.feats)) return postags %%time pos_tagging_stanza(nlp, question_example) %%time pos_tagging_stanza(nlp, response_example) %%time pos_tagging_stanza(nlp, context_example) def ner_stanza(nlp, text): nertags = [] doc = nlp(text) for token in doc.ents: nertags.append((token.text, token.type)) return nertags print(ner_stanza(nlp, 'Which name is also used to describe the Amazon rainforest in English?')) print(ner_stanza(nlp, 'also known in English as Amazonia or the Amazon Jungle,')) print(ner_stanza(nlp, 'Jay Z and Beyonce attended which event together in August of 2011?')) print(ner_stanza(nlp, 'MTV Video Music Awards')) print(ner_stanza(nlp, question_example)) print(ner_stanza(nlp, response_example)) print(ner_stanza(nlp, context_example)) print(ner_stanza(nlp, amazon_context_example)) print(ner_stanza(nlp, beyonce_context)) print(ner_stanza(nlp, 'This poster of Madrid costs 3 euros during 3 hours with 5% of discount to first buyers')) ``` ### Stanford Core NLP NER ``` #from stanfordnlp.server import CoreNLPClient from stanfordcorenlp import StanfordCoreNLP import re def preprocess_text(text_str): regular_expr = re.compile('\n|\r|\t|\(|\)|\[|\]|:|\,|\;|"|\?|\-|\%') text_str = re.sub(regular_expr, ' ', text_str) token_list = text_str.split(' ') token_list = [element for element in token_list if element] return ' '.join(token_list) def filter_ner_relevant(tuple_list): ner_dictionary = {} previous_ner = 'O' for element in tuple_list: if element[1] != 'O': if element[1] == previous_ner: ner_dictionary[element[1]][-1] += ' ' + element[0] elif element[1] in ner_dictionary.keys(): ner_dictionary[element[1]].append(element[0]) else: ner_dictionary[element[1]] = [element[0]] previous_ner = element[1] return ner_dictionary ``` Start server with command: java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -annotators "tokenize,ssplit ,pos,lemma,parse,ner,sentiment" -port 9000 -timeout 30000 ``` # https://www.khalidalnajjar.com/setup-use-stanford-corenlp-server-python/ # https://stanfordnlp.github.io/CoreNLP/index.html#download # https://stanfordnlp.github.io/stanfordnlp/corenlp_client.html nlp = StanfordCoreNLP('http://localhost', port=9000, timeout=30000) #filter_ner_relevant(nlp.ner(preprocess_text(question_example))) nlp.ner(preprocess_text(question_example)) filter_ner_relevant(nlp.ner(preprocess_text(response_example))) #filter_ner_relevant(nlp.ner(preprocess_text(context_example))) print(filter_ner_relevant(nlp.ner(preprocess_text('Which name is also used to describe the Amazon rainforest in English?')))) print(filter_ner_relevant(nlp.ner(preprocess_text('also known in English as Amazonia or the Amazon Jungle,')))) filter_ner_relevant(nlp.ner(preprocess_text(amazon_context_example))) print(filter_ner_relevant(nlp.ner(preprocess_text('Jay Z and Beyonce attended which event together in August of 2011?')))) print(filter_ner_relevant(nlp.ner(preprocess_text('MTV Video Music Awards')))) filter_ner_relevant(nlp.ner(preprocess_text(beyonce_context))) #example_tosend = preprocess_text(example) #result = nlp.ner(example_tosend) #print(result) #filter_ner_relevant(result) ``` ## Spacy NER ``` import spacy nlp_spacy = spacy.load("en_core_web_sm") spacy.explain('FAC') spacy.displacy.render(nlp_spacy(context_example), style='ent', jupyter=True) spacy.displacy.render(nlp_spacy(amazon_context_example), style='ent', jupyter=True) spacy.displacy.render(nlp_spacy(beyonce_context), style='ent', jupyter=True) # https://spacy.io/api/annotation#named-entities # https://spacy.io/usage/linguistic-features#named-entities # 'PERSON', 'NORP', 'FAC', 'ORG', 'GPE', 'LOC', 'PRODUCT', 'EVENT', 'WORK_OF_ART', # 'LAW', 'LANGUAGE', 'DATE', 'TIME', 'PERCENT', 'MONEY', 'QUANTITY', 'ORDINAL', 'CARDINAL' def detect_entities(nlp, text, ner_tag): entities = [] doc = nlp(text) for ent in doc.ents: if ent.label_ in ner_tag: entities.append(ent.text) return entities result = detect_entities(nlp_spacy, context_example, ['PERSON', 'NORP', 'FAC', 'ORG', 'GPE', 'LOC', 'PRODUCT', 'EVENT', 'WORK_OF_ART', 'LAW', 'LANGUAGE', 'DATE', 'TIME', 'PERCENT', 'MONEY', 'QUANTITY', 'ORDINAL', 'CARDINAL']) print(context_example) print(result) people_entities = detect_entities(nlp_spacy, context_example, 'PERSON') print('PERSON: ' + str(people_entities)) norp_entities = detect_entities(nlp_spacy, context_example, 'NORP') print('NORP: ' + str(norp_entities)) fac_entities = detect_entities(nlp_spacy, context_example, 'FAC') print('FAC: ' + str(fac_entities)) org_entities = detect_entities(nlp_spacy, context_example, 'ORG') print('ORG: ' + str(org_entities)) gpe_entities = detect_entities(nlp_spacy, context_example, 'GPE') print('GPE: ' + str(gpe_entities)) loc_entities = detect_entities(nlp_spacy, context_example, 'LOC') print('LOC: ' + str(loc_entities)) product_entities = detect_entities(nlp_spacy, context_example, 'PRODUCT') print('PRODUCT: ' + str(product_entities)) event_entities = detect_entities(nlp_spacy, context_example, 'EVENT') print('EVENT: ' + str(event_entities)) workofart_entities = detect_entities(nlp_spacy, context_example, 'WORK_OF_ART') print('WORK_OF_ART: ' + str(workofart_entities)) lang_entities = detect_entities(nlp_spacy, context_example, 'LANGUAGE') print('LANGUAGE: ' + str(lang_entities)) date_entities = detect_entities(nlp_spacy, context_example, 'DATE') print('DATE: ' + str(date_entities)) time_entities = detect_entities(nlp_spacy, context_example, 'TIME') print('TIME: ' + str(time_entities)) percent_entities = detect_entities(nlp_spacy, context_example, 'PERCENT') print('PERCENT: ' + str(percent_entities)) money_entities = detect_entities(nlp_spacy, context_example, 'MONEY') print('MONEY: ' + str(money_entities)) quantity_entities = detect_entities(nlp_spacy, context_example, 'QUANTITY') print('QUANTITY: ' + str(quantity_entities)) cardinal_entities = detect_entities(nlp_spacy, context_example, 'CARDINAL') print('CARDINAL: ' + str(cardinal_entities)) ordinal_entities = detect_entities(nlp_spacy, context_example, 'ORDINAL') print('ORDINAL: ' + str(ordinal_entities)) ```
github_jupyter
``` import sympy as sp from sympy.parsing.sympy_parser import parse_expr import pandas as pd def get_lines(filename): file = open(filename, 'r+') lines = file.readlines() # lines = map(lambda line : line[:-1],lines) file.close() return lines lines = get_lines('./tests/ejercicio1.txt') syntax = pd.DataFrame(data=lines, columns=['line']) syntax.head() syntax["length"] = syntax["line"].map(lambda line: len(line)) syntax.head() def get_type(line): #If not line.lower().find('x') returns true if x starts at line[0] if not line.lower().find('inicio'): return "inicio" if not line.lower().find('pare'): return "pare" if not line.lower().find('para'): return "para" if not line.lower().find('lea'): return "lea" if not line.lower().find('esc'): return "esc" if not line.lower().find('fpara'): return "fpara" if not line.lower().find('sino'): return "sino" if not line.lower().find('si'): return "si" if not line.lower().find('fsi'): return "fsi" if line.lower().find('='): return "assignment" return "Indefinite so far" syntax["type"] = syntax["line"].map(get_type) syntax.head(len(syntax)) def process_for(line): raw_data = line[line.index('=')+1:] split_data = raw_data.split(',') print(split_data) lower_bound = parse_expr(split_data[0]) upper_bound = parse_expr(split_data[1]) increment = parse_expr(split_data[2]) return { 'lower_b' : lower_bound, 'upper_b' : upper_bound, 'inc' : increment } test_for_line = 'Para i=100,a+b,-10' process_for(test_for_line) def process_if(line): comparisons = [pos for pos, char in enumerate(line) if char == '('] return { 'comparisons' : len(comparisons) } def new_get_statement_runtime(syntax): lines_dict_list = lines = syntax.to_dict('records') order = 0 for index in range(len(lines_dict_list)): line = lines[index] # print(line['type']) line_type = line['type'] if line_type in ('inicio','pare','sino'): #order does not change line['runtime'] = 0 # line['data'] = 'Control statement' line['order'] = order elif line_type in ("assignment",'lea','esc'): line['runtime'] = 1 # line['data'] = 'Assignment i/o' line['order'] = order elif line_type in ('fsi','fpara'): order -= 1 line['runtime'] = 0 # line['data'] = 'End of block' line['order'] = order elif line_type is 'para': line['runtime'] = 'Nan' line['data'] = process_for(line['line']) line['order'] = order order += 1 elif line_type is 'si': line['runtime'] = 'Nan' #Number of comparisons + instructions inside line['data'] = process_if(line['line']) line['order'] = order order +=1 # print(f'Line : {line}') return pd.DataFrame.from_dict(lines) new_syntax = new_get_statement_runtime(syntax) new_syntax.head(len(new_syntax)) new_line = 'Si (i==j) y (j==1)' process_if(new_line) def get_if_block_runtime(block_lines): runtime = 0 for line in block_lines: runtime += line['runtime'] return runtime def get_if_blocks_runtime(syntax): lines_dict_list = lines = syntax.to_dict('records') if_indices = [pos for pos, line in enumerate(lines) if line['type'] is 'si'] else_indices = [pos for pos, line in enumerate(lines) if line['type'] is 'sino'] end_if_indices = [pos for pos, line in enumerate(lines) if line['type'] is 'fsi'] # done = False #Let's begin by processing the ifs statements if_statements = [] for x, if_index in enumerate(if_indices): #Find closing endif end_if_index = end_if_indices[x] #Is there an else? else_index = False for line_index in range(if_index,end_if_index): if line_index in else_indices: else_index = line_index break # print((if_index,else_index,end_if_index)) comparisons = lines[if_index]['data']['comparisons'] if_runtime = comparisons if else_index: block_a = lines[if_index+1:else_index] block_b = lines[else_index+1: end_if_index] bloc_a_runtime = get_if_block_runtime(block_a) bloc_b_runtime = get_if_block_runtime(block_b) if_runtime += max(bloc_a_runtime,bloc_b_runtime) else: block = lines[if_index+1:end_if_index] bloc_runtime = get_if_block_runtime(block) if_runtime += bloc_runtime print((if_index,else_index,end_if_index,if_runtime)) lines[if_index]['runtime'] = if_runtime return pd.DataFrame.from_dict(lines) # print('para',for_indices) # print('fpara',end_for_indices) # print('si',if_indices) # print('sino',else_indices) # print('fsi',end_if_indices) syntax_with_ifs = get_if_blocks_runtime(new_syntax) syntax_with_ifs.head(len(syntax_with_ifs)) # orders = [] # for line in lines: # orders.append(line['order']) # max_order = max(orders) # if max_order is 3: #There exists at least a combination of two nested loops with an if # for_indices = [pos for pos, line in enumerate(lines) if line['type'] is 'para'] # end_for_indices = [pos for pos, line in enumerate(lines) if line['type'] is 'fpara'] def for_runtime_formula(for_data,content_runtime): lower_bound = for_data['lower_b'] upper_bound = for_data['upper_b'] try: lower_bound = int(lower_bound) except TypeError: lower_bound = lower_bound try: upper_bound = int(upper_bound) except TypeError: upper_bound = upper_bound increment = parse_expr(str(for_data['inc'])) ceil = sp.Function('ceil') iterations = (ceil((upper_bound-lower_bound+1)/increment)*(content_runtime +2)) + 2 return iterations int(parse_expr('8')) lower_bound = parse_expr('n-1') upper_bound = parse_expr('8') try: lb = int(lower_bound) except TypeError: lb = lower_bound lb + 2 def get_for_blocks_runtime(syntax): lines_dict_list = lines = syntax.to_dict('records') for_indices = [pos for pos, line in enumerate(lines) if line['type'] is 'para'] print('for_indices',for_indices) endfor_indices = [pos for pos,line in enumerate(lines) if line['type'] is 'fpara'] print('endfor_indices',endfor_indices) #get for blocks and their orders block_orders = [] for x ,for_index in enumerate(for_indices): if x < len(for_indices) -1 : next_end_for = endfor_indices[x] next_for = for_indices[x+1] if next_for < next_end_for: block_orders.append((for_index,0)) else: block_orders.append((for_index,1)) else: block_orders.append((for_index,1)) # print(block_orders) #get inner for runtime for for_index in [bloc_order[0] for bloc_order in block_orders if bloc_order[1] is 1]: # print(for_index) for end_for in endfor_indices: if end_for > for_index: break for_order = lines[for_index]['order'] instruction_order = for_order + 1 inner_instructions = lines[for_index+1:end_for] content_runtime = 0 #placeholder for line in inner_instructions: if(line['order'] is instruction_order): content_runtime+= line['runtime'] for_runtime = for_runtime_formula(lines[for_index]['data'],content_runtime) lines[for_index]['runtime'] = for_runtime #get outer for runtimes for for_index in [bloc_order[0] for bloc_order in block_orders if bloc_order[1] is 0]: for x, end_for in enumerate(endfor_indices): if lines[end_for]['order'] == lines[for_index]['order'] and end_for >for_index: break for_order = lines[for_index]['order'] instruction_order = for_order + 1 inner_instructions = lines[for_index+1:end_for] content_runtime = "" #placeholder for line in inner_instructions: if(line['order'] is instruction_order): content_runtime += '+'+str(line['runtime']) for_runtime =for_runtime_formula(lines[for_index]['data'],parse_expr(str(content_runtime))) lines[for_index]['runtime'] = for_runtime return pd.DataFrame.from_dict(lines) syntax_complete = get_for_blocks_runtime(syntax_with_ifs) syntax_complete.head(len(syntax_complete)) test_for_data = {'lower_b': parse_expr('1'), 'upper_b': parse_expr('n'), 'inc': parse_expr(str(1))} for_runtime_formula(test_for_data,2) ceil = sp.Function('ceil') upper_bound = parse_expr('n') lower_bound = parse_expr('3') increment = parse_expr('2') iterations = ceil((upper_bound-lower_bound)/increment) iterations def get_runtime(syntax_complete): lines = syntax_complete.to_dict('records') runtime = parse_expr('0') for line in lines: if line['order'] is 0: runtime += line['runtime'] return runtime str(sp.simplify(get_runtime(syntax_complete))) ```
github_jupyter
``` from os import environ environ['optimizer'] = 'Adam' environ['num_workers']= '2' environ['batch_size']= str(2048) environ['n_epochs']= '1000' environ['batch_norm']= 'True' environ['loss_func']='MAPE' environ['layers'] = '600 350 200 180' environ['dropouts'] = '0.1 '* 4 environ['log'] = 'False' environ['weight_decay'] = '0.01' environ['cuda_device'] ='cuda:3' environ['dataset'] = 'data/speedup_dataset2.pkl' %run utils.ipynb train_dl, val_dl, test_dl = train_dev_split(dataset, batch_size, num_workers, log=log) db = fai.basic_data.DataBunch(train_dl, val_dl, test_dl, device=device) input_size = train_dl.dataset.X.shape[1] output_size = train_dl.dataset.Y.shape[1] model = None if batch_norm: model = Model_BN(input_size, output_size, hidden_sizes=layers_sizes, drops=drops) else: model = Model(input_size, output_size) if loss_func == 'MSE': criterion = nn.MSELoss() else: criterion = smape_criterion # criterion = mape_criterion l = fai.Learner(db, model, loss_func=criterion, metrics=[mape_criterion, rmse_criterion]) if optimizer == 'SGD': l.opt_func = optim.SGD l = l.load(f"r_speedup_{optimizer}_batch_norm_{batch_norm}_{loss_func}_nlayers_{len(layers_sizes)}_log_{log}") l.lr_find() l.recorder.plot() lr = 1e-03 l.fit_one_cycle(450, lr) l.recorder.plot_losses() l.save(f"r_speedup_{optimizer}_batch_norm_{batch_norm}_{loss_func}_nlayers_{len(layers_sizes)}_log_{log}") !ls models val_df = get_results_df(val_dl, l.model) train_df = get_results_df(train_dl, l.model) df = val_df df[:][['prediction','target', 'abs_diff','APE']].describe() df = train_df df[:][['prediction','target', 'abs_diff','APE']].describe() df[:][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange==0) & (df.unroll == 0) & (df.tile == 0)][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange==0) & (df.unroll == 0) & (df.tile == 1)][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange==0) & (df.unroll == 1) & (df.tile == 0)][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange==1) & (df.unroll == 0) & (df.tile == 0)][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange==0) & (df.unroll == 1) & (df.tile == 1)][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange==1) & (df.unroll == 1) & (df.tile == 0)][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange==1) & (df.unroll == 0) & (df.tile == 1)][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange==1) & (df.unroll == 1) & (df.tile == 1)][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange + df.tile + df.unroll != 0)][['prediction','target', 'abs_diff','APE']].describe() df1 = df[(df.interchange==0) & (df.unroll == 0) & (df.tile == 0)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange==0) & (df.unroll == 0) & (df.tile == 1)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange==0) & (df.unroll == 1) & (df.tile == 0)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange==1) & (df.unroll == 0) & (df.tile == 0)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange==0) & (df.unroll == 1) & (df.tile == 1)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange==1) & (df.unroll == 1) & (df.tile == 0)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange==1) & (df.unroll == 0) & (df.tile == 1)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange==0) & (df.unroll == 1) & (df.tile == 1)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange==1) & (df.unroll == 1) & (df.tile == 1)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange + df.tile + df.unroll != 0)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df2 = df joint_plot(df2, f"Validation dataset, {loss_func} loss") ```
github_jupyter
# "Intro til Anvendt Matematik og Python opfriskning" > "19 April 2021 - HA-AAUBS" - toc: true - branch: master - badges: true - comments: true - author: Roman Jurowetzki - categories: [intro, forelæsning] # Intro til Anvendt Matematik og Python opfriskning - Matematik bruges i finance, økonomistyring, data science, tech og meget andet - men også helt sikkert senere hvis I skal videre med en kandidat. - Analytiske skills er meget [eftertragtede på arbejdsmarkedet](https://youtu.be/u2oupkbxddc ) > [Ny DI-analyse viser](https://www.danskindustri.dk/tech-der-taller/analysearkiv/analyser/2020/10/kompetencer-til-et-digitalt-arbejdsliv/), at den digitale omstilling i virksomheder ikke kan drives af it-specialisterne alene. Der er i stærkt stigende omfang behov for, at samfundsvidenskabelige profiler også har gode digitale kompetencer. ### Hvad sker her fra idag til 21 Juni? - overblik over linkeær algebra og calculus (ikke meget mere end B niveau) - Brug gerne fx https://www.webmatematik.dk/ - $\LaTeX$ [cheat-sheet](http://tug.ctan.org/info/undergradmath/undergradmath.pdf) - [Markdown cheatsheet](https://www.markdownguide.org/cheat-sheet/) - Lære at **bruge** matematik - ikke være matematiker¨ - lære fra et data/computer science perspektiv, hvor det handler mest at kunne implementere matematik direkte og bruge til fx at bygge en søgemaskine, recommender system, visualisere eller automatisere BI - "computational tilgang" - Python som tool - Danglish ### Pingvin Motivation og Intuition - Fra Data og Statistik til Liniær Algebra Pingvin data: https://github.com/allisonhorst/palmerpenguins ![](https://github.com/allisonhorst/palmerpenguins/raw/master/man/figures/lter_penguins.png) Vi bygger en søgemaskine til pingviner 🤔 Antagelse: - Pingviner kan bedst lide at være sammen med dem, der ligner dem mest ``` import pandas as pd import numpy as np np.set_printoptions(suppress=True) import seaborn as sns sns.set(color_codes=True, rc={'figure.figsize':(10,8)}) pinguins = pd.read_csv("https://github.com/allisonhorst/palmerpenguins/raw/5b5891f01b52ae26ad8cb9755ec93672f49328a8/data/penguins_size.csv") pinguins.head() pinguins = pinguins.dropna() pinguins.species_short.value_counts() pinguins.index = range(len(pinguins)) # Hvordan ser vores data ud? sns.pairplot(pinguins, hue='species_short', kind="reg", corner=True, markers=["o", "s", "D"], plot_kws={'line_kws':{'color':'white'}}) ``` Vi danner alle variable om til Z-scores (så de er på samme skala) $Z = \frac{x-\mu}{\sigma} $ x = værdi, $\mu$ = gennemsnit, $\sigma$ = stadnardafvigelse ``` # scaling - vi tager kun de 4 nummeriske variable from sklearn.preprocessing import StandardScaler scaled_pinguins = StandardScaler().fit_transform(pinguins.loc[:,'culmen_length_mm':'body_mass_g']) # plot af alle skalerede variable, som nu har gennemsnit ~ 0 og std ~ 1 for i in range(4): sns.kdeplot(scaled_pinguins[:,i]) print(scaled_pinguins.shape) scaled_pinguins # pinguin 1 kan representeres som en 4D række-vektor scaled_pinguins[0,:] ``` Nu bruger vi noget, som vi måske kommer til at se på helt til sidst i Liniær Algebra, næmlig Principal Component Analysis eller PCA. - læs mere om PCA og hvordan man [bygger det fra bunden](https://towardsdatascience.com/principal-component-analysis-pca-from-scratch-in-python-7f3e2a540c51)) - Hvis du er meget interesseret - [læs her](https://jakevdp.github.io/PythonDataScienceHandbook/05.09-principal-component-analysis.html) Vi bruger 2 components (dvs. vores 4D vektorer bliver skrumpet til 2D hvor PCA forsøger at beholde så meget information som muligt ``` # import PCA from sklearn.decomposition import PCA pca = PCA(n_components=2) # Transform penguin matrix med PCA pca_pinguins = pca.fit_transform(scaled_pinguins) print(pca_pinguins.shape) pca_pinguins ``` Nu bruger vi denne 2D matrix og plotter, hvor 1.kollonne = x; 2. kolonne = y; vi bruger farver fra pingvin-arter i vores start-data ``` sns.scatterplot(x = pca_pinguins[:,0], y = pca_pinguins[:,1], hue = pinguins['species_short'] ) ``` Hvordan finder vi så en buddy for en given pingvin? - det er den, der er tættest på 🤖 **Eucledian Distance** ![](https://upload.wikimedia.org/wikipedia/commons/5/55/Euclidean_distance_2d.svg) **Vi kan også gå fra 2D til n-D** $d(\vec{u}, \vec{v}) = \| \vec{u} - \vec{v} \| = \sqrt{(u_1 - v_1)^2 + (u_2 - v_2)^2 ... (u_n - v_n)^2}$ fx Vi kan regne ED mellem $\vec{u} = (2, 3, 4, 2)$ og $\vec{v} = (1, -2, 1, 3)$ $\begin{align} d(\vec{u}, \vec{v}) = \| \vec{u} - \vec{v} \| = \sqrt{(2-1)^2 + (3+2)^2 + (4-1)^2 + (2-3)^2} \\ d(\vec{u}, \vec{v}) = \| \vec{u} - \vec{v} \| = \sqrt{1 + 25 + 9 + 1} \\ d(\vec{u}, \vec{v}) = \| \vec{u} - \vec{v} \| = \sqrt{36} \\ d(\vec{u}, \vec{v}) = \| \vec{u} - \vec{v} \| = 6 \end{align}$ ``` # hvor tæt er de første 2 print(scaled_pinguins[0,:]) print(scaled_pinguins[1,:]) # kvardarod er ikke standard og skal importeres from math import sqrt # manuelt sqrt((-0.89765322--0.82429023)**2 + (0.78348666-0.12189602)**2 + (-1.42952144--1.07240838)**2 + (-0.57122888--0.50901123)**2) # med numpy np.linalg.norm(scaled_pinguins[0,:] - scaled_pinguins[1,:]) np.linalg.norm(scaled_pinguins[0,:] - scaled_pinguins[2,:]) pinguins.iloc[:5,:] pinguins.iloc[-5:,:] np.linalg.norm(scaled_pinguins[0,:] - scaled_pinguins[333,:]) np.linalg.norm(scaled_pinguins[0,:] - scaled_pinguins[331,:]) import matplotlib.pyplot as plt # This code draws the x and y axis as lines. points = [0,1,2,333,331] fig, ax = plt.subplots() ax.scatter(pca_pinguins[[points],0], pca_pinguins[[points],1]) plt.axhline(0, c='black', lw=0.5) plt.axvline(0, c='black', lw=0.5) plt.xlim(-2,3) plt.ylim(-1,1) plt.quiver(0, 0, pca_pinguins[0,0], pca_pinguins[0,1], angles='xy', scale_units='xy', scale=1, color='blue') plt.quiver(0, 0, pca_pinguins[1,0], pca_pinguins[1,1], angles='xy', scale_units='xy', scale=1, color='green') plt.quiver(0, 0, pca_pinguins[2,0], pca_pinguins[2,1], angles='xy', scale_units='xy', scale=1, color='yellow') plt.quiver(0, 0, pca_pinguins[333,0], pca_pinguins[333,1], angles='xy', scale_units='xy', scale=1, color='violet') plt.quiver(0, 0, pca_pinguins[331,0], pca_pinguins[331,1], angles='xy', scale_units='xy', scale=1, color='black') for i in points: ax.annotate(str(i), (pca_pinguins[i,0], pca_pinguins[i,1])) ``` Man kunne nu enten skrive noget, som gentager denne beregning for alle kombinationer...eller ``` from sklearn.metrics.pairwise import euclidean_distances euclidean_matrix = euclidean_distances(scaled_pinguins) print(euclidean_matrix.shape) euclidean_matrix np.argmin(euclidean_matrix[0,:]) np.argsort(euclidean_matrix[0,:])[:3] scaled_pinguins[[0,139,16],:] euclidean_distances(scaled_pinguins[[0,139,16],:]) ``` ### Python fresh-up - Simple datatyper - Grundlæggende matematiske operationer - Lister - Funktioner - Control Flow #### Simple datatyper - Integers - hele tal **6** - Floating-Point Numbers - decimaltal **3.2** - Boolean - digital data type / bit **True / False** - String - text **Roman* ``` i = 6 print(i, type(i)) x = 3.2 print(x, type(x)) t = i == 6 print(t, type(t)) s = 'Hello' print(s, type(s)) ``` #### Grundlæggende matematiske operationer ``` a = 2.0 b = 3.0 print(a+b, a*b, a-b, a/b, a**2, a+b**2, (a+b)**2) c = a + b print(c) a + b == c a + b < c ``` #### Lister man kan pakke alt i en liste :-) ``` l = ['Eskil', 1.0, sqrt] type(l) l[2] l[0] l.append('Roman') l l.extend(['Marie',37]) l l.pop(2) l ``` #### Funktioner Funktioner har (normalt) in og outputs. $a$ og $b$ er vores input her og funktionen producerer $\sqrt{a^2 + b^2}$ som output. Vi prøver lige ... $\begin{align} a^2 + b^2 = c^2 \rightarrow c = \sqrt{a^2 + b^2} \end{align}$ ``` def pythagoras(a, b): return sqrt(a**2 + b**2) pythagoras(1,2) # Hvis man gør det rigtigt, så er det en god ide at kommentere hvad der sker. # Her er det en no-brainer men funktioner kan blive indviklede og # det er good-practice at skrive "docstrings" til en anden eller en selv (i) def pythagoras(a, b): """ Computes the length of the hypotenuse of a right triangle Arguments a, b: the two lengths of the right triangle """ return sqrt(a**2 + b**2) ``` ##### Mini-assignment * Lav en funktion, som tager to punkter $(x_1, y_1), (x_2, y_2)$ på en linje og beregner hældning $a$ $$ y = ax + b$$ $$ a = \frac{y_2- y_1}{x_2 - x_1}$$ ``` plt.plot((1,2), (2,3), 'ro-') plt.plot((1,2), (2,2), 'bo-') plt.plot((2,2), (2,3), 'bo-') # slope(1,2,2,3) ``` #### Control flow ``` def isNegative(n): if n < 0: return True else: return False ``` ##### Mini-assignment * Lav en funktion `KtoC` som regner Kelvin om til Celcius $$ C = K - 273.15 \quad \text{ved} \quad C\geq - 273.15$$ Funktionen udgiver `None` hvis $C < -273.15$ ``` list(range(10)) # for-loop even = [] # tom liste for i in range(10): even.append(i*2) print(even) # list-comprehension even = [2*i for i in range(10)] print(even) ``` ##### Mini-assignment 1. Beregn summen af integers 1 ... 100 ved at bruge `sum`, list-comprehension, for-loop 2. Beregn summen af integers 1 ... 100 ved at bruge partial-sum formula $$ \sum_{k=1}^n k = 1 + 2 + \cdots + (n-1) + n = \frac{n(n+1)}{2}$$ ### Matematik fresh-up alle øvelser taget fra https://tutorial.math.lamar.edu/Problems/Alg/Preliminaries.aspx Erfaringen viser, at det er en god idé at få sig en god routine med at løse matematiske problemer. - Integer Exponents - Rational Exponents - Radicals - Polynomials Vi arbejder old-school med papir men bruger også `SymPy` for at tjekke vores løsninger #### Integer Exponents $- {6^2} + 4 \cdot {3^2}$ ${\left( {2{w^4}{v^{ - 5}}} \right)^{ - 2}}$ (løsning med kun positive eksponenter!) ``` from sympy import * simplify(-6**2+4*3**2) w, v = symbols('w v') simplify((2*w**4*v**-5)**-2) ``` #### Rational Exponents ${\left( { - 125} \right)^{\frac{1}{3}}}$ ${\left( {{a^3}\,{b^{ - \,\,\frac{1}{4}}}} \right)^{\frac{2}{3}}}$ ``` simplify(-125**(1/3), rational=True) a, b = symbols('a b') simplify((a**3*b**(-1/4))**(2/3), rational=True) ``` #### Radicals $$\begin{array}{c} \sqrt[7]{y}\\ \sqrt[3]{{{x^2}}} \\ \sqrt[3]{{ - 512}} \\ \sqrt x \left( {4 - 3\sqrt x } \right)\end{array}$$ ``` x, y, z = symbols('x, y , z') simplify((x**2)**(1/3), rational=True) simplify(-512**(1/3), rational=True) simplify(sqrt(x)*(4 - 3*sqrt(x)), rational = True) ``` #### Polynomials $$(4{x^3} - 2{x^2} + 1) + (7{x^2} + 12x)$$ ``` simplify((4*x**3-2*x**2+1)+(7*x**2+12*x)) ```
github_jupyter
# Hyperparameter Optimization (HPO) of Machine Learning Models L. Yang and A. Shami, “On hyperparameter optimization of machine learning algorithms: Theory and practice,” Neurocomputing, vol. 415, pp. 295–316, 2020, doi: https://doi.org/10.1016/j.neucom.2020.07.061. ### **Sample code for regression problems** **Dataset used:** &nbsp; Boson Housing dataset from sklearn **Machine learning algorithms used:** &nbsp; Random forest (RF), support vector machine (SVM), k-nearest neighbor (KNN), artificial neural network (ANN) **HPO algorithms used:** &nbsp; Grid search, random search, hyperband, Bayesian Optimization with Gaussian Processes (BO-GP), Bayesian Optimization with Tree-structured Parzen Estimator (BO-TPE), particle swarm optimization (PSO), genetic algorithm (GA). **Performance metric:** &nbsp; Mean square error (MSE) ``` import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split,cross_val_score from sklearn.ensemble import RandomForestClassifier,RandomForestRegressor from sklearn.metrics import classification_report,confusion_matrix,accuracy_score from sklearn.neighbors import KNeighborsClassifier,KNeighborsRegressor from sklearn.svm import SVC,SVR from sklearn import datasets import scipy.stats as stats ``` ## Load Boston Housing dataset We will take the Housing dataset which contains information about different houses in Boston. There are 506 samples and 13 feature variables in this Boston dataset. The main goal is to predict the value of prices of the house using the given features. You can read more about the data and the variables [[1]](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html) [[2]](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html). ``` X, y = datasets.load_boston(return_X_y=True) datasets.load_boston() ``` ## Baseline Machine Learning models: Regressors with Default Hyperparameters ``` #Random Forest clf = RandomForestRegressor() scores = cross_val_score(clf, X, y, cv=3,scoring='neg_mean_squared_error') # 3-fold cross-validation print("MSE:"+ str(-scores.mean())) #SVM clf = SVR(gamma='scale') scores = cross_val_score(clf, X, y, cv=3,scoring='neg_mean_squared_error') print("MSE:"+ str(-scores.mean())) #KNN clf = KNeighborsRegressor() scores = cross_val_score(clf, X, y, cv=3,scoring='neg_mean_squared_error') print("MSE:"+ str(-scores.mean())) #ANN from keras.models import Sequential, Model from keras.layers import Dense, Input from sklearn.model_selection import GridSearchCV from keras.wrappers.scikit_learn import KerasRegressor from keras.callbacks import EarlyStopping def ANN(optimizer = 'adam',neurons=32,batch_size=32,epochs=50,activation='relu',patience=5,loss='mse'): model = Sequential() model.add(Dense(neurons, input_shape=(X.shape[1],), activation=activation)) model.add(Dense(neurons, activation=activation)) model.add(Dense(1)) model.compile(optimizer = optimizer, loss=loss) early_stopping = EarlyStopping(monitor="loss", patience = patience)# early stop patience history = model.fit(X, y, batch_size=batch_size, epochs=epochs, callbacks = [early_stopping], verbose=0) #verbose set to 1 will show the training process return model clf = KerasRegressor(build_fn=ANN, verbose=0) scores = cross_val_score(clf, X, y, cv=3,scoring='neg_mean_squared_error') print("MSE:"+ str(-scores.mean())) ``` ## HPO Algorithm 1: Grid Search Search all the given hyper-parameter configurations **Advantages:** * Simple implementation. **Disadvantages:** * Time-consuming, * Only efficient with categorical HPs. ``` #Random Forest from sklearn.model_selection import GridSearchCV # Define the hyperparameter configuration space rf_params = { 'n_estimators': [10, 20, 30], #'max_features': ['sqrt',0.5], 'max_depth': [15,20,30,50], #'min_samples_leaf': [1,2,4,8], #"bootstrap":[True,False], #"criterion":['mse','mae'] } clf = RandomForestRegressor(random_state=0) grid = GridSearchCV(clf, rf_params, cv=3, scoring='neg_mean_squared_error') grid.fit(X, y) print(grid.best_params_) print("MSE:"+ str(-grid.best_score_)) #SVM from sklearn.model_selection import GridSearchCV rf_params = { 'C': [1,10, 100], "kernel":['poly','rbf','sigmoid'], "epsilon":[0.01,0.1,1] } clf = SVR(gamma='scale') grid = GridSearchCV(clf, rf_params, cv=3, scoring='neg_mean_squared_error') grid.fit(X, y) print(grid.best_params_) print("MSE:"+ str(-grid.best_score_)) #KNN from sklearn.model_selection import GridSearchCV rf_params = { 'n_neighbors': [2, 3, 5,7,10] } clf = KNeighborsRegressor() grid = GridSearchCV(clf, rf_params, cv=3, scoring='neg_mean_squared_error') grid.fit(X, y) print(grid.best_params_) print("MSE:"+ str(-grid.best_score_)) #ANN from sklearn.model_selection import GridSearchCV rf_params = { 'optimizer': ['adam','rmsprop'], 'activation': ['relu','tanh'], 'loss': ['mse','mae'], 'batch_size': [16,32], 'neurons':[16,32], 'epochs':[20,50], 'patience':[2,5] } clf = KerasRegressor(build_fn=ANN, verbose=0) grid = GridSearchCV(clf, rf_params, cv=3,scoring='neg_mean_squared_error') grid.fit(X, y) print(grid.best_params_) print("MSE:"+ str(-grid.best_score_)) ``` ## HPO Algorithm 2: Random Search Randomly search hyper-parameter combinations in the search space **Advantages:** * More efficient than GS. * Enable parallelization. **Disadvantages:** * Not consider previous results. * Not efficient with conditional HPs. ``` #Random Forest from scipy.stats import randint as sp_randint from sklearn.model_selection import RandomizedSearchCV # Define the hyperparameter configuration space rf_params = { 'n_estimators': sp_randint(10,100), "max_features":sp_randint(1,13), 'max_depth': sp_randint(5,50), "min_samples_split":sp_randint(2,11), "min_samples_leaf":sp_randint(1,11), "criterion":['mse','mae'] } n_iter_search=20 #number of iterations is set to 20, you can increase this number if time permits clf = RandomForestRegressor(random_state=0) Random = RandomizedSearchCV(clf, param_distributions=rf_params,n_iter=n_iter_search,cv=3,scoring='neg_mean_squared_error') Random.fit(X, y) print(Random.best_params_) print("MSE:"+ str(-Random.best_score_)) #SVM from scipy.stats import randint as sp_randint from sklearn.model_selection import RandomizedSearchCV rf_params = { 'C': stats.uniform(0,50), "kernel":['poly','rbf','sigmoid'], "epsilon":stats.uniform(0,1) } n_iter_search=20 clf = SVR(gamma='scale') Random = RandomizedSearchCV(clf, param_distributions=rf_params,n_iter=n_iter_search,cv=3,scoring='neg_mean_squared_error') Random.fit(X, y) print(Random.best_params_) print("MSE:"+ str(-Random.best_score_)) #KNN from scipy.stats import randint as sp_randint from sklearn.model_selection import RandomizedSearchCV rf_params = { 'n_neighbors': sp_randint(1,20), } n_iter_search=10 clf = KNeighborsRegressor() Random = RandomizedSearchCV(clf, param_distributions=rf_params,n_iter=n_iter_search,cv=3,scoring='neg_mean_squared_error') Random.fit(X, y) print(Random.best_params_) print("MSE:"+ str(-Random.best_score_)) #ANN from scipy.stats import randint as sp_randint from random import randrange as sp_randrange from sklearn.model_selection import RandomizedSearchCV rf_params = { 'optimizer': ['adam','rmsprop'], 'activation': ['relu','tanh'], 'loss': ['mse','mae'], 'batch_size': [16,32,64], 'neurons':sp_randint(10,100), 'epochs':[20,50], #'epochs':[20,50,100,200], 'patience':sp_randint(3,20) } n_iter_search=10 clf = KerasRegressor(build_fn=ANN, verbose=0) Random = RandomizedSearchCV(clf, param_distributions=rf_params,n_iter=n_iter_search,cv=3,scoring='neg_mean_squared_error') Random.fit(X, y) print(Random.best_params_) print("MSE:"+ str(-Random.best_score_)) ``` ## HPO Algorithm 3: Hyperband Generate small-sized subsets and allocate budgets to each hyper-parameter combination based on its performance **Advantages:** * Enable parallelization. **Disadvantages:** * Not efficient with conditional HPs. * Require subsets with small budgets to be representative. ``` #Random Forest from hyperband import HyperbandSearchCV from scipy.stats import randint as sp_randint # Define the hyperparameter configuration space rf_params = { 'n_estimators': sp_randint(10,100), "max_features":sp_randint(1,13), 'max_depth': sp_randint(5,50), "min_samples_split":sp_randint(2,11), "min_samples_leaf":sp_randint(1,11), "criterion":['mse','mae'] } clf = RandomForestRegressor(random_state=0) hyper = HyperbandSearchCV(clf, param_distributions =rf_params,cv=3,min_iter=10,max_iter=100,scoring='neg_mean_squared_error') hyper.fit(X, y) print(hyper.best_params_) print("MSE:"+ str(-hyper.best_score_)) #SVM from hyperband import HyperbandSearchCV from scipy.stats import randint as sp_randint rf_params = { 'C': stats.uniform(0,50), "kernel":['poly','rbf','sigmoid'], "epsilon":stats.uniform(0,1) } clf = SVR(gamma='scale') hyper = HyperbandSearchCV(clf, param_distributions =rf_params,cv=3,min_iter=1,max_iter=10,scoring='neg_mean_squared_error',resource_param='C') hyper.fit(X, y) print(hyper.best_params_) print("MSE:"+ str(-hyper.best_score_)) #KNN from hyperband import HyperbandSearchCV from scipy.stats import randint as sp_randint rf_params = { 'n_neighbors': range(1,20), } clf = KNeighborsRegressor() hyper = HyperbandSearchCV(clf, param_distributions =rf_params,cv=3,min_iter=1,max_iter=20,scoring='neg_mean_squared_error',resource_param='n_neighbors') hyper.fit(X, y) print(hyper.best_params_) print("MSE:"+ str(-hyper.best_score_)) #ANN from hyperband import HyperbandSearchCV from scipy.stats import randint as sp_randint rf_params = { 'optimizer': ['adam','rmsprop'], 'activation': ['relu','tanh'], 'loss': ['mse','mae'], 'batch_size': [16,32,64], 'neurons':sp_randint(10,100), 'epochs':[20,50], #'epochs':[20,50,100,200], 'patience':sp_randint(3,20) } clf = KerasRegressor(build_fn=ANN, epochs=20, verbose=0) hyper = HyperbandSearchCV(clf, param_distributions =rf_params,cv=3,min_iter=1,max_iter=10,scoring='neg_mean_squared_error',resource_param='epochs') hyper.fit(X, y) print(hyper.best_params_) print("MSE:"+ str(-hyper.best_score_)) ``` ## HPO Algorithm 4: BO-GP Bayesian Optimization with Gaussian Process (BO-GP) **Advantages:** * Fast convergence speed for continuous HPs. **Disadvantages:** * Poor capacity for parallelization. * Not efficient with conditional HPs. ### Using skopt.BayesSearchCV ``` #Random Forest from skopt import Optimizer from skopt import BayesSearchCV from skopt.space import Real, Categorical, Integer # Define the hyperparameter configuration space rf_params = { 'n_estimators': Integer(10,100), "max_features":Integer(1,13), 'max_depth': Integer(5,50), "min_samples_split":Integer(2,11), "min_samples_leaf":Integer(1,11), "criterion":['mse','mae'] } clf = RandomForestRegressor(random_state=0) Bayes = BayesSearchCV(clf, rf_params,cv=3,n_iter=20, scoring='neg_mean_squared_error') #number of iterations is set to 20, you can increase this number if time permits Bayes.fit(X, y) print(Bayes.best_params_) bclf = Bayes.best_estimator_ print("MSE:"+ str(-Bayes.best_score_)) #SVM from skopt import Optimizer from skopt import BayesSearchCV from skopt.space import Real, Categorical, Integer rf_params = { 'C': Real(0,50), "kernel":['poly','rbf','sigmoid'], 'epsilon': Real(0,1) } clf = SVR(gamma='scale') Bayes = BayesSearchCV(clf, rf_params,cv=3,n_iter=20, scoring='neg_mean_squared_error') Bayes.fit(X, y) print(Bayes.best_params_) print("MSE:"+ str(-Bayes.best_score_)) #KNN from skopt import Optimizer from skopt import BayesSearchCV from skopt.space import Real, Categorical, Integer rf_params = { 'n_neighbors': Integer(1,20), } clf = KNeighborsRegressor() Bayes = BayesSearchCV(clf, rf_params,cv=3,n_iter=10, scoring='neg_mean_squared_error') Bayes.fit(X, y) print(Bayes.best_params_) print("MSE:"+ str(-Bayes.best_score_)) #ANN from skopt import Optimizer from skopt import BayesSearchCV from skopt.space import Real, Categorical, Integer rf_params = { 'optimizer': ['adam','rmsprop'], 'activation': ['relu','tanh'], 'loss': ['mse','mae'], 'batch_size': [16,32,64], 'neurons':Integer(10,100), 'epochs':[20,50], #'epochs':[20,50,100,200], 'patience':Integer(3,20) } clf = KerasRegressor(build_fn=ANN, verbose=0) Bayes = BayesSearchCV(clf, rf_params,cv=3,n_iter=10, scoring='neg_mean_squared_error') Bayes.fit(X, y) print(Bayes.best_params_) print("MSE:"+ str(-Bayes.best_score_)) ``` ### Using skopt.gp_minimize ``` #Random Forest from skopt.space import Real, Integer from skopt.utils import use_named_args reg = RandomForestRegressor() # Define the hyperparameter configuration space space = [Integer(10, 100, name='n_estimators'), Integer(5, 50, name='max_depth'), Integer(1, 13, name='max_features'), Integer(2, 11, name='min_samples_split'), Integer(1, 11, name='min_samples_leaf'), Categorical(['mse', 'mae'], name='criterion') ] # Define the objective function @use_named_args(space) def objective(**params): reg.set_params(**params) return -np.mean(cross_val_score(reg, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) from skopt import gp_minimize res_gp = gp_minimize(objective, space, n_calls=20, random_state=0) #number of iterations is set to 20, you can increase this number if time permits print("MSE:%.4f" % res_gp.fun) print(res_gp.x) #SVM from skopt.space import Real, Integer from skopt.utils import use_named_args reg = SVR(gamma='scale') space = [Real(0, 50, name='C'), Categorical(['poly','rbf','sigmoid'], name='kernel'), Real(0, 1, name='epsilon'), ] @use_named_args(space) def objective(**params): reg.set_params(**params) return -np.mean(cross_val_score(reg, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) from skopt import gp_minimize res_gp = gp_minimize(objective, space, n_calls=20, random_state=0) print("MSE:%.4f" % res_gp.fun) print(res_gp.x) #KNN from skopt.space import Real, Integer from skopt.utils import use_named_args reg = KNeighborsRegressor() space = [Integer(1, 20, name='n_neighbors')] @use_named_args(space) def objective(**params): reg.set_params(**params) return -np.mean(cross_val_score(reg, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) from skopt import gp_minimize res_gp = gp_minimize(objective, space, n_calls=10, random_state=0) print("MSE:%.4f" % res_gp.fun) print(res_gp.x) ``` ## HPO Algorithm 5: BO-TPE Bayesian Optimization with Tree-structured Parzen Estimator (TPE) **Advantages:** * Efficient with all types of HPs. * Keep conditional dependencies. **Disadvantages:** * Poor capacity for parallelization. ``` #Random Forest from hyperopt import hp, fmin, tpe, STATUS_OK, Trials from sklearn.model_selection import cross_val_score, StratifiedKFold # Define the objective function def objective(params): params = { 'n_estimators': int(params['n_estimators']), 'max_depth': int(params['max_depth']), 'max_features': int(params['max_features']), "min_samples_split":int(params['min_samples_split']), "min_samples_leaf":int(params['min_samples_leaf']), "criterion":str(params['criterion']) } clf = RandomForestRegressor( **params) score = -np.mean(cross_val_score(clf, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) return {'loss':score, 'status': STATUS_OK } # Define the hyperparameter configuration space space = { 'n_estimators': hp.quniform('n_estimators', 10, 100, 1), 'max_depth': hp.quniform('max_depth', 5, 50, 1), "max_features":hp.quniform('max_features', 1, 13, 1), "min_samples_split":hp.quniform('min_samples_split',2,11,1), "min_samples_leaf":hp.quniform('min_samples_leaf',1,11,1), "criterion":hp.choice('criterion',['mse','mae']) } best = fmin(fn=objective, space=space, algo=tpe.suggest, max_evals=20) print("Random Forest: Hyperopt estimated optimum {}".format(best)) #SVM from hyperopt import hp, fmin, tpe, STATUS_OK, Trials from sklearn.model_selection import cross_val_score, StratifiedKFold def objective(params): params = { 'C': abs(float(params['C'])), "kernel":str(params['kernel']), 'epsilon': abs(float(params['epsilon'])), } clf = SVR(gamma='scale', **params) score = -np.mean(cross_val_score(clf, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) return {'loss':score, 'status': STATUS_OK } space = { 'C': hp.normal('C', 0, 50), "kernel":hp.choice('kernel',['poly','rbf','sigmoid']), 'epsilon': hp.normal('epsilon', 0, 1), } best = fmin(fn=objective, space=space, algo=tpe.suggest, max_evals=20) print("SVM: Hyperopt estimated optimum {}".format(best)) #KNN from hyperopt import hp, fmin, tpe, STATUS_OK, Trials from sklearn.model_selection import cross_val_score, StratifiedKFold def objective(params): params = { 'n_neighbors': abs(int(params['n_neighbors'])) } clf = KNeighborsRegressor( **params) score = -np.mean(cross_val_score(clf, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) return {'loss':score, 'status': STATUS_OK } space = { 'n_neighbors': hp.quniform('n_neighbors', 1, 20, 1), } best = fmin(fn=objective, space=space, algo=tpe.suggest, max_evals=10) print("KNN: Hyperopt estimated optimum {}".format(best)) #ANN from hyperopt import hp, fmin, tpe, STATUS_OK, Trials from sklearn.model_selection import cross_val_score, StratifiedKFold def objective(params): params = { "optimizer":str(params['optimizer']), "activation":str(params['activation']), "loss":str(params['loss']), 'batch_size': abs(int(params['batch_size'])), 'neurons': abs(int(params['neurons'])), 'epochs': abs(int(params['epochs'])), 'patience': abs(int(params['patience'])) } clf = KerasRegressor(build_fn=ANN,**params, verbose=0) score = -np.mean(cross_val_score(clf, X, y, cv=3, scoring="neg_mean_squared_error")) return {'loss':score, 'status': STATUS_OK } space = { "optimizer":hp.choice('optimizer',['adam','rmsprop']), "activation":hp.choice('activation',['relu','tanh']), "loss":hp.choice('loss',['mse','mae']), 'batch_size': hp.quniform('batch_size', 16, 64, 16), 'neurons': hp.quniform('neurons', 10, 100, 10), 'epochs': hp.quniform('epochs', 20, 50, 10), 'patience': hp.quniform('patience', 3, 20, 3), } best = fmin(fn=objective, space=space, algo=tpe.suggest, max_evals=10) print("ANN: Hyperopt estimated optimum {}".format(best)) ``` ## HPO Algorithm 6: PSO Partical swarm optimization (PSO): Each particle in a swarm communicates with other particles to detect and update the current global optimum in each iteration until the final optimum is detected. **Advantages:** * Efficient with all types of HPs. * Enable parallelization. **Disadvantages:** * Require proper initialization. ``` #Random Forest import optunity import optunity.metrics # Define the hyperparameter configuration space search = { 'n_estimators': [10, 100], 'max_features': [1, 13], 'max_depth': [5,50], "min_samples_split":[2,11], "min_samples_leaf":[1,11], } # Define the objective function @optunity.cross_validated(x=X, y=y, num_folds=3) def performance(x_train, y_train, x_test, y_test,n_estimators=None, max_features=None,max_depth=None,min_samples_split=None,min_samples_leaf=None): # fit the model model = RandomForestRegressor(n_estimators=int(n_estimators), max_features=int(max_features), max_depth=int(max_depth), min_samples_split=int(min_samples_split), min_samples_leaf=int(min_samples_leaf), ) scores=-np.mean(cross_val_score(model, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) return scores optimal_configuration, info, _ = optunity.minimize(performance, solver_name='particle swarm', num_evals=20, **search ) print(optimal_configuration) print("MSE:"+ str(info.optimum)) #SVM import optunity import optunity.metrics search = { 'C': (0,50), 'kernel':[0,3], 'epsilon': (0, 1) } @optunity.cross_validated(x=X, y=y, num_folds=3) def performance(x_train, y_train, x_test, y_test,C=None,kernel=None,epsilon=None): # fit the model if kernel<1: ke='poly' elif kernel<2: ke='rbf' else: ke='sigmoid' model = SVR(C=float(C), kernel=ke, gamma='scale', epsilon=float(epsilon) ) scores=-np.mean(cross_val_score(model, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) return scores optimal_configuration, info, _ = optunity.minimize(performance, solver_name='particle swarm', num_evals=20, **search ) print(optimal_configuration) print("MSE:"+ str(info.optimum)) #KNN import optunity import optunity.metrics search = { 'n_neighbors': [1, 20], } @optunity.cross_validated(x=X, y=y, num_folds=3) def performance(x_train, y_train, x_test, y_test,n_neighbors=None): # fit the model model = KNeighborsRegressor(n_neighbors=int(n_neighbors), ) scores=-np.mean(cross_val_score(model, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) return scores optimal_configuration, info, _ = optunity.minimize(performance, solver_name='particle swarm', num_evals=10, **search ) print(optimal_configuration) print("MSE:"+ str(info.optimum)) #ANN import optunity import optunity.metrics search = { 'optimizer':[0,2], 'activation':[0,2], 'loss':[0,2], 'batch_size': [0, 2], 'neurons': [10, 100], 'epochs': [20, 50], 'patience': [3, 20], } @optunity.cross_validated(x=X, y=y, num_folds=3) def performance(x_train, y_train, x_test, y_test,optimizer=None,activation=None,loss=None,batch_size=None,neurons=None,epochs=None,patience=None): # fit the model if optimizer<1: op='adam' else: op='rmsprop' if activation<1: ac='relu' else: ac='tanh' if loss<1: lo='mse' else: lo='mae' if batch_size<1: ba=16 else: ba=32 model = ANN(optimizer=op, activation=ac, loss=lo, batch_size=ba, neurons=int(neurons), epochs=int(epochs), patience=int(patience) ) clf = KerasRegressor(build_fn=ANN, verbose=0) scores=-np.mean(cross_val_score(clf, X, y, cv=3, scoring="neg_mean_squared_error")) return scores optimal_configuration, info, _ = optunity.minimize(performance, solver_name='particle swarm', num_evals=20, **search ) print(optimal_configuration) print("MSE:"+ str(info.optimum)) ``` ## HPO Algorithm 7: Genetic Algorithm Genetic algorithms detect well-performing hyper-parameter combinations in each generation, and pass them to the next generation until the best-performing combination is identified. **Advantages:** * Efficient with all types of HPs. * Not require good initialization. **Disadvantages:** * Poor capacity for parallelization. ### Using DEAP ``` #Random Forest from evolutionary_search import EvolutionaryAlgorithmSearchCV from scipy.stats import randint as sp_randint # Define the hyperparameter configuration space rf_params = { 'n_estimators': range(10,100), "max_features":range(1,13), 'max_depth': range(5,50), "min_samples_split":range(2,11), "min_samples_leaf":range(1,11), "criterion":['mse','mae'] } clf = RandomForestRegressor(random_state=0) # Set the hyperparameters of GA ga1 = EvolutionaryAlgorithmSearchCV(estimator=clf, params=rf_params, scoring="neg_mean_squared_error", cv=3, verbose=1, population_size=10, gene_mutation_prob=0.10, gene_crossover_prob=0.5, tournament_size=3, generations_number=5, n_jobs=1) ga1.fit(X, y) print(ga1.best_params_) print("MSE:"+ str(-ga1.best_score_)) #SVM from evolutionary_search import EvolutionaryAlgorithmSearchCV rf_params = { 'C': np.random.uniform(0,50,1000), "kernel":['poly','rbf','sigmoid'], 'epsilon': np.random.uniform(0,1,100), } clf = SVR(gamma='scale') ga1 = EvolutionaryAlgorithmSearchCV(estimator=clf, params=rf_params, scoring="neg_mean_squared_error", cv=3, verbose=1, population_size=10, gene_mutation_prob=0.10, gene_crossover_prob=0.5, tournament_size=3, generations_number=5, n_jobs=1) ga1.fit(X, y) print(ga1.best_params_) print("MSE:"+ str(-ga1.best_score_)) #KNN from evolutionary_search import EvolutionaryAlgorithmSearchCV rf_params = { 'n_neighbors': range(1,20), } clf = KNeighborsRegressor() ga1 = EvolutionaryAlgorithmSearchCV(estimator=clf, params=rf_params, scoring="neg_mean_squared_error", cv=3, verbose=1, population_size=10, gene_mutation_prob=0.10, gene_crossover_prob=0.5, tournament_size=3, generations_number=5, n_jobs=1) ga1.fit(X, y) print(ga1.best_params_) print("MSE:"+ str(-ga1.best_score_)) #ANN from evolutionary_search import EvolutionaryAlgorithmSearchCV # Define the hyperparameter configuration space rf_params = { 'optimizer': ['adam','rmsprop'], 'activation': ['relu','tanh'], 'loss': ['mse','mae'], 'batch_size': [16,32,64], 'neurons':range(10,100), 'epochs':[20,50], #'epochs':[20,50,100,200], 'patience':range(3,20) } clf = KerasRegressor(build_fn=ANN, verbose=0) # Set the hyperparameters of GA ga1 = EvolutionaryAlgorithmSearchCV(estimator=clf, params=rf_params, scoring="neg_mean_squared_error", cv=3, verbose=1, population_size=10, gene_mutation_prob=0.10, gene_crossover_prob=0.5, tournament_size=3, generations_number=5, n_jobs=1) ga1.fit(X, y) print(ga1.best_params_) print("MSE:"+ str(-ga1.best_score_)) ``` ### Using TPOT ``` #Random Forest from tpot import TPOTRegressor # Define the hyperparameter configuration space parameters = { 'n_estimators': range(20,200), "max_features":range(1,13), 'max_depth': range(10,100), "min_samples_split":range(2,11), "min_samples_leaf":range(1,11), #"criterion":['mse','mae'] } # Set the hyperparameters of GA ga2 = TPOTRegressor(generations= 3, population_size= 10, offspring_size= 5, verbosity= 3, early_stop= 5, config_dict= {'sklearn.ensemble.RandomForestRegressor': parameters}, cv = 3, scoring = 'neg_mean_squared_error') ga2.fit(X, y) #SVM from tpot import TPOTRegressor parameters = { 'C': np.random.uniform(0,50,1000), "kernel":['poly','rbf','sigmoid'], 'epsilon': np.random.uniform(0,1,100), 'gamma': ['scale'] } ga2 = TPOTRegressor(generations= 3, population_size= 10, offspring_size= 5, verbosity= 3, early_stop= 5, config_dict= {'sklearn.svm.SVR': parameters}, cv = 3, scoring = 'neg_mean_squared_error') ga2.fit(X, y) #KNN from tpot import TPOTRegressor parameters = { 'n_neighbors': range(1,20), } ga2 = TPOTRegressor(generations= 3, population_size= 10, offspring_size= 5, verbosity= 3, early_stop= 5, config_dict= {'sklearn.neighbors.KNeighborsRegressor': parameters}, cv = 3, scoring = 'neg_mean_squared_error') ga2.fit(X, y) ```
github_jupyter
# Нейросети и вероятностные модели **Разработчик: Алексей Умнов** # Авторегрессионные модели Мы поработаем с авторегрессионными моделями на примере архитектуры PixelCNN. Мы обучим модель для задачи генерации изображений и для задачи дорисовывания недостающих частей изображения. ### LCD digits dataset В качестве примера мы возьмем датасет из простых LCD-цифр. Ниже приведен код, который его загружает и рисует примеры сэмплов. Источник датасета: https://gist.github.com/benjaminwilson/b25a321f292f98d74269b83d4ed2b9a8#file-lcd-digits-dataset-nmf-ipynb ``` %matplotlib inline import pickle import numpy as np from matplotlib import pyplot as plt import torch from torch.utils.data import DataLoader import torch.nn as nn from torchvision import datasets, utils from utils import LcdDigits, IMAGE_WIDTH, IMAGE_HEIGHT BATCH_SIZE = 100 train_dataset = LcdDigits(BATCH_SIZE * 50) train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE) def show_as_image(image, figsize=(10, 5)): plt.figure(figsize=figsize) plt.imshow(image, cmap='gray') plt.xticks([]); plt.yticks([]) def batch_images_to_one(batches_images): n_square_elements = int(np.sqrt(batches_images.shape[0])) rows_images = np.split(np.squeeze(batches_images), n_square_elements) return np.vstack([np.hstack(row_images) for row_images in rows_images]) for batch, _ in train_loader: show_as_image(batch_images_to_one(batch[:25]), figsize=(10, 10)) break ``` Здесь специально выбран простой датасет, так как вероятностные модели обычно требуют больших ресурсов. Также обратите внимание, что хотя данные очень простые (фактически всего 10 разных сэмплов), они находятся в пространстве значительно большей размерности ($2^{8 \times 13}$). Мы будем подавать модели сырые пиксели на вход, и будем хотеть, чтобы она нашла в них правильные зависимости и научилась строить только валидные изображения. ### PixelCNN Коротко вспомним, что такое PixelCNN. Авторегрессионные модели в общем виде моделируют распределения на векторах $x = (x_1, \ldots, x_N)$ в виде: $$ p(x) = \prod_{i=1}^{N} p(x_i \mid x_1, \ldots, x_{i-1}). $$ Распределения $p(x_i \mid x_1, \ldots, x_{i-1})$ можно моделировать при помощи нейронных сетей, которые получают на вход значения $x_1, \ldots, x_{i-1}$ и выдают распределение вероятностей для значений $x_i$. Так как входов здесь переменное число, можно использовать рекуррентные сети (например, PixelRNN), но неплохо работает и более простая модель &mdash; PixelCNN, &mdash; которая подает на вход не все значения $x_1, \ldots, x_{i-1}$, а только соседние на некотором расстоянии с помощью сверточных слоев. ![pixelcnn](pixelcnn.png) Для того, чтобы для данного пикселя подавать на вход только значения идущие ранее, вместо обычных сверток нужно использовать маскированные свертки. Напишите недостающий код, чтобы создать соответствующие маски и потом сделайте из них слой для pytorch. Такие слои можно добавлять последовательно, сохраняя корректные зависимости, при этом во всех слоях кроме первого можно использовать центральный пиксель. У вас должны получаться вот такие маски (с `include_center=False` и с `include_center=True` соответственно): ![](mask_with_center.png) ![](mask_no_center.png) Hint: можно умножить на маску не входы, а веса. ``` def causal_mask(width, height, starting_point): mask = torch.cat(( torch.ones(starting_point[0], height), torch.cat(( torch.ones(1, starting_point[1] + 1), torch.zeros(1, height - starting_point[1] - 1) ), 1), torch.zeros(width - starting_point[0] - 1, height) ), 0) return mask.numpy() def conv_mask(height, width, include_center=False): cm = causal_mask( width, height, starting_point=(height//2, width//2 + include_center - 1)) return 1.0 * torch.Tensor(cm) conv_mask(5, 5, True) class MaskedConv2d(nn.Conv2d): def __init__(self, include_center, *args, **kwargs): super(MaskedConv2d, self).__init__(*args, **kwargs) self.include_center = include_center cm = conv_mask( *self.weight.data.shape[2:], include_center ) self.weight.data = torch.matmul(cm, self.weight) # YOUR CODE ``` Теперь соберите сеть с несколькими слоями маскированных сверток и обучите ее. Hint 1: в задаче хорошо помогает сверточный слой 1x1 в конце. Hint 2: если ошибиться и нарушить казуальность (т.е. сделать зависимости вперед), то обучаться будет хорошо, а генерировать плохо. ``` class PixelCNN(nn.Module): N_PIXELS_OUT = 2 # binary 0/1 pixels def __init__(self, n_channels, kernel_size, padding): super(PixelCNN, self).__init__() self.layers = nn.Sequential( MaskedConv2d( False, in_channels=1, out_channels=n_channels, kernel_size=kernel_size, padding=padding ), nn.BatchNorm2d(n_channels), nn.ReLU(), MaskedConv2d( True, in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding ), nn.BatchNorm2d(n_channels), nn.ReLU(), MaskedConv2d( True, in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding ), nn.BatchNorm2d(n_channels), nn.ReLU(), MaskedConv2d( True, in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding ), nn.BatchNorm2d(n_channels), nn.ReLU(), MaskedConv2d( True, in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding ), nn.BatchNorm2d(n_channels), nn.ReLU(), MaskedConv2d( True, in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding ), nn.BatchNorm2d(n_channels), nn.ReLU(), MaskedConv2d( True, in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding ), nn.BatchNorm2d(n_channels), nn.ReLU(), MaskedConv2d( True, in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding ), nn.BatchNorm2d(n_channels), nn.ReLU(), MaskedConv2d( True, in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding ), nn.BatchNorm2d(n_channels), nn.ReLU(), MaskedConv2d( True, in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding ), nn.BatchNorm2d(n_channels), nn.ReLU(), MaskedConv2d( True, in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding ), nn.BatchNorm2d(n_channels), nn.ReLU(), MaskedConv2d( True, in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding ), nn.BatchNorm2d(n_channels), nn.ReLU(), MaskedConv2d( True, in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding ), nn.BatchNorm2d(n_channels), nn.ReLU(), MaskedConv2d( True, in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding ), nn.BatchNorm2d(n_channels), nn.ReLU(), nn.Conv2d( in_channels=n_channels, out_channels=self.N_PIXELS_OUT, kernel_size=(1, 1) ) ) # YOUR CODE def forward(self, x): pixel_logits = self.layers(x) return pixel_logits N_EPOCHS = 25 LR = 0.005 cnn = PixelCNN(n_channels=4, kernel_size=7, padding=3) optimizer = torch.optim.Adam(cnn.parameters(), lr=LR) ``` Обратите внимание, что полученной сети достаточно подать на вход изображение, и на выходе получится распределение для значений каждого пикселя. Осталось только минимизировать кросс-энтропию этих значений и пикселей примеров в выборке. В случае успеха итоговая кросс-энтропия будет около 0.02. ``` import torch.nn.functional as F for epoch in range(N_EPOCHS): for i, (images, _) in enumerate(train_loader): optimizer.zero_grad() # TRAIN output = cnn(images) target = images[:,0].long() loss = F.cross_entropy(output, target) pickle.dump(output, open("./output.pkl", "wb")) # YOUR CODE loss.backward() optimizer.step() if i % 100 == 0: print ('Epoch [%d/%d], Loss: %.4f' %(epoch+1, N_EPOCHS, loss.data.item())) ``` При генерации изображений можно начинать с пустого изображения, а можно подавать какие-то начальные пиксели. Допишите функцию генерации и проверьте ее для задачи генерации (на вход пустое изображения) и для задачи дорисовывания (на вход - верхняя часть изображения). У вас должны получиться разумные изображения цифр, допускается небольшая доля "плохих" изображений. *Упражнение:* почему при одинаковых пустых входных изображениях получаются разные изображения на выходе? ``` data = pickle.load(open("./out.pkl", "rb")) data_1 = data[:, 0, :, :].view(-1, 1, 13, 8) data_2 = data[:, 1, :, :].view(-1, 1, 13, 8) print(f""" data1: {data_1.shape} data2: {data_2.shape} """) show_as_image(batch_images_to_one(data_1.detach().numpy()), figsize=(10, 20)) show_as_image(batch_images_to_one(data_2.detach().numpy()), figsize=(10, 20)) # def generate_samples(n_samples, starting_point=(0, 0), starting_image=None): # samples = torch.from_numpy( # starting_image if starting_image is not None else # np.zeros((n_samples * n_samples, 1, IMAGE_HEIGHT, IMAGE_WIDTH))).float() # cnn.train(False) # optimizer.zero_grad() # out = cnn(samples) # sm_layer = nn.Softmax() # samples = sm_layer(out) # dist = torch.distributions.Multinomial(1000, samples) # samples = dist.sample().detach() # samples = samples[:,0].view(-1, 1, 13, 8) # return samples.detach().numpy() # def generate_samples(n_samples, starting_point=(0, 0), starting_image=None): # samples = torch.from_numpy( # starting_image if starting_image is not None else # np.zeros((n_samples * n_samples, 1, IMAGE_HEIGHT, IMAGE_WIDTH))).float() # cnn.train(False) # optimizer.zero_grad() # out = cnn(samples) # samples = F.softmax(out) # samples = -1 * (samples - 1) # samples = torch.bernoulli(samples) # samples = samples.detach()[:,0].view(-1, 1, 13, 8) # return samples.numpy() def generate_samples(n_samples, starting_point=(0, 0), starting_image=None): samples = torch.from_numpy( starting_image if starting_image is not None else np.zeros((n_samples * n_samples, 1, IMAGE_HEIGHT, IMAGE_WIDTH))).float() cnn.train(False) optimizer.zero_grad() out = cnn(samples) _, samples = torch.max(out, 1) # samples = samples[0].view(-1, 1, 13, 8) return samples.numpy() # def binarize(images): # return (np.random.uniform(size=images.shape) < images).astype('float32') # def generate_samples(n_samples, starting_point=(0, 0), starting_image=None): # samples = torch.from_numpy( # starting_image if starting_image is not None else # np.zeros((n_samples * n_samples, 1, IMAGE_HEIGHT, IMAGE_WIDTH))).float() # cnn.train(False) # optimizer.zero_grad() # out = cnn(samples) # pickle.dump(out, open("./out.pkl", "wb")) # prob = F.softmax(out, dim=1) # samples = torch.zeros((100, 1, 13, 8)) # for k in range(100): # for i in range(13): # for j in range(8): # n_samples = torch.multinomial(prob[:, :, i, j].view(-1), 1) # samples[k, 0, i, j] = n_samples # return samples.detach().numpy() # def generate_samples(n_samples, starting_point=(0, 0), starting_image=None): # samples = torch.from_numpy( # starting_image if starting_image is not None else # np.zeros((n_samples * n_samples, 1, IMAGE_HEIGHT, IMAGE_WIDTH))).float() # cnn.train(False) # optimizer.zero_grad() # out = cnn(samples) # samples_tensors = [] # for k in range(out.shape[0]): # t_pair = out[k].view(2, 13, 8) # prob = F.softm(t_pair) # sample = torch.multinomial(prob.view(13*8, 2), 1) # samples_tensors.append(sample) # samples = torch.cat(samples_tensors).view(-1, 1, 13, 8) # samples = samples.detach() # return samples show_as_image(batch_images_to_one(generate_samples(n_samples=10)), figsize=(10, 20)) from utils import random_digits n_images = 10 starting_point = (4, 3) mask = causal_mask(IMAGE_HEIGHT, IMAGE_WIDTH, starting_point) starting_images = digits_list = [random_digits(fixed_label=d)[0] for d in range(10)] batch_starting_images = np.expand_dims(np.stack([i * mask for i in starting_images] * n_images), axis=1) samples = generate_samples(n_images, starting_image=batch_starting_images, starting_point=starting_point) show_as_image(np.hstack([(1 + mask) * i for i in starting_images]), figsize=(10, 10)) show_as_image( batch_images_to_one((samples * (1 + mask))), figsize=(10, 20)) ```
github_jupyter
``` %reload_ext autoreload %autoreload 2 %matplotlib inline # enable outputs from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow import keras from google.colab import files # upload data_generator file files.upload() ``` # Number classification For our first task, we will build a **CNN** that will learn to identify which number appears in a image ## Data at hand Our dataset was created by applying random transformations to digit images in MNIST database , concatenating them and afterwards padding the final image. The results consist of images of maximum three digit numbers, ranging from **0 to 255**. Each image is **28x84** and has 1 channel of colour, grayscale. Our dataset is created on demand as explained above by two generator functions, *training_generator* and *test_generator*. These generator functions return a pair of images as described above, a pair of labels with the numbers in the images, and a label with the sum of these numbers. ## Input preprocessing We create a wrapper for the given generator and unfold the pair of images and the pair of corresponding labels into an array of images and an array of labels. We will also normalize pixels by *dividing each of the pixel value by 255*. ## To generate or not to generate Generators enable us to **train** and **evaluate** our model by not loading all data into memory. But this will increase fitting and testing times. This notebook ran in a *Google Colab* environment, that managed our workload well. **60000 training samples** and **6000 testing samples** were used. ``` # define generator wrapper and function to generate data based on a generator from data_generator import training_generator, test_generator # unpack array of pairs of images into an array def seq_gen(generator, batch_size = 32): # a sample in generator will create pairs of two numbers. # So we halve the test size for batch in generator(batch_size // 2): x_new_batch = [] y_new_batch = [] x_batch , y_batch, sum_batch = batch for x_sample in x_batch: x_new_batch.append(x_sample[0]) x_new_batch.append(x_sample[1]) for y_sample in y_batch: y_new_batch.append(y_sample[0]) y_new_batch.append(y_sample[1]) yield (np.array(x_new_batch), np.array(y_new_batch), sum_batch) #function to extract a batch from generator def generate_data(generator): x_samples = [] y_samples = [] x_samples, y_samples, sum_samples = next(generator) x_samples = x_samples / 255.0 x_samples = np.expand_dims(x_samples, -1) return x_samples, y_samples, sum_samples %%time train_size = 60000 test_size = 6000 seq_train_gen = seq_gen(training_generator, train_size) X_train, Y_train, _ = generate_data(seq_train_gen) seq_test_gen = seq_gen(test_generator, test_size) X_test, Y_test, _ = generate_data(seq_train_gen) ``` ## Data Exploration Take a look at how some of the images look like. ``` plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(X_train.squeeze()[i], cmap=plt.cm.binary) plt.xlabel(Y_train[i]) ``` For our model to work at its best, data should be **balanced**. So we check if all present labels have, with some approximation, the same number of occurences in our data. ``` labels, counts = np.unique(Y_train, return_counts=True) fig, ax = plt.subplots() fig.suptitle('Count distribution of samples by classes', fontsize=16); print('Media aparitiilor : %.2f' % (counts.mean())) print('Cea mai rar intalnita clasa apare de %d ori' % (counts.min())) print('Cea mai des intalnita clasa apare de %d ori.' % (counts.max())) # ax.bar(labels, counts ) counts_p = counts / sum(counts) ax.set_xlabel('Number of occurences'); ax.set_ylabel('Number of classes'); ax.hist(counts); ``` ## Model Arhitecture We use **3 Convolutional layers** , followed by **MaxPooling** to train filters to extract relevant information from the image. Number of filters in the first layer is usually chosen smaller than at following layers because simpler features are usually extracted in the first layer : things like digit contours and shapes. We follow each of these with a **Dropout** layer of *0.4* dropout rate. We tried values ranging from 0.2 to 0.5 and model scores didn't vary much. We then **flatten** the output and add an additional **Dense** layer of *128* neurons. Finally, we get the network result by adding a **Dense** layer with **softmax**. The rest of the layers are activated using **relu**. We tried **sgd**, **adam** and **rmsprop** as the optimizers of the problem. Even though *rmsprop* is usually used at rnn networks, it worked best here. The *sgd* trained slower than the others and *adam* gave comparable but smaller results than *rmsprop*. ``` model = tf.keras.models.Sequential() model.add(tf.keras.layers.Conv2D(32, (3, 3), activation = 'relu', input_shape = (28, 84, 1))) model.add(tf.keras.layers.MaxPooling2D(2, 2)) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Conv2D(64, (3, 3), activation = 'relu')) model.add(tf.keras.layers.MaxPooling2D(2, 2)) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Conv2D(64, (3, 3), activation = 'relu')) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation = 'relu')) # model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(256, activation = 'softmax')) model.summary() # rmsprop pare cel mai bun, dar e recomandat pentru rnn-uri. # Mai încearca si cu adam si cu asta # pentru input = 10000 (.3 split), cu 25 de epoci se ajunge la 75-80% # pentru input = 60000 (.3 split), cu 5-10 epoci se ajunge la 70-80% #rmsprop - 25 (27 val) #sgd - 0.7 #adam - 26 model.compile(optimizer = 'rmsprop', loss = 'categorical_crossentropy', metrics = ['accuracy']) ``` ## Data preprocessing Our labels are numbers ranging from 0 to 255. We will **one-hot encode** these labels to be compliant with the network arhitecture. ``` from keras.utils import to_categorical Y_train_OH = to_categorical(Y_train, num_classes=256) Y_test_OH = to_categorical(Y_test, num_classes = 256) ``` For proper evaluation of the model and to avoid overfitting, we split train data into training and validation samples. We will fit our model on the train samples, but evaluate its performance after an epoch of training on the validation samples. ``` from sklearn.model_selection import train_test_split x_train, x_val, y_train, y_val = train_test_split(X_train, Y_train_OH, test_size = 0.3, random_state = 48) x_test = X_test y_test = Y_test_OH ``` ## Fitting We save the weights of the model with the best results on the validation set. ``` no_epochs = 30 # save best model in hdf5 file checkpointer = keras.callbacks.ModelCheckpoint(filepath="numbers_2.hdf5", verbose=1, save_best_only=True) history = model.fit(x_train, y_train, epochs=no_epochs, validation_data= (x_val, y_val) , callbacks=[checkpointer]) ``` ### Plotting results Define functions to plot training loss and validation loss of the training process. Do the same for training accuracy and validation accuracy. ``` def get_loss(history = history): loss = history.history['loss'] val_loss = history.history['val_loss'] return loss, val_loss def get_acc(history = history): acc = history.history['acc'] val_acc = history.history['val_acc'] return acc, val_acc def get_epochs(no_epochs = no_epochs): epochs = range(1, no_epochs + 1) return epochs def plot_loss(history = history, no_epochs = no_epochs): epochs = get_epochs(no_epochs) loss, val_loss = get_loss(history) plt.plot(epochs, loss, 'ko', label = 'Training Loss') plt.plot(epochs, val_loss, 'k', label = 'Validation Loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.title('Training and Validation Loss') plt.legend() def plot_acc(history = history, no_epochs = no_epochs): epochs = get_epochs(no_epochs) acc, val_acc = get_acc(history) plt.plot(epochs, acc, 'bo', label = 'Training Accuracy') plt.plot(epochs, val_acc, 'b', label = 'Validation Accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.title('Training and Validation Accuracy') plt.legend() plot_loss() plot_acc() ``` ### Score on test data ``` model = keras.models.load_model("numbers_2.hdf5") model.evaluate(x_test,y_test) ``` ### Plotting activations We plot activation of the some filters of some layers of the CNN. ``` from tensorflow.keras.models import Model def get_activations(model, sample): layer_outputs = [layer.output for layer in model.layers] activation_model = Model(inputs=model.input, outputs=layer_outputs) activations = activation_model.predict(sample) #change the index for x_train[index] to use another image return activations #when you call the function, col_size*row_size must be equal to the number of filters in that conv layer def display_activation(activations, col_size, row_size, act_index): activation = activations[act_index] #act_index is the index of the layer eg. 0 is the first layer activation_index=0 fig, ax = plt.subplots(row_size, col_size, figsize=(row_size*5,col_size*5)) for row in range(0,row_size): for col in range(0,col_size): ax[row][col].imshow(activation[0, :, :, activation_index], cmap='gray') activation_index += 1 layer_to_inspect = 1 x = x_train[48].reshape(1,28, 84, 1) activations = get_activations(model, x) display_activation(activations, 4, 8, layer_to_inspect) ``` # Addition Given a pair of numbers, we will create an **RNN** that learns to output their sum. We will use the original generator function to generate data for this problem, but will also *normalize* the pixels. We will use **30 000 training samples** and **3 000 test samples**. ``` %%time train_size = 30000 test_size = 3000 _, X_train, Y_train = generate_data(training_generator(train_size)) _, X_test, Y_test = generate_data(test_generator(test_size)) ``` Obviously, we can define a simple MLP that does addition of two numbers. With a *Dense* layer of a **single neuron** with **weights set to 1** and **bias set to 0**, the network will perform addition of two input numbers. But the problem at hand will be solved with an RNN, to illustrate training of reccurent neural networks. ``` # ADDITION MLP adder = keras.models.Sequential() adder.add(keras.layers.Flatten( input_shape = (2,))) adder.add(keras.layers.Dense(1, weights = [np.array([[1], [1]]), np.array([0])])) adder.compile(loss='mean_squared_error', optimizer='adam', metrics = ['accuracy']) predictions = adder.predict(X_test) correct_labels = np.sum(predictions == Y_test) print('Accuracy of simple MLP : {}'.format( correct_labels / len(predictions) )) ``` ## Input / Output Preprocessing To treat the problem as a **sequence-to-sequence** one, we will encode input and output as follow : Each of the numbers will be converted to a padded string of three characters and the numbers will be concatenated afterwards. Output will be a three character string representing the addition of the numbers. ``` Input : (23, 101) -> '023101' Output : '124' ``` To feed this data into our model, we will **one-hot encode** each of the digits of the sequence and thus will obtain following input and output sizes : ``` input.shape == (6,10) output.shape == (3,10) ``` from keras.utils import to_categorical def stringify_X(X): X_S = [''.join([str(number).zfill(3) for number in pair]) for pair in X.squeeze()] return X_S def stringify_Y(Y): Y_S = [str(number).zfill(3) for number in Y.squeeze()] return Y_S def onehot(arr): arr_split = np.array( [list(string) for string in arr]) arr_OH = to_categorical(arr_split, num_classes = 10) return arr_OH def encode_X(X): X_S = stringify_X(X) X_OH = onehot(X_S) return X_OH def encode_Y(Y): Y_S = stringify_Y(Y) Y_OH = onehot(Y_S) return Y_OH X_train_OH, Y_train_OH = encode_X(X_train), encode_Y(Y_train) x_test, y_test = encode_X(X_test), encode_Y(Y_test) from sklearn.model_selection import train_test_split x_train, x_val, y_train, y_val = train_test_split(X_train_OH, Y_train_OH, test_size = 0.3, random_state = 48) ``` ## Model arhitecture We will use a **encoder-decoder** arhitecture for the task at hand. First, we **encode** the input sequence using a **LSTM** of *128* units and will get an output of *128*. The **decoder** will be another **LSTM** layer of *128* units. Output of the encoder will be repeatedly served to the decoder **3 times** using a **Repeat Vector** layer, because the maximum length of our addition output is 3 digits : ``` 255 + 255 = 510 ``` We then apply a **Dense** layer of *10 neurons* to every temporal slice of the input. This layer will decide which digit we keep for each of the step of the output sequence. To apply the above layer to every temporal slice, we wrap it in a **TimeDimensional** layer. Because it expects the first dimension of the input to be the timesteps, we must set **return_sequences** to *True* on the decoder layer. This makes the decoder output the whole output steps so far in the following form : ``` output_so_far.shape == (num_samples, timesteps, output_dim) ``` We choose **rmsprop** as the optimizer of this problem because of its inclined advantage in *rnn arhitectures*. ``` model = keras.models.Sequential() model.add(keras.layers.LSTM(128, input_shape=(6, 10))) model.add(keras.layers.RepeatVector(3)) model.add(keras.layers.LSTM(128, return_sequences=True)) model.add(keras.layers.TimeDistributed(keras.layers.Dense(10, activation='softmax'))) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.summary() ``` ## Fitting ``` no_epochs = 30 checkpointer = keras.callbacks.ModelCheckpoint(filepath="add.hdf5", verbose=1, save_best_only=True) history = model.fit(x_train, y_train, epochs = no_epochs, validation_data=(x_val, y_val), callbacks=[checkpointer]) model.evaluate(x_test, y_test) files.download("add.hdf5") ``` ## Plotting Plot accuracy and loss throughout training process, both on training and validation data ``` plot_loss() plot_acc() ``` ## Evaluate the model Because of the way our **LSTM** arhitecture works, it counts as its output each of the digits of the sequence. ``` input = '023101' output = '128' real_output = '124' accuracy = 2 / 3 (66%) real_accuracy = 0 ``` Thus, if it correctly predicts two of three digits of a number, as above, it has *66%* accuracy, but its true accuracy on our problem is *0*, because it gave the wrong number. It esentially counts how many digits it got correctly, but we actually care how many numbers it got correctly. To measure this, we will make predictions with our network and compare the predictions with the test data. ``` model = tf.keras.models.load_model("add.hdf5") model.evaluate(x_test, y_test) def true_accuracy(model, x_test, y_test): predicted = model.predict(x_test) predicted_OH = to_categorical(np.argmax(predicted, axis = 2), num_classes = 10) bool_test = np.all(predicted_OH == y_test, axis = (1,2)) true_acc = np.sum(bool_test) / len(y_test) return true_acc model = keras.models.load_model("add.hdf5") true_acc = true_accuracy(model, x_test, y_test) print('True accuracy : {}'.format(true_acc)) ``` # End-to-End network We train a network that receives a pair of images with numbers and outputs their sum. We attempt to concatenate the above networks (referred below as **CNN** and **RNN**) in the following way : 1. Concatenate two images and add them as input to the *CNN*. ``` input.shape == (28,168,1) ``` 2. Remove the layer with **softmax** from cnn and replace it with a **reshape** layer. We reshape the output of the cnn and halve it to obtain pairs of parameters representing each image. ``` cnn_output.shape == (128) reshaped_output.shape == (2,64) ``` 3. Run the obtained output through the **RNN**. ``` %%time train_size = 30000 test_size = 3000 train_gen = training_generator(train_size) test_gen = test_generator(test_size) X_train, _ , Y_train = generate_data(train_gen) X_test, _ , Y_test = generate_data(test_gen) #concatenate images X_train_CAT = np.array([np.hstack( (pair[0], pair[1])) for pair in X_train]) x_test = np.array([np.hstack( (pair[0], pair[1])) for pair in X_test]) Y_train_OH = encode_Y(Y_train) # Y_train_OH = to_categorical(Y_train, num_classes=510) y_test = encode_Y(Y_test) # y_test = to_categorical(Y_test, num_classes=510) from sklearn.model_selection import train_test_split x_train, x_val, y_train, y_val = train_test_split(X_train_CAT, Y_train_OH, test_size = 0.3, random_state = 48) cnn = keras.models.Sequential() cnn.add(tf.keras.layers.Conv2D(32, (3, 3), activation = 'relu', input_shape = (28, 168, 1))) cnn.add(tf.keras.layers.MaxPooling2D(2, 2)) cnn.add(tf.keras.layers.Dropout(0.6)) cnn.add(tf.keras.layers.Conv2D(64, (3, 3), activation = 'relu')) cnn.add(tf.keras.layers.MaxPooling2D(2, 2)) cnn.add(tf.keras.layers.Dropout(0.6)) cnn.add(tf.keras.layers.Conv2D(64, (3, 3), activation = 'relu')) cnn.add(tf.keras.layers.Flatten()) cnn.add(tf.keras.layers.Dense(128, activation = 'relu')) # cnn.add(tf.keras.layers.Dropout(0.5)) cnn.compile(optimizer = 'rmsprop', loss='sparse_categorical_crossentropy', metrics=['accuracy']) cnn.summary() model = keras.models.Sequential() model.add(cnn) # lstm model.add(tf.keras.layers.Reshape((2, 64))) model.add(tf.keras.layers.LSTM(128, kernel_initializer = 'random_normal', recurrent_initializer = 'random_normal')) model.add(tf.keras.layers.RepeatVector(3)) model.add(tf.keras.layers.LSTM(128, return_sequences=True)) model.add(tf.keras.layers.TimeDistributed(keras.layers.Dense(10, activation='softmax'))) #convlstm # model = keras.models.Sequential() # model.add(tf.keras.layers.ConvLSTM2D(filters = 32, kernel_size = (3,3), activation = 'relu', # padding = 'same', dropout = .2, input_shape = (28,168,1))) # model.add(tf.keras.layers.RepeatVector(3)) # model.add(tf.keras.layers.ConvLSTM2D(filters = 32, kernel_size = (3,3), activation = 'relu', # padding = 'same', dropout = .2 )) # model.add(tf.keras.layers.TimeDistributed(keras.layers.Dense(10, activation='softmax'))) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() ``` ## Fitting ``` no_epochs = 30 checkpointer = keras.callbacks.ModelCheckpoint(filepath="end_to_end_2.hdf5", verbose=1, save_best_only=True) history = model.fit(x_train, y_train, epochs = no_epochs, validation_data= (x_val, y_val), callbacks=[checkpointer]) ``` ## Plotting We plot the loss and accuracy during training. We observed that our validation set is actually helping us. The accuracy on training grows quickly, but the one on validation stays the same. It means that we are **overfitting**. ``` plot_loss() plot_acc() ``` ## Evaluate the model We evaluate the model below, both with the network accuracy and the true accuracy. ``` model = keras.models.load_model("end_to_end_2.hdf5") model.evaluate(x_test, y_test) model = keras.models.load_model("end_to_end_2.hdf5") true_acc =true_accuracy(model,x_test,y_test) print("True accuracy : {}".format(true_acc)) ``` ## Further improvements We can add more dropout to prevent overfitting. Overfitting may also be caused by our model being too complex. Also tried using **Convolutional LSTM** layers to apply the *RNN* directly on the image, but was stopped by some keras error.
github_jupyter
<a href="https://colab.research.google.com/github/geansm2/PI2B/blob/master/Analise_Exploratoria_DIO.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` #Importando as bibliotecas import pandas as pd import matplotlib.pyplot as plt plt.style.use("seaborn") #Upload do arquivo from google.colab import files arq = files.upload() #Criando nosso DataFrame df = pd.read_excel("AdventureWorks.xlsx") #Visualizando as 5 primeiras linhas df.head() #Quantidade de linhas e colunas df.shape #Verificando os tipos de dados df.dtypes #Qual a Receita total? df["Valor Venda"].sum() #Qual o custo Total? df["custo"] = df["Custo Unitário"].mul(df["Quantidade"]) #Criando a coluna de custo df.head(1) #Qual o custo Total? round(df["custo"].sum(), 2) #Agora que temos a receita e custo e o total, podemos achar o Lucro total #Vamos criar uma coluna de Lucro que será Receita - Custo df["lucro"] = df["Valor Venda"] - df["custo"] df.head(1) #Total Lucro round(df["lucro"].sum(),2) #Criando uma coluna com total de dias para enviar o produto df["Tempo_envio"] = df["Data Envio"] - df["Data Venda"] df.head(1) ``` **Agora, queremos saber a média do tempo de envio para cada Marca, e para isso precisamos transformar a coluna Tempo_envio em númerica** ``` #Extraindo apenas os dias df["Tempo_envio"] = (df["Data Envio"] - df["Data Venda"]).dt.days df.head(1) #Verificando o tipo da coluna Tempo_envio df["Tempo_envio"].dtype #Média do tempo de envio por Marca df.groupby("Marca")["Tempo_envio"].mean() ``` **Missing Values** ``` #Verificando se temos dados faltantes df.isnull().sum() ``` **E, se a gente quiser saber o Lucro por Ano e Por Marca?** ``` #Vamos Agrupar por ano e marca df.groupby([df["Data Venda"].dt.year, "Marca"])["lucro"].sum() pd.options.display.float_format = '{:20,.2f}'.format #Resetando o index lucro_ano = df.groupby([df["Data Venda"].dt.year, "Marca"])["lucro"].sum().reset_index() lucro_ano #Qual o total de produtos vendidos? df.groupby("Produto")["Quantidade"].sum().sort_values(ascending=False) #Gráfico Total de produtos vendidos df.groupby("Produto")["Quantidade"].sum().sort_values(ascending=True).plot.barh(title="Total Produtos Vendidos") plt.xlabel("Total") plt.ylabel("Produto"); df.groupby(df["Data Venda"].dt.year)["lucro"].sum().plot.bar(title="Lucro x Ano") plt.xlabel("Ano") plt.ylabel("Receita"); df.groupby(df["Data Venda"].dt.year)["lucro"].sum() #Selecionando apenas as vendas de 2009 df_2009 = df[df["Data Venda"].dt.year == 2009] df_2009.head() df_2009.groupby(df_2009["Data Venda"].dt.month)["lucro"].sum().plot(title="Lucro x Mês") plt.xlabel("Mês") plt.ylabel("Lucro"); df_2009.groupby("Marca")["lucro"].sum().plot.bar(title="Lucro x Marca") plt.xlabel("Marca") plt.ylabel("Lucro") plt.xticks(rotation='horizontal'); df_2009.groupby("Classe")["lucro"].sum().plot.bar(title="Lucro x Classe") plt.xlabel("Classe") plt.ylabel("Lucro") plt.xticks(rotation='horizontal'); df["Tempo_envio"].describe() #Gráfico de Boxplot plt.boxplot(df["Tempo_envio"]); #Histograma plt.hist(df["Tempo_envio"]); #Tempo mínimo de envio df["Tempo_envio"].min() #Tempo máximo de envio df['Tempo_envio'].max() #Identificando o Outlier df[df["Tempo_envio"] == 20] df.to_csv("df_vendas_novo.csv", index=False) ```
github_jupyter
## 1. Winter is Coming. Let's load the dataset ASAP! <p>If you haven't heard of <em>Game of Thrones</em>, then you must be really good at hiding. Game of Thrones is the hugely popular television series by HBO based on the (also) hugely popular book series <em>A Song of Ice and Fire</em> by George R.R. Martin. In this notebook, we will analyze the co-occurrence network of the characters in the Game of Thrones books. Here, two characters are considered to co-occur if their names appear in the vicinity of 15 words from one another in the books. </p> <p><img src="https://assets.datacamp.com/production/project_76/img/got_network.jpeg" style="width: 550px"></p> <p>This dataset constitutes a network and is given as a text file describing the <em>edges</em> between characters, with some attributes attached to each edge. Let's start by loading in the data for the first book <em>A Game of Thrones</em> and inspect it.</p> ``` # Importing modules # ... YOUR CODE FOR TASK 1 ... import pandas as pd # Reading in datasets/book1.csv book1 = pd.read_csv('datasets/book1.csv') book1.head() # Printing out the head of the dataset # ... YOUR CODE FOR TASK 1 ... ``` ## 2. Time for some Network of Thrones <p>The resulting DataFrame <code>book1</code> has 5 columns: <code>Source</code>, <code>Target</code>, <code>Type</code>, <code>weight</code>, and <code>book</code>. Source and target are the two nodes that are linked by an edge. A network can have directed or undirected edges and in this network all the edges are undirected. The weight attribute of every edge tells us the number of interactions that the characters have had over the book, and the book column tells us the book number.</p> <p>Once we have the data loaded as a pandas DataFrame, it's time to create a network. We will use <code>networkx</code>, a network analysis library, and create a graph object for the first book.</p> ``` # Importing modules # ... YOUR CODE FOR TASK 2 ... import networkx as nx # Creating an empty graph object G_book1 = nx.Graph() ``` ## 3. Populate the network with the DataFrame <p>Currently, the graph object <code>G_book1</code> is empty. Let's now populate it with the edges from <code>book1</code>. And while we're at it, let's load in the rest of the books too!</p> ``` # Iterating through the DataFrame to add edges # ... YOUR CODE FOR TASK 3 ... for _, edge in book1.iterrows(): G_book1.add_edge(edge['Source'], edge['Target'], weight=edge['weight']) # Creating a list of networks for all the books books = [G_book1] book_fnames = ['datasets/book2.csv', 'datasets/book3.csv', 'datasets/book4.csv', 'datasets/book5.csv'] for book_fname in book_fnames: book = pd.read_csv(book_fname) G_book = nx.Graph() for _, edge in book.iterrows(): G_book.add_edge(edge['Source'], edge['Target'], weight=edge['weight']) books.append(G_book) ``` ## 4. The most important character in Game of Thrones <p>Is it Jon Snow, Tyrion, Daenerys, or someone else? Let's see! Network science offers us many different metrics to measure the importance of a node in a network. Note that there is no "correct" way of calculating the most important node in a network, every metric has a different meaning.</p> <p>First, let's measure the importance of a node in a network by looking at the number of neighbors it has, that is, the number of nodes it is connected to. For example, an influential account on Twitter, where the follower-followee relationship forms the network, is an account which has a high number of followers. This measure of importance is called <em>degree centrality</em>.</p> <p>Using this measure, let's extract the top ten important characters from the first book (<code>book[0]</code>) and the fifth book (<code>book[4]</code>).</p> ``` # Calculating the degree centrality of book 1 deg_cen_book1 = nx.degree_centrality(books[0]) # Calculating the degree centrality of book 5 deg_cen_book5 = nx.degree_centrality(books[4]) # Sorting the dictionaries according to their degree centrality and storing the top 10 sorted_deg_cen_book1 = sorted(deg_cen_book1.items(), key=lambda x: x[1], reverse=True)[0:10] # Sorting the dictionaries according to their degree centrality and storing the top 10 sorted_deg_cen_book5 = sorted(deg_cen_book5.items(), key=lambda x: x[1], reverse=True)[0:10] print(sorted_deg_cen_book1) print(sorted_deg_cen_book5) # Printing out the top 10 of book1 and book5 # ... YOUR CODE FOR TASK 4 ... ``` ## 5. The evolution of character importance <p>According to degree centrality, the most important character in the first book is Eddard Stark but he is not even in the top 10 of the fifth book. The importance of characters changes over the course of five books because, you know, stuff happens... ;)</p> <p>Let's look at the evolution of degree centrality of a couple of characters like Eddard Stark, Jon Snow, and Tyrion, which showed up in the top 10 of degree centrality in the first book.</p> ``` %matplotlib inline # Creating a list of degree centrality of all the books evol = [nx.degree_centrality(book) for book in books] # Creating a DataFrame from the list of degree centralities in all the books degree_evol_df = pd.DataFrame.from_records(evol) degree_evol_df[['Eddard-Stark','Tyrion-Lannister','Jon-Snow']].plot() # Plotting the degree centrality evolution of Eddard-Stark, Tyrion-Lannister and Jon-Snow # ... YOUR CODE FOR TASK 5 ... ``` ## 6. What's up with Stannis Baratheon? <p>We can see that the importance of Eddard Stark dies off as the book series progresses. With Jon Snow, there is a drop in the fourth book but a sudden rise in the fifth book.</p> <p>Now let's look at various other measures like <em>betweenness centrality</em> and <em>PageRank</em> to find important characters in our Game of Thrones character co-occurrence network and see if we can uncover some more interesting facts about this network. Let's plot the evolution of betweenness centrality of this network over the five books. We will take the evolution of the top four characters of every book and plot it.</p> ``` # Creating a list of betweenness centrality of all the books just like we did for degree centrality evol = [nx.betweenness_centrality(book,weight='weight') for book in books] # Making a DataFrame from the list betweenness_evol_df = pd.DataFrame.from_records(evol) # Finding the top 4 characters in every book set_of_char = set() for i in range(5): set_of_char |= set(list(betweenness_evol_df.T[i].sort_values(ascending=False)[0:4].index)) list_of_char = list(set_of_char) betweenness_evol_df[list_of_char].plot() # Plotting the evolution of the top characters # ... YOUR CODE FOR TASK 6 ... ``` ## 7. What does Google PageRank tell us about GoT? <p>We see a peculiar rise in the importance of Stannis Baratheon over the books. In the fifth book, he is significantly more important than other characters in the network, even though he is the third most important character according to degree centrality.</p> <p>PageRank was the initial way Google ranked web pages. It evaluates the inlinks and outlinks of webpages in the world wide web, which is, essentially, a directed network. Let's look at the importance of characters in the Game of Thrones network according to PageRank. </p> ``` # Creating a list of pagerank of all the characters in all the books evol = [nx.pagerank(book) for book in books] # Making a DataFrame from the list pagerank_evol_df = pd.DataFrame.from_records(evol) # Finding the top 4 characters in every book set_of_char = set() for i in range(5): set_of_char |= set(list(pagerank_evol_df.T[i].sort_values(ascending=False)[0:4].index)) list_of_char = list(set_of_char) pagerank_evol_df[list_of_char].plot(figsize=(13, 7)) # Plotting the top characters # ... YOUR CODE FOR TASK 7 ... ``` ## 8. Correlation between different measures <p>Stannis, Jon Snow, and Daenerys are the most important characters in the fifth book according to PageRank. Eddard Stark follows a similar curve but for degree centrality and betweenness centrality: He is important in the first book but dies into oblivion over the book series.</p> <p>We have seen three different measures to calculate the importance of a node in a network, and all of them tells us something about the characters and their importance in the co-occurrence network. We see some names pop up in all three measures so maybe there is a strong correlation between them?</p> <p>Let's look at the correlation between PageRank, betweenness centrality and degree centrality for the fifth book using Pearson correlation.</p> ``` # Creating a list of pagerank, betweenness centrality, degree centrality # of all the characters in the fifth book. measures = [nx.pagerank(books[4]), nx.betweenness_centrality(books[4], weight='weight'), nx.degree_centrality(books[4])] # Creating the correlation DataFrame cor = pd.DataFrame.from_records(measures) cor.corr() # Calculating the correlation # ... YOUR CODE FOR TASK 8 ... ``` ## 9. Conclusion <p>We see a high correlation between these three measures for our character co-occurrence network.</p> <p>So we've been looking at different ways to find the important characters in the Game of Thrones co-occurrence network. According to degree centrality, Eddard Stark is the most important character initially in the books. But who is/are the most important character(s) in the fifth book according to these three measures? </p> ``` # Finding the most important character in the fifth book, # according to degree centrality, betweenness centrality and pagerank. p_rank, b_cent, d_cent = cor.idxmax(axis=1) # Printing out the top character accoding to the three measures # ... YOUR CODE FOR TASK 9 ... ```
github_jupyter
## CNN on MNIST digits classification This example is the same as the MLP for MNIST classification. The difference is we are going to use `Conv2D` layers instead of `Dense` layers. The model that will be costructed below is made of: - First 2 layers - `Conv2D-ReLU-MaxPool` - 3rd layer - `Conv2D-ReLU` - 4th layer - `Dense(10)` - Output Activation - `softmax` - Optimizer - `SGD` Let us first load the packages and perform the initial pre-processing such as loading the dataset, performing normalization and conversion of labels to one-hot. Recall that in our `3-Dense` MLP example, we achieved ~95.3% accuracy at 269k parameters. Here, we can achieve ~98.5% using 105k parameters. CNN is more parameter efficient. ``` from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Activation, Dense, Dropout from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten from tensorflow.keras.utils import to_categorical, plot_model from tensorflow.keras.datasets import mnist # load mnist dataset (x_train, y_train), (x_test, y_test) = mnist.load_data() # compute the number of labels num_labels = len(np.unique(y_train)) # convert to one-hot vector y_train = to_categorical(y_train) y_test = to_categorical(y_test) # input image dimensions image_size = x_train.shape[1] # resize and normalize x_train = np.reshape(x_train,[-1, image_size, image_size, 1]) x_test = np.reshape(x_test,[-1, image_size, image_size, 1]) x_train = x_train.astype('float32') / 255 x_test = x_test.astype('float32') / 255 ``` ### Hyper-parameters This hyper-parameters are similar to our MLP example. The differences are `kernel_size = 3` which is a typical kernel size in most CNNs and `filters = 64`. ``` # network parameters # image is processed as is (square grayscale) input_shape = (image_size, image_size, 1) batch_size = 128 kernel_size = 3 filters = 64 ``` ### Sequential Model Building The model is similar to our previous example in MLP. The difference is we use `Conv2D` instead of `Dense`. Note that due to mismatch in dimensions, the output of the last `Conv2D` is flattened via `Flatten()` layer to suit the input vector dimensions of the `Dense`. Note that though we use `Activation(softmax)` as the last layer, this can also be integrated within the `Dense` layer in the parameter `activation='softmax'`. Both are the same. ``` # model is a stack of CNN-ReLU-MaxPooling model = Sequential() model.add(Conv2D(filters=filters, kernel_size=kernel_size, activation='relu', padding='same', input_shape=input_shape)) model.add(MaxPooling2D()) model.add(Conv2D(filters=filters, kernel_size=kernel_size, padding='same', activation='relu')) model.add(MaxPooling2D()) model.add(Conv2D(filters=filters, kernel_size=kernel_size, padding='same', activation='relu')) model.add(Flatten()) # dropout added as regularizer # model.add(Dropout(dropout)) # output layer is 10-dim one-hot vector model.add(Dense(num_labels)) model.add(Activation('softmax')) model.summary() ``` ## Model Training and Evaluation After building the model, it is time to train and evaluate. This part is similar to MLP training and evaluation. ``` #plot_model(model, to_file='cnn-mnist.png', show_shapes=True) # loss function for one-hot vector # use of adam optimizer # accuracy is good metric for classification tasks model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # train the network model.fit(x_train, y_train, epochs=20, batch_size=batch_size) loss, acc = model.evaluate(x_test, y_test, batch_size=batch_size) print("\nTest accuracy: %.1f%%" % (100.0 * acc)) ```
github_jupyter
<a href="https://colab.research.google.com/github/BNN-UPC/ignnition/blob/ignnition-nightly/notebooks/shortest_path.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # IGNNITION: Quick start tutorial ### **Problem**: Find the shortest path in graphs with a Graph Neural Network Find more details on this quick-start tutorial at: https://ignnition.net/doc/quick_tutorial/ --- # Prepare the environment #### **Note**: Follow the instructions below to finish the installation ``` #@title Installing libraries and load resources #@markdown ####Hit **"enter"** to complete the installation of libraries !add-apt-repository ppa:deadsnakes/ppa !apt-get update !apt-get install python3.7 !python -m pip install --upgrade pip !pip install jupyter-client==6.1.5 !pip install ignnition==1.2.2 !pip install ipython-autotime #@title Import libraries { form-width: "30%" } import networkx as nx import random import json from networkx.readwrite import json_graph import os import ignnition %load_ext tensorboard %load_ext autotime #@markdown #### Download three YAML files we will need after (train_options.yaml, model_description.yaml, global_variables.yaml) # Download YAML files for this tutorial !curl -O https://raw.githubusercontent.com/BNN-UPC/ignnition/ignnition-nightly/examples/Shortest_Path/train_options.yaml !curl -O https://raw.githubusercontent.com/BNN-UPC/ignnition/ignnition-nightly/examples/Shortest_Path/global_variables.yaml !curl -O https://raw.githubusercontent.com/BNN-UPC/ignnition/ignnition-nightly/examples/Shortest_Path/model_description.yaml #@title Generate the datasets (training and validation) import os def generate_random_graph(min_nodes, max_nodes, min_edge_weight, max_edge_weight, p): while True: # Create a random Erdos Renyi graph G = nx.erdos_renyi_graph(random.randint(min_nodes, max_nodes), p) complement = list(nx.k_edge_augmentation(G, k=1, partial=True)) G.add_edges_from(complement) nx.set_node_attributes(G, 0, 'src-tgt') nx.set_node_attributes(G, 0, 'sp') nx.set_node_attributes(G, 'node', 'entity') # Assign randomly weights to graph edges for (u, v, w) in G.edges(data=True): w['weight'] = random.randint(min_edge_weight, max_edge_weight) # Select a source and target nodes to compute the shortest path src, tgt = random.sample(list(G.nodes), 2) G.nodes[src]['src-tgt'] = 1 G.nodes[tgt]['src-tgt'] = 1 # Compute all the shortest paths between source and target nodes try: shortest_paths = list(nx.all_shortest_paths(G, source=src, target=tgt,weight='weight')) except: shortest_paths = [] # Check if there exists only one shortest path if len(shortest_paths) == 1: for node in shortest_paths[0]: G.nodes[node]['sp'] = 1 return nx.DiGraph(G) def generate_dataset(file_name, num_samples, min_nodes=5, max_nodes=15, min_edge_weight=1, max_edge_weight=10, p=0.3): samples = [] for _ in range(num_samples): G = generate_random_graph(min_nodes, max_nodes, min_edge_weight, max_edge_weight, p) G.remove_nodes_from([node for node, degree in dict(G.degree()).items() if degree == 0]) samples.append(json_graph.node_link_data(G)) with open(file_name, "w") as f: json.dump(samples, f) root_dir="./data" if not os.path.exists(root_dir): os.makedirs(root_dir) if not os.path.exists(root_dir+"/train"): os.makedirs(root_dir+"/train") if not os.path.exists(root_dir + "/validation"): os.makedirs(root_dir + "/validation") generate_dataset("./data/train/data.json", 20000) generate_dataset("./data/validation/data.json", 1000) ``` --- # GNN model training ``` #@title Remove all the models previously trained (CheckPoints) #@markdown (It is not needed to execute this the first time) ! rm -r ./CheckPoint ! rm -r ./computational_graphs #@title Load TensorBoard to visualize the evolution of learning metrics along training #@markdown **IMPORTANT NOTE**: Click on "settings" in the TensorBoard GUI and check the option "Reload data" to see the evolution in real time. Note you can set the reload time interval (in seconds). from tensorboard import notebook notebook.list() # View open TensorBoard instances dir="./CheckPoint" if not os.path.exists(dir): os.makedirs(dir) %tensorboard --logdir $dir # Para finalizar instancias anteriores de TensorBoard # !kill 2953 # !ps aux #@title Run the training of your GNN model #@markdown </u>**Note**</u>: You can stop the training whenever you want to continue making predictions below import ignnition model = ignnition.create_model(model_dir= './') model.computational_graph() model.train_and_validate() ``` --- # Make predictions ## (This can be only excuted once the training is finished or stopped) ``` #@title Load functions to generate random graphs and print them import os import networkx as nx import matplotlib.pyplot as plt import json from networkx.readwrite import json_graph import ignnition import numpy as np import random %load_ext autotime def generate_random_graph(min_nodes, max_nodes, min_edge_weight, max_edge_weight, p): while True: # Create a random Erdos Renyi graph G = nx.erdos_renyi_graph(random.randint(min_nodes, max_nodes), p) complement = list(nx.k_edge_augmentation(G, k=1, partial=True)) G.add_edges_from(complement) nx.set_node_attributes(G, 0, 'src-tgt') nx.set_node_attributes(G, 0, 'sp') nx.set_node_attributes(G, 'node', 'entity') # Assign randomly weights to graph edges for (u, v, w) in G.edges(data=True): w['weight'] = random.randint(min_edge_weight, max_edge_weight) # Select the source and target nodes to compute the shortest path src, tgt = random.sample(list(G.nodes), 2) G.nodes[src]['src-tgt'] = 1 G.nodes[tgt]['src-tgt'] = 1 # Compute all the shortest paths between source and target nodes try: shortest_paths = list(nx.all_shortest_paths(G, source=src, target=tgt,weight='weight')) except: shortest_paths = [] # Check if there exists only one shortest path if len(shortest_paths) == 1: if len(shortest_paths[0])>=3 and len(shortest_paths[0])<=5: for node in shortest_paths[0]: G.nodes[node]['sp'] = 1 return shortest_paths[0], nx.DiGraph(G) def print_graph_predictions(G, path, predictions,ax): predictions = np.array(predictions) node_border_colors = [] links = [] for i in range(len(path)-1): links.append([path[i], path[i+1]]) links.append([path[i+1], path[i]]) # Add colors to node borders for source and target nodes for node in G.nodes(data=True): if node[1]['src-tgt'] == 1: node_border_colors.append('red') else: node_border_colors.append('white') # Add colors for predictions [0,1] node_colors = predictions # Add colors for edges edge_colors = [] for edge in G.edges(data=True): e=[edge[0],edge[1]] if e in links: edge_colors.append('red') else: edge_colors.append('black') pos= nx.shell_layout(G) vmin = node_colors.min() vmax = node_colors.max() vmin = 0 vmax = 1 cmap = plt.cm.coolwarm nx.draw_networkx_nodes(G, pos=pos, node_color=node_colors, cmap=cmap, vmin=vmin, vmax=vmax, edgecolors=node_border_colors, linewidths=4, ax=ax) nx.draw_networkx_edges(G, pos=pos, edge_color=edge_colors, arrows=False, ax=ax, width=2) nx.draw_networkx_edge_labels(G, pos=pos, label_pos=0.5, edge_labels=nx.get_edge_attributes(G, 'weight'), ax=ax) sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax)) sm.set_array([]) plt.colorbar(sm, ax=ax) def print_graph_solution(G, path, predictions,ax, pred_th): predictions = np.array(predictions) node_colors = [] node_border_colors = [] links = [] for i in range(len(path)-1): links.append([path[i], path[i+1]]) links.append([path[i+1], path[i]]) # Add colors on node borders for source and target nodes for node in G.nodes(data=True): if node[1]['src-tgt'] == 1: node_border_colors.append('red') else: node_border_colors.append('white') # Add colors for predictions Blue or Red cmap = plt.cm.get_cmap('coolwarm') dark_red = cmap(1.0) for p in predictions: if p >= pred_th: node_colors.append(dark_red) else: node_colors.append('blue') # Add colors for edges edge_colors = [] for edge in G.edges(data=True): e=[edge[0],edge[1]] if e in links: edge_colors.append('red') else: edge_colors.append('black') pos= nx.shell_layout(G) nx.draw_networkx_nodes(G, pos=pos, node_color=node_colors, edgecolors=node_border_colors, linewidths=4, ax=ax) nx.draw_networkx_edges(G, pos=pos, edge_color=edge_colors, arrows=False, ax=ax, width=2) nx.draw_networkx_edge_labels(G, pos=pos, label_pos=0.5, edge_labels=nx.get_edge_attributes(G, 'weight'), ax=ax) def print_input_graph(G, ax): node_colors = [] node_border_colors = [] # Add colors to node borders for source and target nodes for node in G.nodes(data=True): if node[1]['src-tgt'] == 1: node_border_colors.append('red') else: node_border_colors.append('white') pos= nx.shell_layout(G) nx.draw_networkx_nodes(G, pos=pos, edgecolors=node_border_colors, linewidths=4, ax=ax) nx.draw_networkx_edges(G, pos=pos, arrows=False, ax=ax, width=2) nx.draw_networkx_edge_labels(G, pos=pos, label_pos=0.5, edge_labels=nx.get_edge_attributes(G, 'weight'), ax=ax) #@title Make predictions on random graphs #@markdown **NOTE**: IGNNITION will automatically load the latest trained model (CheckPoint) to make the predictions dataset_samples = [] sh_path, G = generate_random_graph(min_nodes=8, max_nodes=12, min_edge_weight=1, max_edge_weight=10, p=0.3) graph = G.to_undirected() dataset_samples.append(json_graph.node_link_data(G)) # write prediction dataset root_dir="./data" if not os.path.exists(root_dir): os.makedirs(root_dir) if not os.path.exists(root_dir+"/test"): os.makedirs(root_dir+"/test") with open(root_dir+"/test/data.json", "w") as f: json.dump(dataset_samples, f) # Make predictions predictions = model.predict() # Print the results fig, axes = plt.subplots(nrows=1, ncols=3) ax = axes.flatten() # Print input graph ax1 = ax[0] ax1.set_title("Input graph") print_input_graph(graph, ax1) # Print graph with predictions (soft values) ax1 = ax[1] ax1.set_title("GNN predictions (soft values)") print_graph_predictions(graph, sh_path, predictions[0], ax1) # Print solution of the GNN pred_th = 0.5 ax1 = ax[2] ax1.set_title("GNN solution (p >= "+str(pred_th)+")") print_graph_solution(graph, sh_path, predictions[0], ax1, pred_th) # Show plot in full screen plt.rcParams['figure.figsize'] = [10, 4] plt.rcParams['figure.dpi'] = 100 plt.tight_layout() plt.show() ``` --- # Try to improve your GNN model **Optional exercise**: The previous training was executed with some parameters set by default, so the accuracy of the GNN model is far from optimal. Here, we propose an alternative configuration that defines better training parameters for the GNN model. For this, you can check and modify the following YAML files to configure your GNN model: * /content/model_description.yaml -> GNN model description * /content/train_options.yaml -> Configuration of training parameters Try to define an optimizer with learning rate decay and set the number of samples and epochs adding the following lines in the train_options.yaml file: ``` optimizer: type: Adam learning_rate: # define a schedule type: ExponentialDecay initial_learning_rate: 0.001 decay_steps: 10000 decay_rate: 0.5 ... batch_size: 1 epochs: 150 epoch_size: 200 ``` Then, you can train a new model from scratch by executing al the code snippets from section "GNN model training" Please note that the training process may take quite a long time depending on the machine where it is executed. In this example, there are a total of 30,000 training samples: 1 sample/step * 200 steps/epoch * 150 epochs = 30.000 samples
github_jupyter
# Do IPAs Really Taste Better?? ## Introduction: The craft beer industry in the US has grown tremendously over the past decade. Of the types of beer that are new to the market, India Pale Ales (IPAs) seem to be the most popular. IPAs are known for their bold, bitter and hoppy taste, and while many fanatics can't get enough of this unique taste, the IPA critics are driven away by those exact features. This project aims to analyze the popularity of IPA's taste using a beer review dataset. ### Dataset: The dataset utilized for this analysis is the "Beer Profile and Ratings Data Set" (beer_profile_and_ratings.csv). The following information is posted on the Kaggle page where the dataset is located. (The link to full description and information: https://www.kaggle.com/ruthgn/beer-profile-and-ratings-data-set) Below is an overview/summary description of the dataset. This data set contains tasting profiles and consumer reviews for 3197 unique beers from 934 different breweries. It was created by integrating information from two existing data sets on Kaggle: **Beer Tasting Profiles Dataset** **1.5 Million Beer Reviews** ``` # Load necessary packages import pandas as pd import matplotlib.pyplot as plt %matplotlib inline plt.style.use('ggplot') ``` Read dataset and inspect first few rows: ``` data = pd.read_csv('beer_profile_and_ratings.csv') data.head() ``` Look at the columns/features of this dataset: ``` data.describe() print(data.columns) ``` Verify that there are 3197 beer profiles in this dataset as stated in the description from the source. ``` print(data.shape) ``` ### Drilling down Since we want to analyze the taste of IPA compared to other styles of beer, we first need to see all varieties of beer styles included in this dataset. ``` data['Style'].unique() ``` As we can see above, the styles of beer are listed with their broad category, followed by a sub-style. For the purpose of this analysis, we are considering IPA at its broad category level. All sub-styles of IPA (American, English, Imperial) are considered IPAs. Here we extract the broad category by getting the first element (before '-'). ``` data['Style Category'] = data['Style'].str.rsplit(pat=' - ', expand=True)[0] ``` Then we look again at all values of broad category. ``` data['Style Category'].unique() ``` Check if there are any NaN values in the review_taste column, as we will focus on the analysis of this value. ``` data['review_taste'].isnull().values.any() ``` No NaN values in the review_taste column. Since we are particularly interested in IPAs, we can look at a distribution of taste reviews for all IPAs. ``` data[data['Style Category'] == 'IPA']['review_taste'].hist() ``` Most taste reviews for IPAs are at around 4, with some low tail between 2.5 and 3.5. Find average taste review of each style category. ``` style_taste_review_avg = data.groupby(by='Style Category')['review_taste'].mean() \ .rename('review_taste_avg') \ .reset_index() \ .sort_values(by='review_taste_avg', ascending=False) \ .reset_index(drop = True) ``` Display the average taste review by style category in descending order ``` style_taste_review_avg ``` From the above list, we can see the rank of **average taste review of IPA is 8th with an average taste review of 3.98**, after many other style categories. Wile Ale appears to be "the tastiest" style category with an average taste review of 4.30. Get top 10 best tasting beer style category ``` top10_style = style_taste_review_avg.iloc[:10] top10_style ``` Next, we can create a visualizatoin of the above information and **highlight IPA's spot (in orange)** in the top 10 beer style categories. ``` x = top10_style['Style Category'] review_taste_avg = top10_style['review_taste_avg'] x_pos = [i for i, _ in enumerate(x)] color_list = ['royalblue']*7 + ['orange'] + ['royalblue']*3 plt.bar(x_pos, review_taste_avg, color=color_list) plt.xlabel("Beer Style Category") plt.ylabel("Average Taste Review") plt.title("Average Taste Review of Top 10 Beer Style Categories") plt.xticks(x_pos, x, rotation = 90) plt.show() ``` IPA certainly ranks high (at 8th place) among all beer styles in terms of taste. However, it appears that IPA's taste still doesn't beat that of some of the more traditional beer styles. Interestingly, the beers that rank higher in terms of taste than IPA **seem to (by definition) have higher alcohol content**. To see if this is true, we can see if the average alcohol by volume (ABV) of these style categories are higher than IPA. ``` style_abv_avg = data.groupby(by='Style Category')['ABV'].mean() \ .rename('abv_avg') \ .reset_index() \ .sort_values(by='abv_avg', ascending=False) \ .reset_index(drop = True) style_abv_avg ``` In this list, we see many overlapping style categories in the top ranking spots with the taste ranking. Specifically, **Barleywine, Quadrupel, Bière de Champagne / Bière Brut, Old Ale, Wild Ale** all have higher contents than IPA. These also ranked higher in taste than IPA. ``` abv_ipa_comp = style_abv_avg[:13] abv_ipa_comp x = abv_ipa_comp['Style Category'] abv_avg = abv_ipa_comp['abv_avg'] x_pos = [i for i, _ in enumerate(x)] color_list = ['red']*4 + ['cyan'] + ['royalblue'] + ['red'] + ['royalblue']+['cyan']+['royalblue']*3 + ['orange'] plt.bar(x_pos, abv_avg, color=color_list) plt.xlabel("Beer Style Category") plt.ylabel("Average Alcohol By Volume (ABV)") plt.title("Average Alcohol By Volume (ABV) by Beer Style Categories") plt.xticks(x_pos, x, rotation = 90) plt.show() ``` As we can see above, 5 beer style categories (red) that ranked higher in taste than IPA (Barleywine, Quadrupel, Bière de Champagne / Bière Brut, Old Ale, and Wild Ale) also have higher averge ABV than IPA. On the other hand, there are 2 beer styles (cyan) with higher average ABV that ranked lower in taste than IPA (Tripel, Scotch Ale / Wee Heavy). The fact that there are 8 out of the top 10 beer categories in terms of taste among the highest ABV beer categories suggests that taste could be correlated with ABV. Perhaps beer drinkers collectively tend to prefer the taste of strong (high ABV) beers. ## Conclusion: **IPA seems to be a very popular beer category lately, and analysis looked into the tastiness of IPA compared to other beer categories. In doing so, a potential feature that seems to be associated with tastiness was identifies: alcohol content.** More analysis is needed to identify if there are other features/metrics associated with a beer being rated highly in tastiness. Some possible future analysis includes correlation analysis between each feature of all beer categories.
github_jupyter
# Lecture 3: Optimize, print and plot [Download on GitHub](https://github.com/NumEconCopenhagen/lectures-2019) [<img src="https://mybinder.org/badge_logo.svg">](https://mybinder.org/v2/gh/NumEconCopenhagen/lectures-2019/master?urlpath=lab/tree/03/Optimize_print_and_plot.ipynb) 1. [The consumer problem](#The-consumer-problem) 2. [Numerical python (numpy)](#Numerical-python-(numpy)) 3. [Utility function](#Utility-function) 4. [Algorithm 1: Simple loops](#Algorithm-1:-Simple-loops) 5. [Algorithm 2: Use monotonicity](#Algorithm-2:-Use-monotonicity) 6. [Algorithm 3: Call a solver](#Algorithm-3:-Call-a-solver) 7. [Indifference curves](#Indifference-curves) 8. [A classy solution](#A-classy-solution) 9. [Summary](#Summary) You will learn how to work with numerical data (**numpy**) and solve simple numerical optimization problems (**scipy.optimize**) and report the results both in text (**print**) and in figures (**matplotlib**). **Links:**: - **print**: [examples](https://www.python-course.eu/python3_formatted_output.php) (very detailed) - **numpy**: [detailed tutorial](https://www.python-course.eu/numpy.php) - **matplotlib**: [examples](https://matplotlib.org/tutorials/introductory/sample_plots.html#sphx-glr-tutorials-introductory-sample-plots-py), [documentation](https://matplotlib.org/users/index.html), [styles](https://matplotlib.org/3.1.0/gallery/style_sheets/style_sheets_reference.html) - **scipy-optimize**: [documentation](https://docs.scipy.org/doc/scipy/reference/optimize.html) <a id="The-consumer-problem"></a> # 1. The consumer problem Consider the following 2-good consumer problem with * utility function $u(x_1,x_2):\mathbb{R}^2_{+}\rightarrow\mathbb{R}$, * exogenous income $I$, and * price-vector $(p_1,p_2)$, given by $$ \begin{aligned} V(p_{1},p_{2},I) & = \max_{x_{1},x_{2}}u(x_{1},x_{2})\\ \text{s.t.}\\ p_{1}x_{1}+p_{2}x_{2} & \leq I,\,\,\,p_{1},p_{2},I>0\\ x_{1},x_{2} & \geq 0 \end{aligned} $$ **Specific example:** Let the utility function be Cobb-Douglas, $$ u(x_1,x_2) = x_1^{\alpha}x_2^{1-\alpha} $$ We then know the solution is given by $$ \begin{aligned} x_1^{\ast} &= \alpha \frac{I}{p_1} \\ x_2^{\ast} &= (1-\alpha) \frac{I}{p_2} \end{aligned} $$ which implies that $\alpha$ is the budget share of the first good and $1-\alpha$ is the budget share of the second good. <a id="Numerical-python-(numpy)"></a> # 2. Numerical python (numpy) ``` import numpy as np # import the numpy module ``` A **numpy array** is like a list, but with two important differences: 1. Elements must be of **one homogenous type** 2. A **slice returns a view** rather than extract content ## 2.1 Basics Numpy arrays can be **created from lists** and can be **multi-dimensional**: ``` A = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) # one dimension B = np.array([[3.4, 8.7, 9.9], [1.1, -7.8, -0.7], [4.1, 12.3, 4.8]]) # two dimensions print(type(A),type(B)) # type print(A.dtype,B.dtype) # data type print(A.ndim,B.ndim) # dimensions print(A.shape,B.shape) # shape (1d: (columns,), 2d: (row,columns)) print(A.size,B.size) # size ``` **Slicing** a numpy array returns a **view**: ``` A = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) B = A.copy() # a copy of A C = A[2:6] # a view into A C[0] = 0 C[1] = 0 print(A) # changed print(B) # not changed ``` Numpy array can also be created using numpy functions: ``` print(np.ones((2,3))) print(np.zeros((4,2))) print(np.linspace(0,1,6)) # linear spacing ``` **Tip 1:** Try pressing <kbd>Shift</kbd>+<kbd>Tab</kbd> inside a function.<br> **Tip 2:** Try to write `?np.linspace` in a cell ``` ?np.linspace ``` ## 2.2 Math Standard **mathematical operations** can be applied: ``` A = np.array([[1,0],[0,1]]) B = np.array([[2,2],[2,2]]) print(A+B) print(A-B) print(A*B) # element-by-element product print(A/B) # element-by-element division print(A@B) # matrix product ``` If arrays does not fit together **broadcasting** is applied. Here is an example with multiplication: ``` A = np.array([ [10, 20, 30], [40, 50, 60] ]) # shape = (2,3) B = np.array([1, 2, 3]) # shape = (3,) = (1,3) C = np.array([[1],[2]]) # shape = (2,1) print(A) print(A*B) # every row is multiplied by B print(A*C) # every column is multiplied by C ``` **General rule:** Numpy arrays can be added/substracted/multiplied/divided if they in all dimensions have the same size or one of them has a size of one. If the numpy arrays differ in number of dimensions, this only has to be true for the (inner) dimensions they share. **More on broadcasting:** [Documentation](https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html). A lot of **mathematical procedures** can easily be performed on numpy arrays. ``` A = np.array([3.1, 2.3, 9.1, -2.5, 12.1]) print(np.min(A)) # find minimum print(np.argmin(A)) # find index for minimum print(np.mean(A)) # calculate mean print(np.sort(A)) # sort (ascending) ``` **Note:** Sometimes a method can be used instead of a function, e.g. ``A.mean()``. Personally, I typically stick to functions because that always works. ## 2.3 Indexing **Multi-dimensional** indexing is done as: ``` X = np.array([ [11, 12, 13], [21, 22, 23] ]) print(X) print(X[0,0]) # first row, first column print(X[0,1]) # first row, second column print(X[1,2]) # second row, third column X[0] ``` Indexes can be **logical**. Logical 'and' is `&` and logical 'or' is `|`. ``` A = np.array([1,2,3,4,1,2,3,4]) B = np.array([3,3,3,3,2,2,2,2]) I = (A < 3) & (B == 3) # note & instead of 'and' print(type(I),I.dtype) print(I) print(A[I]) I = (A < 3) | (B == 3) # note | instead of 'or' print(A[I]) ``` ## 2.4 List of good things to know **Attributes and methods** to know: - size / ndim / shape - ravel / reshape / sort - copy **Functions** to know: - array / empty / zeros / ones / linspace - mean / median / std / var / sum / percentile - min/max, argmin/argmax / fmin / fmax / sort / clip - meshgrid / hstack / vstack / concatenate / tile / insert - allclose / isnan / isinf / isfinite / any / all **Concepts** to know: - view vs. copy - broadcasting - logical indexing **Question:** Consider the following code: ``` A = np.array([1,2,3,4,5]) B = A[3:] B[:] = 0 ``` What is `np.sum(A)` equal to? - **A:** 15 - **B:** 10 - **C:** 6 - **D:** 0 - **E:** Don't know ## 2.5 Extra: Memory Memory is structured in **rows**: ``` A = np.array([[3.1,4.2],[5.7,9.3]]) B = A.ravel() # one-dimensional view of A print(A.shape,A[0,:]) print(B.shape,B) ``` <a id="Utility-function"></a> # 3. Utility function Define the utility function: ``` def u_func(x1,x2,alpha=0.50): return x1**alpha*x2**(1-alpha) # x1,x2 are positional arguments # alpha is a keyword argument with default value 0.50 ``` ## 3.1 Print to screen Print a **single evaluation** of the utility function. ``` x1 = 1 x2 = 3 u = u_func(x1,x2) # f'text' is called a "formatted string" # {x1:.3f} prints variable x1 as floating point number with 3 decimals print(f'x1 = {x1:.3f}, x2 = {x2:.3f} -> u = {u:.3f}') print(u) ``` Print **multiple evaluations** of the utility function. ``` x1_list = [2,4,6,8,10,12] x2 = 3 for x1 in x1_list: # loop through each element in x1_list u = u_func(x1,x2,alpha=0.25) print(f'x1 = {x1:.3f}, x2 = {x2:.3f} -> u = {u:.3f}') ``` And a little nicer... ``` for i,x1 in enumerate(x1_list): # i is a counter u = u_func(x1,x2,alpha=0.25) print(f'{i:2d}: x1 = {x1:<6.3f} x2 = {x2:<6.3f} -> u = {u:<6.3f}') # {i:2d}: integer a width of 2 (right-aligned) # {x1:<6.3f}: float width of 6 and 3 decimals (<, left-aligned) ``` **Task**: Write a loop printing the results shown in the answer below. ``` # write your code here ``` **Answer:** ``` for i,x1 in enumerate(x1_list): # i is a counter u = u_func(x1,x2,alpha=0.25) print(f'{i:2d}: u({x1:.2f},{x1:.2f}) = {u:.4f}') ``` **More formatting options?** See these [examples](https://www.python-course.eu/python3_formatted_output.php). ## 3.2 Print to file Open a text-file and write lines in it: ``` with open('somefile.txt', 'w') as the_file: # 'w' is for 'write' for i, x1 in enumerate(x1_list): u = u_func(x1,x2,alpha=0.25) text = f'{i+10:2d}: x1 = {x1:<6.3f} x2 = {x2:<6.3f} -> u = {u:<6.3f}' the_file.write(text + '\n') # \n gives a lineshift # note: the with clause ensures that the file is properly closed afterwards ``` Open a text-file and read the lines in it and then print them: ``` with open('somefile.txt', 'r') as the_file: # 'r' is for 'read' lines = the_file.readlines() for line in lines: print(line,end='') # end='' removes the extra lineshift print creates ``` > **Note:** You could also write tables in LaTeX format and the import them in your LaTeX document. ## 3.3 Calculate the utility function on a grid **Calculate the utility function** on a 2-dimensional grid with $N$ elements in each dimension: ``` # a. settings N = 100 # number of elements x_max = 10 # maximum value # b. allocate numpy arrays shape_tuple = (N,N) x1_values = np.empty(shape_tuple) # allocate 2d numpy array with shape=(N,N) x2_values = np.empty(shape_tuple) u_values = np.empty(shape_tuple) # c. fill numpy arrays for i in range(N): # 0,1,...,N-1 for j in range(N): # 0,1,...,N-1 x1_values[i,j] = (i/(N-1))*x_max # in [0,x_max] x2_values[i,j] = (j/(N-1))*x_max # in [0,x_max] u_values[i,j] = u_func(x1_values[i,j],x2_values[i,j],alpha=0.25) ``` **Alternatively:** Use internal numpy functions: ``` x_vec = np.linspace(0,x_max,N) x1_values_alt,x2_values_alt = np.meshgrid(x_vec,x_vec,indexing='ij') u_values_alt = u_func(x1_values_alt,x2_values_alt,alpha=0.25) ``` Test whether the results are the same: ``` # a. maximum absolute difference max_abs_diff = np.max(np.abs(u_values-u_values_alt)) print(max_abs_diff) # very close to zero # b. test if all values are "close" print(np.allclose(u_values,u_values_alt)) ``` **Note:** The results are not exactly the same due to floating point arithmetics. ## 3.4 Plot the utility function Import modules and state that the figures should be inlined: ``` %matplotlib inline import matplotlib.pyplot as plt # baseline modul from mpl_toolkits.mplot3d import Axes3D # for 3d figures plt.style.use('seaborn-whitegrid') # whitegrid nice with 3d ``` Construct the actual plot: ``` fig = plt.figure() # create the figure ax = fig.add_subplot(1,1,1,projection='3d') # create a 3d axis in the figure ax.plot_surface(x1_values,x2_values,u_values); # create surface plot in the axis # note: fig.add_subplot(a,b,c) creates the c'th subplot in a grid of a times b plots ``` Make the figure **zoomable** and **panable** using a widget: ``` %matplotlib widget fig = plt.figure() # create the figure ax = fig.add_subplot(1,1,1,projection='3d') # create a 3d axis in the figure ax.plot_surface(x1_values,x2_values,u_values); # create surface plot in the axis ``` Turn back to normal inlining: ``` %matplotlib inline ``` **Extensions**: Use a colormap, make it pretier, and save to disc. ``` from matplotlib import cm # for colormaps # a. actual plot fig = plt.figure() ax = fig.add_subplot(1,1,1,projection='3d') ax.plot_surface(x1_values,x2_values,u_values,cmap=cm.jet) # b. add labels ax.set_xlabel('$x_1$') ax.set_ylabel('$x_2$') ax.set_zlabel('$u$') # c. invert xaxis ax.invert_xaxis() # d. save fig.tight_layout() fig.savefig('someplot.pdf') # or e.g. .png ``` **More formatting options?** See these [examples](https://matplotlib.org/tutorials/introductory/sample_plots.html#sphx-glr-tutorials-introductory-sample-plots-py). **Task**: Construct the following plot: ![wireframeplot](https://github.com/NumEconCopenhagen/lectures-2019/raw/master/03/someplot_wireframe.png) **Answer:** ``` # write your code here # a. actual plot fig = plt.figure() ax = fig.add_subplot(1,1,1,projection='3d') ax.plot_wireframe(x1_values,x2_values,u_values,edgecolor='black') # b. add labels ax.set_xlabel('$x_1$') ax.set_ylabel('$x_2$') ax.set_zlabel('$u$') # c. invert xaxis ax.invert_xaxis() # e. save fig.tight_layout() fig.savefig('someplot_wireframe.png') fig.savefig('someplot_wireframe.pdf') ``` ## 3.5 Summary We have talked about: 1. Print (to screen and file) 2. Figures (matplotlib) **Other plotting libraries:** [seaborn](https://seaborn.pydata.org/) and [bokeh](https://bokeh.pydata.org/en/latest/). <a id="Algorithm-1:-Simple-loops"></a> # 4. Algorithm 1: Simple loops Remember the problem we wanted to solve: $$ \begin{aligned} V(p_{1},p_{2},I) & = \max_{x_{1},x_{2}}u(x_{1},x_{2})\\ & \text{s.t.}\\ p_{1}x_{1}+p_{2}x_{2} & \leq I,\,\,\,p_{1},p_{2},I>0\\ x_{1},x_{2} & \geq 0 \end{aligned} $$ **Idea:** Loop through a grid of $N_1 \times N_2$ possible solutions. This is the same as solving: $$ \begin{aligned} V(p_{1},p_{2},I) & = \max_{x_{1}\in X_1,x_{2} \in X_2} x_1^{\alpha}x_2^{1-\alpha}\\ & \text{s.t.}\\ X_1 & = \left\{0,\frac{1}{N_1-1}\frac{I}{p_1},\frac{2}{N_1-1}\frac{I}{p_1},\dots,\frac{I}{p_1}\right\} \\ X_2 & = \left\{0,\frac{1}{N_2-1}\frac{I}{p_2},\frac{2}{N_2-1}\frac{ I}{p_2},\dots,\frac{ I}{p_2}\right\} \\ p_{1}x_{1}+p_{2}x_{2} & \leq I\\ \end{aligned} $$ Function doing just this: ``` def find_best_choice(alpha,I,p1,p2,N1,N2,do_print=True): # a. allocate numpy arrays shape_tuple = (N1,N2) x1_values = np.empty(shape_tuple) x2_values = np.empty(shape_tuple) u_values = np.empty(shape_tuple) # b. start from guess of x1=x2=0 x1_best = 0 x2_best = 0 u_best = u_func(0,0,alpha=alpha) # c. loop through all possibilities for i in range(N1): for j in range(N2): # i. x1 and x2 (chained assignment) x1_values[i,j] = x1 = (i/(N1-1))*I/p1 x2_values[i,j] = x2 = (j/(N2-1))*I/p2 # ii. utility if p1*x1+p2*x2 <= I: # u(x1,x2) if expenditures <= income u_values[i,j] = u_func(x1,x2,alpha=alpha) else: # u(0,0) if expenditures > income u_values[i,j] = u_func(0,0,alpha=alpha) # iii. check if best sofar if u_values[i,j] > u_best: x1_best = x1_values[i,j] x2_best = x2_values[i,j] u_best = u_values[i,j] # d. print if do_print: print_solution(x1_best,x2_best,u_best,I,p1,p2) return x1_best,x2_best,u_best,x1_values,x2_values,u_values # function for printing the solution def print_solution(x1,x2,u,I,p1,p2): print(f'x1 = {x1:.8f}') print(f'x2 = {x2:.8f}') print(f'u = {u:.8f}') print(f'I-p1*x1-p2*x2 = {I-p1*x1-p2*x2:.8f}') ``` Call the function: ``` sol = find_best_choice(alpha=0.25,I=20,p1=1,p2=2,N1=500,N2=400) ``` Plot the solution: ``` %matplotlib widget # a. unpack solution x1_best,x2_best,u_best,x1_values,x2_values,u_values = sol # b. setup figure fig = plt.figure(dpi=100,num='') ax = fig.add_subplot(1,1,1,projection='3d') # c. plot 3d surface of utility values for different choices ax.plot_surface(x1_values,x2_values,u_values,cmap=cm.jet) ax.invert_xaxis() # d. plot optimal choice ax.scatter(x1_best,x2_best,u_best,s=50,color='black'); %matplotlib inline ``` **Task**: Can you find a better solution with higher utility and lower left-over income, $I-p_1 x_1-p_2 x_2$? ``` # write your code here # sol = find_best_choice() ``` **Answer:** ``` sol = find_best_choice(alpha=0.25,I=10,p1=1,p2=2,N1=1000,N2=1000) ``` <a id="Algorithm-2:-Use-monotonicity"></a> # 5. Algorithm 2: Use monotonicity **Idea:** Loop through a grid of $N$ possible solutions for $x_1$ and assume the remainder is spent on $x_2$. This is the same as solving: $$ \begin{aligned} V(p_{1},p_{2},I) & = \max_{x_{1}\in X_1} x_1^{\alpha}x_2^{1-\alpha}\\ \text{s.t.}\\ X_1 & = \left\{0,\frac{1}{N-1}\frac{}{p_1},\frac{2}{N-1}\frac{I}{p_1},\dots,\frac{I}{p_1}\right\} \\ x_{2} & = \frac{I-p_{1}x_{1}}{p_2}\\ \end{aligned} $$ Function doing just this: ``` def find_best_choice_monotone(alpha,I,p1,p2,N,do_print=True): # a. allocate numpy arrays shape_tuple = (N) x1_values = np.empty(shape_tuple) x2_values = np.empty(shape_tuple) u_values = np.empty(shape_tuple) # b. start from guess of x1=x2=0 x1_best = 0 x2_best = 0 u_best = u_func(0,0,alpha) # c. loop through all possibilities for i in range(N): # i. x1 x1_values[i] = x1 = i/(N-1)*I/p1 # ii. implied x2 x2_values[i] = x2 = (I-p1*x1)/p2 # iii. utility u_values[i] = u_func(x1,x2,alpha) if u_values[i] >= u_best: x1_best = x1_values[i] x2_best = x2_values[i] u_best = u_values[i] # d. print if do_print: print_solution(x1_best,x2_best,u_best,I,p1,p2) return x1_best,x2_best,u_best,x1_values,x2_values,u_values sol_monotone = find_best_choice_monotone(alpha=0.25,I=10,p1=1,p2=2,N=1000) ``` Plot the solution: ``` plt.style.use("seaborn") # a. create the figure fig = plt.figure(figsize=(10,4))# figsize is in inches... # b. unpack solution x1_best,x2_best,u_best,x1_values,x2_values,u_values = sol_monotone # c. left plot ax_left = fig.add_subplot(1,2,1) ax_left.plot(x1_values,u_values) ax_left.scatter(x1_best,u_best) ax_left.set_title('value of choice, $u(x_1,x_2)$') ax_left.set_xlabel('$x_1$') ax_left.set_ylabel('$u(x_1,(I-p_1 x_1)/p_2)$') ax_left.grid(True) # c. right plot ax_right = fig.add_subplot(1,2,2) ax_right.plot(x1_values,x2_values) ax_right.scatter(x1_best,x2_best) ax_right.set_title('implied $x_2$') ax_right.set_xlabel('$x_1$') ax_right.set_ylabel('$x_2$') ax_right.grid(True) ``` <a id="Algorithm-3:-Call-a-solver"></a> # 6. Algorithm 3: Call a solver ``` from scipy import optimize ``` Choose paramters: ``` alpha = 0.25 # preference parameter I = 10 # income p1 = 1 # price 1 p2 = 2 # price 2 ``` **Case 1**: Scalar solver using monotonicity. ``` # a. objective funciton (to minimize) def value_of_choice(x1,alpha,I,p1,p2): x2 = (I-p1*x1)/p2 return -u_func(x1,x2,alpha) # b. call solver sol_case1 = optimize.minimize_scalar( value_of_choice,method='bounded', bounds=(0,I/p1),args=(alpha,I,p1,p2)) # c. unpack solution x1 = sol_case1.x x2 = (I-p1*x1)/p2 u = u_func(x1,x2,alpha) print_solution(x1,x2,u,I,p1,p2) ``` **Case 2**: Multi-dimensional constrained solver. ``` # a. objective function (to minimize) def value_of_choice(x,alpha,I,p1,p2): # note: x is a vector x1 = x[0] x2 = x[1] return -u_func(x1,x2,alpha) # b. constraints (violated if negative) and bounds constraints = ({'type': 'ineq', 'fun': lambda x: I-p1*x[0]-p2*x[1]}) bounds = ((0,I/p1),(0,I/p2)) # c. call solver initial_guess = [I/p1/2,I/p2/2] sol_case2 = optimize.minimize( value_of_choice,initial_guess,args=(alpha,I,p1,p2), method='SLSQP',bounds=bounds,constraints=constraints) # d. unpack solution x1 = sol_case2.x[0] x2 = sol_case2.x[1] u = u_func(x1,x2,alpha) print_solution(x1,x2,u,I,p1,p2) ``` **Case 3**: Multi-dimensional unconstrained solver with constrains implemented via penalties. ``` # a. objective function (to minimize) def value_of_choice(x,alpha,I,p1,p2): # i. unpack x1 = x[0] x2 = x[1] # ii. penalty penalty = 0 E = p1*x1+p2*x2 # total expenses if E > I: # expenses > income -> not allowed fac = I/E penalty += 1000*(E-I) # calculate penalty x1 *= fac # force E = I x2 *= fac # force E = I return -u_func(x1,x2,alpha) # b. call solver initial_guess = [I/p1/2,I/p2/2] sol_case3 = optimize.minimize( value_of_choice,initial_guess,method='Nelder-Mead', args=(alpha,I,p1,p2)) # c. unpack solution x1 = sol_case3.x[0] x2 = sol_case3.x[1] u = u_func(x1,x2,alpha) print_solution(x1,x2,u,I,p1,p2) ``` **Task:** Find the error in the code in the previous cell. ``` # write your code here ``` **Answer:** ``` # a. objective function (to minimize) def value_of_choice(x,alpha,I,p1,p2): # i. unpack x1 = x[0] x2 = x[1] # ii. penalty penalty = 0 E = p1*x1+p2*x2 # total expenses if E > I: # expenses > income -> not allowed fac = I/E penalty += 1000*(E-I) # calculate penalty x1 *= fac # force E = I x2 *= fac # force E = I return -u_func(x1,x2,alpha) + penalty # the error # b. call solver initial_guess = [I/p1/2,I/p2/2] sol_case3 = optimize.minimize( value_of_choice,initial_guess,method='Nelder-Mead', args=(alpha,I,p1,p2)) # c. unpack solution x1 = sol_case3.x[0] x2 = sol_case3.x[1] u = u_func(x1,x2,alpha) print_solution(x1,x2,u,I,p1,p2) ``` <a id="Indifference-curves"></a> # 7. Indifference curves Remember that the indifference curve through the point $(y_1,y_2)$ is given by $$ \big\{(x_1,x_2) \in \mathbb{R}^2_+ \,|\, u(x_1,x_2) = u(y_1,y_2)\big\} $$ To find the indifference curve, we can fix a grid for $x_2$, and then find the corresponding $x_1$ which solves $u(x_1,x_2) = u(y_1,y_2)$ for each value of $x_2$. ``` def objective(x1,x2,alpha,u): return u_func(x1,x2,alpha)-u # = 0 then on indifference curve with utility = u def find_indifference_curve(y1,y2,alpha,N,x2_max): # a. utiltty in (y1,y2) u_y1y2 = u_func(y1,y2,alpha) # b. allocate numpy arrays x1_vec = np.empty(N) x2_vec = np.linspace(1e-8,x2_max,N) # c. loop through x2 for i,x2 in enumerate(x2_vec): x1_guess = 0 # initial guess sol = optimize.root(objective, x1_guess, args=(x2,alpha,u_y1y2)) # optimize.root -> solve objective = 0 starting from x1 = x1_guess x1_vec[i] = sol.x[0] return x1_vec,x2_vec ``` Find and plot an inddifference curve: ``` # a. find indifference curve through (2,2) for x2 in [0,10] x2_max = 10 x1_vec,x2_vec = find_indifference_curve(y1=2,y2=2,alpha=0.25,N=100,x2_max=x2_max) # b. plot inddifference curve fig = plt.figure(figsize=(6,6)) ax = fig.add_subplot(1,1,1) ax.plot(x1_vec,x2_vec) ax.set_xlabel('$x_1$') ax.set_ylabel('$x_2$') ax.set_xlim([0,x2_max]) ax.set_ylim([0,x2_max]) ax.grid(True) ``` **Task:** Find the indifference curve through $x_1 = 15$ and $x_2 = 3$ with $\alpha = 0.5$. ``` # write your code here x2_max = 20 x1_vec,x2_vec = find_indifference_curve(y1=15,y2=3,alpha=0.5,N=100,x2_max=x2_max) fig = plt.figure(figsize=(6,6)) ax = fig.add_subplot(1,1,1) ax.plot(x1_vec,x2_vec) ax.set_xlabel('$x_1$') ax.set_ylabel('$x_2$') ax.set_xlim([0,x2_max]) ax.set_ylim([0,x2_max]) ax.grid(True) ``` <a id="A-classy-solution"></a> # 8. A classy solution > **Note:** This section is advanced due to the use of a module with a class. It is, however, a good example of how to structure code for solving and illustrating a model. **Load module** I have written (consumer_module.py in the same folder as this notebook). ``` from consumer_module import consumer ``` ## 8.1 Jeppe Give birth to a consumer called **jeppe**: ``` jeppe = consumer() # create an instance of the consumer class called jeppe print(jeppe) ``` Solve **jeppe**'s problem. ``` jeppe.solve() print(jeppe) ``` ## 8.2 Mette Create a new consumer, called Mette, and solve her problem. ``` mette = consumer(alpha=0.25) mette.solve() mette.find_indifference_curves() print(mette) ``` Make an illustration of Mette's problem and it's solution: ``` fig = plt.figure(figsize=(6,6)) ax = fig.add_subplot(1,1,1) mette.plot_indifference_curves(ax) mette.plot_budgetset(ax) mette.plot_solution(ax) mette.plot_details(ax) ``` <a id="Summary"></a> # 9. Summary **This lecture:** We have talked about: 1. Numpy (view vs. copy, indexing, broadcasting, functions, methods) 2. Print (to screen and file) 3. Figures (matplotlib) 4. Optimization (using loops or scipy.optimize) 5. Advanced: Consumer class Most economic models contain optimizing agents solving a constrained optimization problem. The tools applied in this lecture is not specific to the consumer problem in anyway. **Your work:** Before solving Problem Set 1 read through this notebook and play around with the code. To solve the problem set, you only need to modify the code used here slightly. **Next lecture:** Random numbers and simulation.
github_jupyter
## An example Python data analysis notebook This page illustrates how to use Python to perform a simple but complete analysis: retrieve data, do some computations based on it, and visualise the results. **Don't worry if you don't understand everything on this page!** Its purpose is to give you an example of things you can do and how to go about doing them - you are not expected to be able to reproduce an analysis like this in Python at this stage! We will be looking at the concepts and practices introduced on this page as we go along the course. As we show the code for different parts of the work, we will be touching on various aspects you may want to keep in mind, either related to Python specifically, or to research programming more generally. ### Why write software to manage your data and plots? We can use programs for our entire research pipeline. Not just big scientific simulation codes, but also the small scripts which we use to tidy up data and produce plots. This should be code, so that the whole research pipeline is recorded for reproducibility. Data manipulation in spreadsheets is much harder to share or check. You can see another similar demonstration on the [software carpentry site](https://swcarpentry.github.io/python-novice-inflammation/01-numpy/index.html). We'll try to give links to other sources of Python training along the way. Part of our approach is that we assume you know how to use the internet! If you find something confusing out there, please bring it along to the next session. In this course, we'll always try to draw your attention to other sources of information about what we're learning. Paying attention to as many of these as you need to, is just as important as these core notes. ### Importing Libraries Research programming is all about using libraries: tools other people have provided programs that do many cool things. By combining them we can feel really powerful but doing minimum work ourselves. The python syntax to import someone else's library is "import". ``` import geopy # A python library for investigating geographic information. # https://pypi.org/project/geopy/ ``` Now, if you try to follow along on this example in an Jupyter notebook, you'll probably find that you just got an error message. You'll need to wait until we've covered installation of additional python libraries later in the course, then come back to this and try again. For now, just follow along and try get the feel for how programming for data-focused research works. ``` geocoder = geopy.geocoders.Nominatim(user_agent="my-application") geocoder.geocode('Cambridge', exactly_one=False) ``` The results come out as a **list** inside a list: `[Name, [Latitude, Longitude]]`. Programs represent data in a variety of different containers like this. ### Comments Code after a `#` symbol doesn't get run. ``` print("This runs") # print "This doesn't" # print This doesn't either ``` ### Functions We can wrap code up in a **function**, so that we can repeatedly get just the information we want. ``` def geolocate(place): return geocoder.geocode(place, exactly_one = False)[0][1] ``` Defining **functions** which put together code to make a more complex task seem simple from the outside is the most important thing in programming. The output of the function is stated by "return"; the input comes in in brackets after the function name: ``` geolocate('Cambridge') ``` ### Variables We can store a result in a variable: ``` london_location = geolocate("London") print(london_location) ``` ### More complex functions The Yandex API allows us to fetch a map of a place, given a longitude and latitude. The URLs look like: https://static-maps.yandex.ru/1.x/?size=400,400&ll=-0.1275,51.51&z=10&l=sat&lang=en_US We'll probably end up working out these URLs quite a bit. So we'll make ourselves another function to build up a URL given our parameters. ``` import requests def request_map_at(lat, long, satellite=True, zoom=12, size=(400, 400)): base = "https://static-maps.yandex.ru/1.x/?" params = dict( z = zoom, size = str(size[0]) + "," + str(size[1]), ll = str(long) + "," + str(lat), l = "sat" if satellite else "map", lang = "en_US" ) return requests.get(base,params=params) map_response = request_map_at(51.5072, -0.1275) ``` ### Checking our work Let's see what URL we ended up with: ``` url = map_response.url print(url[0:50]) print(url[50:100]) print(url[100:]) ``` We can write **automated tests** so that if we change our code later, we can check the results are still valid. ``` from nose.tools import assert_in assert_in("https://static-maps.yandex.ru/1.x/?", url) assert_in("ll=-0.1275%2C51.5072", url) assert_in("z=12", url) assert_in("size=400%2C400", url) ``` Our previous function comes back with an Object representing the web request. In object oriented programming, we use the . operator to get access to a particular **property** of the object, in this case, the actual image at that URL is in the `content` property. It's a big file, so I'll just get the first few chars: ``` map_response.content[0:20] ``` ### Displaying results I'll need to do this a lot, so I'll wrap up our previous function in another function, to save on typing. ``` def map_at(*args, **kwargs): return request_map_at(*args, **kwargs).content ``` I can use a library that comes with Jupyter notebook to display the image. Being able to work with variables which contain images, or documents, or any other weird kind of data, just as easily as we can with numbers or letters, is one of the really powerful things about modern programming languages like Python. ``` import IPython map_png = map_at(*london_location) print("The type of our map result is actually a: ", type(map_png)) IPython.core.display.Image(map_png) IPython.core.display.Image(map_at(*geolocate("New Delhi"))) ``` ### Manipulating Numbers Now we get to our research project: we want to find out how urbanised the world is, based on satellite imagery, along a line between two cites. We expect the satellite image to be greener in the countryside. We'll use lots more libraries to count how much green there is in an image. ``` from io import BytesIO # A library to convert between files and strings import numpy as np # A library to deal with matrices import imageio # A library to deal with images ``` Let's define what we count as green: ``` def is_green(pixels): threshold = 1.1 greener_than_red = pixels[:,:,1] > threshold * pixels[:,:,0] greener_than_blue = pixels[:,:,1] > threshold * pixels[:,:,2] green = np.logical_and(greener_than_red, greener_than_blue) return green ``` This code has assumed we have our pixel data for the image as a $400 \times 400 \times 3$ 3-d matrix, with each of the three layers being red, green, and blue pixels. We find out which pixels are green by comparing, element-by-element, the middle (green, number 1) layer to the top (red, zero) and bottom (blue, 2) Now we just need to parse in our data, which is a PNG image, and turn it into our matrix format: ``` def count_green_in_png(data): f = BytesIO(data) pixels = imageio.imread(f) # Get our PNG image as a numpy array return np.sum(is_green(pixels)) print(count_green_in_png( map_at(*london_location) )) ``` We'll also need a function to get an evenly spaced set of places between two endpoints: ``` def location_sequence(start, end, steps): lats = np.linspace(start[0], end[0], steps) # "Linearly spaced" data longs = np.linspace(start[1], end[1], steps) return np.vstack([lats, longs]).transpose() location_sequence(geolocate("London"), geolocate("Cambridge"), 5) ``` ### Creating Images We should display the green content to check our work: ``` def show_green_in_png(data): pixels = imageio.imread(BytesIO(data)) # Get our PNG image as rows of pixels green = is_green(pixels) out = green[:, :, np.newaxis] * np.array([0, 1, 0])[np.newaxis, np.newaxis, :] buffer = BytesIO() result = imageio.imwrite(buffer, out, format='png') return buffer.getvalue() IPython.core.display.Image( map_at(*london_location, satellite=True) ) IPython.core.display.Image( show_green_in_png( map_at( *london_location, satellite=True))) ``` ### Looping We can loop over each element in out list of coordinates, and get a map for that place: ``` for location in location_sequence(geolocate("London"), geolocate("Birmingham"), 4): IPython.core.display.display( IPython.core.display.Image(map_at(*location))) ``` So now we can count the green from London to Birmingham! ``` [count_green_in_png(map_at(*location)) for location in location_sequence(geolocate("London"), geolocate("Birmingham"), 10)] ``` ### Plotting graphs Let's plot a graph. ``` import matplotlib.pyplot as plt %matplotlib inline plt.plot([count_green_in_png(map_at(*location)) for location in location_sequence(geolocate("London"), geolocate("Birmingham"), 10)]) ``` From a research perspective, of course, this code needs a lot of work. But I hope the power of using programming is clear. ### Composing Program Elements We built little pieces of useful code, to: * Find latitude and longitude of a place * Get a map at a given latitude and longitude * Decide whether a (red,green,blue) triple is mainly green * Decide whether each pixel is mainly green * Plot a new image showing the green places * Find evenly spaced points between two places By putting these together, we can make a function which can plot this graph automatically for any two places: ``` def green_between(start, end,steps): return [count_green_in_png( map_at(*location) ) for location in location_sequence( geolocate(start), geolocate(end), steps)] plt.plot(green_between('New York', 'Chicago', 20)) ``` And that's it! We've covered, very very quickly, the majority of the python language, and much of the theory of software engineering. Now we'll go back, carefully, through all the concepts we touched on, and learn how to use them properly ourselves.
github_jupyter
# Advanced Logistic Regression in TensorFlow 2.0 ## Learning Objectives 1. Load a CSV file using Pandas 2. Create train, validation, and test sets 3. Define and train a model using Keras (including setting class weights) 4. Evaluate the model using various metrics (including precision and recall) 5. Try common techniques for dealing with imbalanced data like: Class weighting and Oversampling ## Introduction This lab how to classify a highly imbalanced dataset in which the number of examples in one class greatly outnumbers the examples in another. You will work with the [Credit Card Fraud Detection](https://www.kaggle.com/mlg-ulb/creditcardfraud) dataset hosted on Kaggle. The aim is to detect a mere 492 fraudulent transactions from 284,807 transactions in total. You will use [Keras](../../guide/keras/overview.ipynb) to define the model and [class weights](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model) to help the model learn from the imbalanced data. PENDING LINK UPDATE: Each learning objective will correspond to a __#TODO__ in the [student lab notebook](https://training-data-analyst/courses/machine_learning/deepdive2/image_classification/labs/5_fashion_mnist_class.ipynb) -- try to complete that notebook first before reviewing this solution notebook. Start by importing the necessary libraries for this lab. ``` import tensorflow as tf from tensorflow import keras import os import tempfile import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sklearn from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler print("TensorFlow version: ",tf.version.VERSION) ``` In the next cell, we're going to customize our Matplot lib visualization figure size and colors. Note that each time Matplotlib loads, it defines a runtime configuration (rc) containing the default styles for every plot element we create. This configuration can be adjusted at any time using the plt.rc convenience routine. ``` mpl.rcParams['figure.figsize'] = (12, 10) colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] ``` ## Data processing and exploration ### Download the Kaggle Credit Card Fraud data set Pandas is a Python library with many helpful utilities for loading and working with structured data and can be used to download CSVs into a dataframe. Note: This dataset has been collected and analysed during a research collaboration of Worldline and the [Machine Learning Group](http://mlg.ulb.ac.be) of ULB (Université Libre de Bruxelles) on big data mining and fraud detection. More details on current and past projects on related topics are available [here](https://www.researchgate.net/project/Fraud-detection-5) and the page of the [DefeatFraud](https://mlg.ulb.ac.be/wordpress/portfolio_page/defeatfraud-assessment-and-validation-of-deep-feature-engineering-and-learning-solutions-for-fraud-detection/) project ``` file = tf.keras.utils raw_df = pd.read_csv('https://storage.googleapis.com/download.tensorflow.org/data/creditcard.csv') raw_df.head() ``` Now, let's view the statistics of the raw dataframe. ``` raw_df[['Time', 'V1', 'V2', 'V3', 'V4', 'V5', 'V26', 'V27', 'V28', 'Amount', 'Class']].describe() ``` ### Examine the class label imbalance Let's look at the dataset imbalance: ``` neg, pos = np.bincount(raw_df['Class']) total = neg + pos print('Examples:\n Total: {}\n Positive: {} ({:.2f}% of total)\n'.format( total, pos, 100 * pos / total)) ``` This shows the small fraction of positive samples. ### Clean, split and normalize the data The raw data has a few issues. First the `Time` and `Amount` columns are too variable to use directly. Drop the `Time` column (since it's not clear what it means) and take the log of the `Amount` column to reduce its range. ``` cleaned_df = raw_df.copy() # You don't want the `Time` column. cleaned_df.pop('Time') # The `Amount` column covers a huge range. Convert to log-space. eps=0.001 # 0 => 0.1¢ cleaned_df['Log Ammount'] = np.log(cleaned_df.pop('Amount')+eps) ``` Split the dataset into train, validation, and test sets. The validation set is used during the model fitting to evaluate the loss and any metrics, however the model is not fit with this data. The test set is completely unused during the training phase and is only used at the end to evaluate how well the model generalizes to new data. This is especially important with imbalanced datasets where [overfitting](https://developers.google.com/machine-learning/crash-course/generalization/peril-of-overfitting) is a significant concern from the lack of training data. ``` # TODO 1 # Use a utility from sklearn to split and shuffle our dataset. train_df, test_df = #TODO: Your code goes here. train_df, val_df = #TODO: Your code goes here. # Form np arrays of labels and features. train_labels = #TODO: Your code goes here. bool_train_labels = #TODO: Your code goes here. val_labels = #TODO: Your code goes here. test_labels = #TODO: Your code goes here. train_features = np.array(train_df) val_features = np.array(val_df) test_features = np.array(test_df) ``` Normalize the input features using the sklearn StandardScaler. This will set the mean to 0 and standard deviation to 1. Note: The `StandardScaler` is only fit using the `train_features` to be sure the model is not peeking at the validation or test sets. ``` scaler = StandardScaler() train_features = scaler.fit_transform(train_features) val_features = scaler.transform(val_features) test_features = scaler.transform(test_features) train_features = np.clip(train_features, -5, 5) val_features = np.clip(val_features, -5, 5) test_features = np.clip(test_features, -5, 5) print('Training labels shape:', train_labels.shape) print('Validation labels shape:', val_labels.shape) print('Test labels shape:', test_labels.shape) print('Training features shape:', train_features.shape) print('Validation features shape:', val_features.shape) print('Test features shape:', test_features.shape) ``` Caution: If you want to deploy a model, it's critical that you preserve the preprocessing calculations. The easiest way to implement them as layers, and attach them to your model before export. ### Look at the data distribution Next compare the distributions of the positive and negative examples over a few features. Good questions to ask yourself at this point are: * Do these distributions make sense? * Yes. You've normalized the input and these are mostly concentrated in the `+/- 2` range. * Can you see the difference between the ditributions? * Yes the positive examples contain a much higher rate of extreme values. ``` pos_df = pd.DataFrame(train_features[ bool_train_labels], columns = train_df.columns) neg_df = pd.DataFrame(train_features[~bool_train_labels], columns = train_df.columns) sns.jointplot(pos_df['V5'], pos_df['V6'], kind='hex', xlim = (-5,5), ylim = (-5,5)) plt.suptitle("Positive distribution") sns.jointplot(neg_df['V5'], neg_df['V6'], kind='hex', xlim = (-5,5), ylim = (-5,5)) _ = plt.suptitle("Negative distribution") ``` ## Define the model and metrics Define a function that creates a simple neural network with a densly connected hidden layer, a [dropout](https://developers.google.com/machine-learning/glossary/#dropout_regularization) layer to reduce overfitting, and an output sigmoid layer that returns the probability of a transaction being fraudulent: ``` METRICS = [ keras.metrics.TruePositives(name='tp'), keras.metrics.FalsePositives(name='fp'), keras.metrics.TrueNegatives(name='tn'), keras.metrics.FalseNegatives(name='fn'), keras.metrics.BinaryAccuracy(name='accuracy'), keras.metrics.Precision(name='precision'), keras.metrics.Recall(name='recall'), keras.metrics.AUC(name='auc'), ] def make_model(metrics = METRICS, output_bias=None): if output_bias is not None: output_bias = tf.keras.initializers.Constant(output_bias) # TODO 1 model = keras.Sequential( #TODO: Your code goes here. #TODO: Your code goes here. #TODO: Your code goes here. #TODO: Your code goes here. ) model.compile( optimizer=keras.optimizers.Adam(lr=1e-3), loss=keras.losses.BinaryCrossentropy(), metrics=metrics) return model ``` ### Understanding useful metrics Notice that there are a few metrics defined above that can be computed by the model that will be helpful when evaluating the performance. * **False** negatives and **false** positives are samples that were **incorrectly** classified * **True** negatives and **true** positives are samples that were **correctly** classified * **Accuracy** is the percentage of examples correctly classified > $\frac{\text{true samples}}{\text{total samples}}$ * **Precision** is the percentage of **predicted** positives that were correctly classified > $\frac{\text{true positives}}{\text{true positives + false positives}}$ * **Recall** is the percentage of **actual** positives that were correctly classified > $\frac{\text{true positives}}{\text{true positives + false negatives}}$ * **AUC** refers to the Area Under the Curve of a Receiver Operating Characteristic curve (ROC-AUC). This metric is equal to the probability that a classifier will rank a random positive sample higher than than a random negative sample. Note: Accuracy is not a helpful metric for this task. You can 99.8%+ accuracy on this task by predicting False all the time. Read more: * [True vs. False and Positive vs. Negative](https://developers.google.com/machine-learning/crash-course/classification/true-false-positive-negative) * [Accuracy](https://developers.google.com/machine-learning/crash-course/classification/accuracy) * [Precision and Recall](https://developers.google.com/machine-learning/crash-course/classification/precision-and-recall) * [ROC-AUC](https://developers.google.com/machine-learning/crash-course/classification/roc-and-auc) ## Baseline model ### Build the model Now create and train your model using the function that was defined earlier. Notice that the model is fit using a larger than default batch size of 2048, this is important to ensure that each batch has a decent chance of containing a few positive samples. If the batch size was too small, they would likely have no fraudulent transactions to learn from. Note: this model will not handle the class imbalance well. You will improve it later in this tutorial. ``` EPOCHS = 100 BATCH_SIZE = 2048 early_stopping = tf.keras.callbacks.EarlyStopping( monitor='val_auc', verbose=1, patience=10, mode='max', restore_best_weights=True) model = make_model() model.summary() ``` Test run the model: ``` model.predict(train_features[:10]) ``` ### Optional: Set the correct initial bias. These are initial guesses are not great. You know the dataset is imbalanced. Set the output layer's bias to reflect that (See: [A Recipe for Training Neural Networks: "init well"](http://karpathy.github.io/2019/04/25/recipe/#2-set-up-the-end-to-end-trainingevaluation-skeleton--get-dumb-baselines)). This can help with initial convergence. With the default bias initialization the loss should be about `math.log(2) = 0.69314` ``` results = model.evaluate(train_features, train_labels, batch_size=BATCH_SIZE, verbose=0) print("Loss: {:0.4f}".format(results[0])) ``` The correct bias to set can be derived from: $$ p_0 = pos/(pos + neg) = 1/(1+e^{-b_0}) $$ $$ b_0 = -log_e(1/p_0 - 1) $$ $$ b_0 = log_e(pos/neg)$$ ``` initial_bias = np.log([pos/neg]) initial_bias ``` Set that as the initial bias, and the model will give much more reasonable initial guesses. It should be near: `pos/total = 0.0018` ``` model = make_model(output_bias = initial_bias) model.predict(train_features[:10]) ``` With this initialization the initial loss should be approximately: $$-p_0log(p_0)-(1-p_0)log(1-p_0) = 0.01317$$ ``` results = model.evaluate(train_features, train_labels, batch_size=BATCH_SIZE, verbose=0) print("Loss: {:0.4f}".format(results[0])) ``` This initial loss is about 50 times less than if would have been with naive initilization. This way the model doesn't need to spend the first few epochs just learning that positive examples are unlikely. This also makes it easier to read plots of the loss during training. ### Checkpoint the initial weights To make the various training runs more comparable, keep this initial model's weights in a checkpoint file, and load them into each model before training. ``` initial_weights = os.path.join(tempfile.mkdtemp(),'initial_weights') model.save_weights(initial_weights) ``` ### Confirm that the bias fix helps Before moving on, confirm quick that the careful bias initialization actually helped. Train the model for 20 epochs, with and without this careful initialization, and compare the losses: ``` model = make_model() model.load_weights(initial_weights) model.layers[-1].bias.assign([0.0]) zero_bias_history = model.fit( train_features, train_labels, batch_size=BATCH_SIZE, epochs=20, validation_data=(val_features, val_labels), verbose=0) model = make_model() model.load_weights(initial_weights) careful_bias_history = model.fit( train_features, train_labels, batch_size=BATCH_SIZE, epochs=20, validation_data=(val_features, val_labels), verbose=0) def plot_loss(history, label, n): # Use a log scale to show the wide range of values. plt.semilogy(history.epoch, history.history['loss'], color=colors[n], label='Train '+label) plt.semilogy(history.epoch, history.history['val_loss'], color=colors[n], label='Val '+label, linestyle="--") plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plot_loss(zero_bias_history, "Zero Bias", 0) plot_loss(careful_bias_history, "Careful Bias", 1) ``` The above figure makes it clear: In terms of validation loss, on this problem, this careful initialization gives a clear advantage. ### Train the model ``` model = make_model() model.load_weights(initial_weights) baseline_history = model.fit( train_features, train_labels, batch_size=BATCH_SIZE, epochs=EPOCHS, callbacks = [early_stopping], validation_data=(val_features, val_labels)) ``` ### Check training history In this section, you will produce plots of your model's accuracy and loss on the training and validation set. These are useful to check for overfitting, which you can learn more about in this [tutorial](https://www.tensorflow.org/tutorials/keras/overfit_and_underfit). Additionally, you can produce these plots for any of the metrics you created above. False negatives are included as an example. ``` def plot_metrics(history): metrics = ['loss', 'auc', 'precision', 'recall'] for n, metric in enumerate(metrics): name = metric.replace("_"," ").capitalize() plt.subplot(2,2,n+1) plt.plot(history.epoch, history.history[metric], color=colors[0], label='Train') plt.plot(history.epoch, history.history['val_'+metric], color=colors[0], linestyle="--", label='Val') plt.xlabel('Epoch') plt.ylabel(name) if metric == 'loss': plt.ylim([0, plt.ylim()[1]]) elif metric == 'auc': plt.ylim([0.8,1]) else: plt.ylim([0,1]) plt.legend() plot_metrics(baseline_history) ``` Note: That the validation curve generally performs better than the training curve. This is mainly caused by the fact that the dropout layer is not active when evaluating the model. ### Evaluate metrics You can use a [confusion matrix](https://developers.google.com/machine-learning/glossary/#confusion_matrix) to summarize the actual vs. predicted labels where the X axis is the predicted label and the Y axis is the actual label. ``` # TODO 1 train_predictions_baseline = #TODO: Your code goes here. test_predictions_baseline = #TODO: Your code goes here. def plot_cm(labels, predictions, p=0.5): cm = confusion_matrix(labels, predictions > p) plt.figure(figsize=(5,5)) sns.heatmap(cm, annot=True, fmt="d") plt.title('Confusion matrix @{:.2f}'.format(p)) plt.ylabel('Actual label') plt.xlabel('Predicted label') print('Legitimate Transactions Detected (True Negatives): ', cm[0][0]) print('Legitimate Transactions Incorrectly Detected (False Positives): ', cm[0][1]) print('Fraudulent Transactions Missed (False Negatives): ', cm[1][0]) print('Fraudulent Transactions Detected (True Positives): ', cm[1][1]) print('Total Fraudulent Transactions: ', np.sum(cm[1])) ``` Evaluate your model on the test dataset and display the results for the metrics you created above. ``` baseline_results = model.evaluate(test_features, test_labels, batch_size=BATCH_SIZE, verbose=0) for name, value in zip(model.metrics_names, baseline_results): print(name, ': ', value) print() plot_cm(test_labels, test_predictions_baseline) ``` If the model had predicted everything perfectly, this would be a [diagonal matrix](https://en.wikipedia.org/wiki/Diagonal_matrix) where values off the main diagonal, indicating incorrect predictions, would be zero. In this case the matrix shows that you have relatively few false positives, meaning that there were relatively few legitimate transactions that were incorrectly flagged. However, you would likely want to have even fewer false negatives despite the cost of increasing the number of false positives. This trade off may be preferable because false negatives would allow fraudulent transactions to go through, whereas false positives may cause an email to be sent to a customer to ask them to verify their card activity. ### Plot the ROC Now plot the [ROC](https://developers.google.com/machine-learning/glossary#ROC). This plot is useful because it shows, at a glance, the range of performance the model can reach just by tuning the output threshold. ``` def plot_roc(name, labels, predictions, **kwargs): fp, tp, _ = sklearn.metrics.roc_curve(labels, predictions) plt.plot(100*fp, 100*tp, label=name, linewidth=2, **kwargs) plt.xlabel('False positives [%]') plt.ylabel('True positives [%]') plt.xlim([-0.5,20]) plt.ylim([80,100.5]) plt.grid(True) ax = plt.gca() ax.set_aspect('equal') plot_roc("Train Baseline", train_labels, train_predictions_baseline, color=colors[0]) plot_roc("Test Baseline", test_labels, test_predictions_baseline, color=colors[0], linestyle='--') plt.legend(loc='lower right') ``` It looks like the precision is relatively high, but the recall and the area under the ROC curve (AUC) aren't as high as you might like. Classifiers often face challenges when trying to maximize both precision and recall, which is especially true when working with imbalanced datasets. It is important to consider the costs of different types of errors in the context of the problem you care about. In this example, a false negative (a fraudulent transaction is missed) may have a financial cost, while a false positive (a transaction is incorrectly flagged as fraudulent) may decrease user happiness. ## Class weights ### Calculate class weights The goal is to identify fradulent transactions, but you don't have very many of those positive samples to work with, so you would want to have the classifier heavily weight the few examples that are available. You can do this by passing Keras weights for each class through a parameter. These will cause the model to "pay more attention" to examples from an under-represented class. ``` # Scaling by total/2 helps keep the loss to a similar magnitude. # The sum of the weights of all examples stays the same. # TODO 1 weight_for_0 = #TODO: Your code goes here. weight_for_1 = #TODO: Your code goes here. class_weight = #TODO: Your code goes here. print('Weight for class 0: {:.2f}'.format(weight_for_0)) print('Weight for class 1: {:.2f}'.format(weight_for_1)) ``` ### Train a model with class weights Now try re-training and evaluating the model with class weights to see how that affects the predictions. Note: Using `class_weights` changes the range of the loss. This may affect the stability of the training depending on the optimizer. Optimizers whose step size is dependent on the magnitude of the gradient, like `optimizers.SGD`, may fail. The optimizer used here, `optimizers.Adam`, is unaffected by the scaling change. Also note that because of the weighting, the total losses are not comparable between the two models. ``` weighted_model = make_model() weighted_model.load_weights(initial_weights) weighted_history = weighted_model.fit( train_features, train_labels, batch_size=BATCH_SIZE, epochs=EPOCHS, callbacks = [early_stopping], validation_data=(val_features, val_labels), # The class weights go here class_weight=class_weight) ``` ### Check training history ``` plot_metrics(weighted_history) ``` ### Evaluate metrics ``` # TODO 1 train_predictions_weighted = #TODO: Your code goes here. test_predictions_weighted = #TODO: Your code goes here. weighted_results = weighted_model.evaluate(test_features, test_labels, batch_size=BATCH_SIZE, verbose=0) for name, value in zip(weighted_model.metrics_names, weighted_results): print(name, ': ', value) print() plot_cm(test_labels, test_predictions_weighted) ``` Here you can see that with class weights the accuracy and precision are lower because there are more false positives, but conversely the recall and AUC are higher because the model also found more true positives. Despite having lower accuracy, this model has higher recall (and identifies more fraudulent transactions). Of course, there is a cost to both types of error (you wouldn't want to bug users by flagging too many legitimate transactions as fraudulent, either). Carefully consider the trade offs between these different types of errors for your application. ### Plot the ROC ``` plot_roc("Train Baseline", train_labels, train_predictions_baseline, color=colors[0]) plot_roc("Test Baseline", test_labels, test_predictions_baseline, color=colors[0], linestyle='--') plot_roc("Train Weighted", train_labels, train_predictions_weighted, color=colors[1]) plot_roc("Test Weighted", test_labels, test_predictions_weighted, color=colors[1], linestyle='--') plt.legend(loc='lower right') ``` ## Oversampling ### Oversample the minority class A related approach would be to resample the dataset by oversampling the minority class. ``` # TODO 1 pos_features = #TODO: Your code goes here. neg_features = train_features[~bool_train_labels] pos_labels = #TODO: Your code goes here. neg_labels = #TODO: Your code goes here. ``` #### Using NumPy You can balance the dataset manually by choosing the right number of random indices from the positive examples: ``` ids = np.arange(len(pos_features)) choices = np.random.choice(ids, len(neg_features)) res_pos_features = pos_features[choices] res_pos_labels = pos_labels[choices] res_pos_features.shape resampled_features = np.concatenate([res_pos_features, neg_features], axis=0) resampled_labels = np.concatenate([res_pos_labels, neg_labels], axis=0) order = np.arange(len(resampled_labels)) np.random.shuffle(order) resampled_features = resampled_features[order] resampled_labels = resampled_labels[order] resampled_features.shape ``` #### Using `tf.data` If you're using `tf.data` the easiest way to produce balanced examples is to start with a `positive` and a `negative` dataset, and merge them. See [the tf.data guide](../../guide/data.ipynb) for more examples. ``` BUFFER_SIZE = 100000 def make_ds(features, labels): ds = tf.data.Dataset.from_tensor_slices((features, labels))#.cache() ds = ds.shuffle(BUFFER_SIZE).repeat() return ds pos_ds = make_ds(pos_features, pos_labels) neg_ds = make_ds(neg_features, neg_labels) ``` Each dataset provides `(feature, label)` pairs: ``` for features, label in pos_ds.take(1): print("Features:\n", features.numpy()) print() print("Label: ", label.numpy()) ``` Merge the two together using `experimental.sample_from_datasets`: ``` resampled_ds = tf.data.experimental.sample_from_datasets([pos_ds, neg_ds], weights=[0.5, 0.5]) resampled_ds = resampled_ds.batch(BATCH_SIZE).prefetch(2) for features, label in resampled_ds.take(1): print(label.numpy().mean()) ``` To use this dataset, you'll need the number of steps per epoch. The definition of "epoch" in this case is less clear. Say it's the number of batches required to see each negative example once: ``` resampled_steps_per_epoch = np.ceil(2.0*neg/BATCH_SIZE) resampled_steps_per_epoch ``` ### Train on the oversampled data Now try training the model with the resampled data set instead of using class weights to see how these methods compare. Note: Because the data was balanced by replicating the positive examples, the total dataset size is larger, and each epoch runs for more training steps. ``` resampled_model = make_model() resampled_model.load_weights(initial_weights) # Reset the bias to zero, since this dataset is balanced. output_layer = resampled_model.layers[-1] output_layer.bias.assign([0]) val_ds = tf.data.Dataset.from_tensor_slices((val_features, val_labels)).cache() val_ds = val_ds.batch(BATCH_SIZE).prefetch(2) resampled_history = resampled_model.fit( resampled_ds, epochs=EPOCHS, steps_per_epoch=resampled_steps_per_epoch, callbacks = [early_stopping], validation_data=val_ds) ``` If the training process were considering the whole dataset on each gradient update, this oversampling would be basically identical to the class weighting. But when training the model batch-wise, as you did here, the oversampled data provides a smoother gradient signal: Instead of each positive example being shown in one batch with a large weight, they're shown in many different batches each time with a small weight. This smoother gradient signal makes it easier to train the model. ### Check training history Note that the distributions of metrics will be different here, because the training data has a totally different distribution from the validation and test data. ``` plot_metrics(resampled_history ) ``` ### Re-train Because training is easier on the balanced data, the above training procedure may overfit quickly. So break up the epochs to give the `callbacks.EarlyStopping` finer control over when to stop training. ``` resampled_model = make_model() resampled_model.load_weights(initial_weights) # Reset the bias to zero, since this dataset is balanced. output_layer = resampled_model.layers[-1] output_layer.bias.assign([0]) resampled_history = resampled_model.fit( resampled_ds, # These are not real epochs steps_per_epoch = 20, epochs=10*EPOCHS, callbacks = [early_stopping], validation_data=(val_ds)) ``` ### Re-check training history ``` plot_metrics(resampled_history) ``` ### Evaluate metrics ``` # TODO 1 train_predictions_resampled = #TODO: Your code goes here. test_predictions_resampled = #TODO: Your code goes here. resampled_results = resampled_model.evaluate(test_features, test_labels, batch_size=BATCH_SIZE, verbose=0) for name, value in zip(resampled_model.metrics_names, resampled_results): print(name, ': ', value) print() plot_cm(test_labels, test_predictions_resampled) ``` ### Plot the ROC ``` plot_roc("Train Baseline", train_labels, train_predictions_baseline, color=colors[0]) plot_roc("Test Baseline", test_labels, test_predictions_baseline, color=colors[0], linestyle='--') plot_roc("Train Weighted", train_labels, train_predictions_weighted, color=colors[1]) plot_roc("Test Weighted", test_labels, test_predictions_weighted, color=colors[1], linestyle='--') plot_roc("Train Resampled", train_labels, train_predictions_resampled, color=colors[2]) plot_roc("Test Resampled", test_labels, test_predictions_resampled, color=colors[2], linestyle='--') plt.legend(loc='lower right') ``` ## Applying this tutorial to your problem Imbalanced data classification is an inherantly difficult task since there are so few samples to learn from. You should always start with the data first and do your best to collect as many samples as possible and give substantial thought to what features may be relevant so the model can get the most out of your minority class. At some point your model may struggle to improve and yield the results you want, so it is important to keep in mind the context of your problem and the trade offs between different types of errors.
github_jupyter
# W207 Final Project Erika, Jen Jen, Geoff, Leslie (In Python 3) As of 3/35 Outline: * Data Pre-Processing * Simple Feature Selection * Basline Models * Possible Approaches # Section 1 Loading and Processing Data ``` ## Import Libraries ## import json from pprint import pprint from pandas import * from pandas.io.json import json_normalize # General libraries. import re import numpy as np import matplotlib.pyplot as plt # SK-learn libraries for learning. from sklearn.pipeline import Pipeline from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import BernoulliNB from sklearn.naive_bayes import MultinomialNB from sklearn.grid_search import GridSearchCV from sklearn.metrics import recall_score # SK-learn libraries for evaluation. from sklearn.metrics import confusion_matrix from sklearn import metrics from sklearn.metrics import classification_report # SK-learn library for importing the newsgroup data. from sklearn.datasets import fetch_20newsgroups # SK-learn libraries for feature extraction from text. from sklearn.feature_extraction.text import * ## Get Data ## #reference on data: https://www.kaggle.com/c/random-acts-of-pizza/data # pull in the training and test data with open('/Users/erikaananda/Documents/MIDS/W207/Final Project/data/train.json', encoding='utf-8') as data_file: #with open('/home/levi/Documents/W207_Proj/data/train.json', encoding='utf-8') as data_file: trainData = json.loads(data_file.read()) with open('/Users/erikaananda/Documents/MIDS/W207/Final Project/data/test.json', encoding='utf-8') as data_file: #with open('/home/levi/Documents/W207_Proj/data/train.json', encoding='utf-8') as data_file: testData = json.loads(data_file.read()) # create a dev data set devData = trainData[0:1000] trainData = trainData[1000:] # show how the data looks in its original format #pprint("data in json format:") #pprint(trainData[1]) # create a normalized view allTData = json_normalize(trainData) print("\nSize of the normalized Data:", allTData.shape) print("\nnormalized data columns:", list(allTData)) allDData = json_normalize(devData) ## Create subsets of data for analysis ### # create a flat dataset without the subreddits list flatData = allTData.drop('requester_subreddits_at_request', 1) # create a separate dataset with just subreddits, indexed on request id # we can creata a count vector on the words, run Naive Bayes against it, # and add the probabilities to our flat dataset subredTData = allTData[['request_id','requester_subreddits_at_request']] subredTData.set_index('request_id', inplace=True) subredDData= allDData[['request_id','requester_subreddits_at_request']] subredDData.set_index('request_id', inplace=True) # our training labels trainLabel = allTData['requester_received_pizza'] devLabel = allDData['requester_received_pizza'] # what do these look like? #print(list(flatData)) print(subredTData.shape) #print(subredTData['requester_subreddits_at_request'][1]) # create a corpus of subreddits to vectorize trainCorpus = [] for index in range(len(subredTData)): trainCorpus.append(' '.join(subredTData['requester_subreddits_at_request'][index])) devCorpus = [] for index in range(len(subredDData)): devCorpus.append(' '.join(subredDData['requester_subreddits_at_request'][index])) # combine all text sources into a single corpus fldTText = allTData[['request_id','request_text', 'request_text_edit_aware', 'request_title']] fldDText = allDData[['request_id','request_text', 'request_text_edit_aware', 'request_title']] trainCorpus = [] for index in range(len(subredTData)): a = ' '.join(subredTData['requester_subreddits_at_request'][index]) b = (a, fldTText['request_text'][index], fldTText['request_text_edit_aware'][index], fldTText['request_title'][index]) trainCorpus.append(' '.join(b)) devCorpus = [] for index in range(len(subredDData)): a = ' '.join(subredDData['requester_subreddits_at_request'][index]) b = (a, fldDText['request_text'][index], fldDText['request_text_edit_aware'][index], fldDText['request_title'][index]) devCorpus.append(' '.join(b)) # Print 3 examples print (trainCorpus[:3]) labels = trainLabel.astype(int) labels = list(labels) print(labels[:3]) print('-'*75) print ('\n' , devCorpus[:3]) labels_dev = devLabel.astype(int) labels_dev = list(labels_dev) print(labels_dev[:3]) ``` # Section 2. Simple Feature Selection and Pre-Processing ``` # Simple Pre-Processing def data_preprocessor(s): """ Note: this function pre-processors data: (1) removes non-alpha characters (2) converts digits to 'number' (3) regularizes spaces (although CountVectorizer ignores this unless they are part of words) (4) reduces word size to n """ s = [re.sub(r'[?|$|.|!|@|\n|(|)|<|>|_|-|,|\']',r' ',s) for s in s] # strip out non-alpha numeric char, replace with space s = [re.sub(r'\d+',r'number ',s) for s in s] # convert digits to number s = [re.sub(r' +',r' ',s) for s in s] # convert multiple spaces to single space # This sets word size to n=5 num = 5 def set_word(s): temp = [] for s in s: x = s.split() z = [elem[:num] for elem in x] z = ' '.join(z) temp.append(z) return temp s = set_word(s) return s # Set up the data with CountVectorizer #vectorizer = CountVectorizer(lowercase=True, strip_accents='unicode',stop_words='english') vectorizer = CountVectorizer(min_df=1,lowercase=True) tVector = vectorizer.fit_transform(trainCorpus) dVector = vectorizer.transform(devCorpus) print ('\nRaw data:') print ("The size of the vocabulary for the training text data is", tVector.shape[1]) print ("First 5 feature Names:", vectorizer.get_feature_names()[1:6], "\n") tVector_p = vectorizer.fit_transform(data_preprocessor(trainCorpus)) dVector_p = vectorizer.transform(data_preprocessor(devCorpus)) print ('\nPre-Processed data:') print ("The size of the vocabulary for the training text data is", tVector_p.shape[1]) print ("First 5 feature Names:", vectorizer.get_feature_names()[1:6], "\n") ``` # Section 3. Baseline Models ## Logistic Regression ``` # Logistic Regression C = 0.01 #(For now) modelLogit = LogisticRegression(penalty='l2', C=C) modelLogit.fit(tVector,trainLabel) logitScore = round(modelLogit.score(dVector, devLabel), 4) print("For C = ", C, "Logistic regression accuracy:", logitScore) modelLogit.fit(tVector_p,trainLabel) logitScore = round(modelLogit.score(dVector_p, devLabel), 4) print("For C = ", C, "Logistic regression (processed data) accuracy:", logitScore) ``` ## Naive Bayes ``` # Multinomial NB alpha = 0.01 clf = BernoulliNB(alpha=alpha) clf.fit(tVector, trainLabel) test_predicted_labels = clf.predict(dVector) print ('Bernoulli NB using raw data with alpha = %1.3f:' %alpha, metrics.accuracy_score(devLabel,test_predicted_labels) ) clf.fit(tVector_p, trainLabel) test_predicted_labels = clf.predict(dVector_p) print ('Bernoulli NB using processed data with alpha = %1.3f:' %alpha, metrics.accuracy_score(devLabel,test_predicted_labels) ) ``` ## Logistic Regression More Feature Selection ``` # get the best regularization regStrength = [0.0001, 0.001, 0.01, 0.1, 0.5, 1.0, 2.0, 6.0, 10.0] for c in regStrength: modelLogit = LogisticRegression(penalty='l1', C=c) modelLogit.fit(tVector_p, trainLabel) logitScore = round(modelLogit.score(dVector_p, devLabel), 4) print("For C = ", c, "Logistic regression accuracy:", logitScore) # although the best score comes from c=.001, the bet F1-score # comes from c=.5, and this gives better weight options modelLogit = LogisticRegression(penalty='l1', C=.5, tol = .1) modelLogit.fit(tVector_p, trainLabel) print(max(modelLogit.coef_[0])) numWeights = 5 sortIndex = np.argsort(modelLogit.coef_) iLen = len(sortIndex[0]) print("\nTop", numWeights, "Weighted Features:") for index in range((iLen - numWeights) , iLen): lookup = sortIndex[0][index] print(lookup) weight = modelLogit.coef_[0][lookup] print(vectorizer.get_feature_names()[sortIndex[0][index]], weight) ``` # Future Steps * More data pre-processing (looking for newer features too) * Explore PCA/LSA * Ideas on features - Combination of words - Pruning - Timing (of requests) - Location
github_jupyter
``` # Base Data Science snippet import pandas as pd import numpy as np import matplotlib.pyplot as plt import os import time from tqdm import tqdm_notebook %matplotlib inline %load_ext autoreload %autoreload 2 import sys sys.path.append("../") import westworld from westworld.assets import * from westworld.colors import * from westworld.objects import * from westworld.agents import * from westworld.environment import * from westworld.simulation import * from westworld.logger import Logger ``` # Playground ## Beta law for fight evaluations - https://fr.wikipedia.org/wiki/Loi_b%C3%AAta - https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html ``` from scipy.stats import beta r = beta.rvs(10, 1, size=1000) from ipywidgets import interact,IntSlider @interact(a = IntSlider(min = 1,max = 10,value = 1,step = 1),b = IntSlider(min = 1,max = 10,value = 1,step = 1)) def explore(a,b): x = np.linspace(0,1,100) rv = beta(a, b) plt.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf') plt.show() def win(a,b): return beta.rvs(a, b) > 0.5 ``` ## Env development ### Dev decorators ``` env.data import functools class Test: @staticmethod def decorator(func): @functools.wraps(func) def wrapper_decorator(*args, **kwargs): print("Something is happening before the function is called.") # Do something before value = func(*args, **kwargs) print("Something is happening after the function is called.") # Do something after return value return wrapper_decorator @self.decorator def __init__(self): pass @self.decorator def render(self): pass class CTest(Test): pass import functools def decorator(func): @functools.wraps(func) def wrapper_decorator(self,*args, **kwargs): print("Something is happening before the function is called.") # Do something before print(func) value = func(self,*args, **kwargs) self.post_init() print("Something is happening after the function is called.") # Do something after return value return wrapper_decorator class Test: @decorator def __init__(self): pass @decorator def render(self): pass def post_init(self): print("postinit1") class CTest(Test): @decorator def __init__(self): super().__init__() def post_init(self): print("postinit2") env.quit() t = CTest() ``` ### Dev env ``` class Player(BaseAgent): attrs = ["color","stacked"] def post_bind(self): self.stacked = 1 self.other_color = RED if self.color == BLUE else BLUE @property def blocking(self): return False def step(self): self.wander() def render(self,screen): super().render(screen = screen) self.render_text(self.stacked,size = 25) # def prerender(self): # player1 = self.env.make_group({"color":RED}) # player2 = self.env.make_group({"color":BLUE}) # collision1 = self.collides_group(player1,method = "rect") # collision2 = self.collides_group(player2,method = "rect") # self.pop = 1 + (len(collision1) if self.color == RED else len(collision2)) class Environment(GridEnvironment): def count_stacked(self): count = self.data.groupby(["color","pos"])["stacked"].transform(lambda x : len(x)) for obj,stacked in count.to_dict().items(): self[obj].stacked = stacked def prerender(self): self.count_stacked() spawner1 = lambda x,y : Player(x,y,color = RED) spawner2 = lambda x,y : Player(x,y,color = BLUE) env = Environment(width = 20,height = 10,cell_size=30,show_grid = True) env.spawn(spawner1,20) env.spawn(spawner2,20) env.render() env.get_img() sim = Simulation(env,fps = 10) sim.run_episode(n_steps = 20,replay = True,save = True) class Player(BaseAgent): def step(self): self.wander() spawner1 = lambda x,y : Player(x,y,color = BLUE,img_asset = "blob") env = GridEnvironment(width = 20,height = 10,cell_size=30,show_grid = True) env.spawn(spawner1,50) env.render() env.get_img() sim = Simulation(env,fps = 3) sim.run_episode(n_steps = 500,replay = True,save = False) ```
github_jupyter
# Step 1) Data Preparation ``` %run data_prep.py INTC import pandas as pd df = pd.read_csv("../1_Data/INTC.csv",infer_datetime_format=True, parse_dates=['dt'], index_col=['dt']) trainCount=int(len(df)*0.4) dfTrain = df.iloc[:trainCount] dfTest = df.iloc[trainCount:] dfTest.to_csv('local_test/test_dir/input/data/training/data.csv') dfTest.head() %matplotlib notebook dfTest["close"].plot() ``` # Step 2) Modify Strategy Configuration In the following cell, you can adjust the parameters for the strategy. * `user` = Name for Leaderboard (optional) * `go_long` = Go Long for Breakout (true or false) * `go_short` = Go Short for Breakout (true or false) * `period` = Length of window for previous high and low * `size` = The number of shares for a transaction `Tip`: A good starting point for improving the strategy is to lengthen the period of the previous high and low. Equity Markets tend to have a long bias and if you only consider long trades this might improve the performance. ``` %%writefile model/algo_config { "user" : "user", "go_long" : true, "go_short" : true, "period" : 9, "size" : 1000 } %run update_config.py daily_breakout ``` # Step 3) Modify Strategy Code `Tip`: A good starting point for improving the strategy is to add additional indicators like ATR (Average True Range) before placing a trade. You want to avoid false signals if there is not enough volatility. Here are some helpful links: * Backtrader Documentation: https://www.backtrader.com/docu/strategy/ * TA-Lib Indicator Reference: https://www.backtrader.com/docu/talibindautoref/ * Backtrader Indicator Reference: https://www.backtrader.com/docu/indautoref/ ``` %%writefile model/algo_daily_breakout.py import backtrader as bt from algo_base import * import pytz from pytz import timezone class MyStrategy(StrategyTemplate): def __init__(self): # Initiation super(MyStrategy, self).__init__() self.highest = bt.ind.Highest(period=self.config["period"]) self.lowest = bt.ind.Lowest(period=self.config["period"]) self.size = self.config["size"] def next(self): # Processing super(MyStrategy, self).next() dt=self.datas[0].datetime.datetime(0) if not self.position: if self.config["go_long"] and self.datas[0] > self.highest[-1]: self.buy(size=self.size) # Go long elif self.config["go_short"] and self.datas[0] < self.lowest[-1]: self.sell(size=self.size) # Go short elif self.position.size>0 and self.datas[0] < self.highest[-1]: self.close() elif self.position.size<0 and self.datas[0] > self.lowest[-1]: self.close() ``` # Step 4) Backtest Locally (historical data) **Please note that the initial docker image build may take up to 5 min. Subsequent runs are fast.** ``` #Build Local Algo Image !docker build -t algo_$(cat model/algo_name) . !docker run -v $(pwd)/local_test/test_dir:/opt/ml --rm algo_$(cat model/algo_name) train from IPython.display import Image Image(filename='local_test/test_dir/model/chart.png') ``` ## Refine your trading strategy (step 2 to 4). Once you are ready to test the performance of your strategy in a forwardtest, move on to the next step. # Step 5) Forwardtest on SageMaker (simulated data) and submit performance **Please note that the forwardtest in SageMaker runs each time with a new simulated dataset to validate the performance of the strategy. Feel free to run it multiple times to compare performance.** ``` #Deploy Algo Image to ECS !./build_and_push.sh #Run Remote Forwardtest via SageMaker import sagemaker as sage from sagemaker import get_execution_role from sagemaker.estimator import Estimator role = get_execution_role() sess = sage.Session() WORK_DIRECTORY = 'local_test/test_dir/input/data/training' data_location = sess.upload_data(WORK_DIRECTORY, key_prefix='data') print(data_location) with open('model/algo_config', 'r') as f: config = json.load(f) algo_name=config['algo_name'] config['sim_data']=True prefix='algo_'+algo_name job_name=prefix.replace('_','-') account = sess.boto_session.client('sts').get_caller_identity()['Account'] region = sess.boto_session.region_name image = f'{account}.dkr.ecr.{region}.amazonaws.com/{prefix}:latest' algo = sage.estimator.Estimator( image_name=image, role=role, train_instance_count=1, train_instance_type='ml.m4.xlarge', output_path="s3://{}/output".format(sess.default_bucket()), sagemaker_session=sess, base_job_name=job_name, hyperparameters=config, metric_definitions=[ { "Name": "algo:pnl", "Regex": "Total PnL:(.*?)]" }, { "Name": "algo:sharpe_ratio", "Regex": "Sharpe Ratio:(.*?)," } ]) algo.fit(data_location) #Get Algo Metrics from sagemaker.analytics import TrainingJobAnalytics latest_job_name = algo.latest_training_job.job_name metrics_dataframe = TrainingJobAnalytics(training_job_name=latest_job_name).dataframe() metrics_dataframe #Get Algo Chart from S3 model_name=algo.model_data.replace('s3://'+sess.default_bucket()+'/','') import boto3 s3 = boto3.resource('s3') my_bucket = s3.Bucket(sess.default_bucket()) my_bucket.download_file(model_name,'model.tar.gz') !tar -xzf model.tar.gz !rm model.tar.gz from IPython.display import Image Image(filename='chart.png') ``` ### Congratulations! You've completed this strategy. Verify your submission on the leaderboard. ``` %run leaderboard.py ```
github_jupyter
This notebook is part of the `nbsphinx` documentation: https://nbsphinx.readthedocs.io/. # Installation Note that some packages may be out of date. You can always get the newest `nbsphinx` release from [PyPI](https://pypi.org/project/nbsphinx) (using `pip`). If you want to try the latest development version, have a look at the file [CONTRIBUTING.rst](https://github.com/spatialaudio/nbsphinx/blob/master/CONTRIBUTING.rst). ## nbsphinx Packages [![Anaconda Badge](https://anaconda.org/conda-forge/nbsphinx/badges/version.svg)](https://anaconda.org/conda-forge/nbsphinx) If you are using the `conda` package manager (e.g. with [Anaconda](https://www.anaconda.com/distribution/) for Linux/macOS/Windows), you can install `nbsphinx` from the [conda-forge](https://conda-forge.org/) channel: conda install -c conda-forge nbsphinx If you are using Linux, there are packages available for many distributions. [![Packaging status](https://repology.org/badge/vertical-allrepos/python:nbsphinx.svg)](https://repology.org/project/python:nbsphinx/versions) [![PyPI version](https://badge.fury.io/py/nbsphinx.svg)](https://pypi.org/project/nbsphinx) On any platform, you can also install `nbsphinx` with `pip`, Python's own package manager: python3 -m pip install nbsphinx --user If you want to install it system-wide for all users (assuming you have the necessary rights), just drop the `--user` flag. To upgrade an existing `nbsphinx` installation to the newest release, use the `--upgrade` flag: python3 -m pip install nbsphinx --upgrade --user If you suddenly change your mind, you can un-install it with: python3 -m pip uninstall nbsphinx Depending on your Python installation, you may have to use `python` instead of `python3`. ## nbsphinx Prerequisites Some of the aforementioned packages will install some of these prerequisites automatically, some of the things may be already installed on your computer anyway. ### Python Of course you'll need Python, because both Sphinx and `nbsphinx` are implemented in Python. There are many ways to get Python. If you don't know which one is best for you, you can try [Anaconda](https://www.anaconda.com/distribution/). ### Sphinx You'll need [Sphinx](https://www.sphinx-doc.org/) as well, because `nbsphinx` is just a Sphinx extension and doesn't do anything on its own. If you use `conda`, you can get [Sphinx from the conda-forge channel](https://anaconda.org/conda-forge/sphinx): conda install -c conda-forge sphinx Alternatively, you can install it with `pip` (see below): python3 -m pip install Sphinx --user ### pip Recent versions of Python already come with `pip` pre-installed. If you don't have it, you can [install it manually](https://pip.pypa.io/en/latest/installing/). ### pandoc The stand-alone program [pandoc](https://pandoc.org/) is used to convert Markdown content to something Sphinx can understand. You have to install this program separately, ideally with your package manager. If you are using `conda`, you can install [pandoc from the conda-forge channel](https://anaconda.org/conda-forge/pandoc): conda install -c conda-forge pandoc If that doesn't work out for you, have a look at `pandoc`'s [installation instructions](https://pandoc.org/installing.html). <div class="alert alert-info"> **Note:** The use of `pandoc` in `nbsphinx` is temporary, but will likely stay that way for a long time, see [issue #36](https://github.com/spatialaudio/nbsphinx/issues/36). </div> ### Pygments Lexer for Syntax Highlighting To get proper syntax highlighting in code cells, you'll need an appropriate *Pygments lexer*. This of course depends on the programming language of your Jupyter notebooks (more specifically, the `pygments_lexer` metadata of your notebooks). For example, if you use Python in your notebooks, you'll have to have the `IPython` package installed, e.g. with conda install -c conda-forge ipython or python3 -m pip install IPython --user <div class="alert alert-info"> **Note:** If you are using Anaconda with the default channel and syntax highlighting in code cells doesn't seem to work, you can try to install IPython from the `conda-forge` channel or directly with `pip`, or as a work-around, add `'IPython.sphinxext.ipython_console_highlighting'` to `extensions` in your `conf.py`. For details, see [Anaconda issue #1430](https://github.com/ContinuumIO/anaconda-issues/issues/1430) and [nbsphinx issue #24](https://github.com/spatialaudio/nbsphinx/issues/24). </div> ### Jupyter Kernel If you want to execute your notebooks during the Sphinx build process (see [Controlling Notebook Execution](executing-notebooks.ipynb)), you need an appropriate [Jupyter kernel](https://jupyter.readthedocs.io/en/latest/projects/kernels.html) installed. For example, if you use Python, you should install the `ipykernel` package, e.g. with conda install -c conda-forge ipykernel or python3 -m pip install ipykernel --user If you created your notebooks yourself with Jupyter, it's very likely that you have the right kernel installed already.
github_jupyter
# Building your Deep Neural Network: Step by Step Welcome to your week 4 assignment (part 1 of 2)! You have previously trained a 2-layer Neural Network (with a single hidden layer). This week, you will build a deep neural network, with as many layers as you want! - In this notebook, you will implement all the functions required to build a deep neural network. - In the next assignment, you will use these functions to build a deep neural network for image classification. **After this assignment you will be able to:** - Use non-linear units like ReLU to improve your model - Build a deeper neural network (with more than 1 hidden layer) - Implement an easy-to-use neural network class **Notation**: - Superscript $[l]$ denotes a quantity associated with the $l^{th}$ layer. - Example: $a^{[L]}$ is the $L^{th}$ layer activation. $W^{[L]}$ and $b^{[L]}$ are the $L^{th}$ layer parameters. - Superscript $(i)$ denotes a quantity associated with the $i^{th}$ example. - Example: $x^{(i)}$ is the $i^{th}$ training example. - Lowerscript $i$ denotes the $i^{th}$ entry of a vector. - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the $l^{th}$ layer's activations). Let's get started! ## 1 - Packages Let's first import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the main package for scientific computing with Python. - [matplotlib](http://matplotlib.org) is a library to plot graphs in Python. - dnn_utils provides some necessary functions for this notebook. - testCases provides some test cases to assess the correctness of your functions - np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work. Please don't change the seed. ``` import numpy as np import h5py import matplotlib.pyplot as plt from testCases_v3 import * from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward %matplotlib inline plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' %load_ext autoreload %autoreload 2 np.random.seed(1) ``` ## 2 - Outline of the Assignment To build your neural network, you will be implementing several "helper functions". These helper functions will be used in the next assignment to build a two-layer neural network and an L-layer neural network. Each small helper function you will implement will have detailed instructions that will walk you through the necessary steps. Here is an outline of this assignment, you will: - Initialize the parameters for a two-layer network and for an $L$-layer neural network. - Implement the forward propagation module (shown in purple in the figure below). - Complete the LINEAR part of a layer's forward propagation step (resulting in $Z^{[l]}$). - We give you the ACTIVATION function (relu/sigmoid). - Combine the previous two steps into a new [LINEAR->ACTIVATION] forward function. - Stack the [LINEAR->RELU] forward function L-1 time (for layers 1 through L-1) and add a [LINEAR->SIGMOID] at the end (for the final layer $L$). This gives you a new L_model_forward function. - Compute the loss. - Implement the backward propagation module (denoted in red in the figure below). - Complete the LINEAR part of a layer's backward propagation step. - We give you the gradient of the ACTIVATE function (relu_backward/sigmoid_backward) - Combine the previous two steps into a new [LINEAR->ACTIVATION] backward function. - Stack [LINEAR->RELU] backward L-1 times and add [LINEAR->SIGMOID] backward in a new L_model_backward function - Finally update the parameters. <img src="images/final outline.png" style="width:800px;height:500px;"> <caption><center> **Figure 1**</center></caption><br> **Note** that for every forward function, there is a corresponding backward function. That is why at every step of your forward module you will be storing some values in a cache. The cached values are useful for computing gradients. In the backpropagation module you will then use the cache to calculate the gradients. This assignment will show you exactly how to carry out each of these steps. ## 3 - Initialization You will write two helper functions that will initialize the parameters for your model. The first function will be used to initialize parameters for a two layer model. The second one will generalize this initialization process to $L$ layers. ### 3.1 - 2-layer Neural Network **Exercise**: Create and initialize the parameters of the 2-layer neural network. **Instructions**: - The model's structure is: *LINEAR -> RELU -> LINEAR -> SIGMOID*. - Use random initialization for the weight matrices. Use `np.random.randn(shape)*0.01` with the correct shape. - Use zero initialization for the biases. Use `np.zeros(shape)`. ``` # GRADED FUNCTION: initialize_parameters def initialize_parameters(n_x, n_h, n_y): """ Argument: n_x -- size of the input layer n_h -- size of the hidden layer n_y -- size of the output layer Returns: parameters -- python dictionary containing your parameters: W1 -- weight matrix of shape (n_h, n_x) b1 -- bias vector of shape (n_h, 1) W2 -- weight matrix of shape (n_y, n_h) b2 -- bias vector of shape (n_y, 1) """ np.random.seed(1) ### START CODE HERE ### (≈ 4 lines of code) W1 = np.random.randn(n_h,n_x) * 0.01 b1 = np.zeros((n_h,1)) W2 = np.random.randn(n_y,n_h) * 0.01 b2 = np.zeros((n_y,1)) ### END CODE HERE ### assert(W1.shape == (n_h, n_x)) assert(b1.shape == (n_h, 1)) assert(W2.shape == (n_y, n_h)) assert(b2.shape == (n_y, 1)) parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters parameters = initialize_parameters(3,2,1) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ``` **Expected output**: <table style="width:80%"> <tr> <td> **W1** </td> <td> [[ 0.01624345 -0.00611756 -0.00528172] [-0.01072969 0.00865408 -0.02301539]] </td> </tr> <tr> <td> **b1**</td> <td>[[ 0.] [ 0.]]</td> </tr> <tr> <td>**W2**</td> <td> [[ 0.01744812 -0.00761207]]</td> </tr> <tr> <td> **b2** </td> <td> [[ 0.]] </td> </tr> </table> ### 3.2 - L-layer Neural Network The initialization for a deeper L-layer neural network is more complicated because there are many more weight matrices and bias vectors. When completing the `initialize_parameters_deep`, you should make sure that your dimensions match between each layer. Recall that $n^{[l]}$ is the number of units in layer $l$. Thus for example if the size of our input $X$ is $(12288, 209)$ (with $m=209$ examples) then: <table style="width:100%"> <tr> <td> </td> <td> **Shape of W** </td> <td> **Shape of b** </td> <td> **Activation** </td> <td> **Shape of Activation** </td> <tr> <tr> <td> **Layer 1** </td> <td> $(n^{[1]},12288)$ </td> <td> $(n^{[1]},1)$ </td> <td> $Z^{[1]} = W^{[1]} X + b^{[1]} $ </td> <td> $(n^{[1]},209)$ </td> <tr> <tr> <td> **Layer 2** </td> <td> $(n^{[2]}, n^{[1]})$ </td> <td> $(n^{[2]},1)$ </td> <td>$Z^{[2]} = W^{[2]} A^{[1]} + b^{[2]}$ </td> <td> $(n^{[2]}, 209)$ </td> <tr> <tr> <td> $\vdots$ </td> <td> $\vdots$ </td> <td> $\vdots$ </td> <td> $\vdots$</td> <td> $\vdots$ </td> <tr> <tr> <td> **Layer L-1** </td> <td> $(n^{[L-1]}, n^{[L-2]})$ </td> <td> $(n^{[L-1]}, 1)$ </td> <td>$Z^{[L-1]} = W^{[L-1]} A^{[L-2]} + b^{[L-1]}$ </td> <td> $(n^{[L-1]}, 209)$ </td> <tr> <tr> <td> **Layer L** </td> <td> $(n^{[L]}, n^{[L-1]})$ </td> <td> $(n^{[L]}, 1)$ </td> <td> $Z^{[L]} = W^{[L]} A^{[L-1]} + b^{[L]}$</td> <td> $(n^{[L]}, 209)$ </td> <tr> </table> Remember that when we compute $W X + b$ in python, it carries out broadcasting. For example, if: $$ W = \begin{bmatrix} j & k & l\\ m & n & o \\ p & q & r \end{bmatrix}\;\;\; X = \begin{bmatrix} a & b & c\\ d & e & f \\ g & h & i \end{bmatrix} \;\;\; b =\begin{bmatrix} s \\ t \\ u \end{bmatrix}\tag{2}$$ Then $WX + b$ will be: $$ WX + b = \begin{bmatrix} (ja + kd + lg) + s & (jb + ke + lh) + s & (jc + kf + li)+ s\\ (ma + nd + og) + t & (mb + ne + oh) + t & (mc + nf + oi) + t\\ (pa + qd + rg) + u & (pb + qe + rh) + u & (pc + qf + ri)+ u \end{bmatrix}\tag{3} $$ **Exercise**: Implement initialization for an L-layer Neural Network. **Instructions**: - The model's structure is *[LINEAR -> RELU] $ \times$ (L-1) -> LINEAR -> SIGMOID*. I.e., it has $L-1$ layers using a ReLU activation function followed by an output layer with a sigmoid activation function. - Use random initialization for the weight matrices. Use `np.random.rand(shape) * 0.01`. - Use zeros initialization for the biases. Use `np.zeros(shape)`. - We will store $n^{[l]}$, the number of units in different layers, in a variable `layer_dims`. For example, the `layer_dims` for the "Planar Data classification model" from last week would have been [2,4,1]: There were two inputs, one hidden layer with 4 hidden units, and an output layer with 1 output unit. Thus means `W1`'s shape was (4,2), `b1` was (4,1), `W2` was (1,4) and `b2` was (1,1). Now you will generalize this to $L$ layers! - Here is the implementation for $L=1$ (one layer neural network). It should inspire you to implement the general case (L-layer neural network). ```python if L == 1: parameters["W" + str(L)] = np.random.randn(layer_dims[1], layer_dims[0]) * 0.01 parameters["b" + str(L)] = np.zeros((layer_dims[1], 1)) ``` ``` # GRADED FUNCTION: initialize_parameters_deep def initialize_parameters_deep(layer_dims): """ Arguments: layer_dims -- python array (list) containing the dimensions of each layer in our network Returns: parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL": Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1]) bl -- bias vector of shape (layer_dims[l], 1) """ np.random.seed(3) parameters = {} L = len(layer_dims) # number of layers in the network for l in range(1, L): ### START CODE HERE ### (≈ 2 lines of code) parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * 0.01 parameters['b' + str(l)] = np.zeros((layer_dims[l], 1)) ### END CODE HERE ### assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1])) assert(parameters['b' + str(l)].shape == (layer_dims[l], 1)) return parameters parameters = initialize_parameters_deep([5,4,3]) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ``` **Expected output**: <table style="width:80%"> <tr> <td> **W1** </td> <td>[[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388] [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218] [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034] [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]</td> </tr> <tr> <td>**b1** </td> <td>[[ 0.] [ 0.] [ 0.] [ 0.]]</td> </tr> <tr> <td>**W2** </td> <td>[[-0.01185047 -0.0020565 0.01486148 0.00236716] [-0.01023785 -0.00712993 0.00625245 -0.00160513] [-0.00768836 -0.00230031 0.00745056 0.01976111]]</td> </tr> <tr> <td>**b2** </td> <td>[[ 0.] [ 0.] [ 0.]]</td> </tr> </table> ## 4 - Forward propagation module ### 4.1 - Linear Forward Now that you have initialized your parameters, you will do the forward propagation module. You will start by implementing some basic functions that you will use later when implementing the model. You will complete three functions in this order: - LINEAR - LINEAR -> ACTIVATION where ACTIVATION will be either ReLU or Sigmoid. - [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID (whole model) The linear forward module (vectorized over all the examples) computes the following equations: $$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}\tag{4}$$ where $A^{[0]} = X$. **Exercise**: Build the linear part of forward propagation. **Reminder**: The mathematical representation of this unit is $Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}$. You may also find `np.dot()` useful. If your dimensions don't match, printing `W.shape` may help. ``` # GRADED FUNCTION: linear_forward def linear_forward(A, W, b): """ Implement the linear part of a layer's forward propagation. Arguments: A -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) Returns: Z -- the input of the activation function, also called pre-activation parameter cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently """ ### START CODE HERE ### (≈ 1 line of code) Z = np.dot(W,A) + b ### END CODE HERE ### assert(Z.shape == (W.shape[0], A.shape[1])) cache = (A, W, b) return Z, cache A, W, b = linear_forward_test_case() Z, linear_cache = linear_forward(A, W, b) print("Z = " + str(Z)) ``` **Expected output**: <table style="width:35%"> <tr> <td> **Z** </td> <td> [[ 3.26295337 -1.23429987]] </td> </tr> </table> ### 4.2 - Linear-Activation Forward In this notebook, you will use two activation functions: - **Sigmoid**: $\sigma(Z) = \sigma(W A + b) = \frac{1}{ 1 + e^{-(W A + b)}}$. We have provided you with the `sigmoid` function. This function returns **two** items: the activation value "`a`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call: ``` python A, activation_cache = sigmoid(Z) ``` - **ReLU**: The mathematical formula for ReLu is $A = RELU(Z) = max(0, Z)$. We have provided you with the `relu` function. This function returns **two** items: the activation value "`A`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call: ``` python A, activation_cache = relu(Z) ``` For more convenience, you are going to group two functions (Linear and Activation) into one function (LINEAR->ACTIVATION). Hence, you will implement a function that does the LINEAR forward step followed by an ACTIVATION forward step. **Exercise**: Implement the forward propagation of the *LINEAR->ACTIVATION* layer. Mathematical relation is: $A^{[l]} = g(Z^{[l]}) = g(W^{[l]}A^{[l-1]} +b^{[l]})$ where the activation "g" can be sigmoid() or relu(). Use linear_forward() and the correct activation function. ``` # GRADED FUNCTION: linear_activation_forward def linear_activation_forward(A_prev, W, b, activation): """ Implement the forward propagation for the LINEAR->ACTIVATION layer Arguments: A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu" Returns: A -- the output of the activation function, also called the post-activation value cache -- a python dictionary containing "linear_cache" and "activation_cache"; stored for computing the backward pass efficiently """ if activation == "sigmoid": # Inputs: "A_prev, W, b". Outputs: "A, activation_cache". ### START CODE HERE ### (≈ 2 lines of code) Z, linear_cache = linear_forward(A_prev, W, b) A, activation_cache = sigmoid(Z) ### END CODE HERE ### elif activation == "relu": # Inputs: "A_prev, W, b". Outputs: "A, activation_cache". ### START CODE HERE ### (≈ 2 lines of code) Z, linear_cache = linear_forward(A_prev, W, b) A, activation_cache = relu(Z) ### END CODE HERE ### assert (A.shape == (W.shape[0], A_prev.shape[1])) cache = (linear_cache, activation_cache) return A, cache A_prev, W, b = linear_activation_forward_test_case() A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "sigmoid") print("With sigmoid: A = " + str(A)) A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "relu") print("With ReLU: A = " + str(A)) ``` **Expected output**: <table style="width:35%"> <tr> <td> **With sigmoid: A ** </td> <td > [[ 0.96890023 0.11013289]]</td> </tr> <tr> <td> **With ReLU: A ** </td> <td > [[ 3.43896131 0. ]]</td> </tr> </table> **Note**: In deep learning, the "[LINEAR->ACTIVATION]" computation is counted as a single layer in the neural network, not two layers. ### d) L-Layer Model For even more convenience when implementing the $L$-layer Neural Net, you will need a function that replicates the previous one (`linear_activation_forward` with RELU) $L-1$ times, then follows that with one `linear_activation_forward` with SIGMOID. <img src="images/model_architecture_kiank.png" style="width:600px;height:300px;"> <caption><center> **Figure 2** : *[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model</center></caption><br> **Exercise**: Implement the forward propagation of the above model. **Instruction**: In the code below, the variable `AL` will denote $A^{[L]} = \sigma(Z^{[L]}) = \sigma(W^{[L]} A^{[L-1]} + b^{[L]})$. (This is sometimes also called `Yhat`, i.e., this is $\hat{Y}$.) **Tips**: - Use the functions you had previously written - Use a for loop to replicate [LINEAR->RELU] (L-1) times - Don't forget to keep track of the caches in the "caches" list. To add a new value `c` to a `list`, you can use `list.append(c)`. ``` # GRADED FUNCTION: L_model_forward def L_model_forward(X, parameters): """ Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation Arguments: X -- data, numpy array of shape (input size, number of examples) parameters -- output of initialize_parameters_deep() Returns: AL -- last post-activation value caches -- list of caches containing: every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2) the cache of linear_sigmoid_forward() (there is one, indexed L-1) """ caches = [] A = X L = len(parameters) // 2 # number of layers in the neural network # Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list. for l in range(1, L): A_prev = A ### START CODE HERE ### (≈ 2 lines of code) A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], activation = "relu") caches.append(cache) ### END CODE HERE ### # Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list. ### START CODE HERE ### (≈ 2 lines of code) AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], activation = "sigmoid") caches.append(cache) ### END CODE HERE ### assert(AL.shape == (1,X.shape[1])) return AL, caches X, parameters = L_model_forward_test_case_2hidden() AL, caches = L_model_forward(X, parameters) print("AL = " + str(AL)) print("Length of caches list = " + str(len(caches))) ``` <table style="width:50%"> <tr> <td> **AL** </td> <td > [[ 0.03921668 0.70498921 0.19734387 0.04728177]]</td> </tr> <tr> <td> **Length of caches list ** </td> <td > 3 </td> </tr> </table> Great! Now you have a full forward propagation that takes the input X and outputs a row vector $A^{[L]}$ containing your predictions. It also records all intermediate values in "caches". Using $A^{[L]}$, you can compute the cost of your predictions. ## 5 - Cost function Now you will implement forward and backward propagation. You need to compute the cost, because you want to check if your model is actually learning. **Exercise**: Compute the cross-entropy cost $J$, using the following formula: $$-\frac{1}{m} \sum\limits_{i = 1}^{m} (y^{(i)}\log\left(a^{[L] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right)) \tag{7}$$ ``` # GRADED FUNCTION: compute_cost def compute_cost(AL, Y): """ Implement the cost function defined by equation (7). Arguments: AL -- probability vector corresponding to your label predictions, shape (1, number of examples) Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples) Returns: cost -- cross-entropy cost """ m = Y.shape[1] # Compute loss from aL and y. ### START CODE HERE ### (≈ 1 lines of code) logprobs = np.multiply(np.log(AL),Y) + np.multiply(np.log(1-AL),1-Y) cost = - np.sum(logprobs) / m ### END CODE HERE ### cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17). assert(cost.shape == ()) return cost Y, AL = compute_cost_test_case() print("cost = " + str(compute_cost(AL, Y))) ``` **Expected Output**: <table> <tr> <td>**cost** </td> <td> 0.41493159961539694</td> </tr> </table> ## 6 - Backward propagation module Just like with forward propagation, you will implement helper functions for backpropagation. Remember that back propagation is used to calculate the gradient of the loss function with respect to the parameters. **Reminder**: <img src="images/backprop_kiank.png" style="width:650px;height:250px;"> <caption><center> **Figure 3** : Forward and Backward propagation for *LINEAR->RELU->LINEAR->SIGMOID* <br> *The purple blocks represent the forward propagation, and the red blocks represent the backward propagation.* </center></caption> <!-- For those of you who are expert in calculus (you don't need to be to do this assignment), the chain rule of calculus can be used to derive the derivative of the loss $\mathcal{L}$ with respect to $z^{[1]}$ in a 2-layer network as follows: $$\frac{d \mathcal{L}(a^{[2]},y)}{{dz^{[1]}}} = \frac{d\mathcal{L}(a^{[2]},y)}{{da^{[2]}}}\frac{{da^{[2]}}}{{dz^{[2]}}}\frac{{dz^{[2]}}}{{da^{[1]}}}\frac{{da^{[1]}}}{{dz^{[1]}}} \tag{8} $$ In order to calculate the gradient $dW^{[1]} = \frac{\partial L}{\partial W^{[1]}}$, you use the previous chain rule and you do $dW^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial W^{[1]}}$. During the backpropagation, at each step you multiply your current gradient by the gradient corresponding to the specific layer to get the gradient you wanted. Equivalently, in order to calculate the gradient $db^{[1]} = \frac{\partial L}{\partial b^{[1]}}$, you use the previous chain rule and you do $db^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial b^{[1]}}$. This is why we talk about **backpropagation**. !--> Now, similar to forward propagation, you are going to build the backward propagation in three steps: - LINEAR backward - LINEAR -> ACTIVATION backward where ACTIVATION computes the derivative of either the ReLU or sigmoid activation - [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID backward (whole model) ### 6.1 - Linear backward For layer $l$, the linear part is: $Z^{[l]} = W^{[l]} A^{[l-1]} + b^{[l]}$ (followed by an activation). Suppose you have already calculated the derivative $dZ^{[l]} = \frac{\partial \mathcal{L} }{\partial Z^{[l]}}$. You want to get $(dW^{[l]}, db^{[l]} dA^{[l-1]})$. <img src="images/linearback_kiank.png" style="width:250px;height:300px;"> <caption><center> **Figure 4** </center></caption> The three outputs $(dW^{[l]}, db^{[l]}, dA^{[l]})$ are computed using the input $dZ^{[l]}$.Here are the formulas you need: $$ dW^{[l]} = \frac{\partial \mathcal{L} }{\partial W^{[l]}} = \frac{1}{m} dZ^{[l]} A^{[l-1] T} \tag{8}$$ $$ db^{[l]} = \frac{\partial \mathcal{L} }{\partial b^{[l]}} = \frac{1}{m} \sum_{i = 1}^{m} dZ^{[l](i)}\tag{9}$$ $$ dA^{[l-1]} = \frac{\partial \mathcal{L} }{\partial A^{[l-1]}} = W^{[l] T} dZ^{[l]} \tag{10}$$ **Exercise**: Use the 3 formulas above to implement linear_backward(). ``` # GRADED FUNCTION: linear_backward def linear_backward(dZ, cache): """ Implement the linear portion of backward propagation for a single layer (layer l) Arguments: dZ -- Gradient of the cost with respect to the linear output (of current layer l) cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ A_prev, W, b = cache m = A_prev.shape[1] ### START CODE HERE ### (≈ 3 lines of code) dW = np.dot(dZ, A_prev.T) * 1. / m db = 1. / m * np.sum(dZ, axis=1, keepdims=True) dA_prev = np.dot(W.T, dZ) ### END CODE HERE ### assert (dA_prev.shape == A_prev.shape) assert (dW.shape == W.shape) assert (db.shape == b.shape) return dA_prev, dW, db # Set up some test inputs dZ, linear_cache = linear_backward_test_case() dA_prev, dW, db = linear_backward(dZ, linear_cache) print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db)) ``` **Expected Output**: <table style="width:90%"> <tr> <td> **dA_prev** </td> <td > [[ 0.51822968 -0.19517421] [-0.40506361 0.15255393] [ 2.37496825 -0.89445391]] </td> </tr> <tr> <td> **dW** </td> <td > [[-0.10076895 1.40685096 1.64992505]] </td> </tr> <tr> <td> **db** </td> <td> [[ 0.50629448]] </td> </tr> </table> ### 6.2 - Linear-Activation backward Next, you will create a function that merges the two helper functions: **`linear_backward`** and the backward step for the activation **`linear_activation_backward`**. To help you implement `linear_activation_backward`, we provided two backward functions: - **`sigmoid_backward`**: Implements the backward propagation for SIGMOID unit. You can call it as follows: ```python dZ = sigmoid_backward(dA, activation_cache) ``` - **`relu_backward`**: Implements the backward propagation for RELU unit. You can call it as follows: ```python dZ = relu_backward(dA, activation_cache) ``` If $g(.)$ is the activation function, `sigmoid_backward` and `relu_backward` compute $$dZ^{[l]} = dA^{[l]} * g'(Z^{[l]}) \tag{11}$$. **Exercise**: Implement the backpropagation for the *LINEAR->ACTIVATION* layer. ``` # GRADED FUNCTION: linear_activation_backward def linear_activation_backward(dA, cache, activation): """ Implement the backward propagation for the LINEAR->ACTIVATION layer. Arguments: dA -- post-activation gradient for current layer l cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu" Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ linear_cache, activation_cache = cache if activation == "relu": ### START CODE HERE ### (≈ 2 lines of code) dZ = relu_backward(dA, activation_cache) dA_prev, dW, db = linear_backward(dZ, linear_cache) ### END CODE HERE ### elif activation == "sigmoid": ### START CODE HERE ### (≈ 2 lines of code) dZ = sigmoid_backward(dA, activation_cache) dA_prev, dW, db = linear_backward(dZ, linear_cache) ### END CODE HERE ### return dA_prev, dW, db AL, linear_activation_cache = linear_activation_backward_test_case() dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "sigmoid") print ("sigmoid:") print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db) + "\n") dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "relu") print ("relu:") print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db)) ``` **Expected output with sigmoid:** <table style="width:100%"> <tr> <td > dA_prev </td> <td >[[ 0.11017994 0.01105339] [ 0.09466817 0.00949723] [-0.05743092 -0.00576154]] </td> </tr> <tr> <td > dW </td> <td > [[ 0.10266786 0.09778551 -0.01968084]] </td> </tr> <tr> <td > db </td> <td > [[-0.05729622]] </td> </tr> </table> **Expected output with relu:** <table style="width:100%"> <tr> <td > dA_prev </td> <td > [[ 0.44090989 0. ] [ 0.37883606 0. ] [-0.2298228 0. ]] </td> </tr> <tr> <td > dW </td> <td > [[ 0.44513824 0.37371418 -0.10478989]] </td> </tr> <tr> <td > db </td> <td > [[-0.20837892]] </td> </tr> </table> ### 6.3 - L-Model Backward Now you will implement the backward function for the whole network. Recall that when you implemented the `L_model_forward` function, at each iteration, you stored a cache which contains (X,W,b, and z). In the back propagation module, you will use those variables to compute the gradients. Therefore, in the `L_model_backward` function, you will iterate through all the hidden layers backward, starting from layer $L$. On each step, you will use the cached values for layer $l$ to backpropagate through layer $l$. Figure 5 below shows the backward pass. <img src="images/mn_backward.png" style="width:450px;height:300px;"> <caption><center> **Figure 5** : Backward pass </center></caption> ** Initializing backpropagation**: To backpropagate through this network, we know that the output is, $A^{[L]} = \sigma(Z^{[L]})$. Your code thus needs to compute `dAL` $= \frac{\partial \mathcal{L}}{\partial A^{[L]}}$. To do so, use this formula (derived using calculus which you don't need in-depth knowledge of): ```python dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL ``` You can then use this post-activation gradient `dAL` to keep going backward. As seen in Figure 5, you can now feed in `dAL` into the LINEAR->SIGMOID backward function you implemented (which will use the cached values stored by the L_model_forward function). After that, you will have to use a `for` loop to iterate through all the other layers using the LINEAR->RELU backward function. You should store each dA, dW, and db in the grads dictionary. To do so, use this formula : $$grads["dW" + str(l)] = dW^{[l]}\tag{15} $$ For example, for $l=3$ this would store $dW^{[l]}$ in `grads["dW3"]`. **Exercise**: Implement backpropagation for the *[LINEAR->RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model. ``` # GRADED FUNCTION: L_model_backward def L_model_backward(AL, Y, caches): """ Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group Arguments: AL -- probability vector, output of the forward propagation (L_model_forward()) Y -- true "label" vector (containing 0 if non-cat, 1 if cat) caches -- list of caches containing: every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2) the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1]) Returns: grads -- A dictionary with the gradients grads["dA" + str(l)] = ... grads["dW" + str(l)] = ... grads["db" + str(l)] = ... """ grads = {} L = len(caches) # the number of layers m = AL.shape[1] Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL # Initializing the backpropagation ### START CODE HERE ### (1 line of code) dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL ### END CODE HERE ### # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"] ### START CODE HERE ### (approx. 2 lines) current_cache = caches[-1] grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, activation = "sigmoid") ### END CODE HERE ### for l in reversed(range(L-1)): # lth layer: (RELU -> LINEAR) gradients. # Inputs: "grads["dA" + str(l + 2)], caches". Outputs: "grads["dA" + str(l + 1)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)] ### START CODE HERE ### (approx. 5 lines) current_cache = caches[l] dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 2)], current_cache, activation = "relu") grads["dA" + str(l + 1)] = dA_prev_temp grads["dW" + str(l + 1)] = dW_temp grads["db" + str(l + 1)] = db_temp ### END CODE HERE ### return grads AL, Y_assess, caches = L_model_backward_test_case() grads = L_model_backward(AL, Y_assess, caches) print_grads(grads) ``` **Expected Output** <table style="width:60%"> <tr> <td > dW1 </td> <td > [[ 0.41010002 0.07807203 0.13798444 0.10502167] [ 0. 0. 0. 0. ] [ 0.05283652 0.01005865 0.01777766 0.0135308 ]] </td> </tr> <tr> <td > db1 </td> <td > [[-0.22007063] [ 0. ] [-0.02835349]] </td> </tr> <tr> <td > dA1 </td> <td > [[ 0.12913162 -0.44014127] [-0.14175655 0.48317296] [ 0.01663708 -0.05670698]] </td> </tr> </table> ### 6.4 - Update Parameters In this section you will update the parameters of the model, using gradient descent: $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{16}$$ $$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{17}$$ where $\alpha$ is the learning rate. After computing the updated parameters, store them in the parameters dictionary. **Exercise**: Implement `update_parameters()` to update your parameters using gradient descent. **Instructions**: Update parameters using gradient descent on every $W^{[l]}$ and $b^{[l]}$ for $l = 1, 2, ..., L$. ``` # GRADED FUNCTION: update_parameters def update_parameters(parameters, grads, learning_rate): """ Update parameters using gradient descent Arguments: parameters -- python dictionary containing your parameters grads -- python dictionary containing your gradients, output of L_model_backward Returns: parameters -- python dictionary containing your updated parameters parameters["W" + str(l)] = ... parameters["b" + str(l)] = ... """ L = len(parameters) // 2 # number of layers in the neural network # Update rule for each parameter. Use a for loop. ### START CODE HERE ### (≈ 3 lines of code) for l in range(1,L+1): parameters["W" + str(l)] -= learning_rate * grads["dW" + str(l)] parameters["b" + str(l)] -= learning_rate * grads["db" + str(l)] ### END CODE HERE ### return parameters parameters, grads = update_parameters_test_case() parameters = update_parameters(parameters, grads, 0.1) print ("W1 = "+ str(parameters["W1"])) print ("b1 = "+ str(parameters["b1"])) print ("W2 = "+ str(parameters["W2"])) print ("b2 = "+ str(parameters["b2"])) ``` **Expected Output**: <table style="width:100%"> <tr> <td > W1 </td> <td > [[-0.59562069 -0.09991781 -2.14584584 1.82662008] [-1.76569676 -0.80627147 0.51115557 -1.18258802] [-1.0535704 -0.86128581 0.68284052 2.20374577]] </td> </tr> <tr> <td > b1 </td> <td > [[-0.04659241] [-1.28888275] [ 0.53405496]] </td> </tr> <tr> <td > W2 </td> <td > [[-0.55569196 0.0354055 1.32964895]]</td> </tr> <tr> <td > b2 </td> <td > [[-0.84610769]] </td> </tr> </table> ## 7 - Conclusion Congrats on implementing all the functions required for building a deep neural network! We know it was a long assignment but going forward it will only get better. The next part of the assignment is easier. In the next assignment you will put all these together to build two models: - A two-layer neural network - An L-layer neural network You will in fact use these models to classify cat vs non-cat images!
github_jupyter