code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Predicting Closing Price import yfinance as yf # + # Download historical data from yahoo finance df = yf.download("PFE", start="2008-04-05", end="2019-04-05") df.dropna() # drop NaN # Calculating the simple moving average for different windows SMAs = [7, 14, 21] for i in SMAs: # we use the fourth column that corresponds to the Closing Price df["SMA "+str(i)]= df["Close"].rolling(window=i).mean() # Calculate the 7 day standard deviation df["STD 7"]= df["Close"].rolling(window=i).std() # look at the data df.tail() # + # Create new variables df["H-L"] = df.iloc[:,1] - df.iloc[:,2] df["O-C"] = df.iloc[:,0] - df.iloc[:,3] # This is the feature set X = df.iloc[:,6:] # This is the prediction set Y = df["Close"] # - # Divide into training and test data set # + Xtrain = X.loc["2009-04-06":"2017-04-03",:]; Ytrain = Y.loc["2009-04-07":"2017-04-04"] # Should be 1 day ahead Xtest = X.loc["2017-04-04":"2019-04-03",:]; Ytest = Y.loc["2017-04-05":"2019-04-04"] # - # Look at the training data Xtrain.head() # + from sklearn.ensemble import RandomForestRegressor rf = RandomForestRegressor() rf.fit(Xtrain, Ytrain) # + from sklearn.metrics import mean_absolute_percentage_error mean_absolute_percentage_error(Ytest, rf.predict(Xtest))*100 # Evaluation score # + from sklearn.neural_network import MLPRegressor ann = MLPRegressor(hidden_layer_sizes=(3), max_iter=int(2e4), activation = 'relu', # default=’relu’ solver='lbfgs', # default=’adam’ alpha = 0.0001, # default=0.0001 max_fun = 35000, # default=15000 learning_rate='adaptive', # default=’constant’ learning_rate_init = 0.001, # default=0.001 shuffle = True, tol = 1e-4 # default=1e-4 ) ann.fit(Xtrain,Ytrain); MAPE = mean_absolute_percentage_error(Ytrain, ann.predict(Xtrain))*100 print("Training MAPE = %f%%" % MAPE) # - # Evaluation score MAPE = mean_absolute_percentage_error(Ytest, ann.predict(Xtest))*100 print("Test MAPE = %f%%" % MAPE) # Create dataframe with dates so that it is easier to plot # + import pandas as pd dates = list(Ytest.index.values) # get dates Yguess = pd.DataFrame(ann.predict(Xtest), columns=['Close'], index = dates) # + import matplotlib.pyplot as plt # %config InlineBackend.figure_format = 'svg' plt.style.use('seaborn') # %matplotlib inline plt.xticks(rotation=40) plt.plot(Ytest) plt.plot(Yguess,'orange') plt.legend(['original','predicted']) # - # # Predict if closing price is positive or negative # + import numpy as np # Calculate the difference in closing price between dates df["Diff Close"] = df["Close"].diff() df["Sign"] = np.where(df['Diff Close'] > 0, 1, -1) # This is the prediction set Y = df["Sign"] # - # Divide into training and test data set # + Xtrain = X.loc["2009-04-06":"2017-04-03",:]; Ytrain = Y.loc["2009-04-07":"2017-04-04"] # Should be 1 day ahead Xtest = X.loc["2017-04-04":"2019-04-03",:]; Ytest = Y.loc["2017-04-05":"2019-04-04"] # + from sklearn.neural_network import MLPClassifier from sklearn.metrics import accuracy_score ann = MLPClassifier(hidden_layer_sizes=(3), max_iter=int(2e4), activation = 'relu', # default=’relu’ solver='lbfgs', # default=’adam’ alpha = 0.0001, # default=0.0001 max_fun = 1000, # default=15000 learning_rate='adaptive', # default=’constant’ learning_rate_init = 0.01, # default=0.001 shuffle = False, tol = 1e-3 # default=1e-4 ) ann.fit(Xtrain,Ytrain) Yguess = ann.predict(Xtest) accuracy_score(Ytest, Yguess)
Predict Closing Price.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Homework Plotting # ## Problem 1 # Creat a plot of $\ e^{-x}sin(4x)$ and $\ e^{-x}cos(4x)$ from 0 to 5. # # Change the line color for $\ e^{-x}sin(4x)$ to red, and $\ e^{-x}cos(4x)$ to blue. Adjust the line width to 2.0. Add legend, xlabel, ylabel, and plot title. # ## Problem 2 # # Create 4 subplots of $y_1=x$, $y_2=x^2$, $y_3=x^3$, and $y_4=x^{0.5}$. Use 10 points between $x=0$ to $x=10$. For $y_1 = x$ use red circles, $y_2 = x^2$ use green dashes, $y_3 = x^3$ use blue triangles, and $y_4 = x^{0.5}$ use black squares. Also give each subplot a title. Include a command to save the figure as a PNG file so that it can be imported into another program such as Microsoft Powerpoint. # ## Problem 3 # Plot x and y, and x and z. Make the x,y points red circles and the x,z points blue squares. Change the x axis limits to 0 to 120, change the y axis limits to -300 to 300, and change the marker size of the x,y points to 10. # + import numpy as np import random x=np.arange(0,100) y=np.zeros(len(x)) z=np.zeros(len(x)) for i in range(len(x)): y[i] = 2.0*x[i] + 50*random.random() z[i] = 250*random.random() - 2.0*x[i] # - # ## Problem 4 # # The following Gross Domestic Product (GDP) data is reported for the top 8 countries by economy size for 2013-2015 in billions of dollars. # # * **Country, 2014, 2015** # * United States, 17,348.1, 17,947.0 # * China, 10,430.7, 10,982.8 # * Japan, 4,596.2, 4,123.3 # * Germany, 3,874.4, 3,357.6 # * United Kingdom, 2,991.7, 2,849.3 # * France, 2,833.7, 2,421.6 # * India, 2,042.6, 2,090.7 # * Italy, 2,141.9, 1,815.8 # # a) Create a bar chart to display the percentage change in economic output for 2015 from the prior year. import numpy as np years = np.array([2014,2015]) data = np.array([[17348.1, 17947.0],[10430.7, 10982.8],\ [4596.2, 4123.3],[3874.4, 3357.6],\ [2991.7, 2849.3],[2833.7, 2421.6],\ [2042.6, 2090.7],[2141.9, 1815.8]]) countries = ['US','China','Japan','Germany','UK','France','India','Italy'] # b) Create a pie chart to display the 2014 data for the top 8 world economies. With a Gross World Product (GWP) of about $77,960 billion, display a 9th category that includes the combined GDP for all other countries.
python/HW09.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Discretization is an essential preprocessing method used in many data science processes. # # Its main goal is to transform a set of continuous attributes into discrete ones, by associating categorical values to intervals and thus transforming quantitative data into qualitative data. # # Many existing data science algorithms are designed only to learn in categorical data, using nominal attributes, while real-world applications usually involve continuous features. # # 1. Use the KBinsDiscretizer class to transform the provided data into discretized attributes with 3 bins per attribute # # 2. Use the same discretizer to transform into a different number of bins per attribute: att1=3 bins, att2=2 bins, # # 3. Print the results of your transformation process # # # + import numpy as np from sklearn.preprocessing import KBinsDiscretizer x = np.array([[ -3, 5, 15 ], [ 0, 6, 14 ], [ 6, 3, 11 ], [ 1, 5, 14 ], [ 7, 6, 17 ]]) #Use the KBinsDiscretizer class to transform the provided data into discretized attributes with 3 bins per attribute est = KBinsDiscretizer(n_bins=3, encode='ordinal').fit(x) x_tran=est.transform(x) #Print the result print("3 bins per attribute: \n",x_tran) #Use the same discretizer to transform into a different number of bins per attribute: att1=3 bins, att2=2 bins, att3=3 bins est = KBinsDiscretizer(n_bins=[3, 2, 3], encode='ordinal').fit(x) x_tran=est.transform(x) #Print the result print("\nDifferent number of bins per attribute: \n",x_tran) # - # With an additional paramerer, different discretization strategies can be implemented with KBinsDiscretizer: # # - 'uniform': The discretization is uniform in each feature, which means that the bin widths are constant in each dimension (equal width binning) # - 'quantile': The discretization is done on the quantiled values, which means that each bin has approximately the same number of samples (equal frequency binning) # - 'kmeans': The discretization is based on the centroids of a KMeans clustering procedure # # + from sklearn.datasets import make_blobs import matplotlib.pyplot as plt import pandas as pd import seaborn as sns num_samples = 200 num_bins=4 centers_0 = np.array([15,40]) centers_1 = np.array([25]) #distributions: x0=uniform,x1=double_hump x0= np.random.RandomState(rm_state).uniform(-3, 3, size=num_samples) #Random, uniform data set x1, _= make_blobs(n_samples=num_samples, centers=2, n_features=1, random_state=1) #Gaussian, double hump #Set dist (and x) to one of the two distributions dist ='random' x=x0 #define discretization strategy strat = 'uniform' # 'quantile', 'kmeans'] #Initialize a data frame with distribution x=x0 as raw data #Add an additional indicating the discretization strategy (in this case 'none' as it is raw data) df = pd.DataFrame (x, columns = ['x']) df['dist']=dist df['strat']='none' #df_bin_edges = pd.DataFrame (columns = ['dist','strat','bin1','bin2','bin3','bin4','bin5']) #print("Bin edges:") #Define discretizer with 4 bins and apply it using the defined strategy enc = KBinsDiscretizer(n_bins=num_bins, encode='ordinal', strategy=strat) enc.fit(x.reshape(-1, 1)) x_tran=enc.transform(x.reshape(-1, 1)) #Build new data frame with discretized values (size=num_samples) df_app = pd.DataFrame (x_tran, columns = ['x']) df_app['dist']=dist df_app['strat']=strat #Append new data frame to data frame with distribution 0 df=df.append(df_app) #Print bin width using bin_edges_ attribute of KBinsDiscretizer for i in range (0,num_bins): print("\nWidth bin ",i,": ",enc.bin_edges_[0][i]-enc.bin_edges_[0][i+1]) #Visualize data in Seaborn histrogram grid using FacetGrid class cmap = sns.cubehelix_palette(n_colors=10,as_cmap=True) g = sns.FacetGrid(df, col="strat", row="dist") g.map(plt.hist,"x") # - # Encapsulate the discretization steps of the last task into a function called "discretize", passing the raw data array, the distribution name and the discretization strategy as parameters # # Call the function with different distributions (dist = 'uniform' or 'double_hump'; x=x1 or x2) and with different strategies (strat='uniform','quantile', 'kmeans') # + def discretize(x,dist,strat): #Initialize a data frame with distribution x=x0 as raw data #Add an additional indicating the discretization strategy (in this case 'none' as it is raw data) df = pd.DataFrame (x, columns = ['x']) df['dist']=dist df['strat']='none' #Define discretizer with 4 bins and apply it using the defined strategy enc = KBinsDiscretizer(n_bins=num_bins, encode='ordinal', strategy=strat) enc.fit(x.reshape(-1, 1)) x_tran=enc.transform(x.reshape(-1, 1)) #Build new data frame with discretized values (size=num_samples) df_app = pd.DataFrame (x_tran, columns = ['x']) df_app['dist']=dist df_app['strat']=strat #Append new data frame to data frame with distribution 0 df=df.append(df_app) #Print bin width using bin_edges_ attribute of KBinsDiscretizer for i in range (0,num_bins): print("\nWidth bin ",i,": ",enc.bin_edges_[0][i]-enc.bin_edges_[0][i+1]) #Visualize data in Seaborn histrogram grid using FacetGrid class cmap = sns.cubehelix_palette(n_colors=10,as_cmap=True) g = sns.FacetGrid(df, col="strat", row="dist") g.map(plt.hist,"x") #Call the function with different distributions (dist = 'uniform' or 'double_hump'; x=x1 or x2) #Call the function with different strategies (strat='uniform','quantile', 'kmeans') discretize(x0,'uniform','kmeans')
notebooks/Solutions/DATAPREP_06_Discretization_Lab_Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import category_encoders as ce import pandas as pd import numpy as np from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error, mean_squared_log_error from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from xgboost import XGBRegressor from glob import glob import matplotlib.pyplot as plt import seaborn as sns # + # evaluation functions def rmse(y_true, y_pred): return np.sqrt(mean_squared_error(y_true, y_pred)) def rmsle(y_true, y_pred): return np.sqrt(mean_squared_log_error(y_true, y_pred)) # + # wrangle function def wrangle(X): X = X.copy() # Engineer date features X['quote_date'] = pd.to_datetime(X['quote_date'], infer_datetime_format=True) X['quote_date_year'] = X['quote_date'].dt.year X['quote_date_month'] = X['quote_date'].dt.month X = X.drop(columns='quote_date') # Merge tube data tube = pd.read_csv('competition_data/tube.csv') X = X.merge(tube, how='left') # Engineer features from bill_of_materials materials = pd.read_csv('competition_data/bill_of_materials.csv') materials['components_total'] = (materials['quantity_1'].fillna(0) + materials['quantity_2'].fillna(0) + materials['quantity_3'].fillna(0) + materials['quantity_4'].fillna(0) + materials['quantity_5'].fillna(0) + materials['quantity_6'].fillna(0) + materials['quantity_7'].fillna(0) + materials['quantity_8'].fillna(0)) materials['components_distinct'] = (materials['component_id_1'].notnull().astype(int) + materials['component_id_2'].notnull().astype(int) + materials['component_id_3'].notnull().astype(int) + materials['component_id_4'].notnull().astype(int) + materials['component_id_5'].notnull().astype(int) + materials['component_id_6'].notnull().astype(int) + materials['component_id_7'].notnull().astype(int) + materials['component_id_8'].notnull().astype(int)) # Engineer features from components and bill_of_materials components = pd.read_csv('competition_data/components.csv') # create dictionary to map component_id to component_id_type component_dict = components[['component_id', 'component_type_id']].set_index('component_id').to_dict(orient='dict') component_dict = component_dict['component_type_id'] # use the dictionary to replace component_id in bill_of_materials with component_id_type materials_type = materials.replace(component_dict) # create a function to return the tally of components with a specified component type in a row def count(row): tally=0 for num in range(1,6): name = 'component_id_' + str(num) quantity = 'quantity_' + str(num) if row[name]==item: tally += row[quantity] return tally # get a list of unique component types component_type_list = components.component_type_id.unique().tolist() # iterate over the list of component types and apply the function to # create a feature with the row tallies for item in component_type_list: materials_type[item] = materials_type.apply(count, axis=1) # Merge selected features from bill_of_materials # Just use the first component_id, ignore the others for now! features = ['tube_assembly_id', 'component_id_1', 'components_total', 'components_distinct', 'OTHER', 'CP-024', 'CP-026', 'CP-028', 'CP-014', 'CP-018', 'CP-001', 'CP-008', 'CP-009', 'CP-002', 'CP-010', 'CP-021', 'CP-011', 'CP-015', 'CP-027', 'CP-003', 'CP-004', 'CP-005', 'CP-019', 'CP-025', 'CP-006', 'CP-016', 'CP-020', 'CP-012', 'CP-022', 'CP-007', 'CP-017', 'CP-023', 'CP-029'] X = X.merge(materials_type[features], how='left') # Get component_type_id (has lower cardinality than component_id) components = pd.read_csv('competition_data/components.csv') components = components.rename(columns={'component_id': 'component_id_1'}) features = ['component_id_1', 'component_type_id'] X = X.merge(components[features], how='left') # Count the number of specs for the tube assembly specs = pd.read_csv('competition_data/specs.csv') specs['specs_total'] = specs.drop(columns=['tube_assembly_id']).count(axis=1) features = ['tube_assembly_id', 'specs_total', 'spec1'] X = X.merge(specs[features], how='left') # Drop tube_assembly_id because our goal is to predict unknown assemblies X = X.drop(columns='tube_assembly_id') return X # + # read data trainval = pd.read_csv('competition_data/train_set.csv') test = pd.read_csv('competition_data/test_set.csv') trainval.shape, test.shape # - # Split into train & validation sets # All rows for a given tube_assembly_id should go in either train or validation trainval_tube_assemblies = trainval['tube_assembly_id'].unique() train_tube_assemblies, val_tube_assemblies = train_test_split( trainval_tube_assemblies, random_state=42) train = trainval[trainval.tube_assembly_id.isin(train_tube_assemblies)] val = trainval[trainval.tube_assembly_id.isin(val_tube_assemblies)] # Wrangle train, validation, and test sets train = wrangle(train) val = wrangle(val) test = wrangle(test) # Arrange X matrix and y vector (log-transformed) target = 'cost' X_train = train.drop(columns=target) X_val = val.drop(columns=target) X_test = test.drop(columns='id') y_train = train[target] y_val = val[target] y_train_log = np.log1p(y_train) y_val_log = np.log1p(y_val) # prediction function def generate_submission(estimator, X_test_param, filename): y_pred_log = estimator.predict(X_test_param) y_pred = np.expm1(y_pred_log) # Convert from log-dollars to dollars submission = pd.read_csv('sample_submission.csv') submission['cost'] = y_pred submission.to_csv(filename, index=False) # ## Now lets do a gradient boosting model from sklearn.preprocessing import OneHotEncoder encoder = ce.OneHotEncoder() X_train_encoded = encoder.fit_transform(X_train, cols=['material_id', 'supplier']) X_val_encoded = encoder.fit_transform(X_val, cols=['material_id', 'supplier']) X_test_encoded = encoder.fit_transform(X_test, cols=['material_id', 'supplier']) encoder = ce.OrdinalEncoder() X_train_encoded = encoder.fit_transform(X_train) X_val_encoded = encoder.fit_transform(X_val) X_test_encoded = encoder.transform(X_test) # + eval_set = [(X_train_encoded, y_train_log), (X_val_encoded, y_val_log)] model = XGBRegressor(n_estimators=3000, n_jobs=-1, eta=0.085, max_depth=7) model.fit(X_train_encoded, y_train_log, eval_set=eval_set, eval_metric='rmse', early_stopping_rounds=300) # - # generate submission generate_submission(model, X_test_encoded, 'submission-22.csv') # ## Feature Importances # + # Get feature importances importances = pd.Series(model.feature_importances_, X_train.columns) sns.set_style('darkgrid') # Plot feature importances n = 25 plt.figure(figsize=(10,n/2)) plt.title(f'Top {n} features', fontsize=26) importances.sort_values()[-n:].plot.barh(color='springgreen'); # - # ## Permutation importances # + import eli5 from eli5.sklearn import PermutationImportance permuter = PermutationImportance(model, scoring='neg_mean_squared_error', cv='prefit', n_iter=2, random_state=42) permuter.fit(X_val_encoded, y_val_log) feature_names = X_val_encoded.columns.tolist() eli5.show_weights(permuter, top=None, feature_names=feature_names) # - # ## Partial dependence plot # + from pdpbox.pdp import pdp_isolate, pdp_plot feature = 'annual_usage' isolated = pdp_isolate( model=model, dataset=X_val_encoded, model_features=X_val_encoded.columns, feature=feature ) pdp_plot(isolated, feature_name=feature); # -
Sinclair_permutation_importances_partial_dependence_plots_assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Exploratory-Data-Analysis" data-toc-modified-id="Exploratory-Data-Analysis-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Exploratory Data Analysis</a></span></li><li><span><a href="#Reading-in-Data" data-toc-modified-id="Reading-in-Data-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Reading in Data</a></span></li><li><span><a href="#Visualizing-Channel-Characteristics" data-toc-modified-id="Visualizing-Channel-Characteristics-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Visualizing Channel Characteristics</a></span><ul class="toc-item"><li><span><a href="#Overall-Feature-Distributions" data-toc-modified-id="Overall-Feature-Distributions-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Overall Feature Distributions</a></span><ul class="toc-item"><li><span><a href="#Feature-Histograms" data-toc-modified-id="Feature-Histograms-3.1.1"><span class="toc-item-num">3.1.1&nbsp;&nbsp;</span>Feature Histograms</a></span></li><li><span><a href="#Feature-Ridgeline-Plots" data-toc-modified-id="Feature-Ridgeline-Plots-3.1.2"><span class="toc-item-num">3.1.2&nbsp;&nbsp;</span>Feature Ridgeline Plots</a></span></li></ul></li><li><span><a href="#Thoughts-on-Overall-Distributions" data-toc-modified-id="Thoughts-on-Overall-Distributions-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Thoughts on Overall Distributions</a></span></li><li><span><a href="#Visualizing-Channels-and-Metrics" data-toc-modified-id="Visualizing-Channels-and-Metrics-3.3"><span class="toc-item-num">3.3&nbsp;&nbsp;</span>Visualizing Channels and Metrics</a></span><ul class="toc-item"><li><span><a href="#Metrics-Across-Time-by-channel" data-toc-modified-id="Metrics-Across-Time-by-channel-3.3.1"><span class="toc-item-num">3.3.1&nbsp;&nbsp;</span>Metrics Across Time by channel</a></span></li><li><span><a href="#Proportion-of-Monthly-Traffic-by-Channel-Over-Time" data-toc-modified-id="Proportion-of-Monthly-Traffic-by-Channel-Over-Time-3.3.2"><span class="toc-item-num">3.3.2&nbsp;&nbsp;</span>Proportion of Monthly Traffic by Channel Over Time</a></span></li><li><span><a href="#Visualizing-Metrics-in-Terms-of-Sessions" data-toc-modified-id="Visualizing-Metrics-in-Terms-of-Sessions-3.3.3"><span class="toc-item-num">3.3.3&nbsp;&nbsp;</span>Visualizing Metrics in Terms of Sessions</a></span></li><li><span><a href="#Percent-Change-in-Metrics-By-Channel" data-toc-modified-id="Percent-Change-in-Metrics-By-Channel-3.3.4"><span class="toc-item-num">3.3.4&nbsp;&nbsp;</span>Percent Change in Metrics By Channel</a></span></li></ul></li><li><span><a href="#Takeaways:-Channel-Trends-over-Time" data-toc-modified-id="Takeaways:-Channel-Trends-over-Time-3.4"><span class="toc-item-num">3.4&nbsp;&nbsp;</span>Takeaways: Channel Trends over Time</a></span></li></ul></li><li><span><a href="#Visualizing-Geographies-and-Metrics" data-toc-modified-id="Visualizing-Geographies-and-Metrics-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Visualizing Geographies and Metrics</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Sessions-by-Country" data-toc-modified-id="Sessions-by-Country-4.0.1"><span class="toc-item-num">4.0.1&nbsp;&nbsp;</span>Sessions by Country</a></span></li><li><span><a href="#Session-Duration-Characteristics-Value" data-toc-modified-id="Session-Duration-Characteristics-Value-4.0.2"><span class="toc-item-num">4.0.2&nbsp;&nbsp;</span>Session Duration Characteristics Value</a></span></li></ul></li></ul></li><li><span><a href="#Visualizing-Device-Characteristics" data-toc-modified-id="Visualizing-Device-Characteristics-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Visualizing Device Characteristics</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Metrics-by-Device-Over-Time" data-toc-modified-id="Metrics-by-Device-Over-Time-5.0.1"><span class="toc-item-num">5.0.1&nbsp;&nbsp;</span>Metrics by Device Over Time</a></span></li><li><span><a href="#Metrics-by-Device-In-Terms-of-Sessions" data-toc-modified-id="Metrics-by-Device-In-Terms-of-Sessions-5.0.2"><span class="toc-item-num">5.0.2&nbsp;&nbsp;</span>Metrics by Device In Terms of Sessions</a></span></li></ul></li><li><span><a href="#Takeaways:-Metrics-by-Device" data-toc-modified-id="Takeaways:-Metrics-by-Device-5.1"><span class="toc-item-num">5.1&nbsp;&nbsp;</span>Takeaways: Metrics by Device</a></span></li></ul></li><li><span><a href="#Visualizing-Demographic-Characteristics" data-toc-modified-id="Visualizing-Demographic-Characteristics-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>Visualizing Demographic Characteristics</a></span><ul class="toc-item"><li><span><a href="#Percentage-of-Users-by-Age-and-Gender" data-toc-modified-id="Percentage-of-Users-by-Age-and-Gender-6.1"><span class="toc-item-num">6.1&nbsp;&nbsp;</span>Percentage of Users by Age and Gender</a></span></li></ul></li><li><span><a href="#Relationships-Between-Metrics" data-toc-modified-id="Relationships-Between-Metrics-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>Relationships Between Metrics</a></span><ul class="toc-item"><li><span><a href="#Channel" data-toc-modified-id="Channel-7.1"><span class="toc-item-num">7.1&nbsp;&nbsp;</span>Channel</a></span></li><li><span><a href="#Geographies" data-toc-modified-id="Geographies-7.2"><span class="toc-item-num">7.2&nbsp;&nbsp;</span>Geographies</a></span></li><li><span><a href="#Devices" data-toc-modified-id="Devices-7.3"><span class="toc-item-num">7.3&nbsp;&nbsp;</span>Devices</a></span></li></ul></li></ul></div> # - # # Exploratory Data Analysis # # Reading in Data # + import pandas as pd from matplotlib import pyplot as plt import matplotlib.cm as cm # colormaps for visualization import joypy # ridgeline chart import numpy as np # interactive visualization import plotly as py import plotly.graph_objs as go from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot init_notebook_mode(connected=True) import plotly.express as px # + # set filepath to processed data filepath = '../data/processed/ga_export_processed.xlsx' #create separate dataframes for each table traffic_source = pd.read_excel(filepath, sheet_name='traffic_source') geo_location = pd.read_excel(filepath, sheet_name='geo_location') device = pd.read_excel(filepath, sheet_name='device') demos = pd.read_excel(filepath, sheet_name='demos') #set index for dataframes with datetime field traffic_source.set_index(traffic_source['month_year_dt'], inplace=True) device.set_index(device['month_year_dt'], inplace=True) #sort the dataframes that have datetime values in ascending order by datetime traffic_source = traffic_source.sort_index() device = device.sort_index() # - # # Visualizing Channel Characteristics # # I want to explore what the distribution of sessions, bounce rate, and the email capture conversion rate overall. I also want to see what these look like over time in aggregate, and when separated by channel. # ## Overall Feature Distributions # ### Feature Histograms for col in ['Sessions', 'Bounce Rate', 'Email Capture Conversion Rate']: traffic_source.plot(y=col, kind='hist', bins=25, legend=False) plt.title('Distribution of {c}'.format(c=col)) plt.xlabel(col) # ### Feature Ridgeline Plots fig, axes = joypy.joyplot(traffic_source, by='Traffic Channel', column='bounced_sessions', overlap=1.8, colormap=cm.coolwarm, linecolor='w', linewidth=.5, figsize=(10, 6)) plt.title('Distribution of Bounced Sessions by Channel'); plt.xlabel('Bounced Sessions'); fig, axes = joypy.joyplot(traffic_source, by='Traffic Channel', column='Sessions', overlap=3, colormap=cm.coolwarm, linecolor='w', linewidth=.5, figsize=(10, 6)) plt.title('Distribution of Sessions by Channel'); plt.xlabel('Sessions'); fig, axes = joypy.joyplot(traffic_source, by='Traffic Channel', column='email_conv_sessions', overlap=2, colormap=cm.coolwarm, linecolor='w', linewidth=.5, figsize=(10, 6)) plt.title('Distribution of Email Capture Conversions (count) by Channel'); plt.xlabel('Email Capture Conversions'); # + print( 'Mean monthly sessions across all channels: ', np.round( traffic_source.pivot(index='month_year_dt', columns='Traffic Channel', values='Sessions').sum(axis=1).mean(), 0)) print( 'Mean monthly bounce rate across all channels: ', np.round( traffic_source.pivot(index='month_year_dt', columns='Traffic Channel', values='Bounce Rate').mean(axis=1).mean() * 100, 0),"%") # - # ## Thoughts on Overall Distributions # **Bounced Sessions** # * The count of bouncing sessions is quite low for direct traffic, implying that those who were seeking the content out directly were engaging really well with the site once they got there. # * Organic search is high at 50-85% across all months, centering around 75% overall.. If session duration is also low, this may mean there is an issue with the content. It would imply users are arriving, not really engaging, and then leaving quickly. # # * Other referral bounced sessions are somewhat bimodal. The two modes center at ~35% and ~52%. Perhaps plotting this over time will yield more insight. Question: What does the bounce rate look like broken out by referral source? Is the bounce rate consistent across sources or is this plot hinting that some sources are resulting in high bounce rates vs others? # # * Pinterest is bouncing most often and accounts for a majority of bounced sessions. # # **Sessions** # * Direct and other referral channels are accounting for about 1/4 of pinterest channel sessions # * Organic search seems highly dispersed, displaying really high variability. Viewing this across a time axis will likely reveal that this value has changed over time (or is highly variable in general) # # **Email Capture Conversions** # * The distribution of conversions across months in the data are quite narrow for other referral and direct channels, indicating relatively stable conversion scale (count of sessions with a conversion) over time # ## Visualizing Channels and Metrics # ### Metrics Across Time by channel # + # sessions color_sequence = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3'] fig = px.line(traffic_source, y='Sessions', color='Traffic Channel', color_discrete_sequence=color_sequence) fig.update_layout(title="Sessions Over Time by Channel", xaxis_title="Month-Year", yaxis_title="Total Sessions") # exploring adding an n-period rolling mean view but it gets busy quickly # a = traffic_source[traffic_source['Traffic Channel'] == 'pinterest']['Sessions'].rolling(4, min_periods=1).mean() # fig.add_trace(go.Scatter(x=a.index, y=a, name='Moving Average of Sessions (3 month)', # line=dict(color=color_sequence[3], width=1, dash='dot'))) #plot, save to file py.offline.iplot(fig) fig.write_html("../reports/figures/plotly/sessions_by_channel.html") # bounce rate fig = px.line(traffic_source, y='Bounce Rate', color='Traffic Channel', color_discrete_sequence=color_sequence) fig.update_layout(title="Bounce Rate Over Time by Channel", xaxis_title="Month-Year", yaxis_title="Bounce Rate") py.offline.iplot(fig) fig.write_html("../reports/figures/plotly/bounce_rate_by_channel.html") fig = px.line(traffic_source, y='Email Capture Conversion Rate', color='Traffic Channel', color_discrete_sequence=color_sequence) fig.update_layout(title="Email Capture Conversion Rate Over Time by Channel", xaxis_title="Month-Year", yaxis_title="Email Capture Conversion Rate") py.offline.iplot(fig) fig.write_html("../reports/figures/plotly/conversions_by_channel.html") # - # ### Proportion of Monthly Traffic by Channel Over Time # + # # copy table for transformation t_prop = traffic_source.copy() for row_num, row in enumerate(traffic_source.iterrows()): # the sessions total should be the sessions by that channel (on that row, i) over all sessions per month # t prop takes the sessions per channel for a given month (sessions at a particular row) and divides it by the sum of sessions per that month # the loop then writes that proportion to the position where the original value was drawn from in the copy of the df t_prop.loc[row[0], 'pct_sessions'] = np.round( traffic_source.loc[row[0], 'Sessions'] / traffic_source[ traffic_source['month_year_dt'] == row[1][5]]['Sessions'].sum(), 2) * 100 # - fig = px.line(t_prop, y='pct_sessions', color='Traffic Channel') fig.update_layout( title="Sessions as a Proportion of their Monthly Sum by Channel", xaxis_title="Month-Year", yaxis_title="% of This Month's Total Sessions") py.offline.iplot(fig) # ### Visualizing Metrics in Terms of Sessions # + color_sequence = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3'] # bounce rate fig = px.line(traffic_source, y='bounced_sessions', color='Traffic Channel', color_discrete_sequence=color_sequence) fig.update_layout(title="Count of Bounced Sessions Over Time by Channel", xaxis_title="Month-Year", yaxis_title="Count of Bounced Sessions") py.offline.iplot(fig) fig.write_html("../reports/figures/plotly/count_bounced_sessions_by_channel.html") fig = px.line(traffic_source, y='email_conv_sessions', color='Traffic Channel', color_discrete_sequence=color_sequence) fig.update_layout(title="Email Capture Conversions Over Time by Channel", xaxis_title="Month-Year", yaxis_title="Count of Email Capture Conversions") py.offline.iplot(fig) fig.write_html("../reports/figures/plotly/count_email_conversions_by_channel.html") # - # ### Percent Change in Metrics By Channel # # Converting to percent change allows comparing values against their prior raw value. It gives a more clear sense of the scale of the change throughout time, since metrics are transformed into one unit (pct change) instead of the different units present in the raw data. # + # plot pct change # looped implementation due to the mechanics of the .pct_change method and the datetime array index for metric in ['Sessions', 'Bounce Rate', 'Email Capture Conversion Rate']: # create a figure for each available of metric fig = go.Figure() # for each channel type, create a dataframe with the pct change in that one metric, filtered by the channel type for position, channel in enumerate( ['direct', 'organic search', 'other referral', 'pinterest']): df = traffic_source[traffic_source['Traffic Channel'] == channel].reset_index(drop=True) df2 = df[[metric]].pct_change() * 100 #append the month_year_dt for x axis in plots df2['month_year_dt'] = df['month_year_dt'] #add as a trace to the figure for that metric fig.add_trace( go.Scatter(x=df2['month_year_dt'], y=df2.loc[:, metric], mode='lines', name=channel, line=dict(color=color_sequence[position], width=2), legendgroup=channel, showlegend=True)) fig.update_layout(title="Percent Change in {m} Over Time".format(m=metric), xaxis_title="Month-Year", yaxis_title="Percent Change (Month over Month %) ") fig.add_trace( go.Scatter(x=df2['month_year_dt'], y=[0] * 17, mode='lines', name='Zero Pct Change', line=dict(color='black', width=1.5, dash='dot'))) fig.show() # - # ## Takeaways: Channel Trends over Time # **Sessions** # # * Pinterest traffic was responsible for the most sessions early on, but organic search has grown to compete with pinterest traffic. Pinterest sessions and organic search sessions seem to be negatively correlated, where when one goes up, the other goes down as of Nov 2019 onward. # # Question : What mechanism would be driving this trend or is it just spurious? # # * Direct and Other referral sessions tend to be more consistent over time, but direct traffic seems to be on a negative trend over the past few months. # # **Bounce Rate** # # * Organic search and pinterest bounce rates are very high across time, in the 80-90% range # # **Email Capture Conversion Rate** # # * Email capture is highest in the other referral channel, followed by the pinterest and direct channels which have similar values generally. # * Organic search has comparatively low email capture conversion and trends only between 1% and 1.5% # # # Visualizing Geographies and Metrics # ### Sessions by Country # + code_folding=[] # raw sessions among top 10 countries fig = px.bar(geo_location.sort_values(by='Sessions', ascending=False).iloc[0:10, :], x="Country", y=["Sessions"], title='10 Countries with Highest Sessions by Count') fig.show() # as pct of total fig2 = px.bar( geo_location.sort_values(by='pct_overall_sessions', ascending=False).iloc[0:10, :], x="Country", y="pct_overall_sessions", title='10 Countries with Highest Sessions by Percentage Composition') fig2.update_layout(yaxis=dict( tickmode='array', tickvals=[0, 0.20, .40, .6, .8, 1], ticktext=['0', '20%', '40%', '60%', '80%', '100%'])) fig2.show() fig2.write_html("../reports/figures/plotly/pct_sessions_by_country.html") # - # ### Session Duration Characteristics Value print( 'Weighted Avg. of all session times: ', np.round( geo_location['total_duration'].sum() / geo_location['Sessions'].sum(), 2)) # + # which countries make up 95% of sessions for c in [5]: print('{c} countries account for {p}% of sessions: \n'.format( c=c, p=np.round( geo_location.sort_values( by='Sessions', ascending=False)['pct_overall_sessions'][0:c].sum(), 2) * 100)) # create dataframe with just these countries geo_small = geo_location.iloc[0:5, :] # print countries [print(i) for i in geo_small['Country']]; # - # as pct of total fig2 = px.bar(geo_small, x="Country", y="Avg. Session Duration", title='Average Session Duration by Country (95% of Sessions)') fig2.show() fig2.write_html("../reports/figures/plotly/avg_session_duration_95_pct_countries.html") # # Visualizing Device Characteristics # Proportion of sessions by device: ##.# % print('Proportion of total sessions by device:\n---------------') np.round( device.pivot(columns='Device', values='Sessions').sum(axis=0) / device.pivot(columns='Device', values='Sessions').sum().sum() * 100, 2) # ### Metrics by Device Over Time # + # sessions fig = px.line(device, y='Sessions', color='Device', color_discrete_sequence=color_sequence[0:3]) fig.update_layout(title="Sessions Over Time by Device", xaxis_title="Month-Year", yaxis_title="Total Sessions") py.offline.iplot(fig) fig.write_html("../reports/figures/plotly/sessions_by_device.html") # bounce rate fig = px.line(device, y='Bounce Rate', color='Device', color_discrete_sequence=color_sequence[0:3]) fig.update_layout(title="Bounce Rate Over Time by Device", xaxis_title="Month-Year", yaxis_title="Bounce Rate") py.offline.iplot(fig) fig.write_html("../reports/figures/plotly/bounce_rate_by_device.html") fig = px.line(device, y='Email Capture Conversion Rate', color='Device', color_discrete_sequence=color_sequence[0:3]) fig.update_layout(title="Email Capture Conversion Rate Over Time by Device", xaxis_title="Month-Year", yaxis_title="Email Capture Conversion Rate") py.offline.iplot(fig) fig.write_html("../reports/figures/plotly/email_conversion_by_device.html") # - # ### Metrics by Device In Terms of Sessions # + # bounce rate fig = px.line(device, y='bounced_sessions', color='Device', color_discrete_sequence=color_sequence[0:3]) fig.update_layout(title="Count of Bounced Sessions Over Time by Device", xaxis_title="Month-Year", yaxis_title="Bounced Sessions") py.offline.iplot(fig) fig.write_html("../reports/figures/plotly/bounced_session_count_by_device.html") fig = px.line(device, y='email_conv_sessions', color='Device', color_discrete_sequence=color_sequence[0:3]) fig.update_layout(title="Sessions with Email Capture Conversion Over Time by Device", xaxis_title="Month-Year", yaxis_title="Count of Sessions with Email Capture Conversion") py.offline.iplot(fig) fig.write_html("../reports/figures/plotly/email_conversions_by_device.html") # - # ## Takeaways: Metrics by Device # **Sessions** # # * Mobile devices account for the large majority of sessions (73% of all sessions across all months) # # **Bounce Rate** # * Mobile device access has a very high bounce rate, implying content is either poorly formatted for mobile and/or that there isnt a lot of movement through the site. # * Mobile bounce rate is less variable than other devices. # * Question: Why is tablet bounce rate so variable? # * Desktop bounce rate was nice and low, but is creeping back up past its average across 2019. # * Question: Did anything change with the content and design of the site in the new calendar year? # # **Email Capture Conversions** # * Mobile email capture conversion rate is just under 2% on average, but is on the rise more recently. # * Question: Tablet email capture conversion is non-existant right now. Is that broken in analytics, for users, or both? # # **Overall** # # * Mobile device access is most common, but the conversion rate is lower than desktop which has a higher email capture conversion rate. Question: Is the content poorly formatted for mobile consumption? # * Bounce rates for tablets peaked in December and then email conversion rate completely dropped to zero in January and onward. Question: Is the email conversion process broken on tablets? # * Email Capture conversion is relatively consistent over time for mobile and desktop. With that, it may be easier to focus on streamlining the path for the conversion for mobile users # * Question: What happened in January when bounce rate when way down for desktop users? # * Question: Why might bounce rate be climbing among desktop users? # # Visualizing Demographic Characteristics # + #create aggregation across gender indexed by age d_pivot = demos.pivot(index='Age', columns='Gender', values='Users') #convert each value to a proportion of total users d_pct = d_pivot / d_pivot.sum().sum() # flip sign of males, as they are much lower than females among users d_pct['male'] = d_pct['male'] * -1 #make more human readable with percentage values d_pct = np.round(d_pct * 100, 1) # - print('Ratio of female to male users across all age groups:', np.round(d_pivot['female'].sum() / d_pivot['male'].sum(), 0), ": 1") # ## Percentage of Users by Age and Gender # + fig = go.Figure() category_order = ['female', 'male'] colors = ['#5ab4ac', '#d8b365'] for pos, column in enumerate(d_pct.columns): fig.add_trace( go.Bar( x=d_pct[column], y=d_pct.index, name=column, orientation='h', marker_color=colors[pos], )) fig.update_layout(barmode='relative', title='Composition of Total Users By Age and Gender', xaxis_title='Percent of Total Users', yaxis_title='Age Group', xaxis=dict( tickmode='array', tickvals=[-2.5, 0, 5, 10, 15, 20, 25], ticktext=['2.5', '0', '5', '10', '15', '20', '25'])) fig.show() # - # # Relationships Between Metrics # ## Channel # + fig = px.scatter( traffic_source, x="Bounce Rate", y="Email Capture Conversion Rate", color="Traffic Channel", size='Sessions', # hover_data=['Month of Year', 'Traffic Channel'] ) fig.update_layout( title= 'Email Capture Conversion Rate x Bounce Rate (Point Size based on Sessions)', xaxis_title='Bounce Rate', yaxis_title='Email Capture Conversion Rate') fig.show() fig.write_html("../reports/figures/plotly/scatter_email_capture_bounce_rate_channel.html") # - # ## Geographies # + fig = px.scatter(geo_location[geo_location.Sessions > 100], x="Bounce Rate", y="Email Capture Conversion Rate", size='Sessions', color='Sessions', hover_data=['Sessions'], color_continuous_scale=px.colors.sequential.Magenta) fig.update_layout( title= 'Email Capture Conversion Rate x Bounce Rate (Among Countries with 100+ Sessions)', xaxis_title='Bounce Rate', yaxis_title='Email Capture Conversion Rate') fig.show() # - # ## Devices # + fig = px.scatter( device, x="Bounce Rate", y="Email Capture Conversion Rate", size='Sessions', color='Device', # hover_data=['Sessions'], # color_continuous_scale=px.colors.sequential.Magenta ) fig.update_layout( title= 'Email Capture Conversion Rate x Bounce Rate Across Devices(sized by sessions)', xaxis_title='Bounce Rate', yaxis_title='Email Capture Conversion Rate') fig.show() fig.write_html("../reports/figures/plotly/scatter_email_capture_bounce_rate_device.html") # -
notebooks/EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pandas as pd import seaborn as sns file_name = 'lstm_kernel_reg' df = pd.read_csv('result/'+file_name+'.csv') df = df.set_index(df.columns[0]) df import matplotlib.pyplot as plt plt.figure(figsize=(20,10)) df.boxplot() plt.yscale('log') plt.savefig('result/'+file_name+'.png') plt.show()
baseline and initial data/BoxPlot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="skBHazUjfEwt" # # Quantum State Tomography with Iterative Maximum Likelihood Estimation. # # Author: <NAME> # Email: <EMAIL> # GitHub: quantshah # # In this notebook, we use QuTiP to perform Quantum State Tomography by counting photon number statistics of a resonator as discussed in [1]. The iterative Maximum Likelihood Estimation method is used to start from a random guess of the density matrix and repeatedly apply an operator to obtain the true density matrix of the state [2]. # # ![SegmentLocal](images/reconstruction.gif "segment") # # # We measure the probability of observing a certain number of photons $\langle n \rangle$ after displacing the state by various angles. This is done by applying the displacement operator to the density matrix of the state $D(\beta) \rho D^{\dagger}(\beta)$. Then, using the photon number statistics for various measurement settings, i.e., values of $\beta_i$, we can recreate the density matrix. # # ![photonstats](images/photon-stats.png "photon-stats") # # # This is done by an iterative Maximum Likelihood method by repeatedly applying an operator $R$ which is a function of the measured value of the observable $f_i$, current estimate of the probability from the density matrix, $p_i$ and measurement setting (the displaced basis in this case $|y_i \rangle \langle y_i| D(\beta) |n_i \rangle \langle n_i| D^{\dagger}(\beta))$, where $n_i$ denotes the fock basis operator for measuring $i$ photons. # # ![rop](images/rop.png "rop") # # # ## References # [1] <NAME>, et al. "Optimized tomography of continuous variable systems using excitation counting." Physical Review A 94.5 (2016): 052327. # Link: https://arxiv.org/abs/1606.07554 # # [2] <NAME>., <NAME>, and <NAME>. "Iterative algorithm for reconstruction of entangled states." Physical Review A 63.4 (2001): 040303. # + colab={} colab_type="code" id="wmzuSKdYfEwu" import numpy as np from qutip import Qobj, rand_dm, fidelity, displace, qdiags, qeye, expect from qutip.states import coherent, coherent_dm, thermal_dm, fock_dm from qutip.visualization import plot_wigner, hinton from qutip.wigner import qfunc import qutip import matplotlib.pyplot as plt from matplotlib import animation # some pretty printing and animation stuff from IPython.display import clear_output # + [markdown] colab_type="text" id="PVkkO71JfEwx" # # Define the operator measured, how to obtain it from a density matrix and the iterative operator for MaxLikelihood. # + colab={} colab_type="code" id="NxpFCAT1fEwy" """ Iterative Maximum Likelihood estimation based on photon number counting. """ def measure_population(beta, rho): """ Measures the photon number statistics for state rho when displaced by angle alpha. Parameters ---------- alpha: np.complex A complex displacement. rho: The density matrix as a QuTiP Qobj (`qutip.Qobj`) Returns ------- population: ndarray A 1D array for the probabilities for populations. """ hilbertsize = rho.shape[0] # Apply a displacement to the state and then measure the diagonals. D = displace(hilbertsize, beta) rho_disp = D*rho*D.dag() populations = np.real(np.diagonal(rho_disp.full())) return populations def roperator(beta, rho, measured): """ Calculates the iterative ratio operator for measured probability for photons (j) to the analytical prediction for some rho. Parameters ---------- beta: list_like A list of the displacements that were applied to the state before measurement. rho: `qutip.Qobj` The current estimate of the density matrix. measured: list_like A list of the measurement statistics (diagonal terms) for each beta. Returns ------- R: `qutip.Qobj` The iterative operator which we are going to apply for state reconstruction. """ hilbert_size = rho.shape[0] # initialize an empty operator and build it R = 0*qeye(hilbert_size) calculated_measurements = measure_population(beta, rho) for n in range(hilbert_size): op = fock_dm(hilbert_size, n) D = displace(hilbert_size, beta) displaced_D = D.dag()*op*D ratio = measured[n]/(calculated_measurements[n] + 1e-6) displaced_D = ratio*displaced_D R += displaced_D return R # + [markdown] colab_type="text" id="vdmB3kPjfEw0" # # Take an example density matrix and reconstruct it # + colab={} colab_type="code" id="om2M8513fEw0" hilbert_size = 32 alpha_range = 1.9 alphas = np.array([alpha_range, -alpha_range - 1j*alpha_range, -alpha_range + 1j*alpha_range]) rho_true = sum([coherent_dm(hilbert_size, a) for a in alphas])/3 # - # # Displace and measure populations # + colab={} colab_type="code" id="A53Dj0pZfEw2" betas = [1.7, -2, 2.2j, -2.1 - 2.4j, -2 + 2j] measured_populations = [measure_population(b, rho_true) for b in betas] width = 1 # + [markdown] colab_type="text" id="AxfEf06TfEw4" # # Random initial state # + colab={"base_uri": "https://localhost:8080/", "height": 386} colab_type="code" id="R14uQK8dfEw5" outputId="e1a92574-ae47-4780-e393-c161c48622db" random_rho = rand_dm(hilbert_size) hinton(random_rho) plt.show() # - # # Wigner function plot and measurement statistics # # The x marks the displacement angles. # + colab={"base_uri": "https://localhost:8080/", "height": 355} colab_type="code" id="pHBNQF5ufEw9" outputId="63782bce-92a3-4cef-8300-e0e0be5b1da3" fig, ax = plt.subplots(1, 3, figsize=(15, 5)) indices = np.arange(hilbert_size) plot_wigner(random_rho, fig, ax[0]) ax[0].scatter(np.real(betas), np.imag(betas), marker="x") ax[0].set_title("Random inital state wigner function") for i in range(len(betas)): ax[1].bar(indices, measured_populations[i], label = r"$beta = {}$".format(i), width=(i+1)/12) ax[1].set_title("Population measurement statistics") ax[1].set_xlabel("n") ax[1].set_ylabel("Photon number probability") plot_wigner(rho_true, fig, ax[2]) ax[2].scatter(np.real(betas), np.imag(betas), marker="x") ax[2].set_title("Target state wigner function") plt.show() # + [markdown] colab_type="text" id="1owyNcLkfEw_" # # The actual MLE iterations for various measurement settings # + colab={} colab_type="code" id="H2AUl97ufExA" rho_t = [] # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="F_j0yM9ffExC" outputId="782fc473-f84b-458c-bdac-15d2c01abee5" max_iter = 100 for iterations in range(max_iter): for i in range(len(betas)): rho_t.append(random_rho) rop = roperator(betas[i], random_rho, measured_populations[i]) random_rho = rop*random_rho*rop # Trace renorm random_rho = random_rho/random_rho.tr() # Compute fidelity fidel = fidelity(random_rho, rho_true) if iterations % 5 == 0: print(r"Fidelity: {}".format(fidel)) clear_output(wait=0.2) if fidel > 0.99: break # + [markdown] colab_type="text" id="S2c9PyJMfExE" # # Reconstructed states # + colab={"base_uri": "https://localhost:8080/", "height": 372} colab_type="code" id="Bi_m5x1ifExF" outputId="0e6ad0d5-fef7-4d16-f082-ac13899ffbf6" fig, ax = plt.subplots(1, 2, figsize=(9, 5)) plot_wigner(random_rho, fig=fig, ax=ax[1]) plot_wigner(rho_true, fig=fig, ax=ax[0], cmap="RdBu") ax[0].set_title("Target state") ax[1].set_title("Reconstructed state") plt.show # + [markdown] colab_type="text" id="7FuwKQysfExG" # # Population measurement from reconstructed states # + colab={"base_uri": "https://localhost:8080/", "height": 1277} colab_type="code" id="mKl3yaNjfExH" outputId="4dd2bb25-99f7-4853-cec7-78ca4d71af38" examples = 5 for i in range(examples): idx = np.random.choice(range(len(betas))) beta = betas[idx] measured = measured_populations[idx] plt.bar(indices, measure_population(beta, random_rho), label = "Reconstructed statistics", width=(i+1)/5) plt.bar(indices, measured, label = r"Simulated true measurement values, $\beta$ = {}".format( np.round(beta, 2)), width=(i+1)/8) plt.xlabel(r"n") plt.ylabel(r"$\langle n \rangle$") plt.legend() plt.show() # - # # QuTiP details qutip.about() # + [markdown] colab_type="text" id="hKsYYuo6fExJ" # # Plot and save the wigner function for making animation of MLE # # **make sure you have the images/wigner folder created** # + colab={} colab_type="code" id="C_jwTysTfExK" # for i in range(len(rho_t)): # fig, ax = plt.subplots(1, 2, figsize=(15, 7)) # indices = np.arange(hilbert_size) # plot_wigner(rho_t[i], fig, ax[0]) # ax[0].scatter(np.real(betas), np.imag(betas), marker="x") # hinton(rho_t[i], ax=ax[1]) # ax[1].set_title("Reconstructed Density matrix at iteration {}".format(str(i))) # plt.savefig("images/wigner/"+str(i)+".png", bbox_inches='tight') # plt.close() # + [markdown] colab_type="text" id="bvnoRBRVfExM" # # Make a gif with the Wigner plots # # Install imageio for this to work # + colab={} colab_type="code" id="_AQfYRpvfExN" # import imageio # png_dir = 'images/wigner/' # images = [] # interval = 20 # intervals to pick to plot # for i in range(0, len(rho_t), interval): # file_name = str(i)+".png" # file_path = os.path.join(png_dir, file_name) # images.append(imageio.imread(file_path)) # imageio.mimsave('reconstruction3.gif', images, loop=1) #make loop=0 to keep looping # + colab={} colab_type="code" id="LJQj2RCgif3H"
qutip-notebooks-master/examples/tomography-resonator-MLE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="yUFqTJut2hqm" colab_type="text" # #data make # + id="gsUZcG4s2gmS" colab_type="code" colab={} import numpy as np import matplotlib.pyplot as plt def make_data(dimention=2): #正常データの作成(2次元) x1 = np.random.normal(1, 0.3, (1, 100)) y1 = np.random.normal(1, 0.3, (1, 100)) x2 = np.random.normal(1.5, 0.3, (1, 100)) y2 = np.random.normal(1.5, 0.3, (1, 100)) #テストデータの作成(2次元) test2 = np.array([0.5,2]) test2 = test2.reshape((1,2)) #データの形を整える data = [] data.append(x1) data.append(x2) data.append(y1) data.append(y2) data = np.array(data) data = data.reshape(2,200) data = data.transpose() #X次元に拡張 data_new = np.random.normal(0, 0.1, (200, dimention)) anomaly_new = np.random.normal(0, 0.1,(1,dimention)) data = np.hstack((data,data_new)) test2 = np.hstack((test2, anomaly_new)) print(data.shape) #可視化 plt.figure(figsize=(5,5)) plt.scatter(data[:,0], data[:,1], c="green", s=50) plt.scatter(test2[:,0],test2[:,1],c="red", s=50, label="Test_anomaly") plt.xlabel("x_1") plt.ylabel("x_2") plt.legend() plt.show() plt.figure(figsize=(10,5)) plt.subplot(1,2,1) plt.scatter(data[:,0], data[:,2], c="green", s=50) plt.scatter(test2[:,0],test2[:,2],c="red", s=50, label="Test_anomaly") plt.xlabel("x_1") plt.ylabel("x_3") plt.legend() plt.subplot(1,2,2) plt.scatter(data[:,0], data[:,3], c="green", s=50) plt.scatter(test2[:,0],test2[:,3],c="red", s=50, label="Test_anomaly") plt.xlabel("x_1") plt.ylabel("x_4") plt.legend() plt.show() return data, test2 # + id="vNjd0YBE5fw4" colab_type="code" outputId="71a2332e-b589-41b5-a7a1-facd79c2deb0" executionInfo={"status": "ok", "timestamp": 1581465672724, "user_tz": -540, "elapsed": 3344, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDVNvIeOf7at5hGMYGVZ57WAAZy2cUySUGtMgQvHg=s64", "userId": "02985854947085672136"}} colab={"base_uri": "https://localhost:8080/", "height": 670} x_train, x_anomaly = make_data() # + [markdown] id="lyxvzY6e5mB-" colab_type="text" # #Isolation Forest # + id="s9pcD1ff5mLu" colab_type="code" colab={} from sklearn.ensemble import IsolationForest def IF_make(x): # training IF = IsolationForest(n_estimators=100, max_samples=50) IF.fit(x) return IF # + id="WiIhI_MZ6Xqf" colab_type="code" colab={} model = IF_make(x_train) # + [markdown] id="BwkuqfO-32IX" colab_type="text" # #permutation importance # + id="UDMprebR3_w3" colab_type="code" colab={} def permutation_importance(train, test, model, repeats=100): criterion = -model.decision_function(test)[0]# スコアが大きいほど異常度が高いように符号反転 result = np.zeros((repeats, test.shape[1])) for feature in range(test.shape[1]): for n in range(repeats): result[n, feature] = -model.decision_function(permuted(train, test, feature)) result -= criterion result = -np.sum(result, axis=0)# 値が大きいほど、異常度 return result def permuted(train, test, column): result = np.copy(test) result[0, column] = np.random.permutation(train[column])[0] return result # + id="xHfjCwGL8QCZ" colab_type="code" outputId="6a961e90-2e85-493a-d00e-60b71ad42b84" executionInfo={"status": "ok", "timestamp": 1581465697375, "user_tz": -540, "elapsed": 20154, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDVNvIeOf7at5hGMYGVZ57WAAZy2cUySUGtMgQvHg=s64", "userId": "02985854947085672136"}} colab={"base_uri": "https://localhost:8080/", "height": 270} result = permutation_importance(x_train, x_anomaly, model) label = ["x_1","x_2","x_3","x_4"] plt.figure() plt.bar(label, result, align="center") plt.grid(True) plt.ylabel("Feature Importance") plt.grid(True) plt.show() # + [markdown] id="oQfbimeIpjox" colab_type="text" # # extra sensor = 8 # + id="6FHi6892prqe" colab_type="code" outputId="c4b9aceb-4ca9-4b0a-8c14-21fcf2d37485" executionInfo={"status": "ok", "timestamp": 1581465760112, "user_tz": -540, "elapsed": 47726, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDVNvIeOf7at5hGMYGVZ57WAAZy2cUySUGtMgQvHg=s64", "userId": "02985854947085672136"}} colab={"base_uri": "https://localhost:8080/", "height": 919} x_train, x_anomaly = make_data(8) model = IF_make(x_train) result = permutation_importance(x_train, x_anomaly, model) label = ["x_1","x_2","x_3","x_4","x_5","x_6","x_7","x_8","x_9","x_10"] plt.figure() plt.bar(label, result, align="center") plt.grid(True) plt.ylabel("Feature Importance") plt.grid(True) plt.show() # + id="DxDVl-b-uZ1u" colab_type="code" outputId="ccc556cc-2ad5-4337-9fcd-e2599616b68c" executionInfo={"status": "ok", "timestamp": 1581466147240, "user_tz": -540, "elapsed": 46938, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDVNvIeOf7at5hGMYGVZ57WAAZy2cUySUGtMgQvHg=s64", "userId": "02985854947085672136"}} colab={"base_uri": "https://localhost:8080/", "height": 919} x_train, x_anomaly = make_data(8) model = IF_make(x_train) result = permutation_importance(x_train, x_anomaly, model) label = ["x_1","x_2","x_3","x_4","x_5","x_6","x_7","x_8","x_9","x_10"] plt.figure() plt.bar(label, result, align="center") plt.grid(True) plt.ylabel("Feature Importance") plt.grid(True) plt.show()
Decrease_sensors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <div class="alert alert-success"> # <b>Author</b>: # # <NAME> # <EMAIL> # # </div> # # # [Click here to see class lecture](https://drive.google.com/file/d/1dA3dEOyuVnWN4K-EY6tjdDrQdRHE_u40/view) # # ### [Book ref](http://mathforcollege.com/nm/topics/textbook_index.html) # Secant method can be derived from newton's method. But we'll follow the geometric derivation for the sake of simplicity. In newton's method we took only one point $x_i$ to calculate. But in second method we'll take two point $x_i$ and $x_{i-1}$ to calculate the next point from $x_i$. Like if we have the value of $x_1$ and it's previous point $x_0$ then we can calculate the next point $x_2$. But why should we use this? This method is more fast for finding the root. As we take a second point and if we draw a tan line on the 2nd point then the extended line will intercept the x-axis more near to root than compared to newton's method. If you understand newton's method then deriving it from geometry won't be that tough but if you still can't understand then just remember the equation. Bisection method is bracketed method cause it brackets the root and newton & secant method are open method cause there aren't any bracketing facts. So finding root using open method isn't guaranteed. So it can be convergence(will find root) or divergence(not sure to find the root). Though newton & secant both are open method but secant is faster. # # In `first iteration` we'll find $x_{i+1}$. So in the `second iteration` $x_{i+1}$(calculated from 1st iteration) will become $x_i$ and $x_i$(value from first iteration) will become $x_{i-1}$ # # ![image.png](figures/fig162.PNG) # # ![image.png](figures/fig163.PNG) # ![image.png](figures/fig164.PNG) # # ![image.png](figures/fig165.PNG) # ![image.png](figures/fig166.PNG) # # ### Let's see an example # # ![image.png](figures/fig167.PNG) # # ![image.png](figures/fig168.PNG) # # ![image.png](figures/fig169.PNG) # # ![image.png](figures/fig170.PNG) # # ![image.png](figures/fig171.PNG) # # ![image.png](figures/fig172.PNG) # # ![image.png](figures/fig173.PNG) # # ![image.png](figures/fig174.PNG) # # ![image.png](figures/fig175.PNG) # # ![image.png](figures/fig176.PNG) # # #### Let's clear out some concepts about significant digits. # # - If the absolute relative error percent is less than 5% than there at least exist 1 significant digit # # - If the absolute relative error percent is less than 0.5% than there at least exist 2 significant digit # # - If the absolute relative error percent is less than 0.05% than there at least exist 3 significant digit # # The absolute relative error below is 0.0595% which is greater than 0.05% and less than 0.5% that's why there must exist 2 significant digits. And we don't need to prove this in exam. # # # ![image.png](figures/fig177.PNG) # # ![image.png](figures/fig178.PNG) # # ![image.png](figures/fig179.PNG) # # These drawbacks are same as newton's method as secant follows newton's method. # # ![image.png](figures/fig180.PNG) # # ![image.png](figures/fig181.PNG) # # That's all fot this lecture!
CSE_313_Numerical Methods/Lecture_11_19.08.2020.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Convert Jpeg to png from glob import glob import os import cv2 jpegs = glob('./fonts/*.jpeg') folder_path = (r'./fonts/') test = os.listdir(folder_path) for j in jpegs: img = cv2.imread(j) cv2.imwrite(j[:-4] + 'png', img) for images in test: if images.endswith(".jpeg"): os.remove(os.path.join(folder_path, images))
script/2_convert_jpeg2png.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lab 03 : LeNet5 architecture - exercise # For Google Colaboratory import sys, os if 'google.colab' in sys.modules: from google.colab import drive drive.mount('/content/gdrive') file_name = 'lenet5_exercise.ipynb' import subprocess path_to_file = subprocess.check_output('find . -type f -name ' + str(file_name), shell=True).decode("utf-8") print(path_to_file) path_to_file = path_to_file.replace(file_name,"").replace('\n',"") os.chdir(path_to_file) # !pwd import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from random import randint import utils import time # ### With or without GPU? # # It is recommended to run this code on GPU:<br> # * Time for 1 epoch on CPU : 96 sec (1.62 min)<br> # * Time for 1 epoch on GPU : 2 sec w/ GeForce GTX 1080 Ti <br> # device= torch.device("cuda") device= torch.device("cpu") print(device) # ### Download the MNIST dataset # + from utils import check_mnist_dataset_exists data_path=check_mnist_dataset_exists() train_data=torch.load(data_path+'mnist/train_data.pt') train_label=torch.load(data_path+'mnist/train_label.pt') test_data=torch.load(data_path+'mnist/test_data.pt') test_label=torch.load(data_path+'mnist/test_label.pt') print(train_data.size()) print(test_data.size()) # - # ### Compute average pixel intensity over all training set and all channels # + mean= train_data.mean() print(mean) # - # ### Compute standard deviation # + std= train_data.std() print(std) # - # ### Make a LeNet5 convnet class. class LeNet5_convnet(nn.Module): def __init__(self): super(LeNet5_convnet, self).__init__() # CL1: 28 x 28 --> 50 x 28 x 28 self.conv1 = nn.Conv2d(1, 50, kernel_size=3, padding=1 ) # MP1: 50 x 28 x 28 --> 50 x 14 x 14 self.pool1 = nn.MaxPool2d(2,2) # CL2: 50 x 14 x 14 --> 100 x 14 x 14 self.conv2 = nn.Conv2d(50, 100, kernel_size=3, padding=1 ) # MP2: 100 x 14 x 14 --> 100 x 7 x 7 self.pool2 = nn.MaxPool2d(2,2) # LL1: 100 x 7 x 7 = 4900 --> 100 self.linear1 = nn.Linear(4900, 100, bias = True) # LL2: 100 --> 10 self.linear2 = nn.Linear(100, 10, bias = True) def forward(self, x): # CL1: 28 x 28 --> 50 x 28 x 28 x = self.conv1(x) x = F.relu(x) # MP1: 50 x 28 x 28 --> 50 x 14 x 14 x = self.pool1(x) # CL2: 50 x 14 x 14 --> 100 x 14 x 14 x = self.conv2(x) x = F.relu(x) # MP2: 100 x 14 x 14 --> 100 x 7 x 7 x = self.pool2(x) # LL1: 100 x 7 x 7 = 4900 --> 100 x = x.view(-1, 4900) # python will automatically fix the value -1, otherwise we can also put the value of bs for the first parameter x = self.linear1(x) x = F.relu(x) # LL2: 4900 --> 10 x = self.linear2(x) return x # ### Build the net. How many parameters in total? net=LeNet5_convnet() print(net) utils.display_num_param(net) # ### Send the weights of the networks to the GPU (as well as the mean and std) # + net = net.to(device) mean=mean.to(device) std=std.to(device) # - # ### Choose the criterion, batch size, and initial learning rate. Select the following: # * batch size =128 # * initial learning rate =0.25 # + criterion = nn.CrossEntropyLoss() my_lr=0.25 bs=128 # - # ### Function to evaluate the network on the test set def eval_on_test_set(): running_error=0 num_batches=0 for i in range(0,10000,bs): minibatch_data = test_data[i:i+bs].unsqueeze(dim=1) minibatch_label= test_label[i:i+bs] minibatch_data=minibatch_data.to(device) minibatch_label=minibatch_label.to(device) inputs = (minibatch_data - mean)/std scores=net( inputs ) error = utils.get_error( scores , minibatch_label) running_error += error.item() num_batches+=1 total_error = running_error/num_batches print( 'error rate on test set =', total_error*100 ,'percent') # ### Do 30 passes through the training set. Divide the learning rate by 2 every 5 epochs. # + start=time.time() for epoch in range(1,30): if not epoch%5: my_lr = my_lr / 2 optimizer=torch.optim.SGD( net.parameters() , lr=my_lr ) running_loss=0 running_error=0 num_batches=0 shuffled_indices=torch.randperm(60000) for count in range(0,60000,bs): # FORWARD AND BACKWARD PASS optimizer.zero_grad() indices=shuffled_indices[count:count+bs] minibatch_data = train_data[indices].unsqueeze(dim=1) minibatch_label= train_label[indices] minibatch_data=minibatch_data.to(device) minibatch_label=minibatch_label.to(device) inputs = (minibatch_data - mean)/std inputs.requires_grad_() scores=net( inputs ) loss = criterion( scores , minibatch_label) loss.backward() optimizer.step() # COMPUTE STATS running_loss += loss.detach().item() error = utils.get_error( scores.detach() , minibatch_label) running_error += error.item() num_batches+=1 # AVERAGE STATS THEN DISPLAY total_loss = running_loss/num_batches total_error = running_error/num_batches elapsed = (time.time()-start)/60 print('epoch=',epoch, '\t time=', elapsed,'min', '\t lr=', my_lr ,'\t loss=', total_loss , '\t error=', total_error*100 ,'percent') eval_on_test_set() print(' ') # - # ### Choose image at random from the test set and see how good/bad are the predictions # + # choose a picture at random idx=randint(0, 10000-1) im=test_data[idx] # diplay the picture utils.show(im) # send to device, rescale, and view as a batch of 1 im = im.to(device) im= (im-mean) / std im=im.view(1,28,28).unsqueeze(dim=1) # feed it to the net and display the confidence scores scores = net(im) probs= F.softmax(scores, dim=1) utils.show_prob_mnist(probs.cpu()) # -
codes/labs_lecture08/lab03_lenet5/lenet5_exercise.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .scala // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Spark // language: '' // name: sparkkernel // --- // ## Clustering // In this exercise, you will use K-Means clustering to segment customer data into five clusters. // // ### Import the Libraries // You will use the **KMeans** class to create your model. This will require a vector of features, so you will also use the **VectorAssembler** class. // import org.apache.spark.ml.clustering.KMeans import org.apache.spark.ml.feature.VectorAssembler // ### Load Source Data // The source data for your clusters is in a comma-separated values (CSV) file, and incldues the following features: // - CustomerName: The custome's name // - Age: The customer's age in years // - MaritalStatus: The custtomer's marital status (1=Married, 0 = Unmarried) // - IncomeRange: The top-level for the customer's income range (for example, a value of 25,000 means the customer earns up to 25,000) // - Gender: A numeric value indicating gender (1 = female, 2 = male) // - TotalChildren: The total number of children the customer has // - ChildrenAtHome: The number of children the customer has living at home. // - Education: A numeric value indicating the highest level of education the customer has attained (1=Started High School to 5=Post-Graduate Degree // - Occupation: A numeric value indicating the type of occupation of the customer (0=Unskilled manual work to 5=Professional) // - HomeOwner: A numeric code to indicate home-ownership (1 - home owner, 0 = not a home owner) // - Cars: The number of cars owned by the customer. val customers = spark.read.option("inferSchema","true").option("header", "true").csv("wasb:///data/customers.csv") customers.show() // ### Create the K-Means Model // You will use the feaures in the customer data to create a Kn-Means model with a k value of 5. This will be used to generate 5 clusters. // + val assembler = new VectorAssembler().setInputCols(Array("Age", "MaritalStatus", "IncomeRange", "Gender", "TotalChildren", "ChildrenAtHome", "Education", "Occupation", "HomeOwner", "Cars")).setOutputCol("features") val train = assembler.transform(customers) val kmeans = new KMeans().setFeaturesCol(assembler.getOutputCol).setPredictionCol("cluster").setK(5).setSeed(0) val model = kmeans.fit(train) println("Model Created!") // - // ### Get the Cluster Centers // The cluster centers are indicated as vector coordinates. println("Cluster Centers: ") model.clusterCenters.foreach(println) // ### Predict Clusters // Now that you have trained the model, you can use it to segemnt the customer data into 5 clusters and show each customer with their allocated cluster. val prediction = model.transform(train) prediction.groupBy("cluster").count().orderBy("cluster").show() prediction.select("CustomerName", "cluster").show(50)
DataAnalyticsWithSpark/Unsupervised/Scala Clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # More Resources # # "Wait, I feel like the prior pages didn't cover enough!" # # A: You're right! I'm speeding past Numpy because # 1. We're going to use `pandas` mostly, and most of our numpy usage will be _implicit_ numpy usage within pandas. # 2. There is such good tutorials online. Instead of copy-pasting the definitive materials, let me point you to the place: # # - [NumPy quickstart](https://numpy.org/doc/stable/user/quickstart.html) # - [NumPy: the absolute basics for beginners](https://numpy.org/doc/stable/user/absolute_beginners.html) # - [NumPy basics](https://numpy.org/doc/stable/user/basics.html) # # --- # # "But, but... couldn't we do lots of _finance_ stuff in numpy?" # # Absolutely. Anytime you're using matrices, numpy is probably what you should be using. It happens that this finance class will not emphasize simulations or derivatives, which are just two finance settings where numpy shines. # #
content/03/01d_NumpyResources.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Day 13 # # First day I had to cheat and look for help (for part two). import numpy as np from aocd.models import Puzzle from scipy.sparse import lil_matrix # ## Data puzzle = Puzzle(year=2020, day=13) data = puzzle.input_data.split() # ## Part One # # Could be done in a two-liner: # # t, b = int(data[0]), np.array(data[1].replace('x,', '').split(','), dtype=int) # pullze.answer_a = min(b-t%b)*b[np.argmin(b - t%b)] time = int(data[0]) bus = np.array(data[1].replace('x,', '').split(','), dtype=int) time, bus # + tt = min(bus-time%bus) bid = bus[np.argmin(bus - time%bus)] answer_a = bid*tt answer_a # - puzzle.answer_a = answer_a # ## Part Two bcomp = np.array(data[1].replace('x', '0').split(','), dtype=int) bid = np.arange(bcomp.size)[bcomp>0] bus = bcomp[bcomp>0] bus, bid # **Ensure buses are prime numbers:** for bnr in bus: for i in range(2, bnr): if bnr % i == 0: print(f"Bus {bnr} is not prime") break # **For this part I had to cheat. The function here is an adaption of the [solution of Ryan May (@dopplershift)](https://github.com/dopplershift/advent-of-code/blob/main/2020/day13.py); see also [Wikipedia: Chinese Remainder Theorem](https://en.wikipedia.org/wiki/Chinese_remainder_theorem).** # # Note the bus/bid could be sorted to start with the largest step first followed by the second-largest and so on, for further speed-up. But then I never paid attention to speed in this AoC... # + inc = time = bus[0] for repeat, wait in zip(bus[1:], bid[1:]): while (time + wait) % repeat: time += inc inc *= repeat answer_b = time answer_b # - puzzle.answer_b = answer_b import scooby scooby.Report('aocd')
2020/Day-13.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Style transfer using VGG16 network # # * `A Neural Algorithm of Artistic Style`, [arXiv:1508.06576](https://arxiv.org/abs/1508.06576) # * <NAME>, <NAME>, and, <NAME> # # # * `models/research/slim/nets`을 이용하여 만듦 # * content_input_image 최대 길이 max_L=1024 로 고정 # * style_input_image 최대 길이 max_L=1024 로 고정 # * 원래 논문처럼 `average_pooling` 사용 # * loss는 논문에 나온 그대로 사용하지 않음 # * Johnson et., al. Perceptual Losses for Real-Time Style Transfer and Super-Resolution 에 나온 loss 참고 # * content loss, style loss 둘다 feature map 차원에 맞게 normalize 함 # * hyperparameter들은 [cs20](http://web.stanford.edu/class/cs20si/) 코드를 참조 # * input_image는 우리집 고양이 # * style_image # * [Starry Night](https://en.wikipedia.org/wiki/The_Starry_Night) [Gogh 작품] # * [The Scream](https://en.wikipedia.org/wiki/The_Scream) [Munch 작품] # # * reference code [ilguyi's code](https://github.com/ilguyi/style-transfer.tensorflow) # + #import sys #sys.path.append("$HOME/models/research/slim/") import os import time import numpy as np import matplotlib.pyplot as plt # %matplotlib inline from PIL import Image import tensorflow as tf slim = tf.contrib.slim sess_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) os.environ["CUDA_VISIBLE_DEVICES"]="0" # - # ### Hyperparameters setting input_data_dir = '../input_data/' content_image_name = 'my_cat2.jpg' style_image_name = 'Gogh_The_Starry_Night.jpg' noise_ratio = 0.6 content_image_max_L = 200 # upper bound of content image size style_image_max_L = 200 # upper bound of style image size style_loss_weight = np.array([0.5, 1.0, 1.5, 3.0, 4.0]) style_loss_weight /= np.sum(style_loss_weight) content_weight = 1.0 style_weight = 1.0 learning_rate = 2.0 max_steps = 300 print_steps = 1 # ### Load a VGG16 graph def vgg_16(inputs, reuse=False, scope='vgg_16'): """Oxford Net VGG 16-Layers version D Example My Note: This code is modified version of vgg_16 which is loacted on `models/research/slim/nets/vgg.py` Note: All the fully_connected layers have been transformed to conv2d layers. To use in classification mode, resize input to 224x224. Args: inputs: a tensor of size [batch_size, height, width, channels]. reuse: whether or not the model is being reused. scope: Optional scope for the variables. Returns: net: the output of the logits layer (if num_classes is a non-zero integer), or the input to the logits layer (if num_classes is 0 or None). end_points: a dict of tensors with intermediate activations. """ with tf.variable_scope(scope, 'vgg_16', [inputs], reuse=reuse) as sc: end_points_collection = sc.original_name_scope + '_end_points' with slim.arg_scope([slim.conv2d, slim.avg_pool2d], outputs_collections=end_points_collection): # 여기를 직접 채워 넣으시면 됩니다. net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1') net = slim.avg_pool2d(net, [2, 2], scope='pool1') net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2') net = slim.avg_pool2d(net, [2, 2], scope='pool2') net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3') net = slim.avg_pool2d(net, [2, 2], scope='pool3') net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4') net = slim.avg_pool2d(net, [2, 2], scope='pool4') net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5') net = slim.avg_pool2d(net, [2, 2], scope='pool5') end_points = slim.utils.convert_collection_to_dict(end_points_collection) return net, end_points # ### Read content image and style image content_image_ = Image.open(os.path.join(input_data_dir, content_image_name)) style_image_ = Image.open(os.path.join(input_data_dir, style_image_name)) def image_resize_with_upper_bound(image, max_L): """Resize images Args: image: PIL image format max_L: upper bound of the image size Returns: image: resized image with PIL format h: resized height w: resized width """ w, h = image.size if np.max(np.array([h, w])) > max_L: if h < w: h = int(max_L * h / w) w = max_L else: w = int(max_L * w / h) h = max_L image = image.resize((w, h)) return image, h, w content_image_, content_image_h, content_image_w = image_resize_with_upper_bound(content_image_, content_image_max_L) style_image_, style_image_h, style_image_w = image_resize_with_upper_bound(style_image_, style_image_max_L) print('content_image size: height: {} width: {}'.format(content_image_h, content_image_w)) print('style_image size: height: {} width: {}'.format(style_image_h, style_image_w)) # 여기를 직접 채워 넣으시면 됩니다. content_image_p = tf.placeholder(tf.float32, [1, content_image_h, content_image_w, 3]) style_image_p = tf.placeholder(tf.float32, [1, style_image_h, style_image_w, 3]) # 여기를 직접 채워 넣으시면 됩니다. # content_image, style_image를 tf.Variable로 바꾸기 위해 tf.placeholder와 같은 shape의 zero Tensor를 만듦 content_image = tf.get_variable(name='content_image', shape=[1, content_image_h, content_image_w, 3], initializer=tf.zeros_initializer()) style_image = tf.get_variable(name='style_image', shape=[1, style_image_h, style_image_w, 3], initializer=tf.zeros_initializer()) generated_image = tf.get_variable(name='generated_image', shape=[1, content_image_h, content_image_w, 3], initializer=tf.random_uniform_initializer(minval=-20, maxval=20)) # tf.placeholder를 tf.Variable로 바꿈 content_image_op = content_image.assign(content_image_p) style_image_op = style_image.assign(style_image_p) # 초기 이미지는 content_image에 random noise를 섞음 generated_image_op = generated_image.assign(generated_image * noise_ratio + \ content_image_p * (1.0 - noise_ratio)) # 여기를 직접 채워 넣으시면 됩니다. # generated_image는 매 update 후에 아래의 값 사이로 clipping norm_means = np.array([123.68, 116.779, 103.939]) min_vals = -norm_means max_vals = 255. - norm_means generated_image_clipping = generated_image.assign(tf.clip_by_value(generated_image, clip_value_min=min_vals, clip_value_max=max_vals)) # 여기를 직접 채워 넣으시면 됩니다. _, feature_maps_c = vgg_16(content_image) # input: content_image _, feature_maps_s = vgg_16(style_image, reuse=True) # input: style_image _, feature_maps_g = vgg_16(generated_image, reuse=True) # input: generated_image with tf.Session() as sess: writer = tf.summary.FileWriter("./graphs/02_style_transfer", sess.graph) writer.close() # ## Build a model # ### collecte feature maps # # * content layers # * `conv4_2`: key name -> 'vgg16/vgg_16/conv4/conv4_2' # * style layers # * `conv1_1`: key name -> '<KEY>' # * `conv2_1`: key name -> 'vgg16/vgg_16/conv2/conv2_1' # * `conv3_1`: key name -> '<KEY>' # * `conv4_1`: key name -> '<KEY>' # * `conv5_1`: key name -> 'vgg16/vgg_16/conv5/conv5_1' content_layers = feature_maps_c['vgg_16/conv4/conv4_2'] style_layers = [feature_maps_s['vgg_16/conv1/conv1_1'], feature_maps_s['vgg_16/conv2/conv2_1'], feature_maps_s['vgg_16/conv3/conv3_1'], feature_maps_s['vgg_16/conv4/conv4_1'], feature_maps_s['vgg_16/conv5/conv5_1']] generated_layers = [feature_maps_g['vgg_16/conv4/conv4_2'], feature_maps_g['vgg_16/conv1/conv1_1'], feature_maps_g['vgg_16/conv2/conv2_1'], feature_maps_g['vgg_16/conv3/conv3_1'], feature_maps_g['vgg_16/conv4/conv4_1'], feature_maps_g['vgg_16/conv5/conv5_1']] # ### content loss def content_loss(P, F, scope): """Calculate the content loss function between the feature maps of content image and generated image. Args: P: the feature maps of the content image F: the feature maps of the generated image scope: scope Returns: loss: content loss (mean squared loss) """ # 여기를 직접 채워 넣으시면 됩니다. with tf.variable_scope(scope): assert F.shape == P.shape loss = tf.losses.mean_squared_error(F, P) #loss = 0.5 * tf.reduce_sum(tf.square(F - P)) # original loss on paper return loss # ### style loss def style_loss(style_layers, generated_layers, scope): """Calculate the style loss function between the gram matrix of feature maps of style image and generated image. Args: style_layers: list of the feature maps of the style image generated_layers: list of the feature maps of the generated image scope: scope Returns: loss: style loss (mean squared loss) """ def _style_loss_one_layer(feature_map_s, feature_map_g): """Calculate the style loss for one layer. Args: feature_map_s: the feature map of the style image - G: the gram matrix of the feature_map_s feature_map_g: the feature map of the generated image - A: the gram matrix of the feature_map_g Returns: loss: style loss for one layer (mean squared loss) """ #_, h, w, c = feature_map_s.get_shape().as_list() G = _gram_matrix(feature_map_s) A = _gram_matrix(feature_map_g) # 여기를 직접 채워 넣으시면 됩니다. loss = tf.losses.mean_squared_error(G, A) return loss def _gram_matrix(feature_map): """Calculate the gram matrix for the feature map Args: feature_map: 4-rank Tensor [1, height, width, channels] - F = 2-rank Tensor [h * w, channels] Returns: gram_matrix: 2-rank Tensor [c, c] (F.transpose x F) """ # 여기를 직접 채워 넣으시면 됩니다. F = tf.squeeze(feature_map, axis=0) h, w, c = F.get_shape().as_list() F = tf.reshape(F, [h * w, c]) # normalize for calculating squared Frobenius norm gram_matrix = tf.matmul(tf.transpose(F), F) / (h * w) return gram_matrix with tf.variable_scope(scope): assert len(style_layers) == len(generated_layers) loss = 0.0 for i in range(len(style_layers)): loss_one = _style_loss_one_layer(style_layers[i], generated_layers[i]) loss += loss_one * style_loss_weight[i] return loss # ### Total loss # + loss_c = content_loss(content_layers, generated_layers[0], scope='content_loss') loss_s = style_loss(style_layers, generated_layers[1:], scope='style_loss') with tf.variable_scope('total_loss'): total_loss = content_weight * loss_c + style_weight * loss_s # - # ### Define a optimizer # 여기를 직접 채워 넣으시면 됩니다. opt = tf.train.AdamOptimizer(learning_rate=learning_rate) train_op = opt.minimize(total_loss, var_list=generated_image) # ### Restore VGG16 weights using `tf.saver.restore` # ### Download the VGG16 checkpoint: # # ``` # $ CHECKPOINT_DIR='./checkpoints' # $ mkdir ${CHECKPOINT_DIR} # $ cd ${CHECKPOINT_DIR} # $ wget http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz # $ tar -xvf vgg_16_2016_08_28.tar.gz # $ rm vgg_16_2016_08_28.tar.gz # ``` # ### Preprocessing a image def vgg_preprocessing(image): """vgg image preprocessing output image is applied by mean_image_subtraction _R_MEAN = 123.68 _G_MEAN = 116.779 _B_MEAN = 103.939 Args: image (PIL image): image with shape [height, width, channels] Returns: image (np.int32): np.array with shape [1, 224, 224, 3] applied by mean_image_subtraction """ image = np.asarray(image) image = image.astype(np.float32) image[:,:,0] -= 123.68 # for _R_MEAN image[:,:,1] -= 116.779 # for _G_MEAN image[:,:,2] -= 103.939 # for _B_MEAN image = np.expand_dims(image, axis=0) return image def print_image(image): """print image Args: image: 4-rank np.array [1, h, w, 3] """ print_image = np.squeeze(image, axis=0) print_image[:, :, 0] += 123.68 print_image[:, :, 1] += 116.779 print_image[:, :, 2] += 103.939 print_image = np.clip(print_image, 0, 255).astype('uint8') plt.axis('off') plt.imshow(print_image) plt.show() content_image_ = vgg_preprocessing(content_image_) style_image_ = vgg_preprocessing(style_image_) v = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='vgg_16') # + saver = tf.train.Saver(var_list=v) with tf.Session(config=sess_config) as sess: sess.run(tf.global_variables_initializer()) # content_image_와 style_image_를 tf.placeholder에 넣고 tf.Variable로 assign sess.run([content_image_op, style_image_op, generated_image_op], feed_dict={content_image_p: content_image_, style_image_p: style_image_}) _, generated_image_ = sess.run([generated_image_clipping, generated_image]) print_image(content_image_) print_image(style_image_) print_image(generated_image_) # initial_image = content_image + small noise # use saver object to load variables from the saved model saver.restore(sess, "../checkpoints/vgg_16.ckpt") start_time = time.time() for step in range(max_steps+1): _, loss_, loss_c_, loss_s_, _, generated_image_ = \ sess.run([train_op, total_loss, loss_c, loss_s, generated_image_clipping, generated_image]) if (step+1) % print_steps == 0: duration = time.time() - start_time start_time = time.time() print("step: {} total_loss: {} loss_c: {} loss_s: {} duration: {}".format((step+1), loss_, loss_c_, loss_s_, duration)) print_image(generated_image_) print('training done!') # - def save_image(image, content_image_name, style_image_name): """print image Args: image: 4-rank np.array [1, h, w, 3] content_image_name: (string) filename of content image style_image_name: (string) filename of style image """ save_image = np.squeeze(image, axis=0) save_image[:, :, 0] += 123.68 save_image[:, :, 1] += 116.779 save_image[:, :, 2] += 103.939 save_image = np.clip(save_image, 0, 255) save_image = Image.fromarray(np.uint8(save_image)) filename = os.path.splitext(os.path.basename(content_image_name))[0] + '_' filename += os.path.splitext(os.path.basename(style_image_name))[0] + '.jpg' save_image.save(filename) save_image(generated_image_, content_image_name, style_image_name)
week05/02_style_transfer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # [실습] Multilayer Perceptron(MLP)로 숫자 분류기 구현하기 # - MLP는 Fully Connect layer, Dense layer 등의 이름으로 불림 # - 기존 cnn에 많이 활용되는 구조 # - hidden layer가 추가 + activation # - linear transformation, affine transform은 아무리 여러번 반복해도 affine이라 중간 중간에 non-lineard를 줘야 훨씬 더 복잡한 함수로 모델링할 수 있습니다 # - 각 레이어의 output이 나오면 activation function을 통과함 (sigmoid, tanh, relu) # - 마지막 output이 나오면 softmax를 취해서 cross entropy에 넣으면 logistic regression # - 우리의 코드에선 softmax를 취하지 않을겁니다! # - softmax를 취하기 전의 값을 logit이라 부르고, 이것을 입력으로 받는 텐서플로우 함수가 있는데 그것을 활용해보겠습니다 # # ### Logistic regression과 Multi layer perceptron의 차이 # - Depth!!! # # # - 아래 코드는 모델, Cost 정의 -> 초기화 -> 실행 순으로 진행 import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data # %matplotlib inline print ("PACKAGES LOADED") mnist = input_data.read_data_sets('data/', one_hot=True) # + n_input = 784 n_hidden_1 = 256 n_hidden_2 = 128 n_hidden_3 = 64 n_classes = 10 x = tf.placeholder("float", [None, n_input]) y = tf.placeholder("float", [None, n_classes]) # weight와 bias가 여러개 있고, dict으로 들어가있음 stddev = 0.1 weights = { 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], stddev=stddev)), 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], stddev=stddev)), 'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3], stddev=stddev)), 'out': tf.Variable(tf.random_normal([n_hidden_3, n_classes], stddev=stddev)) } biases = { 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 'b2': tf.Variable(tf.random_normal([n_hidden_2])), 'b3': tf.Variable(tf.random_normal([n_hidden_3])), 'out': tf.Variable(tf.random_normal([n_classes])) } print ("NETWORK READY") # + # model def multilayer_preceptron(_X, _weights, _biases): layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1'])) layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, _weights['h2']), _biases['b2'])) layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, _weights['h3']), _biases['b3'])) return (tf.matmul(layer_3, _weights['out']) + _biases['out']) # 위 함수는 softmax가 없습니다! # softmax_cross_entropy_with_logits을 활용할 예정이라 그렇습니다! # PREDICTION pred = multilayer_preceptron(x, weights, biases) # LOSS AND OPTIMIZER cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)) # 들어가는 것은 logits이지 확률이 아님! optm = tf.train.AdamOptimizer(learning_rate = 0.001).minimize(cost) corr = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) accr= tf.reduce_mean(tf.cast(corr,"float")) # INITIALIZER init = tf.global_variables_initializer() print ("FUNCTIONS READY") # - # PARAMETERS training_epochs = 20 batch_size = 100 display_step = 4 # LAUNCH THE GRAPH sess = tf.Session() sess.run(init) # OPTIMIZE for epoch in range(training_epochs): avg_cost = 0. total_batch = int(mnist.train.num_examples/batch_size) # ITERATION for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) feeds = {x: batch_xs, y: batch_ys} sess.run(optm, feed_dict=feeds) avg_cost += sess.run(cost, feed_dict=feeds) avg_cost = avg_cost / total_batch # DISPLAY if (epoch+1) % display_step == 0: print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost)) feeds = {x: batch_xs, y: batch_ys} train_acc = sess.run(accr, feed_dict=feeds) print ("TRAIN ACCURACY: %.3f" % (train_acc)) feeds = {x: mnist.test.images, y: mnist.test.labels} test_acc = sess.run(accr, feed_dict=feeds) print ("TEST ACCURACY: %.3f" % (test_acc)) print ("OPTIMIZATION FINISHED")
Lecture_Note/03. CNN Application/01. Numeric classifier using Multilayer Perceptron(MLP) .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.10 64-bit # language: python # name: python3 # --- # # Assignmnent # # 1. Create a random array a, of shape (6x6) using randint and seed 2. # 2. Convert the array a, to an identity matrix I. # # $$ # I = # \begin{bmatrix} # 1 & 0 & 0 & 0 & 0 & 0 \\ # 0 & 1 & 0 & 0 & 0 & 0 \\ # 0 & 0 & 1 & 0 & 0 & 0 \\ # 0 & 0 & 0 & 1 & 0 & 0 \\ # 0 & 0 & 0 & 0 & 1 & 0 \\ # 0 & 0 & 0 & 0 & 0 & 1 \\ # \end{bmatrix} # $$ # + # Question 01 import numpy as np import matplotlib.pyplot as plt np.random.seed(2) A = np.random.randint(0, 255, (6, 6)) print("A :\n ", A) # Question 02 inverse_A = np.linalg.inv(A) print("\nInverse of matrix A : \n ", inverse_A) # Convert A to I by A * A-1 A = A @ inverse_A A = np.round(A).astype(int) print("\nI :\n ", A) # -
assignments/essential/assignment_09.ipynb
# + """ Generate Traffic Light """ # import python randomint package import random # generates a random number from 1 to 3 randn = random.randint(1, 3) if randn == 1: print('red') if randn == 2: print('green') if randn == 3: print('yellow') # if 1, print 'red' # if 2, print 'green', # if 3, print 'yellow'
pset_conditionals/random_nums/solution/nb/p1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # OpenVINO example with Squeezenet Model # # This notebook illustrates how you can serve [OpenVINO](https://software.intel.com/en-us/openvino-toolkit) optimized models for Imagenet with Seldon Core. # # <img src="dog.jpeg"/> # ## Dependencies # # * Seldon-core (```pip install seldon-core```) # * Numpy # * Keras # * Matplotlib # * Tensorflow # ## Download Squeezenet Model # # We will download a pre-trained and optimized model for OpenVINO CPU into a local folder. # !mkdir -p models/squeezenet/1 && \ # wget -O models/squeezenet/1/squeezenet1.1.xml https://s3-eu-west-1.amazonaws.com/seldon-public/openvino-squeeznet-model/squeezenet1.1.xml && \ # wget -O models/squeezenet/1/squeezenet1.1.mapping https://s3-eu-west-1.amazonaws.com/seldon-public/openvino-squeeznet-model/squeezenet1.1.mapping && \ # wget -O models/squeezenet/1/squeezenet1.1.bin https://s3-eu-west-1.amazonaws.com/seldon-public/openvino-squeeznet-model/squeezenet1.1.bin # ## Run Seldon Core on Minikube # # **The example below assumes Minikube 0.30.0 installed** # !minikube start --memory 4096 --disk-size 20g # !kubectl create namespace seldon # !kubectl config set-context $(kubectl config current-context) --namespace=seldon # !kubectl create clusterrolebinding kube-system-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default # !helm init # !kubectl rollout status deploy/tiller-deploy -n kube-system # !helm install ../../../helm-charts/seldon-core-crd --name seldon-core-crd --set usage_metrics.enabled=true # !helm install ../../../helm-charts/seldon-core --name seldon-core --set ambassador.enabled=true # ## (Optional) Install Jaeger # # We will use the Jaeger All-in-1 resource found at the [Jaeger Kubernetes repo](https://github.com/jaegertracing/jaeger-kubernetes). # !kubectl create -f https://raw.githubusercontent.com/jaegertracing/jaeger-kubernetes/master/all-in-one/jaeger-all-in-one-template.yml -n seldon # ### Start Jaeger UI # # ``` # minikube service jaeger-query -n seldon # ``` # ## Mount local folder onto minikube for HostPath # Run in the current folder: # ``` # minikube mount ./models:/opt/ml # ``` # # This will allow the model folder containing the Squeezenet model to be accessed. For production deployments you would use a NFS volume. # ## Build Combiner and Transformer Images # !eval $(minikube docker-env) && cd resources/combiner && s2i build -E environment_grpc . seldonio/seldon-core-s2i-python36:0.5-SNAPSHOT seldonio/imagenet_combiner:0.1 # !eval $(minikube docker-env) && cd resources/transformer && s2i build -E environment_grpc . seldonio/seldon-core-s2i-python36:0.5-SNAPSHOT seldonio/imagenet_transformer:0.1 # ## Deploy Seldon Intel OpenVINO Graph import sys sys.path.append("../../../notebooks") from visualizer import * get_graph("seldon_openvino_ensemble.json") # !pygmentize seldon_openvino_ensemble.json # !kubectl apply -f pvc.json # !kubectl apply -f seldon_openvino_ensemble.json # # Serve Requests # # **Ensure you port forward ambassador:** # # ``` # kubectl port-forward $(kubectl get pods -n seldon -l service=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080 # ``` # + import tensorflow as tf from seldon_core.proto import prediction_pb2 from seldon_core.proto import prediction_pb2_grpc import grpc def grpc_request_ambassador_tensor(deploymentName,namespace,endpoint="localhost:8004",data=None): datadef = prediction_pb2.DefaultData( names = 'x', tftensor = tf.make_tensor_proto(data) ) request = prediction_pb2.SeldonMessage(data = datadef) channel = grpc.insecure_channel(endpoint) stub = prediction_pb2_grpc.SeldonStub(channel) if namespace is None: metadata = [('seldon',deploymentName)] else: metadata = [('seldon',deploymentName),('namespace',namespace)] response = stub.Predict(request=request,metadata=metadata) return response def grpc_request_ambassador_bindata(deploymentName,namespace,endpoint="localhost:8004",data=None): request = prediction_pb2.SeldonMessage(binData = data) channel = grpc.insecure_channel(endpoint) stub = prediction_pb2_grpc.SeldonStub(channel) if namespace is None: metadata = [('seldon',deploymentName)] else: metadata = [('seldon',deploymentName),('namespace',namespace)] response = stub.Predict(request=request,metadata=metadata) return response # + # %matplotlib inline import numpy as np from keras.applications.imagenet_utils import preprocess_input, decode_predictions from keras.preprocessing import image import sys import json import matplotlib.pyplot as plt import datetime API_AMBASSADOR="localhost:8003" def getImage(path): img = image.load_img(path, target_size=(227, 227)) x = image.img_to_array(img) plt.imshow(x/255.) x = np.expand_dims(x, axis=0) x = preprocess_input(x) return x def getImageRaw(path): img = image.load_img(path, target_size=(227, 227)) x = image.img_to_array(img) plt.imshow(x/255.) return x def getImageBytes(path): with open(path, mode='rb') as file: fileContent = file.read() return fileContent #X = getImage("car.png") #X = X.transpose((0,3,1,2)) #X = getImageRaw("car.png") #X = getImageRaw("dog.jpeg") #print(X.shape) X = getImageBytes("dog.jpeg") start_time = datetime.datetime.now() response = grpc_request_ambassador_bindata("openvino-model","seldon",API_AMBASSADOR,data=X) end_time = datetime.datetime.now() duration = (end_time - start_time).total_seconds() * 1000 print(duration) print(response.strData) # - # Send multiple requests to get average response time. durations = [] for i in range(100): X = getImageBytes("dog.jpeg") start_time = datetime.datetime.now() response = grpc_request_ambassador_bindata("openvino-model","seldon",API_AMBASSADOR,data=X) end_time = datetime.datetime.now() duration = (end_time - start_time).total_seconds() * 1000 durations.append(duration) print(sum(durations)/float(len(durations)))
examples/models/openvino_imagenet_ensemble/openvino_imagenet_ensemble.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Prediction # + import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.utils import shuffle from sklearn.metrics import accuracy_score import ER_multiclass as ER from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier import matplotlib.pyplot as plt # %matplotlib inline # - np.random.seed(1) # load data df = pd.read_csv('drug_data2.txt') df.head() df2 = df[['Sex','Treatment','Concentration','Day','wakeact']].copy() df3 = np.array(df2) df3.shape # select only day = 5 df = df3[df3[:,3] == 5] df.shape # features: X = df[:,0:3] X.shape y = df[:,4].astype(float) # + t1 = y < 1.5 X1 = X[t1] y1 = y[t1] len(y1) # + t21 = y > 2. t22 = y < 2.5 t2 = t21*t22 X2 = X[t2] y2 = y[t2] t2 = np.random.choice(len(y2),len(y1),replace=False) X2 = X2[t2] y2 = y2[t2] len(y2) # + t3 = y > 3. X3 = X[t3] y3 = y[t3] len(y3) # - X = np.vstack([X1,X2,X3]) y = np.hstack([y1,y2,y3]) X.shape l,n = X.shape x0 = np.ones(l) x0[X[:,0] == 'Female'] = -1. np.unique(x0,return_counts=True) from sklearn.preprocessing import OneHotEncoder onehot_encoder = OneHotEncoder(sparse=False,categories='auto') x1 = onehot_encoder.fit_transform(X[:,1].reshape(-1,1)) x1.shape x2 = X[:,2].astype(float) X = np.hstack([x0[:,np.newaxis],x1,x2[:,np.newaxis]]) X.shape l = len(y) ynew = np.zeros(l) for t in range(l): if y[t] < 1.5: ynew[t] = 0. elif y[t] > 3.: ynew[t] = 2. else: ynew[t] = 1. y = ynew np.unique(y,return_counts=True) # ### Shuffle data from sklearn.utils import shuffle X, y = shuffle(X, y) from sklearn.preprocessing import MinMaxScaler X = MinMaxScaler().fit_transform(X) def inference(X_train,y_train,X_test,y_test,method='expectation_reflection'): if method == 'expectation_reflection': h0,w = ER.fit(X_train,y_train,niter_max=100,regu=0.) y_pred = ER.predict(X_test,h0,w) else: if method == 'logistic_regression': model = LogisticRegression(multi_class='multinomial',solver='saga') if method == 'naive_bayes': model = GaussianNB() if method == 'random_forest': model = RandomForestClassifier(criterion = "gini", random_state = 1, max_depth=3, min_samples_leaf=5,n_estimators=100) if method == 'decision_tree': model = DecisionTreeClassifier() model.fit(X_train, y_train) y_pred = model.predict(X_test) accuracy = accuracy_score(y_test,y_pred) return accuracy def compare_inference(X,y,train_size): npred = 100 accuracy = np.zeros((len(list_methods),npred)) precision = np.zeros((len(list_methods),npred)) recall = np.zeros((len(list_methods),npred)) accuracy_train = np.zeros((len(list_methods),npred)) for ipred in range(npred): #X, y = shuffle(X, y) X_train0,X_test,y_train0,y_test = train_test_split(X,y,test_size=0.2,random_state = ipred) idx_train = np.random.choice(len(y_train0),size=int(train_size*len(y)),replace=False) X_train,y_train = X_train0[idx_train],y_train0[idx_train] for i,method in enumerate(list_methods): accuracy[i,ipred] = inference(X_train,y_train,X_test,y_test,method) return accuracy.mean(axis=1),accuracy.std(axis=1) list_train_size = [0.8,0.6,0.4,0.2] list_methods=['logistic_regression','naive_bayes','random_forest','expectation_reflection'] acc = np.zeros((len(list_train_size),len(list_methods))) acc_std = np.zeros((len(list_train_size),len(list_methods))) for i,train_size in enumerate(list_train_size): acc[i,:],acc_std[i,:] = compare_inference(X,y,train_size) print(train_size,acc[i,:]) df = pd.DataFrame(acc,columns = list_methods) df.insert(0, "train_size",list_train_size, True) df plt.figure(figsize=(4,3)) plt.plot(list_train_size,acc[:,0],'k--',marker='o',mfc='none',label='Logistic Regression') plt.plot(list_train_size,acc[:,1],'b--',marker='s',mfc='none',label='Naive Bayes') plt.plot(list_train_size,acc[:,2],'r--',marker='^',mfc='none',label='Random Forest') #plt.plot(list_train_size,acc[:,3],'b--',label='Decision Tree') plt.plot(list_train_size,acc[:,-1],'k-',marker='o',label='Expectation Reflection') plt.xlabel('train size') plt.ylabel('accuracy mean') plt.legend()
drug_set2/.ipynb_checkpoints/drug2_prediction_separate01_23_4_quadratic-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %ls # %cd Chapter_1/ # %ls from sklearn import datasets iris = datasets.load_iris() digits = datasets.load_digits() print(digits.data) digits.target digits.images[0] from sklearn import svm clf = svm.SVC(gamma=0.001, C=100.) clf.fit(digits.data[:-1], digits.target[:-1]) clf.predict(digits.data[-1:]) # + print(__doc__) # Code source: <NAME> # Modified for documentation by <NAME> # License: BSD 3 clause from sklearn import datasets import matplotlib.pyplot as plt #Load the digits dataset digits = datasets.load_digits() #Display the first digit plt.figure(1, figsize=(3, 3)) plt.imshow(digits.images[-1], cmap=plt.cm.gray_r, interpolation='nearest') plt.show() # -
Mathine_Learning/Learning_in_Action/.ipynb_checkpoints/Chapter_1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![Opi Python Logo](images/letslearnpython.png) # # # OPI PYTHONIN PERUSTEET # # Tämä interaktiivinen harjoituskokoelma opettaa sinulle Python-ohjelmointikielen alkeet. Opas seurailee PyCon 2014 -tapahtumassa <NAME>en ja <NAME>in pitämän Python-koulutuksen rakennetta. # # > Jos haluat tämän suomenkielisen oppaan sijaan tutustua englanninkieliseen alkuperäismateriaaliin (kalvot, esimerkit ym.), ne ovat ladattavissa täältä: # > https://github.com/mechanicalgirl/young-coders-tutorial # ![Mitä ohjelmointi on](images/whatprog.png) # # Mitä **ohjelmoinnilla** tarkoitetaan: # * **Tietokone** on laite joka paitsi **säilöö**, myös **siirtää**, **järjestää**, **luo** ja **hallitsee** tietoa (eli _dataa_). # * **Tietokoneohjelma** puolestaan on joukko yksikäsitteisiä **ohjeita**, jotka kertovat tietokoneelle mitä datalle pitäisi tehdä. **Ohjelmointikieli** on se erityinen kieli, millä nämä ohjeet kirjoitetaan. # * Alla on joitain esimerkkejä ohjeista, jotka on tarkoitettu meille ihmisille (origamilinnun taitteluohje, pikkuleipäresepti ja kirjan sidontaohje): # # ![Ohje-esimerkki](images/instructions.png) # # * Heti alkuun on hyvä sisäistää, että tietokone on kovin tyhmä. Se ymmärrä yllä olevan esimerkin kaltaisia oheita, vaikka ne ovat ihmiselle päivänselviä. Tietokoneelle ohjeet pitää kirjoittaa todella yksityiskohtaisesti, huolellisesti ja tarkasti, sillä se toimii **täsmälleen** kuten ohjeistettu. Tämä ohjeiden kirjoittaminen jollain ohjelmointikielellä on **ohjelmointia** - eli juuri sitä, mitä tämä opas opettaa. # * **Algoritmilla** tarkoitetaan ohjeita jonkin tietyn asian tekemiseksi tietokoneella - usein tehokkaalla ja nopealla tavalla. Algoritmit ovat kuin reseptejä, eli askel-askeleelta eteneviä ohjeita, joiden lopputulos on haluttu (esim. kanelipulla tai aakkosjärjestykseen saatettu lista nimiä). # # # ![Puhu Pythonia](images/talkpy.png) # # Pian kokeilemme kirjoittaa ohjeita tietokoneelle, mutta ensin on hyvä tietää, että ohjelmointikieliä on olemassa valtavasti erilaisia - eri käyttötarkoituksiin. Tässä oppaassa opettelemme ohjelmointikieltä nimeltä **Python**, joka on aloittelijaystävällinen ja silti valtavan tehokas ja ilmaisuvoimainen kieli. # # Python voi näyttäytyä ohjelmoijalle monessa muodossa: # 1. Yksinkertaisin tapa käyttää Pythonia on Python-komentotulkki, jonne kirjoitetaan komentoja, jotka tulkki sitten välittämästi suorittaa. Komentotulkin kanssa vuoropuheluna voi tehdä laskutoimituksia, opiskella Pythonin käyttöä tai kokeilla erilaisten ideoiden toimivuutta. # 1. Komentotulkista on myös kehittyneempi ja käyttäjäystävällisempi versio nimeltä IPython. Se tarjoaa monenlaisia apuvälineitä Pythonin kirjoittamiseen: tiedostojen hallintaa, dokumentaatiota, erilaisia oikoteitä, komentohistorian ja paljon muuta. # 1. IPython Notebookin, jota nyt parhaillaan käytät, lähestymistapa on erilainen: käytössäsi on nettiselaimessa (Chrome, Firefox ym.) toimiva vuorovaikutteinen muistikirjan sivu, jonne voi kirjoittaa paitsi tekstiä, myös Python-ohjelmointikielisiä käskyjä. # 1. Python-ohjelmointikielellä voidaan myös tehdä itsenäisesti toimivia ohjelmia. Tällöin kirjoitetaan `.py`-tiedosto, joka annetaan tietokoneen suoritettavaksi. `.py`-tiedostojen kirjoittamiseen on monenlaisia työkaluja aina muistiosta tehokkaisiin integroituihin kehitysympäristöihin (_eng. IDE_). Näin tehty ohjelma voidaan jopa paketoida suoritettavaksi `.exe`-tiedostoksi. # # ![Erilaisia Python-ympäristöjä](images/pythonviews.png) # # # 1. Laskutoimituksia # Aloitetaan Python-ohjelmointi yksinkertaisella matematiikalla. Pythonia voi nimittäin käyttää kätevänä laskimena. # # Naksauta hiirellä alla oleva harmaa laatikko aktiivisiksi ja paina [⇧SHIFT] + [↵ENTER], jolloin Python suorittaa laskutoimituksen, kertoo tuloksen ja siirtyy seuraavaan laatikkoon. 1+2 # Hyvä! Tee sama uudelleen alla olevalle laatikolle. 12-3 # Kokeile nyt kirjoittaa itse alla olevaan harmaaseen laatikkoon seuraava laskutoimitus. # Älä kuitenkaan kopioi `>>>`-merkkejä, sillä ne ovat vain ilmaisemassa, että kyseessä on Python-koodi: # ```python # >>> 9+5-15``` 9+2 # Paina sitten `[CTRL]` + `[↵ENTER]` ja Python laskee laskun, kertoo tuloksen **ja pitää kyseisen laatikon aktiivisena siltä varalta, että haluat muokata sen sisältöä.** Tämä siis erotuksena aiempaan [⇧SHIFT] + [↵ENTER] -komentoon, joka siirtyy heti eteenpäin. Toiminnolle löytyy myös [▶]-nappi yläreunan työkalupalkista. # ### Matemaattiset operaattorit # Pythonin matemaattiset perusoperaattorit ovat: # * yhteenlasku: `+` # * vähennyslasku: `-` # * kertolasku: `*` # * jakolasku: `/` # # Kokeile vaikka näitä: # ```python # >>> 6*5 # >>> 10*5*3 # >>> 6/2 # >>> 20/7 # ``` # Tai voit kokeilla laskea ihan itse keksimilläsi luvuilla. Kokeile myös operaattoria `**`. Keksitkö mitä se tekee? 2**2 2/3.0 # > **Sivuhuomautus:** Pythonin 2.7 ja 3 versiot laskevat jakolaskun eri tavalla. Vanhempi ja käytöstä poistuva 2.7 saattaa tuottaa ikävän yllätyksen: `5/2` palauttaakin arvon `2`, eikä `2.5`. Jos Python 2.7:lla haluaa tarkan jakolaskun tuloksen on kirjoitettava `5.0/2.0`. Tässä pitää olla tarkkana, sillä Python 2.7 -ympäristöihin saattaa törmätä vielä ajoittain. # > **Toinen sivuhuomautus:** Saatat huomata tehdessäsi omia laskuja, että tulos ei ole aivan sitä mitä odotit. Tällöin järjestys, missä Python suorittaa laskuoperaatot saattaa olla eri mitä tarkoitit. Voit pakottaa Pythonin laskemaan haluamassasi järjestyksessä lisäämällä sulkuja. Esimerkiksi `3+4+5/3` ei tuota samaa vastausta kuin `(3+4+5)/3`. # ### Vertailuoperaattorit # # <table> # <tr><td> # `== `</td><td>tutkii ovatko luvut yhtäsuuret # </td></tr> # <tr><td> # `!= `</td><td>tutkii ovatko luvut erisuuret # </td></tr> # <tr><td> # `< `</td><td>tarkistaa onko vasemmalla puolella oleva luku pienempi kuin oikealla puolella oleva # </td></tr> # <tr><td> # `> `</td><td>tarkistaa onko vasemmalla puolella oleva luku suurempi kuin oikealla puolella oleva # </td></tr> # <tr><td> # `<= `</td><td>tarkistaa onko vasemmalla puolella oleva luku pienempi *tai yhtäsuuri* kuin oikealla puolella oleva # </td></tr> # <tr><td> # `>= `</td><td>tarkistaa onko vasemmalla puolella oleva luku suurempi *tai yhtäsuuri* kuin oikealla puolella oleva # </td></tr> # </table> # # Kokeile näitä alla. Esimerkiksi: # ```python # >>> 5 < 4 + 3 # >>> 12 + 1 >= 12 # >>> 16 * 2 == 32 # >>> 16 != 16 # >>> 5 >= 6 # ``` # # Vertailuoperaatot ovat tärkeitä ohjelmoinnissa, kuten pian tulemme näkemään. Niiden avulla voimme ohjelmoida tietokoneen tekemään eri asioita eri tilanteissa. # # 2. Merkkijonot # Paitsi lukuja, käsittelevät tietokoneet myös paljon tekstimuotoista tietoa. Tekstimuotoinen tieto on tallennettu tietokoneelle jonona kirjoitusmerkkejä, eli tuttavallisemmin **merkkijonona**. # # Python tulkitsee merkkijonoksi tekstinpätkän, joka on lainausmerkkien välissä. Kokeile: # ```python # >>> "hello world" # >>> "omppu" # >>> omppu # ``` "hello world" omppu # Viimeisen esimerkin, eli sen ompun, jota ei oltu ympyröity lainausmerkein, tuleekin antaa virhe. Virheisiin palaamme pian, mutta tässä esimerkin tarkoitus on iskostaa mieleesi seuraava sääntö: **jos haluat syöttää Pythonille merkkijonon, käytä lainausmerkkejä** # ### Operaatioita merkkijonoille # Kuten luvuille, myös merkkijonoille voi tehdä erilaisia operaatioita: # # * yhdistäminen: `+` # * toistaminen: `*` # # Katso ja kokeile mitä Python tekee kun annat sille seuraavat ohjeet: # ```python # >>> "Hei " + "muukalainen" # >>> "HAHA" * 100 # ``` # # 3. Muuttujat # Tutustuimme edellä siihen, miten lukuja ja merkkijonoja käsitellään Pythonilla. Seuraavaksi tutustumme ideaan, jota voi käyttää molempien kanssa - eli muuttujaan. # # Matematiikan tunnilta saatat muistaa **x**:n. Sitä käytettiin merkitsemään jotain tuntematonta (tai tunnettua) lukua. Ohjelmointikielessä nämä **muuttujat** toimivat hieman samalla tavalla. # # Muuttujat ovat kuin laatikoita, joiden sisään voit pakata numeroita, merkkijonoja tai muita "juttuja". # # Laske alla olevassa harmaassa laatikossa lasku: # ```python # >>> 12 * 12 # ``` # Python kuitenkin unohtaa tuloksen heti. Entäs jos haluaisit, että vastaus pysyy muistissa? Voit laittaa tuloksen muistiin **sijoittamalla** sen nimettyyn muuttujaan: # ```python # >>> tulos = 12 * 12 # >>> tulos # ``` # Muuttujat säästävät aikaa ja vaivaa, sillä voit nyt käyttää muuttujaa `tulos` uudelleen ja uudellen tulevissa laskutoimituksissa ja ohjeissa. # # > Huomaa, että `tulos` ei ole lainausmerkeissä. Näin siksi, että se ei ole merkkijono, vaan muuttuja. Yleensä kannattaa käyttää kuvaavia muuttujan nimiä. Näin ei tarvitse turhaan arvailla ja selvitellä, että mitä muuttujassa on sisällä - nimi kertoo sen jo. # Voit uudelleenkäyttää muuttujia. Sijoita vaan sinne uusi arvo. Esimerkiksi: color = 'yellow' color = 12 # Mitä luulet 'color' muuttujassa nyt olevan? Kokeillaan (suorita kaikki tämän esimerkin kolme laatikkoa ylhäältä alas): color # Muuttujat ovat ohjelmoinnissä hyvin tärkeitä. Siksi kertauksena: # * Voit laittaa tulokset talteen muuttujaan ja käyttää näitä tuloksia myöhemmin # * Voit vaihtaa muuttujan arvon vaikka sen nimi pysyy samana # * Muuttujaan sijoitetaan '=' operaattorilla. Tämä erotuksena '==' operaattoriin, joka siis tutkii ovatko luvut **tai muuttujien sisältämät luvut** samat (esimerkki alla). Tässä sekoilu on hyvin yleinen virhe. x = 3 y = 3 x==y # Tässä muuten joitain muita *juttuja*, mitä muuttujilla - ja erityisesti merkkijonomuuttujilla - voi tehdä. hedelma = "appelsiini" hedelma[2] indeksi = 4 hedelma[indeksi-2] # Alle voit kokeilla tätä merkkijonoihin *indeksointia* itse: # # 4. Virheet # Eräs ohjelmoinnissa hyvin tärkeä asia ovat **virheet** ja **virheilmoitukset**. Paraskaan ohjelmoija ei osaa kirjoittaa virheetöntä ohjelmakoodia. Siksi onkin hyvä, että työkalu osaa tunnistaa ja kertoa meille suurimmassa osassa tapauksista milloin olemme tehneet jotain väärin - ja mikä vielä tärkeämpää, missä ja mikä virhe oikein oli. Suorita alla olevat koodit. "kaveri"*5 "kaveri"+5 # Mitä ylle ilmaantunut virhe mielestäsi tarkoittaa? Mitä 'str' ja 'int' tarkoittavat? # # Pureskellaan virheilmoitus auki: # * Merkkijonot ('str' lyh. *string*) # * Kokonais**luvut** ('int' lyh. *integer*) # * Molemmat ovat jotain objekteja (objects *suom. **olioita***) # * Mutta Python ei osaa yhdistää niitä, sillä ne ovat eri **tyyppisiä** (TypeError). # # Usein lukemalla virheilmoituksen huolellisesti voi päästä jyvälle siitä mikä menee vikaan. Esimerkkitapauksessamme virhe olisi korjattavissa niin, että annamme myös numeron lainausmerkkien välissä olevana merkkijonona, jolloin yhdistettävät ovat molemmat merkkijonoja. Kokeile: # ```python # >>> "kaveri"+"5" # ``` # ![Python perustietotyypit](images/types.png) # # # Perustietotyypit # Olemme jo tutustuneet kolmeen tietotyyppiin: # * `"Hei!"`, joka on **merkkijono** (eng. *string*) # * 27, joka on **kokonaisluku** (eng. *integer*) # * 3.14, joka on desimaaliluku/**liukuluku** (eng. *float*) # # > **Sivuhuomautus:** Huomaa, että Pythonin desimaalierotin on piste eikä pilkku. Tämä on hyvin yleistä ohjelmointikielissä ja tietojenkäsittelyssä, mutta ensikertalaisen kannattaa kiinnittää asiaan huomiota. # # Python osaa kertoa meille tyypin Pythoniin sisäänrakennetulla `type(...)`-funktiolla. Funktioihin tutustumme lähemmin tuonnempana. tervehdys = "Hei!" type(tervehdys) # Hae alla vastaavalla tavalla tyyppikuvaus kokonaisluvulle ja liukuluvulle: type(12) # Voit muuttaa tietoa toiseen muotoon `int(...)`, `str(...)` ja `float(...)` -funktioilla. Esimerkiksi: float(5) # Kokeile muuttaa kokonaisluku merkkijonoksi: # Entäs muuttuuko numeron sisältävä merkkijono desimaaliluvuksi?: # # 5. Listat # Lista voi pitää sisällään sarjan olioita. Esimerkiksi: hedelmat = ["omena", "banaani", "mandariini"] numerot = [3, 17, -4, 8.8, 1] # Arvaa mitä `type(...)`-fuktio osaa kertoa: type(hedelmat), type(numerot) # Voit käyttää kokonaisluku**indeksiä** ja hakasulkuoperaattoria hakeaksesi jonkin listan **alkion**: hedelmat[0] numerot[1] # > Huomaa edellisistä esimerkistä pari seikkaa. Ohjelmointikielissä on tyypillistä, että listan ensimmäinen alkio haetaan indeksillä `0` - se on siis tavallaan "nollas" alkio eikä "ensimmäinen". Näin myös Pythonissa. # # Mihin negatiivinen indeksi -1 osoittaa? Entä -2? Kokeile arvasitko oikein: hedelmat[-1] # ### Tehtävä: # Tee **lista** kolmesta lempiväristäsi: # Käytä **indeksiä** ja hakasulkuoperaattoria hakeaksesi yhden lempiväreistäsi: # **Extratehtävä:** mitä tapahtuu jos indeksoit kahdesti `hedelmat`-listaa tai lempivärilistaa? Miksi tulos on se mikä se on? # hedelmat[1][3] # # 6. Totuusarvot (*eng. booleans*) # Totuusarvo voi olla `True` tai `False` 1==1 15 < 5 # Kokeile mitä tapahtuu jos kirjoitat Pythonille pelkän `True` tai `False`. Entäs pystytkö sijoittamaan totuusarvon muuttujaan? # Mikä on totuusarvon tyyppi? Tarkista se `type(...)`-funktiolla, kuten teimme kokonaisluvuille ja merkkijonoille: # > Huomaa, että `True` ja `False` on kirjoitettu suurella alkukirjaimella, ja että ne eivät ole ympäröity lainausmerkein, eli ne eivät ole merkkijonoja. `True` ja `False` ovatkin Pythonin **varattuja sanoja**, eli kieleen sisäänrakennettuja asioita. # ### and / or / not # Myös `and`, `or` ja `not` ovat Pythonissa varattuja sanoja. Niillä voi muuttaa totuusarvoja, sekä yhdistää niitä tuottavia vertailuoperaatioita toisiinsa. # # > Alla risuaidan `#` jälkeen kirjoitettua ei tulkita Python-kieleksi. Ne ovat koodin sekaan kirjoitettuja **kommentteja**, mikä onkin usein tarpeen, jotta koodin tarkoitusta ei tarvitse turhaan kummastella. Kommentit auttavat koodaajaa - ja mikä tärkeämpää - muita koodaajia ymmärtämään mitä koodi tekee. # # 1==1 and 2==2 # molemmat ovat tosia, lopputulos tosi 1==1 and 2!=2 # Vain toinen on tosi, lopputulos epätosi 1==2 and 2==3 # Kummatkaan eivät ole tosia, lopputulos epätosi 1==1 or 2==2 # molemmat ovat tosia, lopputulos tosi 1==1 or 2!=2 # Vain toinen on tosi, lopputulos silti tosi 1==2 or 2==3 # Kummatkaan eivät ole tosia, lopputulos epätosi 1==1 not 1==1 # ### Tehtävä: # Kokeile sijoittaa muutama totuusarvo muuttujiin (esim. `a` ja `b`) ja kokeile itse `and`, `or` ja `not` operaattoreita. # **XOR:** Osaatko tehdä vertailun, joka palauttaa `True` vain jos jompikumpi vertailtavasta on tosi (ns. exclusive-or, xor)? Vinkki: käytä muuttujia ja ryhmittele vertailuoperaattoreita suluilla. Esim: # ```python # >>> not (True and (True or False)) # ``` # **Merkkijonojen vertailu:** Kokeile vielä verrata merkkijonoja keskenään. Kokeile onko suurilla ja pienillä kirjaimilla merkitystä. Yllätyitkö? "test" == "Test" # # 7. Logiikka # Tähän mennessä olemme käyttäneet Pythonia lähinnä moneen hienoon temppuun taipuvana laskukoneena. Jotta pystyisimme kirjoittamaan tietokoneelle pidempiä ohjeita, tulee meidän tutustua keinoihin joilla ohjeiden **logiikka** kuvataan Pythonin kaltaisissa ohjelmointikielissä. Tähän on käytettävissä kaksi perustekniikkaa: **ehdot** ja **simukat**. # ## 7.1. Ehtolause # ![Haarautuva nuoli](images/arrowtree.png) # # `if` -lause, eli ehtolause, on tapa pistää tietokone päättämään mitä sen tulisi koodia tulkitessaan seuraavaksi tehdä. # # Alla on muutamia esimerkkejä, joita saattaisit käyttää tosielämän päätöksentekoon: # # "**Jos** olet nälkäinen, mennään lounaalle." # # "**Jos** roskis on täynnä, mene ja tyhjää se." # Alla esimerkki siitä, miten samankaltainen asia ilmaistaisiin Python-koodissa: # # ```python # >>> nimi = "Jussi" # >>> if name == "Jussi": # >>> ····print("Moi Jussi!") # ``` # `Moi Jussi!` # > **Huomattavaa:** Pythonissa ehtolauseen jälkeen tulee kaksoispiste, joka puolestaan kertoo että seuraavalta riviltä alkaa uusi **lohko**. Lohko on pätkä ohjelmakoodia, joka kuuluu yhteen, ja jonka tietokone suorittaa kokonaisuutena rivi kerrallaan (ylhäältä alas). Lohko on sisennetty **neljällä välilyönnillä** ja kaikki samaan tasoon sisennetyt peräkkäiset rivit kuuluvat samaan lohkoon (yllä olevassa esimerkissä pisteet tarkoittavat välilyöntejä, ja ovat siinä näkyvillä vain siksi, että välilyöntien käyttö tulisi selvemmäksi). # Mutta entä jos henkilö ei olekaan Jussi? Pythonissa on varattu sana `else` näitä tilanteita varten: # # ```python # >>> if name=="Jussi": # >>> ····print("<NAME>!") # >>> else: # >>> ····print("Hujari!") # ``` # # Jos taas henkilöitä on enemmän, voidaan käyttää sanaa `elif` tämän toisen ehdon tarkastamiseen: # # ```python # >>> if name=="Jussi": # >>> ····print("<NAME>!") # >>> elif name=="Aki": # >>> ····print("He<NAME>!") # >>> else: # >>> ····print("Kuka sinä olet?") # ``` # # > **Huomattavaa:** # * `elif` -ehto tarkastetaan *vain jos mikään ennen sitä olevista ehdoista ei toteutunut*. # * `else` -lohko suoritetaan *vain jos mikään aiempi ehto ei toteutunut*. # # >Toisinsanoen: `if`/`elif`/`else` rakenteesta suoritetaan aina vain yksi haara (lohko). # ### Tehtävä: # Kirjoita koodinpätkä, joka tulostaa ruudulle "Yeah!" jos muuttuja nimeltä `color` on `"yellow"`: # Lisää ehtoon vielä `elif` ja `else` -haarat, joissa tutkit onko muuttujassa jokin toinen väri ja jos on, tulostat jotain muuta nokkelaa. # # 7.2. Silmukat # # ![Silmukkanuoli](images/loop.png) # # Toinen perusrakenne ohjelmointikielissä on **silmukat**. Sitä käytetään kun halutaan, että tietokone toistaa jokin asia monta kertaa. Silmukoita on kahdenlaisia: # * *Laskevat* silmukat, jotka toistetaan tietyn monta kertaa # * *Ehdolliset* silmukat, joita toisteaan kunnes jokin ehto täyttyy # ### Laskeva silmukka # Laskevat silmukat kirjoitetaan pythonissa käyttäen `for`-avainsanaa ja siksi niitä sanotaankin `for`-silmukoiksi. Silmukka vaatii myös nimetyn **laskumuuttujan** (esimerkissä `numero`), `in`-avainsanan ja esim. listan numeroita, jotka silmukan laskumuuttuja saa järjestyksessä yksi toisensa jälkeen. Huomaa myös kaksoispiste rivin lopussa ja uuden lohkon merkiksi tehty sisennys: for numero in [1,2,3,4,5]: print("Hei", numero) # ### Tehtävä: # Kirjoita koodinpätkä, joka laskee kertoman (merkitään matematiikassa huutomerkillä `!`), eli kertoo peräkkäiset numerot keskenään. Esim. # # ```5! = 5*4*3*2*1``` # # Käytä muuttujaa, johon säilöt tuloksen. Pitkien lukulistojen kirjoittaminen on työlästä, joten kannattaa käyttää Pythoniin sisäänrakennettua `range(a,b)`-funktiota, joka tekee sinulle lukulistan `a`:sta `b`:hen. Havainnollistava esimerkki alla: list( range(1,5) ) # voit for-silmukassa jättää list(...)-osan pois. # ### Ehdollinen silmukka # Ehdollinen silmukka pyörii kunnes joku ehto täyttyy (tai jää täyttymättä). Pythonissa tämä on toteutettavissa `while(...):` rakenteella, missä sulkujen sisään tulee tutkittava ehto ja alle toistettava koodilohko. lkm = 1 while (lkm<4): print ('toistojen lukumäärä on', lkm) lkm = lkm+1 # > **Huomattavaa:** on tärkeää, että ehto jää täyttymättä edes joskus, sillä muuten kyseessä on päättymätön silmukka. Niillekin on paikkansa, mutta yleensä ohjelma menee ns. jumiin, jos se jää pyörimään tällaiseen. # ### Tehtävä: # Käy edellä olevaa esimerkkiä rivi riviltä päässäsi läpi suorittaen sitä tietokoneen lailla askel kerrallaan. Huomioi missä vaihessa muuttujan arvoa kasvatetaan ja missä vaiheessa ehdon tarkastus tehdään. # # 9. Aliohjelmat # # ![Aliohjelmanuoli](images/functionarrow.png) # # Ajattele keksirepsetiä: # # ![Kuva keksireseptistä raaka-aineineen (7 kpl) ja vaiheistettuine ohjeineen (6 askelta)](images/resepti.png) # # Kumpi on helpompaa, noudattaa reseptiä, vai kysyä äidiltä, että "leipoisitko keksejä"? Ohjelmoinnissa tällainen pyyntö vertautuu aliohjelmakutsuun, sillä **aliohjelma** on joukko yhteen liittyviä ohjeita paketoituna nimetyksi kokonaisuudeksi. # # Tämä on kätevää, sillä vaikka reseptin seuraaminen meiltä onnistuisikin, on pyytäminen **huomattavasti helpompaa**. Ja asioiden tekeminen helpommin, tehokkaammin ja nopeammin on pitkälti se, mistä ohjelmoinnissa on kyse. # # ## Aliohjelmien syntaksi (eli kirjoitustapa) # Pythoniksi muutettuna tämä kaunis pyyntö saada tuoreita pikkuleipiä voitaisiin ilmaista vaikka näin: # # ```!Python # pikkuleivat = leivo_pikkuleipia(montako_tarvitaan) # ``` # # Tässä `leivo_pikkuleipia` on **aliohjelman nimi** ja `montako_tarvitaan` on **parametri**, joka määrää sen kuinka suurelle joukolle (eli kuinka montakertaisen pikkuleipätaikinan) äiti leipoo. # # Pythonissa aliohjelmat **esitellään** `def`-avainsanaa käyttäen, jonka jälkeen annetaan sille nimi ja sulut. Esittelyrivi päättyy *kaksoispisteeseen*, ja seuraavalla rivillä on aliohjelman toteutuksen määrittelevä koodilohko, eli **aliohjelman runko**. # # Aliohjelmaa **kutsutaan** kirjoittamalla sen nimi ja sulut: # # + def moikkaa_mua(): # tämä on aliohjelman esittely print("moi") moikkaa_mua() # tämä on aliohjelmakutsu # - # > **Huomattavaa:** esittelyn ja kutsumisen eron ymmärtäminen on hyvin tärkeää. Siis kertauksena: **esittelyssä** kerrotaan yksityiskohtaisesti miten joku asia tehdään ja **kutsuttaessa** pyydetään tekemään kyseinen asia! # Aliohjelmalle voidaan lisätä parametreja kirjoittamalla ne sulkujen väliin. Näitä **parametrimuuttujia** voidaan sitten käyttää aliohjelman lohkon koodissa kuten mitä tahansa muuttujaa. Huomaa, että jos aliohjelmalle on lisätty parametri, tällöin myös aliohjelman kutsussa pitää antaa kyiseiselle parametrimuuttujalle arvo. (suorita alla oleva koodi) # + def moikkaa_kaveria(nimelta): #esittely print("moi", nimelta) moikkaa_kaveria("Jussi") # - # ### Tehtävä: # Kutsu edellä esiteltyä `moikkaa_kaveria`-aliohjelmaa niin, että se moikkaa sinua. Esittele sitten uusi aliohjelma, joka ottaa kaksi parametria (parametrit erotetaan toisistaan pilkulla): tervehdyksen ja listan nimiä ja moikkaa kaikkia listassa olevia henkilöitä (kts. `for`-silmukan ja listan ohjeet yltä, jos et muista miten niitä käytettiin). 1+2 # ## Aliohjelmien paluuarvot # # Käytämme edelleen pikkuleipiä leipovaa äitiä apuna: # # ```!Python # pikkuleivat = leivo_pikkuleipia(montako_tarvitaan) # ``` # # Rivin alkuun on nyt lisätty `pikkuleivat`-muuttuja ja yhtäsuuruusmerkki. Tähän muuttujaan puolestaan sijoitetaan **aliohjelman paluuarvo**, eli valmistuneet pikkuleivät. # # Tässä yhteydessä toinen hyödyllinen tapa ajatella aliohjelmia on *tehdasvertaus*: Aliohjelma on kuin tehdas, minne menee raaka-aineita sisään (parametrit) ja ulos tulee valmis tuote (paluuarvo). Emme nimittäin ole kiinnostuneita siitä *miten* tehdas toimii, kunhan se vain tekee sen mitä lupaa. # # Aliohjelman esittelyn yhteydessä, sen koodilohkossa, käyttämällä `return`-avainsanaa, voidaan lopettaa aliohjelma samalla palauttaen jokin arvo. Esimerkiksi näin (suorita alla oleva koodi): # + def tuplaa(numero): #esittely return numero*2 tuplaa (4) #kutsu # - # ### Tehtävä: # Tuplaa 12 käyttäen `tuplaa`-aliohjelmaa. # # Tai, mitä tapahtuu jos tuplaat oman nimesi? Kokeile. # ## Yhteenveto aliohjelmista # Opimme paljon Pythonin aliohjelmista, mutta kerrataan vielä: # # * Aliohjelmat **esitellään** `def`-avainsanalla # * Aliohjelmia **kutsutaan** kirjoittamalla sen nimi ja heti nimen perään **sulut**: `kutsun_nimi()` # * Aliohjelmakutsun sulkujen väliin kirjoitetaan mahdolliset **parametrien** arvot pilkuilla eroteltuna # * Jos aliohjelma palauttaa **paluuarvon**, sen voi ottaa talteen muuttujaan yhtäsuuruusmerkillä (sijoitus) # * Aliohjelman **runkolohkossa** voi lopettaa aliohjelman suorituksen ja palauttaa **paluuarvon** `return`-avainsanalla. # # 10. Syöte # Syöte on tietoa, jota syötämme ohjelmalle. Tämä tieto voidaan sitten vaikkapa välittää aliohjelmalle tai näyttää se ruuudulla. # # Pythonissa on sisäänrakennettu aliohjelma nimeltä `input()`, joka kysyy käyttäjältä jotain. Esimerkki alla (suorita se ja vastaa kysymyksiin): # + def sano_kohteliaisuus(): print("Kirjoita nimesi:") nimi = input() print("Kirjoita lempinumerosi:") vari = int(input()) print( "Hei",nimi+",", vari, "on sitten hyvä numero.") sano_kohteliaisuus() # - # ## Tehtävä: # Kirjoita koodi, joka kysyy kaksi numeroa ja laskee ne yhteen. # # 11. Modulit # *"Pyörää ei kannata keksiä uudelleen"* on sanonta, joka pätee erityisen hyvin koodauksessa. Suuri osa yksinkertaisista koodaustehtävistä on jo tehty, ja usein nopein tapa saada asioita aikaan on uudelleenkäyttää jonkun toisen kirjoittamaa ohjelmakoodia. # # Pythonin tarjoama ratkaisu tähän on modulit. Voit ajatella modulia vaikka kokoelmana yhteen littyviä aliohjelmia, jotka tekevän jonkin tehtävän tekemisestä helppoa. Ne ovat tavallaan ohjelmoinnin leegopalikoita. Pythonin mukana tulee koko joukko moduleita, tutustutaan alla niistä muutamaan: # # *Satunnaisen numeron arpominen väliltä 1-100:* # ```python # >>> import random # >>> print( random.randint(1,100) ) # ``` # # *Kellonajan tarkistaminen:* # ```python # >>> from datetime import datetime # >>> print( datetime.now() ) # ``` # * `from X import Y` tarkoittaa tässä sitä, että haemme `datetime` modulista vain `datetime` nimisen "palikan". # # *Kalenterin tulostaminen:* # ```python # >>> import calendar # >>> calendar.prmonth(2015, 9) # ``` # # > Huomattavaa: moduleiden sisällä asustavia aliohjelmia ja muita "juttuja" kutsutaan pistenotaatiolla, eli niin, että ensin tulee modulin nimi, sitten piste, sitten kutsuttavan aliohjelman (tai muun "jutun") nimi, ja aliohjelman ollessa kyseessä sulut ja niiden väissä parametrit. # # ### Tehtävä: # Kokeile itse käyttää esimerkkien moduleita kirjoittamalla niiden koodi alle (muistathan, että "`>>>`" EI ole osa kopioitavaa koodia): # # # Alla esitellään esimerkkien avulla vielä pari mielenkiintoista modulia. Sisäänrakennettuja moduleita voit selata [Pythonin dokumentaation modulilistasta](https://docs.python.org/3/py-modindex.html). Lisäksi netti on puolellaan kolmannen osapuolen moduleita, tarpeeseen jos toiseenkin. import os for tiedosto in os.listdir( os.getcwd() ): print(tiedosto) import urllib.request py_home = urllib.request.urlopen("http://www.python.org") print(py_home.read()) #Tulostaa pythonin kotisivun HTML-muodossa. # ## Tehtävä: # Hyödynnä äsken ja aiemmin oppimaasi ja kirjoita alle peli, joka arpoo satunnaisen numeron (`random`), ja pyytää sinua arvaamaan sen (`print`, `input`) kunnes (`while`) osut oikeaan. Voit rullailla ylöspäin tätä Notebookia, jos et muista jotain yksityiskohtaa. Jos peli tuntuu liian vaikealta, voit laittaa ohjelman tulostamaan vinkkejä, kuten "luku on pienempi" tai "polttaa". # # 12. Kilpikonnagrafiikkaa # # Pythonissa on eräs mielenkiintoinen moduli nimeltä `turtle`, jolla leikimme seuraavaksi. Kyseessä on omassa ikkunassaan asuva kilpikonna, jota ohjaat kirjoittamalla koodia. # # Valitettavasti Pythonin sisäänrakennettu kilpikonna ei toimi verkossa olevassa Jupyter-muistilehtiössä (esim. Azure Notebooks), vaan tähän tehtävään tarvitset paikallisen Python-asennuksen. Suositeltavaa on käyttää [Anaconda-jakelua](https://www.anaconda.com/). Asennuspaketti eri alustoille (Windows/OSX/Linux) on ladattavissa oheisen linkin takaa. # Kun olet asentanut Anacondan ja avannut tämän `ipynb`-tiedoston siellä, meillä on toimiva kilpikonna. Pistetäänhän se liikkeelle! # + import turtle turtle.reset() #Kirjoita kilpikonnan ohjauskoodi tähän turtle.fd(100) turtle.Screen().mainloop() # - # Jos saat virheilmoituksen, jokin meni pieleen, eikä Jypyter-ympäristö asentunut tai käynnistynyt koneellasi oikein. Tarkista, että olet käynnistänyt *Jupyter Notebook* -palvelimen käynnistysvalikosta ja että selain on aukaissut seuraavan sivun: [http://127.0.0.1:8888](http://127.0.0.1:8888) # Kun ajat kodin kilpikonna vain kököttää paikallaan. Pistetään siihen vähän liikettä. **ENSIN KUITENKIN SULJE AUENNUT KILPIKONNAIKKUNA RUKSISTA. JOS ET TEE TÄTÄ, ET NÄE MUUTTUNEEN KOODIN VAIKUTUKSIA.** # # Alla joitain "temppuja", mitä kilpikonna osaa: # ```python # turtle.forward(10) # kilppari kulkee eteenpäin annetun määrän askelia # turtle.right(45) # kilppari kääntyy annetun määrän asteita oikealle # turtle.left(90) # kilppari kääntyy annetun määrän asteita oikealle # ``` # # ## Tehtävä: # Käytä näitä kolmea käskyä ja silmukoita tuottaaksesi erilaisia toinen toistaan villimpiä kuvioita. Voit kirjoittaa koodin yllä olevaan runkoon. Huom: Komennot ovat myös lyhennettävissä `fd`, `rt`, `lt` jne. # # Jos jäit koukkuun, [Pythonin turtle-modulin dokumentaatiosta](https://docs.python.org/3/library/turtle.html) voit lukea mitä kaikkia temppuja Python-kilppari osaa. Voit mm. vaihtaa ikkunan taustaväriä, kilpikonnan muotoa, viivan paksuutta ja väriä jne. # # ## Lisätehtävä: # Dokumentaatiota lukemalla paljastuu, että kilpikonnaikkunan kautta voi lukea hiiren ja näppäimistön painalluksia. Tämä mahdollistaa jo vaikka mitä! Kokeile mitä alla oleva koodi tekee ja jatkokehitä koodia vaikka peliksi asti! # + from mobilechelonian import Turtle t = Turtle() def painettu_ylos(): t.setheading(90) t.forward(20) def painettu_alas(): t.setheading(-90) t.forward(20) def painettu_vasemmalle(): t.setheading(180) t.forward(20) def painettu_oikealle(): t.setheading(0) t.forward(20) # Kiinnitetään näppäimenpainallukset edellä oleviin aliohjelmiin # kun näppäintä painetaa, aliohjelmaa kutsutaan. screen = turtle.Screen() screen.onkey(painettu_ylos, "w") screen.onkey(painettu_vasemmalle, "a") screen.onkey(painettu_alas, "s") screen.onkey(painettu_oikealle, "d") screen.listen() screen.mainloop() # - # ![Onnittelut](images/congratulations.png) # # Onneksi olkoon, olet nyt oppinut pythonin perusteet ja sitä mukaa kun otat edellä oppimaasi haltuun, voit rakentaa aina vain isompia ja hienompia juttuja. Ai mitä vai? Alla joitain esimerkkejä. Huomaa kuitenkin, että vain osa näistä toimii Jupyter-ympäristössä, joka on tarkoitettu paremminkin tieteelliseen käyttöön ja automaattiseen tietojenkäsittelyyn. Suositeltavaa onkin sovellusten tai kuvan ja äänen kanssa työskennellessä käyttää kehitysympäristöä kuten [Visual Studio Code](https://code.visualstudio.com/docs/languages/python). Sen ja modulien avulla: # # * Voit kirjoittaa pelejä (googleta `"pygame-zero"`) # * Toistaa ja tehdä musiikkia ja videoita (googleta `"pymedia"`) # * Rakentaa verkkosivuja ja -sovelluksia (googleta `"python flask"`) # * Rakentaa mobiilisovelluksia ja -pelejä (googleta `"kivy"`) # * Tehdä matematiikan, fysiikan ja muiden aineiden kotitehtäviä (googleta `"scientific python"` tai `"<opetettavan aiheen englanninkielinen avainsana tähän> python"`) # # Tämä on mahdollista, koska lähes kaikkeen löytyy Python -moduli: # ![XKCD Python](http://imgs.xkcd.com/comics/python.png "Lähde: xkcd.com") #
YoungCodersNotebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 9. Minimum-maximum módszer, MinMaxMethod # # A minimum-maximum módszeres kiértékelésre két különböző út van, az interaktív matplotlib ablak, illetve a manuális minimum maximum megadás. Itt a már meglévő interferogramot értékelem ki, illetve szimulált interferogramokon is bemutatok funkciókat. import numpy as np import matplotlib.pyplot as plt import pysprint as ps # + m = ps.MinMaxMethod.parse_raw( 'datasets/ifg.trt', 'datasets/ref.trt', 'datasets/sam.trt', skiprows=8, meta_len=6, delimiter=";", decimal="," ) m.chdomain() m.slice(2, 3.9) m.plot() # - # ### 9.1 A manuális módszer # Pontosan megtalálni a szélsőértékek helyét *csak kódot használva* különböző adatsorok esetén elég nehéz feladat. Manuális beállítás esetén `MinMaxMethod.xmin` és `MinMaxMethod.xmax` paramétereket kell megadni, melyek a minimumok és maximumok helyeire mutató `np.ndarray`-ok. A szélsőértékek megtalálásához a `MinMaxMethod.detect_peak` és a `MinMaxMethpd.detect_peak_cwt` két beépített segédfüggvény. Ez a két függvény a `scipy.signal.find_peaks` és a `scipy.signal.find_peaks_cwt` függvényeket használja a kódon belül. # + xmax, ymax, xmin, ymin = m.detect_peak(pmax=0.5, pmin=4, threshold=0.15) plt.figure() m.plot() plt.plot(xmax, ymax, "ko", label="maximumok") plt.plot(xmin, ymin, "bo", label="minimumok") plt.legend() # - # Látható, hogy bármennyit is állítgatjuk a paraméterek értékeit, tökéletes eredményt a legtöbb esetben nem tudunk elérni. A `detect_peak` és a `detect_peak_cwt` rögzíti automatikusan a minimumok és a maximumok helyét. Ezután két lehetőségünk van. Az első, hogy meghívjuk a `build_phase(reference_point, SPP_callbacks=None)` függvényt, amivel a program az eddig megadott szélsőértékek, a referencia pont és az SPP-k helyzetéből meghatározza a fázist (`ps.core.phase.Phase`), majd az kerül visszatérítésre. Fontos megadni az `SPP_callbacks` argumentumot. Ez lehet szám, vagy list számokkal. A programnak fontos tudnia ezekről, hiszen ezek adják meg azokat a pozíciókat, ahol a fázis menete előjelet vált. Ha nem adtuk meg az `SPP_callbacks` argumentumot, akkor a program megnézi, hogy az interferogram objektumon állítottunk-e be állandó fázisú pontot, és amennyiben igen azt fogja használni. fazis = m.build_phase(reference_point=2.355, SPP_callbacks=2.77); fazis.plot() # Látható, hogy ebben az esetben a referencia pont környékén letörés tapasztalható. Ez azért van, mert a program a referencia pontból kiindulva két irányba kezdi felépíteni a fázist, így ott legtöbbször a görbe nem folytonos. Itt két lehetőségünk van: használjuk a `flip_around(value, side="left")` függvényt, és átfordítjuk a fázisgrafikon megfelelő részét, vagy egyszerűen a `slice(start, stop)` függvényt meghívva csak az egyik oldalt használjuk. Itt egyszerűen csak a referencia ponttól nagyobb körfrekvencia értékeket fogom használni. fazis.slice(2.355, None) fazis.fit(reference_point=2.355, order=3); fazis.plot() # Köszönhetően a szélsőértékek pontatlan meghatározásának az eredmény is eléggé pontatlan # (körülbelül GD = $-83 fs$, GDD = $165 fs^2$, TOD = $115 fs^3$ lenne a valós), ráadásul az előjelek sem stimmelnek. # # ### 9.2 Az interaktív módszer # # Vizsgáljuk meg, hogy pontos szélsőértékek esetén hogyan fest a fenti számolás. Ehhez használom az `init_edit_session` függvényt, ahol bejelölöm a szélsőértékeket. Pontot hozzáadni az `i`, törölni a `d` billentyűvel lehet. Ezután a `calculate` metódust fogom használni, amely felépíti a fázist és görbét is illeszt. # # # **FONTOS:** # Az állandó fázisú pontokat is be kell jelölni, mint szélsőérték, mivel a program megkeresi a megadott SPP helyzetekhez a legközelebbi szélsőértéket és azt kezeli állandó fázisú pontként. # interaktív módba váltás with ps.interactive(): # az interaktív szélsőérték kereső elindítása m.init_edit_session(threshold=0.3) # + # Itt nem adok meg SPP_callbacks értéket, hanem magára az objektumra állítom be # az állandó fázisú pont helyét. Ekkor ezt fogja a használni a program. # Ha mindkettő adott, akkor az SPP_callbacks argumentum értéke preferált. m.positions = 2.77 m.calculate(reference_point=2.355, order=3, allow_parallel=True, show_graph=True); # - # Fent a `calculate` metódusban az `allow_parallel` argumentumról: Az alapértelmezett értéke `False`, vagyis a metódus csak felépíti a fázist, majd nem végez semmilyen vizsgálatot rajta, hanem csak a megadott görbét illeszti rá, majd abból számolja a diszperziós együtthatókat. Ez akkor lehet jó, amikor a referencia pont és az SPP egybeesik, vagy a referencia pont az adatsor szélén van, mivel ezekben az esetekben a görbében nincs törés. Ha `True`, akkor felbontja a referencia pont mentén a fázisgrafikont, majd külön kiszámolja mindkét oldalra a diszperziós együtthatókat. Ha azok kevéssé térnek el, akkor az együtthatók átlagát adja vissza, természetesen az előjelek egyeztetésével. Ha az együtthatók egy előre meghatározott határnál jobban eltérnek a két oldalon, akkor csak az egyik oldalon számolt együtthatókat adja vissza. Ekkor mindig azt az oldalt használja, ahol több adatpontunk van. Látható, hogy a fenti példában a csak a jobb oldalt használta. A teljes output itt is el van rejtve a felhasználó elől, de elérhető: lefuttatom újra, úgy, hogy látható legyen: # + import logging logger = logging.getLogger() logger.setLevel(logging.INFO) m.calculate(reference_point=2.355, order=3, allow_parallel=True, show_graph=True); # - # Itt láthatóvá válik pl., hogy a két oldalra milyen eredmény adódik (GD, GDD, TOD, FOD, QOD az adatok sorrendje) és hány adatot használtunk fel: # > left side evaluated to [34.99186545 -737.69982878 -3030.40892732 0. 0.], used 13 points. # # > right side evaluated to [-74.99734682 155.78155789 122.85346734 0. 0.], used 56 points. # # # Az interaktív felületet használva a kapott együtthatók jóval pontosabbak lettek. Ha már egyszer lefuttattuk a `build_phase` vagy a `calculate` metódusokat, akkor az objektum eltárolja az eredeti fázist. Ezt természetesen módosíthatjuk és számolhatjuk belőle a diszperziós együtthatókat ahogyan egy előző munkafüzetben már bemutattam. A következőképpen érhetjuk el a tárolt fázist: # + eredeti_fazis = m.phase eredeti_fazis.plot() # - eredeti_fazis.data # ### 9.3 Szimuláció több SPP-vel # Bár a minimum-maximum módszer nem túl pontos magasabb rendű diszperzió esetén, a program képes tetszőleges számú SPP esetén is felépíteni a fázist. Erre mutatok itt egy példát. # + #visszaállítom a log szintet az alapbeállításra logger.setLevel(logging.CRITICAL) g = ps.Generator(1.9, 3, 2.355, delay=0, GD=100, GDD=3000, FOD=-500000, normalize=True, resolution=0.005) g.generate() myminmax = ps.MinMaxMethod(*g.data) myminmax.plot() # - # Ezen az interferogramon 2.186, 2.322 és 2.561 PHz körfrekvenciaértéknél van állandó fázisú pont. Az alábbi cellában ismét az interaktív panelt használva bejelölöm a szélsőértékek helyét (az állandó fázisú pontokat is). with ps.interactive(): myminmax.init_edit_session(threshold=0.85) # az állandó fázisú pontok beállítása myminmax.positions = 2.186, 2.322, 2.561 myminmax.calculate(2.355, 4, allow_parallel=True, show_graph=True);
doc/hu_minmax.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Rainbow with Quantile Regression # ## Imports # + import gym import numpy as np import torch import torch.optim as optim import torch.nn as nn import torch.nn.functional as F from IPython.display import clear_output from matplotlib import pyplot as plt # %matplotlib inline from timeit import default_timer as timer from datetime import timedelta import math from utils.wrappers import * from agents.DQN import Model as DQN_Agent from utils.ReplayMemory import PrioritizedReplayMemory from networks.layers import NoisyLinear from utils.hyperparameters import Config # - # ## Hyperparameters # + config = Config() config.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") #Multi-step returns config.N_STEPS = 3 #misc agent variables config.GAMMA=0.99 config.LR=1e-4 #memory config.TARGET_NET_UPDATE_FREQ = 1000 config.EXP_REPLAY_SIZE = 100000 config.BATCH_SIZE = 32 config.PRIORITY_ALPHA=0.3 config.PRIORITY_BETA_START=0.4 config.PRIORITY_BETA_FRAMES = 100000 #epsilon variables config.SIGMA_INIT=0.5 #Learning control variables config.LEARN_START = 10000 config.MAX_FRAMES=1000000 #Quantile Regression Parameters config.QUANTILES=51 # - # ## Network class DuelingQRDQN(nn.Module): def __init__(self, input_shape, num_actions, sigma_init=0.5, quantiles=51): super(DuelingQRDQN, self).__init__() self.input_shape = input_shape self.num_actions = num_actions self.quantiles=quantiles self.conv1 = nn.Conv2d(self.input_shape[0], 32, kernel_size=8, stride=4) self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2) self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1) self.adv1 = NoisyLinear(self.feature_size(), 512, sigma_init) self.adv2 = NoisyLinear(512, self.num_actions*self.quantiles, sigma_init) self.val1 = NoisyLinear(self.feature_size(), 512, sigma_init) self.val2 = NoisyLinear(512, 1*self.quantiles, sigma_init) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = x.view(x.size(0), -1) adv = F.relu(self.adv1(x)) adv = self.adv2(adv).view(-1, self.num_actions, self.quantiles) val = F.relu(self.val1(x)) val = self.val2(val).view(-1, 1, self.quantiles) return val + adv - adv.mean(dim=1).view(-1, 1, self.quantiles) def feature_size(self): return self.conv3(self.conv2(self.conv1(torch.zeros(1, *self.input_shape)))).view(1, -1).size(1) def sample_noise(self): self.adv1.sample_noise() self.adv2.sample_noise() self.val1.sample_noise() self.val2.sample_noise() # ## Agent class Model(DQN_Agent): def __init__(self, static_policy=False, env=None, config=None): self.num_quantiles = config.QUANTILES self.cumulative_density = torch.tensor((2 * np.arange(self.num_quantiles) + 1) / (2.0 * self.num_quantiles), device=config.device, dtype=torch.float) self.quantile_weight = 1.0 / self.num_quantiles super(Model, self).__init__(static_policy, env, config) self.nsteps=max(self.nsteps, 3) def declare_networks(self): self.model = DuelingQRDQN(self.num_feats, self.num_actions, sigma_init=self.sigma_init, quantiles=self.num_quantiles) self.target_model = DuelingQRDQN(self.num_feats, self.num_actions, sigma_init=self.sigma_init, quantiles=self.num_quantiles) def declare_memory(self): self.memory = PrioritizedReplayMemory(self.experience_replay_size, self.priority_alpha, self.priority_beta_start, self.priority_beta_frames) def next_distribution(self, batch_vars): batch_state, batch_action, batch_reward, non_final_next_states, non_final_mask, empty_next_state_values, indices, weights = batch_vars with torch.no_grad(): quantiles_next = torch.zeros((self.batch_size, self.num_quantiles), device=self.device, dtype=torch.float) if not empty_next_state_values: self.target_model.sample_noise() max_next_action = self.get_max_next_state_action(non_final_next_states) quantiles_next[non_final_mask] = self.target_model(non_final_next_states).gather(1, max_next_action).squeeze(dim=1) quantiles_next = batch_reward + ((self.gamma**self.nsteps)*quantiles_next) return quantiles_next def compute_loss(self, batch_vars): batch_state, batch_action, batch_reward, non_final_next_states, non_final_mask, empty_next_state_values, indices, weights = batch_vars batch_action = batch_action.unsqueeze(dim=-1).expand(-1, -1, self.num_quantiles) self.model.sample_noise() quantiles = self.model(batch_state) quantiles = quantiles.gather(1, batch_action).squeeze(1) quantiles_next = self.next_distribution(batch_vars) diff = quantiles_next.t().unsqueeze(-1) - quantiles.unsqueeze(0) loss = self.huber(diff) * torch.abs(self.cumulative_density.view(1, -1) - (diff < 0).to(torch.float)) loss = loss.transpose(0,1) self.memory.update_priorities(indices, loss.detach().mean(1).sum(-1).abs().cpu().numpy().tolist()) loss = loss * weights.view(self.batch_size, 1, 1) loss = loss.mean(1).sum(-1).mean() return loss def get_action(self, s): with torch.no_grad(): X = torch.tensor([s], device=self.device, dtype=torch.float) self.model.sample_noise() a = (self.model(X) * self.quantile_weight).sum(dim=2).max(dim=1)[1] return a.item() def get_max_next_state_action(self, next_states): next_dist = self.model(next_states) * self.quantile_weight return next_dist.sum(dim=2).max(1)[1].view(next_states.size(0), 1, 1).expand(-1, -1, self.num_quantiles) # ## Plot Results def plot(frame_idx, rewards, losses, sigma, elapsed_time): clear_output(True) plt.figure(figsize=(20,5)) plt.subplot(131) plt.title('frame %s. reward: %s. time: %s' % (frame_idx, np.mean(rewards[-10:]), elapsed_time)) plt.plot(rewards) if losses: plt.subplot(132) plt.title('loss') plt.plot(losses) if sigma: plt.subplot(133) plt.title('noisy param magnitude') plt.plot(sigma) plt.show() # ## Training Loop # + start=timer() env_id = "PongNoFrameskip-v4" env = make_atari(env_id) env = wrap_deepmind(env, frame_stack=False) env = wrap_pytorch(env) model = Model(env=env, config=config) episode_reward = 0 observation = env.reset() for frame_idx in range(1, config.MAX_FRAMES + 1): action = model.get_action(observation) prev_observation=observation observation, reward, done, _ = env.step(action) observation = None if done else observation model.update(prev_observation, action, reward, observation, frame_idx) episode_reward += reward if done: model.finish_nstep() model.reset_hx() observation = env.reset() model.save_reward(episode_reward) episode_reward = 0 if np.mean(model.rewards[-10:]) > 19: plot(frame_idx, all_rewards, losses, timedelta(seconds=int(timer()-start))) break if frame_idx % 10000 == 0: plot(frame_idx, model.rewards, model.losses, model.sigma_parameter_mag, timedelta(seconds=int(timer()-start))) model.save_w() env.close() # -
src/model/pytorch/21-RL/DeepRL-Tutorials/10.Quantile-Rainbow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from matplotlib import pyplot as plt import numpy as np import pandas as pd from scipy import stats # + #df = pd.read_csv("final-from-raw/results/ancA5A6_summary.csv",index_col=0) df = pd.read_csv("final-from-raw_alt/results/altAll_summary.csv",index_col=0) df = df[df.E == -4] df.head() # + cutoff = -4.0 num_samples = 1000 divergent = df["hA5"] convergent = df["hA5-hA6"] ancestral = df["aA5A6-hA5"] + df["aA5A6-hA5-hA6"] #divergent = df["hA6"] #convergent = df["hA5-hA6"] #ancestral = df["aA5A6-hA6"] + df["aA5A6-hA5-hA6"] regions = {"divergent":np.sum(divergent[df.E >= cutoff]), "convergent":np.sum(convergent[df.E >= cutoff]), "ancestral":np.sum(ancestral[df.E >= cutoff])} region_names = np.arange(len(regions.keys()),dtype=np.int) fx_vector = np.array([regions[r] for r in regions.keys()]) fx_vector = fx_vector/np.sum(fx_vector) total_peptides_vs_E = divergent + convergent + ancestral # - fx_vector # + out_dict = {"E":[], "obs":[], "mu":[], "sigma":[], "ninetyfive":[], "p":[], "region":[]} for i, num_pep in enumerate(total_peptides_vs_E): # What did we see in each region what_was_seen = np.array((divergent[i],convergent[i],ancestral[i])) out = np.zeros((num_samples,len(region_names)),dtype=int) for j in range(num_samples): # Draw randomly, with replacement, from the region names with fx_vector # probabilities num_pep times. s = np.bincount(np.random.choice(region_names, size=num_pep, replace=True, p=fx_vector)) out[j,:len(s)] = s # Mean and standard deviations of draw distributions mu = np.mean(out,axis=0) sigma = np.std(out,axis=0) # Calculate proability of seeing what_was_seen Z = (what_was_seen - mu)/sigma p = stats.norm.sf(abs(Z)) for j, r in enumerate(regions.keys()): out_dict["E"].append(df.E.iloc[i]) out_dict["obs"].append(what_was_seen[j]) out_dict["mu"].append(mu[j]) out_dict["sigma"].append(sigma[j]) out_dict["ninetyfive"].append(sigma[j]*2.92) out_dict["p"].append(p[j]) out_dict["region"].append(r) out_df = pd.DataFrame(out_dict) # - 4992913/(3382*3382/2) # + fig, ax = plt.subplots(figsize=(5.5,5)) def add_series(df,region,color,ax): a = df[df.region == region] ax.plot(a.E,a.obs,color=color,linewidth=3) x = np.concatenate((a.E,a.E[::-1])) y = np.concatenate((a.mu + a.ninetyfive,(a.mu - a.ninetyfive)[::-1])) ax.fill(x,y,color=color,alpha=0.5) #add_series(out_df,"ancestral","gray",ax) #add_series(out_df,"divergent","purple",ax) #add_series(out_df,"convergent","pink",ax) add_series(out_df,"ancestral","green",ax) add_series(out_df,"divergent","orange",ax) add_series(out_df,"convergent","pink",ax) ax.set_ylim(0,900) ax.set_xlabel("E") ax.set_ylabel("counts") #fig.savefig("/Users/harmsm/Desktop/hA6-counts-95.pdf") # - out_df[out_df.E == -5.5] 43 + 19 + 15
fig_4/high-e-stats.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Chapter 1 import random import time import gym # In probability theory, the law of large numbers is a theorem that describes the result of performing the same experiment a large number of times. According to the law, the average of the results obtained from a large number of trials should be close to the expected value, and will tend to become closer as more trials are performed. episodes = [5,100,10000,2500000] for episode in episodes: h = 0; t = 0 for i in range(episode): if random.random() > 0.5: h += 1 else: t += 1 avg_h = h/episode avg_t = t/episode print("Episodes: {:8d} Avg.Heads: {:.4f} Average Tails: {:.4f}".format(episode, avg_h, avg_t)) # You create an environment using gym.make() env = gym.make("Taxi-v2") # Here we have created a taxi environment, but before we can act in this environment, we must reset it. When the environment is reset, it will return a state. You can render the environment using env.render(). print(env.reset()) env.render() # Gym also allows you to access information from the environment such as the number of states and actions. This is extremely useful when programming an agent to solve an environment. n_actions = env.action_space.n n_states = env.observation_space.n print("Available Actions: {} \nPossible States: {}".format(n_actions, n_states)) # In this environment we can overide the current state. env.env.s = 362 env.render() # In the Taxi environment there are 500 possible states (0-499) and 6 possible actions (0-5). To perform an action, we use the env.step() method. When you perform a step Gym will return a tuple including the new state, the reward, a boolean stating if the environment has terminated and lastly info used for debugging (but is considered cheating if the agent uses). Here we take action 0 and the state changes to 462, we receive a reward of -1, the environment has not terminated and our state transition happened with a probability of 1. env.step(0) # The aim of the Taxi environment is to move to the passenger (marked in blue), pick them off and drop off at the destination (marked in purple). With enough random actions you can solve this environment, though you will not get a very good reward. # + state = env.reset() counter = 0 G = 0 reward = None while reward != 20: state, reward, done, info = env.step(env.action_space.sample()) G += reward counter += 1 print("Solved in {} steps with a total reward of {}".format(counter, G)) # - env = gym.make("MsPacman-v0") state = env.reset() # For the Atari 2600 environments we can get the action meanings. meanings = env.env.get_action_meanings() for i in range(env.action_space.n): print("Action {}: {}".format(i, meanings[i])) # We can try playing pacman using completely random actions, but that will not get us very far. state = env.reset() done = None while done != True: state, reward, done, info = env.step(env.action_space.sample()) time.sleep(0.05) env.render() # To get a complete list of all the environments. from gym import envs all_envs = envs.registry.all() env_ids = [env_spec.id for env_spec in all_envs] for env in env_ids: print(env)
Chapter01/Chapter 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # 데이터셋을 좀 더 쉽게 다룰 수 있도록 유용한 도구로서 torch.utils.data.Dataset과 torch.utils.data.DataLoader를 제공 한다. # 이를 사용하면 미니 배치 학습, 데이터 셔플(shuffle), 병렬 처리까지 간단히 수행할 수 있다. 기본적인 사용 방법은 Dataset을 정의하고, 이를 DataLoader에 전달하는 것이다 # # # 커스텀 데이터셋(Custom Dataset) # torch.utils.data.Dataset을 상속받아 직접 커스텀 데이터셋(Custom Dataset)을 만들 수 있다. # Dataset을 상속받아 다음 메소드들을 오버라이드 하여 커스텀 데이터셋을 생성해 보자. # 커스텀 데이터셋을 만들 때, 일단 가장 기본적인 뼈대는 아래와 같다. # ``` # class CustomDataset(torch.utils.data.Dataset): # def __init__(self): # # def __len__(self): # # def __getitem__(self, idx): # ``` import numpy as np import pandas as pd import torch from torch.utils.data import Dataset # ### Diabetes dataset # Diabetes dataset은 총 442명의 당뇨병 환자에 대한 자료이다. # age, sex, body mass index, average blood pressure, 6개의 혈청값으로 이루어져 있다. # 442명의 당뇨병 환자를 대상으로한 검사 결과를 나타내는 데이터이다. # # - 타겟 데이터 : 1년 뒤 측정한 당뇨병의 진행률 # # - 특징 데이터 (이 데이터셋의 특징 데이터는 모두 정규화된 값이다.) # # - Age # - Sex # - Body mass index # - Average blood pressure # - S1 # - S2 # - S3 # - S4 # - S5 # - S6 # df = pd.read_csv('diabetes.csv') df.info() # ### 문제 1 # 다음을 참조하여 `diabetes.csv` 파일을 읽고 custom dataset으로 생성해 보시오. # # * len(dataset)을 했을 때 데이터셋의 크기를 리턴할 len # * dataset[i]을 했을 때 i번째 샘플을 가져오도록 하는 인덱싱을 위한 get_item # # TODO : 다음의 코드를 완성하시오. # class CustomDataset(torch.utils.data.Dataset): # def __init__(self): # # 여기에 코드를 작성하시오. # def __len__(self): # # 여기에 코드를 작성하시오. # def __getitem__(self, idx): # # 여기에 코드를 작성하시오. class CustomDataset(Dataset): def __init__(self): data = np.loadtxt('diabetes.csv', delimiter=',', dtype=np.float32, skiprows=1) self.x_data = torch.from_numpy(data[:, :-1]) # age,sex,bmi,bp,s1,s2,s3,s4,s5,s6 self.y_data = torch.from_numpy(data[:, -1]).view(-1,1) # target def __getitem__(self, idx): x = self.x_data[idx] y = self.y_data[idx] return x, y def __len__(self): return len(self.x_data) # ### 문제 2 # dataset을 생성하고 `__getitem__()` 속성을 사용하여 데이터를 조회해 보시오. # + dataset = CustomDataset() dataset.__getitem__([0]) # - # ### 문제 3 # 읽어들인 데이셋을 신경망 모형의 입력에 사용할 수 있도록 `DataLoader` 를 사용하시오. # 적절한 batch_size를 설정하시오. # + from torch.utils.data import DataLoader dataloader = DataLoader(dataset, batch_size=10, shuffle=True) x, y = iter(dataloader).next() print(x.shape, y.shape) # - # ### 문제 4 # diabetes를 예측하는 신경망 모형을 생성하시오. # + import torch from torch import nn model = nn.Sequential(nn.Linear(10, 1)) print(model) # - # ### 문제 5 # # loss 함수와 optimizer를 설정하시오. learning_rate를 적절히 선택하시오. criterion = torch.nn.MSELoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) # ### 문제 6 # 모형을 훈련하시오. epoch의 횟수를 적절히 선택하시오. # + epochs = 1000 for epoch in range(epochs): running_loss = 0 for data, target in dataloader: optimizer.zero_grad() pred = model(data) loss = criterion(pred, target) loss.backward() optimizer.step() running_loss += loss.item() else: if epoch % 100 == 0 : print(f"Training loss: {running_loss/len(dataloader)}") # - # ### 문제 7 # ``` # new_var = torch.FloatTensor([[ 0.0381, 0.0507, 0.0617, 0.0219, -0.0442, -0.0348, # -0.0434, -0.0026, 0.0199, -0.0176]]) # ``` # 새로운 데이터로 diabets를 예측해 보시오. # + new_var = torch.FloatTensor([[ 0.0381, 0.0507, 0.0617, 0.0219, -0.0442, -0.0348, -0.0434, -0.0026, 0.0199, -0.0176]]) with torch.no_grad(): y_hat = model(new_var) print(y_hat) # -
02DNN/04Custom_Dataset_sol.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf print(tf.__version__) from data_utils import load_tiny_imagenet from tensorflow import keras print(keras.__version__) import numpy as np import pandas as pd import matplotlib.pyplot as pyplot import image import cv2 import os scale=5 df=pd.read_csv('words-tiny-dataframe.txt', delimiter="\t", header=None) flist=os.listdir(df[0][0]+'/images') # + print(type(flist)) # + x_train=[] y_train=np.full(500,df[1][0]) i=1 for img in flist: image=cv2.imread(df[0][0]+'/images/'+img) image=np.asarray(image) x_train.append(image) # - cdf=pd.DataFrame() dpath='/home/kartik/'+df[0][0]+'/images/' print(dpath) cdf['filename']=flist cdf['class']=df[1][0] cdf x_train=np.array(x_train) y_train=np.array(y_train) df[1][0] datagen=keras.preprocessing.image.ImageDataGenerator(rescale=1./255, horizontal_flip=True, vertical_flip=True) x_train=x_train.reshape((x_train.shape[0],64,64,3)) wres=scale*x_train.shape[2] hres=scale*x_train.shape[1] npic=x_train.shape[0] nchan=x_train.shape[3] x_train_aug=np.full((npic,wres,hres,nchan),0) for i in range(0,(x_train.shape[0])): pic=x_train[i] pic=cv2.resize(pic,(wres,hres),interpolation=cv2.INTER_AREA) pic=np.expand_dims(pic, axis=2) pic=cv2.cvtColor(pic,cv2.COLOR_GRAY2RGB) x_train_aug[i]=pic datagen.fit(x_train) os.makedirs('images') for x_batch, y_batch in datagen.flow_from_dataframe(cdf, directory=dpath, x_col="filename", y_col="class", class_mode='binary', batch_size=25, save_to_dir='images', target_size=(hres,wres), save_prefix='aug', save_format='png'): for i in range(25): pyplot.figure(figsize=(10,10)) pyplot.subplot(5,5,i+1) pyplot.xticks([]) pyplot.yticks([]) pyplot.grid=(False) pyplot.imshow(x_batch[i], cmap=pyplot.cm.binary) pyplot.xlabel(y_train[i]) if i == 25: print(i) break break x_batch.shape
aug_tiny_imagenet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %pylab inline import sacc import lmfit import emcee # %cd ../../ from lss_like import LSSLikelihood from lss_theory import LSSTheory # We are going to test the sampler varying $\Omega_c$, $\Omega_b$, $h_{0}$ and the bias. We use `lmfit` p = lmfit.Parameters() p.add_many(('omega_c', 0.3, True, 1e-3,0.7), ('omega_b', 0.05,True,1e-4,0.1), ('h0', 0.8,True,0.1,2), ('sigma_8', 0.8, False),('n_s',0.97,False), ('omega_k',0,False),('omega_nu',0,False),('w0',-1.0,False), ('wa',0.0,False),('des_gals_z_b_1',0.05,False),('des_gals_b_b_1',1.,True,0.1,10.),('des_gals_z_b_2',0.5,False), ('des_gals_b_b_2',1.,True,0.1,10.),('des_gals_z_b_3',2.,False),('des_gals_b_b_3',1.,True,0.1,10)) # We define here the likelihood function that is going to be connected to `lmfit` (or any MCMC sampler) #This is particular to LMFit. The parameters cannot be vectors #I had to create an intermediate step to fiddle with the bias #This is kind of slow because it has to open the data file twice we should optimize this #Also ccl is utterly slow... def loglike(params,data_file): """Likelihood function that connects lmfit with LSSLike Args: ----- params (lmfit.Parameters object): Input cosmological parameters. Some of them like omega_c, omega_b or h0 are required. More info at CCL. data_file (string): Path to the sacc file to analyze. Returns: -------- Likelihood (double) """ ll=LSSLikelihood(data_file) dic_par = params.valuesdict() for tr in ll.s.tracers : if tr.type == 'point' : dic_par[tr.exp_sample+'_z_b']=[] dic_par[tr.exp_sample+'_b_b']=[] for pkey in dic_par.keys(): if tr.exp_sample+'_z_b_' in pkey: dic_par[tr.exp_sample+'_z_b'].append(dic_par[pkey]) if tr.exp_sample+'_b_b_' in pkey: dic_par[tr.exp_sample+'_b_b'].append(dic_par[pkey]) theory = LSSTheory(data_file) return ll(theory.get_prediction(dic_par)) loglike(p,'sim_sample/sims/sim_mean.sacc') mini = lmfit.Minimizer(loglike,p,fcn_args=['sim_sample/sims/sim_mean.sacc']) res = mini.emcee(burn=10, steps=100, thin=10, params=p)
doc/notebooks/LSSLike_tests.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Expanding DRS Bundles - SRA Example # Aside from the question of the schema in a bundle covered in the SRA_IDs_and_bundling example the following looks at the capability in the DRS request to expand a bundle. # # To ask for a bundle to be expanded a parameter is added to the request, for example for the SRA server: # ``` # https://locate.be-md.ncbi.nlm.nih.gov/idx/v1/objects/<id>?expand=true # ``` # # DRSClient provides a Python binding for that capability and is used in the examples that follow. # ### Bundling at the experiment level. An SRX accession # # SRA's data model has a number of levels. In descending order they are. # * SRP - Project, a project in which sequencing has been done # * SRS - Sample, a physical sample from the project. What it represents depnds on scientific investigation in the Project. # * SRX - Experiment, the application of a particular sequencing technology to some Sample # * SRR - Run, the run, on a sequencer, of material from the Experiment # # In the following example, SRADRSClient IDentity eXchange service (IDX) is called to get the DRS id which corresponds to a sequencing experiment (SRX). A DRS getObject call with expand=True results in the Run (SRR) nested within the Experiment to be expanded. # + from fasp.loc import SRADRSClient # Set up a client to access NCBI's DRS Server for the Sequence Read Archive (SRA) drsClient = SRADRSClient('https://locate.be-md.ncbi.nlm.nih.gov', public=True) res = drsClient.acc2drs('SRX719843') print(res) drsClient.getObject('16139c5b6f36034eb09768c17a90fd23', expand=True) # - # As a reminder, the unexpanded call looks like this, with only the DRS id provided for the Run. drsClient.getObject('16139c5b6f36034eb09768c17a90fd23') # ### What is the correct expansion? # Note that the SRA DRS Server does not expand the bundle all the way down to the actual files. An additional DRS call with each of the file DRS ids would be needed. # # The [DRS 1.1 spec](https://ga4gh.github.io/data-repository-service-schemas/preview/release/drs-1.1.0/docs/) does seems to suggest that one would expect expansion all the way down to the actual objects. It would make sense given the intent of ?expand=true. However, it is possible to see how some readings of the spec might come to the conclusion that expansion is only intended to go to the bundle level. # ## Bundling at a higher level. An SRP accession # # In the SRA_IDs_and_bundling example we saw how to get the DRS id for SRP048601. As of when the notebook was run the DRS id was 5d8b77dd974e1b7c9de4040cbf9a24c7 # # Expanding an SRA project bundle explores the challenges of scaling expansion. # # Here is the unexpanded version # %%time drsRes = drsClient.getObject('5d8b77dd974e1b7c9de4040cbf9a24c7') print(len(drsRes['contents'])) # The full bundle is not printed here. The following is a truncated example. # # ```json # {'checksums': [{'checksum': '5d8b77dd974e1b7c9de4040cbf9a24c7', # 'type': 'md5'}], # 'contents': [{'id': 'f2b7f3f7c123a38eb904c5412ce48757', 'name': 'SRX719457'}, # {'id': '16139c5b6f36034eb09768c17a90fd23', 'name': 'SRX719843'}, # {'id': '8fa664d99d3cc9fb701d15e026e14950', 'name': 'SRX719844'}, # {'id': '<KEY>', 'name': 'SRX719845'}, # {'id': '<KEY>', 'name': 'SRX719846'}, # {'id': '4b995cc57ff3d4ebeac9684f2b9f7f7f', 'name': 'SRX719847'}, # {'id': 'b488ab01ce3fa83addea057153ec449c', 'name': 'SRX719848'}, # {'id': 'b3dd0d947f7e901bedf9f5789565ed07', 'name': 'SRX719849'}, # {'id': '<KEY>', 'name': 'SRX719850'}, # {'id': '<KEY>', 'name': 'SRX719851'}, # # ...], # 'created_time': '2012-11-15T14:00:55Z', # 'id': '5d8b77dd974e1b7c9de4040cbf9a24c7', # 'name': 'SRP048601', # 'self_url': 'drs://locate.md-be.ncbi.nlm.nih.gov/5d8b77dd974e1b7c9de4040cbf9a24c7', # 'size': 87447929899239} # ``` # ### The expanded version of an SRA Project bundle # The first thing to note is that it took almost four minutes to expand the bundle, compared with 586 ms without expansion. # %%time drsRes = drsClient.getObject('5d8b77dd974e1b7c9de4040cbf9a24c7', expand=True) # With the same 5070 sequence experiments in the result it is too verbose to list. The following is sufficient to illustrate. Note the following # * Three levels of hierarchy within the expanded bundle # * In this SRA project there is only one run per experiment (see code in next step) # * The content for a run is of a variable nature. i.e. the data model/schema is different. # # # ```json # { # "checksums": [ # { # "checksum": "5d8b77dd974e1b7c9de4040cbf9a24c7", # "type": "md5" # } # ], # "contents": [ # { # "contents": [ # { # "contents": [ # { # "id": "60a098a596e5a0155043d4eb42833460", # "name": "NA20362.mapped.ILLUMINA.bwa.ASW.low_coverage.20130415.bam" # }, # { # "id": "81c5d083909a6fe8e23fc55edb9e0d5a", # "name": "NA20362.unmapped.ILLUMINA.bwa.ASW.low_coverage.20130415.bam" # } # ], # "id": "662aecc9370a4efa7af7c926ed411a06", # "name": "SRR1596219" # } # ], # "id": "f2b7f3f7c123a38eb904c5412ce48757", # "name": "SRX719457" # }, # ... # { # "contents": [ # { # "contents": [ # { # "id": "26b441c4bd1909e4303ba409cc6397e3", # "name": "HG01170.unmapped.ILLUMINA.bwa.PUR.exome.20120522.bam" # }, # { # "id": "4aa3aef815edb4aa27f1a3ef4ba7499a", # "name": "HG01170.mapped.ILLUMINA.bwa.PUR.exome.20120522.bam.bai" # }, # { # "id": "6b31d85cf5416c28bca6bb2f4870b5c8", # "name": "HG01170.mapped.ILLUMINA.bwa.PUR.exome.20120522.bam" # } # ], # "id": "673cfcfcefe0e55078efd1408a1eb9d8", # "name": "SRR1597062" # } # ], # "id": "<KEY>", # "name": "SRX720267" # }, # ... # { # "contents": [ # { # "contents": [ # { # "id": "27efc8168c68f1cb121e42e857900524", # "name": "HG01171.unmapped.ILLUMINA.bwa.PUR.exome.20120522.bam" # }, # { # "id": "eef85d6d50ca6ad75678ee32167628af", # "name": "HG01171.mapped.ILLUMINA.bwa.PUR.exome.20120522.bam" # } # ], # "id": "601e6c573db750028d189b7429d02dd8", # "name": "SRR1597064" # } # ], # "id": "<KEY>", # "name": "SRX720269" # } # ], # "created_time": "2012-11-15T14:00:55Z", # "id": "5d8b77dd974e1b7c9de4040cbf9a24c7", # "name": "SRP048601", # "self_url": "drs://locate.md-be.ncbi.nlm.nih.gov/5d8b77dd974e1b7c9de4040cbf9a24c7", # "size": 87447929899239 # } # ``` # # # expts_with_multiple_runs = 0 for sr_experiment in drsRes['contents']: # find any experiments with more than one run if len(sr_experiment['contents']) > 1: expts_with_multiple_runs += 1 print(sr_experiment) print ("No of experiments with more than one run: {}".format(expts_with_multiple_runs)) # ### In conclusion # # * If bundles are to be used (not confirmed) then bundle expansion is a useful capability # * The value of that capability would be greatest if expansion were to go to the file (binary object) level. It would help to confirm that was the intent of the spec. # * Expanding bundles may not scale well. The SRA server is helpful in providing a working example. It has been suggested that the option should be provided for a server to respond indicating that expansion is not permitted for a given id. #
notebooks/drs/Bundle expansion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "-"} # # Recursive decisions and multi-objective optimisation: optimising reservoir release scheduling under conflicting objectives # In this Notebook we will see how simulation models and optimisation algorithms can be used to assist the operation of a water reservoir system. # # <left><img src="../../util/images/Dam1.gif" width = "800px"><left> # # We consider a simple illustrative system where a reservoir is operated to supply water to a domestic consumption node, while ensuring a minimum environmental flow in the downstream river (also called “environmental compensation flow”) and maintaining the water level in the reservoir within prescribed limits. We use a mathematical model to link all the key variables that represent the reservoir dynamics (inflow, storage and outflows) and use model simulation/optimisation to determine the reservoir release scheduling that will best meet the water demand over a coming period of time, given the predicted (or assumed) scenario of future inflows. # <left> <img src="../../util/images/system_representation_IO0.png" width = "600px"><left> # - # ## Import libraries # Before getting started, let's import some libraries that will be used throughout the Notebook: (🚨 in order to run the code like in the box below, place the mouse pointer in the cell, then click on “run cell” button above or press shift + enter) from bqplot import pyplot as plt from bqplot import * from bqplot.traits import * import numpy as np import ipywidgets as widgets from IPython.display import display from Modules.Interactive_release_schedule import Interactive_release_single,Interactive_release_double, Interactive_Pareto_front warnings.filterwarnings('ignore') # to ignore warning messages # ## The reservoir model # # The mathematical model of the reservoir essentially consists of a water balance equation, where the storage (***s***) at a future time step (for example, at the beginning of the next week) is predicted from the storage at the current time (the beginning of the this week) by adding and subtracting the inflows and outflows that will occur during the temporal interval ahead: # # $s(t+1) = s(t) + I(t) – E(t) – env(t) - spill(t) – Qreg(t)$ # # Where # # ***s(t)*** = reservoir storage at time-step t, in Vol (for example: ML) # # ***I(t)*** = reservoir inflows in the interval [t,t+1], in Vol/time (for example: ML/week). This is usually provided by a flow forecasting system or assumed by looking, for example, at historical inflow records for the relevant time of year # # ***E(t)*** = evaporation from the reservoir surface area in the interval [t,t+1], in Vol/time (for example: ML/week). This is calculated internally to the model, by multipling the evaporation rate for unit surface area (which depends on air temperature) by the reservoir surface area (which is derived from the storage S given that the reservoir geometry is known) # # ***env(t)*** = environmental compensation flow in the interval [t,t+1], in Vol/time (for example: ML/week). This is usually set to the value that was agreed upon with the environemtal regulator ([Learn more about the rational behind environmental flows](https://www.youtube.com/watch?v=cbUrrYq9BmU)) # # ***spill(t)*** = outflow through spillways (if any) in the interval [t,t+1], in Vol/time (for example: ML/week). This is calculated internally to the model, and is equal to the excess volume with respect to the maximum reservoir capacity (so most of the time ***spill(t)*** is equal to 0 as the maximum capacity is not reached, but it occasionally is >0 so that the capacity is never exceeded) # # ***Qreg(t)*** = regulated reservoir release for water supply in the interval [t,t+1], in Vol/time (for example: ML/week). This is a completely free variable that the reservoir operator will need to specify # <left> <img src="../../util/images/system_representation_IO1.png" width = "600px"><left> # #### Implementation of the reservoir simulation function # Here we define a function that implements the reservoir simulation, that is, iteratively apply the mass balance equation and reconstruct the temporal evolution of the reservoir variables over the simulation period # + code_folding=[] def syst_sim(N,I,e,d,S0,Smax,env_min,Qreg): # Declare output variables S = [0]*(N+1) # reservoir storage in ML spill = [0]*N # spillage in ML env = [env_min]*N # environmental compensation flow S[0] = S0 # initial storage for t in range(N): # Loop for each time-step (week) # If at week t the inflow (I) is lower than the minimum environmental compensation (env_min), # then the environmental compensation (env) = inflow (I) if env_min >= I[t] : env[t] = I[t] # If the minimum environmental compensation is higher than the water resource available (S + I - E) # then the environmental compensation is equal to the higher value between 0 and the resource available if env_min >= S[t] + I[t] - e[t]: env[t] = max(0,S[t] + I[t] - e[t]) # If the demand is higher than the water resource available (S + I - E - env) # then the release for water supply is equal to the higher value between 0 and the resource available if d[t] >= S[t] + I[t] - e[t] - env[t]: Qreg[t] = min(Qreg[t],max(0,S[t] + I[t] - e[t] - env[t])) # The spillage is equal to the higher value between 0 and the resource available exceeding the reservoir capacity spill[t] = max(0,S[t] + I[t] - Qreg[t] - env[t] - e[t] - Smax) # The final storage (initial storage in the next step) is equal to the storage + inflow - outflows S[t+1] = S[t] + I[t] - Qreg[t] - env[t]- e[t] - spill[t] return S,env,spill,Qreg # - # ## Determining the release scheduling by trial and error (manual optimisation) # Here we want to use the reservoir model to assist the reservoir operator in determining the best scheduling of regulated reservoir releases (Qreg) in response to a certain scenario of inflows. The goal is to minimise the deficit with respect to a prescribed water demand, that is, to minimise the objective function: # # $$TSD = \sum_{t=1}^{N} [ \ max( \ 0, \ d(t)-Qreg(t) \ ) \ ]^2 $$ # # where N is the length of the simulation period that we are considering, and d(t) is the water demand for each time-interval in that period, and TSD stands for Total Squared Deficit. Notice that the function $max(0,...)$ enables us to only count the difference between demand d and release u when this is positive, that is, when the release u is smaller than the demand d, and a water shortage is indeed produced. Also, the squaring is a 'mathematical trick' to make sure that larger deficit amounts are given more weight than smaller ones. This translates the fact that small deficit amounts are easier to mitigate and hence more acceptable, while larger ones can cause disproportionately severe impacts and should be avoided as much as possible. # #### Definition of inflow and demand scenarios # Let's assume we want to look at the next 8 weeks the number of weeks (so ***N=8***), and assume we have forecasts of inflows and demand for this period. # + N = 8 # (weeks) length of the simulation period I_fore = np.array([15,17,19,11,9,4,3,8]) # (ML/week) time series of inflow forecasts T_fore = np.array([13,13,17,18,20,22,25,26]) # (degC) time series of temperature forecasts d_fore = np.array([15]*N)*T_fore/15 # (ML/week) time series of demand forecasts, estimated as a function of temperature # (this is onviously a very simplified approach, and one could use a much more sophisticated demand model) # - # Plot the inflow and demand forecasts: # Axis characterisitcs x_sc_1 = LinearScale();y_sc_1 = LinearScale(min=0,max=35) x_ax_1 = Axis(label='week', scale=x_sc_1);y_ax_1 = Axis(label='ML/week', scale=y_sc_1, orientation='vertical') # Bar plot inflow_plot = plt.bar(np.arange(1,N+1),I_fore,colors=['blue'],stroke = 'lightgray',scales={'x': x_sc_1, 'y': y_sc_1}, labels = ['inflow'], display_legend = True) #Figure characteristics fig_1a = plt.Figure(marks = [inflow_plot],title = 'Inflow forecast for the next 8 weeks', axes=[x_ax_1, y_ax_1], layout={'min_width': '1000px', 'max_height': '250px'}, legend_style = {'fill': 'white', 'opacity': 0.5}) widgets.VBox([fig_1a]) # Bar plot (we use the same axis as the weekly inflows figure) demand_plot = plt.bar(np.arange(1,N+1),d_fore,colors=['gray'],stroke = 'lightgray',opacities = [1]*N, labels = ['demand'], display_legend = True, stroke_width = 1,scales={'x': x_sc_1, 'y': y_sc_1}) #Figure characteristics fig_1b = plt.Figure(marks = [demand_plot],title = 'Demand forecast for the next 8 weeks', axes=[x_ax_1, y_ax_1], layout={'min_width': '1000px', 'max_height': '250px'}, legend_style = {'fill': 'white', 'opacity': 1}) widgets.VBox([fig_1b]) # #### Definition of other input parameters # Let's define other variables that are needed for the reservoir system simulation, such as the reservoir storage capacity, the environmental compensation flow, etc. # + ### Constraints ### s_max = 150 # (ML) Maximum storage (=reservoir capacity) s_min = 0 # (ML) Minimum storage (set to zero for now) env_min = 2 # (ML/week) # Environmental compensation flow ### Initial conditions ### s_0 = 80 # (ML) # Storage volume at the beginning of the simulation period e_fore = T_fore*0.1 # (ML/week) Time series of evaporation from the reservoir (this is a very simplified # approach and could be replaced by a more realistic estimation approach) # - # #### Determining the optimal release scheduling via interactive visualisation # # Use the slider to set the release amount for each week in the simulation period, and in doing so try to minimise the Total Squared Deficit. # Interactive release scheduling fig_2a,fig_2b,release1,release2,release3,release4,release5,release6,release7,release8 = Interactive_release_single( N,I_fore,e_fore,d_fore,s_0,s_max,env_min, demand_plot) HBox_layout = widgets.Layout(justify_content='center') widgets.VBox([widgets.HBox( [release1,release2,release3,release4,release5,release6,release7,release8],layout=HBox_layout),fig_2b,fig_2a]) # **Comment**: clearly it is not possible to fully meet the demand at all times in the simulation period. For example if we fully cover the demand for the first 7 weeks, we drawdown the reservoir to a point that we are forced to dramatically reduce the release in the last week, causing a very severe deficit. A more effective approach is to cause smaller deficits across all time steps, that is, tolerate some small deficits even when in principle we may fully cover the demand, in order to prevent more severe deficits in the later period. This type of approach is called ***hedging*** (see for example [You and Cai 2008](https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2006WR005481)). # # Btw, the minimum TSD value that can be achieved is **49, try to beat it!** # ## From single to multi-objective optimization # Now let's assume that, besides minimising supply deficits, we are also interested in minimising the chances that the reservoir level go below a minimum threshold. This could be, for example, because the quality of the water deteriorates when levels are low, requiring more costly treatment. We measure how well this criterion is satisfied by the following objective function: # # $$MSV = \sum_{t=1}^{N} max ( \ rc - S(t) , \ 0)$$ # # where, again, N is the length of the simulation period, S is the reservoir storage, and rc is the minimum reservoir storage threshold that should preferably not be transpassed (MSV stands for Minimum Storage Violation). # # For our case, let's set this threshold to 30 ML: # Minimum storage threshold ms = np.array([30]*(N+1)) # in ML # Now use the slider to set the release amount for each week in the simulation period, in a way that jointly minimise the Total Squared Deficit and the Minimum Storage Violation. fig_3a,fig_3b,release1,release2,release3,release4,release5,release6,release7,release8 = Interactive_release_double( N,I_fore,e_fore,d_fore,s_0,s_max,ms,env_min, demand_plot) HBox_layout = widgets.Layout(justify_content='center') widgets.VBox([widgets.HBox( [release1,release2,release3,release4,release5,release6,release7,release8],layout=HBox_layout),fig_3b,fig_3a]) # ***Comment*** It is possible to find a release scheduling that produce no violation of the minimum storage threshold, although it will produce some supply deficit - the record is **305, can you beat it?**. However, one could also allow some violation of the storage threshold in order to reduce the deficits. The two objectives are conflicting: improving on one of them implies doing worse on the other. # ## From manual to automatic optimization approach # As we have seen, when we deal with two conflicting objective, we cannot find a solution that optimise both simoultaneously. If we prioritize one objective, the other one is deteriorated: there is a trade-off between the two. It would then be interesting to explore this tradeoff, and find all the release schedules that produce a different optimal combination of the two objectives. However, this is too cumbersome to do manually. Here we then use a multi-objective optimisation algorithm to do that for us. # # To this end, we use the Python Platypus package, and the NSGAII algorithm implemented in it. For more information about these methods and tools, see [Deb et al, 2002](https://ieeexplore.ieee.org/document/996017) and the [Platypus webpage](https://platypus.readthedocs.io). The code to run the optimisation is the following: # + code_folding=[] # Optimizer from platypus import NSGAII, Problem, Real, Integer def auto_optim(vars): release1 = vars[0] release2 = vars[1] release3 = vars[2] release4 = vars[3] release5 = vars[4] release6 = vars[5] release7 = vars[6] release8 = vars[7] Qreg = [release1,release2,release3,release4,release5,release6,release7,release8] s,env,spill,Qreg = syst_sim(N,I_fore,e_fore,d_fore,s_0,s_max,env_min,Qreg) sdpen = (np.sum((np.maximum(d_fore-Qreg,[0]*N))**2)).astype('int') lspen = (np.sum((np.maximum(ms-s,[0]*(N+1))))).astype('int') return [sdpen,lspen] problem = Problem(N,2) Real0 = Real(0, 40);Real1 = Real(0, 40);Real2 = Real(0, 40);Real3 = Real(0, 40); Real4 = Real(0, 40);Real5 = Real(0, 40);Real6 = Real(0, 40);Real7 = Real(0, 40) problem.types[:] = [Real0] + [Real1] + [Real2] + [Real3] + [Real4] + [Real5] + [Real6] + [Real7] problem.function = auto_optim population_size = 20 algorithm = NSGAII(problem,population_size) algorithm.run(10000) # Number of iterations results1_optim_relea = np.array([algorithm.result[i].objectives[0] for i in range(population_size)]) results2_optim_relea = np.array([algorithm.result[i].objectives[1] for i in range(population_size)]) solutions_optim_relea = [algorithm.result[i].variables[0:N] for i in range(population_size)] # - # #### Plot the optimisation results # We can visualise the tradeoffs between the two objectives in one plot, called Pareto front, which displays the combination of the two objective values in correspondence to a set of optimised solutions. Click on one point in the Pareto front to visualise the release scheduling that generates that performance, and associated storage time series. What do you think would be a balanced solution? # Interactive Pareto front fig_4a,fig_4b,fig_pf = Interactive_Pareto_front( N,I_fore,e_fore,d_fore,s_0,s_max,ms,env_min, demand_plot,solutions_optim_relea,results1_optim_relea,results2_optim_relea) widgets.VBox([widgets.HBox([widgets.VBox([fig_4b,fig_4a]),fig_pf])]) # ### References # # <NAME>. et al (2002) A fast and elitist multiobjective genetic algorithm: NSGA-II, IEEE Transactions on Evolutionary Computation, 6(2), 182-197, doi:10.1109/4235.996017. # # <NAME>. and <NAME>. (2008) Hedging rule for reservoir operations: 1. A theoretical analysis, Water Resour. Res., 44, W01415, doi:10.1029/2006WR005481. # #### Let's go to the next section!: [3.b. Decision making under uncertainty: optimising reservoir pumped inflow scheduling under uncertain hydrological forecasts](3.b.%20Decision%20making%20under%20uncertainty.ipynb)
iRONS/Notebooks/A - Knowledge transfer/3.a. Recursive decisions and multi-objective optimisation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + """ Implementing decoder""" """Imports""" import numpy as np from nltk import sent_tokenize, word_tokenize """Global definitons""" _start = 'S_START' _end = 'S_END' _unk = 'UNK' # + """ util definitions""" def hyperbolic(net): return np.tanh(net) def relu(net): return np.maximum(0,net) def softmax(net): _exp = np.exp(net) return _exp/np.sum(_exp) def predict(scores): return np.argmax(scores) # + # """ Word preprocessing """ # def dataset(_fi1='/Users/preethikapachaiyappa/Documents/MachineLearning/Data/English/9-11-in-perspective.txt', _fi1='/Users/preethikapachaiyappa/Documents/MachineLearning/Data/French/9-11-in-perspective.txt'): # file_in_english = open(_fi1) # file_in_french = open(_fi2) # #file_out = open(_fo,'wb') # words = [] #stores unique words encountered in the document as WordItem objects # _dict = {} #temporary dictionary to maintain count of each word # _dict['UNK'] = 0 # sentence_embeddings = [] # count = 0 # for l in file_in_english: # print count, l # count++ # l = _start+' '+l+' '+_end # split = word_tokenize(l) # for w in split: # if w in _vocab : # word_index = _vocab[w] # else : # word_index = _vocab['UNK'] # np.append(sentence_embeddings,W_Embedding[word_index],axis=0) # backpropogate(sentence_embeddings) # file_in.close() # #file_out.close() # return _vocab, words # - class RNNlayer: """ RNN nodes for decoder hidden state at time step t of decoder is conditioned on hidden state at time step t-1, output at time step t-1 and input at time step t """ def __init__(self, inputSize, outputSize, bptt_truncate = 5, hiddenDim = 10): """ inputSize = dimensions of the input embedding outputSize = vocabulary size hiddenDim = size of the hidden unit in RNN bptt_truncate = truncate the number of time steps we calculate the gradient during backpropagation """ self.inputSize = inputSize self.outputSize = outputSize self.hiddenDim = hiddenDim self. bptt_truncate = bptt_truncate self.w_in = np.random.uniform(-np.sqrt(1./inputSize), np.sqrt(1./inputSize),(hiddenDim, inputSize)) self.w_hh = np.random.uniform(-np.sqrt(1./hiddenDim), np.sqrt(1./hiddenDim),(hiddenDim, hiddenDim)) #self.w_outH = np.random.uniform(-np.sqrt(1./hiddenDim), np.sqrt(1./hiddenDim),(outputSize, hiddenDim)) self.w_out = np.random.uniform(-np.sqrt(1./hiddenDim), np.sqrt(1./hiddenDim),(outputSize, hiddenDim)) def forwardProp(self, inSentence, expSent): """ inSentence: word indices in input language vocabulary expSent: word indices in target language vocabulary """ #Total number of time steps equal to number of words in the sentence T = len(expSent) #Saving all hidden states and outputs during forward propagation _h = np.zeros((T,self.hiddenDim)) _o = np.zeros((T,self.outputSize)) #Initializing initial output as the start token #_o[-1] = #For each time step calculating hidden state and output for t in np.arange(T): #outIdx = predict(_o[t-1]) _h[t] = hyperbolic(self.w_in.dot(inSentence[t]) + self.w_hh.dot(_h[t-1])) #+ self.w_outH[:,outIdx:outIdx+1]) _o[t] = softmax(self.w_out.dot(_h[t])) return _o, _h def calculateLoss(self, inSentence, expSentence): #For each sentence o, h = self.forwardProp(inSentencecontext, expSentence) #TODO recheck this part correctPred = o[np.arange(len(expSentence)), expSentence] #Loss for each sentence l = -1 * np.sum(np.log(correctPred)) return l def calculateTotalLoss(self, inSentence, expSentences): L = 0.0 for i in len(inSentence): L += self.calculateLoss(inSentencecontext[i], expSentences[i]) return L def backPropTT(self, inSentence, expSentence): # Total number of time steps equal to number of words in the sentence T = len(expSentence) # Performing forward propagation o, h = self.forwardProp(inSentence, expSentence) # Defining gradient variables dLdin = np.zeros(self.w_in.shape) dLdhh = np.zeros(self.w_hh.shape) #dLdoutH = np.zeros(self.w_outH.shape) dLdout = np.zeros(self.w_out.shape) # Calculating the difference between output and actual output delta_o = o delta_o[np.arange(T), expSentence] -= 1 print 'delta_o', delta_o # Calculating gradients backwards through time for t in np.arange(T)[::-1]: #Output gradient is only dependent on time step t dLdout += np.outer(delta_o[t], h[t]) # Initial delta calculation propagating gradients from output delta_t = self.w_out.T.dot(delta_o[t]) * (1 - (h[t] ** 2)) # Backpropagation through time (for at most self.bptt_truncate steps) for bptt_step in np.arange(max(0, t-self.bptt_truncate), t+1)[::-1]: # print "Backpropagation step t=%d bptt step=%d " % (t, bptt_step) # Add to gradients at each previous step dLdhh += np.outer(delta_t, h[bptt_step-1]) dLdin += np.outer(delta_t, inSentence[bptt_step-1]) #dLdoutH += np.outer(delta_t, o[bptt_step-1]) # Update delta for next step dL/dz at t-1 delta_t = self.w_hh.T.dot(delta_t) * (1 - h[bptt_step-1] ** 2) """TODO review backprop implementation""" #return dLdin, dLdhh, dLdoutH, dLdout return dLdin, dLdhh, dLdout def sgd_step(self, inSentence, expSentence, learningRate): """ Performs a single stochastic gradient step""" # Calculating gradients #dLdin, dLdhh, dLdoutH, dLdout = self.backPropTT(inSentence, expSentence) dLdin, dLdhh, dLdout = self.backPropTT(inSentence, expSentence) # Updating parameters self.w_in -= learningRate * dLdin self.w_hh -= learningRate * dLdhh #self.w_outH -= learningRate * dLdoutH self.w_out -= learningRate * dLdout def train_Decoder_With_SGD(self, X_train, Y_train, learningRate = 0.05, nepochs = 20): """TODO evaluate losses and update learning rate if required""" for epoch in range(nepochs): for i in range(len(Y_train)): self.sgd_step(X_train[i], Y_train[i], learningRate) print 'W_in ', self.w_in print 'W_hh ', self.w_hh print 'W_out ', self.w_out inSentence = [[[1,1,1,1,1,1,1,1,1,1],[1,1,1,1,1,1,1,1,1,1],[1,1,1,1,1,1,1,1,1,1]]] expSentence = [[2,2,2]] # + vocabSize = 5 embSize = 10 W_out = np.random.randn(vocabSize, embSize) W_hh = np.random.randn(embSize, embSize) W_in = np.random.randn(embSize) a = RNNlayer(10,5) a.train_Decoder_With_SGD(inSentence, expSentence) # + a = np.array([[1,2,3],[4,5,6]]) a[np.arange(2),[1,2]] -= 1 print a[1].T print np.outer(a[0],a[1].T) a = np.array([[1,2],[1,1]]) b = np.array([2,2]) print a print b print a.dot(b)
Translate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:python-tutorial] # language: python # name: conda-env-python-tutorial-py # --- # # The Basics of Python # # This tutorial is meant for people who already know a little something about programming and have used an interpretted language already, something like IDL, Matlab, NCL or R. # # We'll start by talking about "objects" in Python. We won't talk about how to create your own _custom_ object (which means being able to write your own `class`) here. We'll leave that for a different tutorial. Then, we'll go further into built-in data types and functions. # <div class="alert alert-block alert-success"> # <p>Next: <a href="01_objects.ipynb">Objects</a></p> # </div>
notebooks/bytopic/python-basics/00_introduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Demonstration of QAD variants in 2D parameter space # # This notebook takes about a minute on a laptop computer as is. # Should this be too long, you may consider to reduce `num_points` in the last cell, # which determines the resolution of the landscapes in the plots and influences the # runtime quadratically. # + tags=[] import numpy as np import matplotlib.pyplot as plt import pennylane as qml import lib import matplotlib as mpl # + tags=[] # Set up a qubit device with 2 qubits. dev = qml.device('default.qubit', wires=2) # Initialize two parameters. This will correspond to the position # in the two-dimensional parameter space around which we attempt/perform # the reconstruction with the various QAD variants. par = np.array([0.33, 0.43]) # + tags=[] # Set up the first circuit, the cost function of which is displayed in the first row in Figure 4. # This circuit depends on two parameters that feed into one gate with a single frequency each. @qml.qnode(dev) def circuit_1(param): qml.RX(param[0], wires=[0]) qml.CNOT(wires=[0,1]) qml.RY(param[1], wires=[1]) return qml.expval(qml.PauliY(0)@qml.PauliZ(1)) # Build the original QAD model that coincides with the landscape only locally. QAD_1 = lib.build_qad_model(circuit_1, par) # Build the extended QAD model that coincides with the landscape globally, because the # parameter space is only two dimensional. extQAD_1 = lib.build_extended_qad_model(circuit_1, par) # Build the generalized QAD model. It coincides with the extended model for gates # with a single frequency like in circuit_1, and thus with the original cost function. genQAD_1 = lib.trig_interpolation_qad(circuit_1, par, R=[1, 1]) # + tags=[] # Set up the second circuit, the cost function of which is displayed in the second row in Figure 4. # This circuit depends on two parameters that feed into multiple gates with a single frequency each, # leading to a dependency of the cost function with two frequencies per parameter. @qml.qnode(dev) def circuit_2(param): qml.RX(param[0], wires=[0]) qml.RX(param[0], wires=[1]) qml.CNOT(wires=[0,1]) qml.RY(param[1], wires=[0]) qml.RY(param[1], wires=[1]) return qml.expval(qml.PauliZ(0)@qml.PauliX(1)) # Build the original QAD model that coincides with the landscape only locally. # Due to the higher frequency contributions, it deviates from the original # cost function more quickly than for `circuit_1`. QAD_2 = lib.build_qad_model(circuit_2, par) # Build the extended QAD model. It does no longer coincide with the original cost # function globally, because the latter contains higher frequency contributions # that are not taken into account by this model. This model may not even reproduce # the local landscape well. extQAD_2 = lib.build_extended_qad_model(circuit_2, par) # Build the generalized QAD model. It no longer coincides with the extended model # but instead is able to reproduce the full cost landscape for two parameters. genQAD_2 = lib.trig_interpolation_qad(circuit_2, par, R=[2, 2]) # + tags=[] # Some plot options to make the plot pretty alpha = 0.6 green = "#209494" orange = "#ED7D31" red = "xkcd:brick red" blue = "xkcd:ocean blue" bg_alpha = 0.1 bg_red = mpl.colors.to_rgb(red)+(bg_alpha,) bg_green = mpl.colors.to_rgb(green)+(bg_alpha,) colors = [green, red, orange, blue] layout_kwargs = {"pad": 2, "w_pad": 2.5} def plot_cost_and_model(funs, models, params, equal_signs, shift_radius=5 * np.pi / 8, num_points=20): """Plot original cost functions and model functions Args: funs (list[callable]): Original function per row, plotted on the left-most subplots. models (list[callable]): Model functions per row, plotted from left to right. params (array): (2D-)Parameters at which to plot the functions and models. equal_signs (list[list[str]]): Latex strings of equal/unequal signs to insert between subplots. shift_radius (float): maximal distance per parameter that determines the plot range. (Radius in the \ell_\infinity-norm sense) num_points (int): Number of points to evaluate the function on, per parameter direction. The total number of evaluations per function/model will be num_points ** 2. ticks (bool): Whether to draw ticks on the plots or not. """ coords = np.linspace(-shift_radius, shift_radius, num_points) X, Y = np.meshgrid(coords + params[0], coords + params[1]) num_funs = len(funs) num_models = len(models[0]) # Compute the original cost function and the model on the grid. Z_original = np.zeros((num_funs, num_points, num_points)) Z_model = np.zeros((num_funs, num_models, num_points, num_points)) for j, (fun, models_) in enumerate(zip(funs, models)): Z_original[j] = np.array([[fun(params + np.array([t1, t2])) for t2 in coords] for t1 in coords]) for i, model in enumerate(models_): Z_model[j, i] = np.array([[model(np.array([t1, t2])) for t2 in coords] for t1 in coords]) # Set up subplots. figsize = (4*(num_models+1), 4*num_funs) fig, axs = plt.subplots(num_funs, num_models+1, subplot_kw={"projection": "3d"}, figsize=figsize) # Iterate over rows for j, row in enumerate(axs): # Plot original function values row[-1].plot_surface(X, Y, Z_original[j], color=colors[-1], alpha=alpha) # Iterate over model columns for i, ax in enumerate(row[:-1]): # Plot models ax.plot_surface(X, Y, Z_model[j, i], color=colors[i], alpha=alpha) fig.text(0.25 * (i + 1), 0.75 - 0.5 * j, equal_signs[j, i], fontsize=25, ha='center') plt.tight_layout(**layout_kwargs) return axs # + tags=[] # Resolution for the plot, quadratically determines runtime of this cell. Suggestion: 60 (45 secs on Laptop) num_points = 60 # Equality and un-equality signs for the plot, per row. equal_signs = np.array([['$\\neq$', '$=$', '$=$'],['$\\neq$', '$\\neq$', '$=$']]) # Original cost functions per row funs = [circuit_1, circuit_2] # Model functions per row models = [[QAD_1, extQAD_1, genQAD_1],[QAD_2, extQAD_2, genQAD_2]] # Plot the cost function as well as the models, for both rows. axs = plot_cost_and_model(funs, models, par, equal_signs, num_points=num_points) plt.savefig(f"gfx/QAD_landscapes_ticks.pdf") [[ax.set(xticks=[], yticks=[], zticks=[]) for ax in row] for row in axs] plt.savefig(f"gfx/QAD_landscapes_noticks.pdf")
plot_QAD_landscapes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.0.0 # language: julia # name: julia-1.0 # --- # Helpful packages for working with images and factorizations # using Pkg; Pkg.add("Images") # using Pkg; Pkg.add("ImageMagick") # And this allows us to load JPEG-encoded images using Images, LinearAlgebra # ### Using a SVD to compress an image # # In this exercise, we'll use a singular value decomposition (SVD) to compress an image -- so that we can store an image without keeping around "unnecessary" information. # # To start, let's define a singular value decomposition. In a SVD, we take a matrix $A$ and factorize it so that # # $$A = USV^T$$ # # where matrices $U$ and $V$ are unitary and hold our singular vectors. Matrix $S$ is diagonal and stores our singular values in decreasing order from top/left to bottom/right. # # In Julia, our images are stored as arrays, so we can think of `yellowbanana` as a matrix yellowbanana = load("images/104_100.jpg") size(yellowbanana) yellowbanana[60,50] # Each element in the array is a color # That means we can take the SVD of this image. So, we can store this picture of a banana as sets of singular vectors and singular values. # # **The reason this is important** is that we'll find that we do **not** need to keep track of *all* the singular vectors and *all* the singular values to store an image that still looks like a banana! This means we can choose to keep only the important information, throw away the rest, and thereby "compress" the image. # # Working with grayscale images is a bit easier, so let's work with the gray version of this banana. banana = Gray.(yellowbanana) # Note that `banana` is a (100 x 100) matrix that stores a pixel -- information about the color -- at each entry. size(banana) # If we take the `channelview` of `banana`, we'll see the numbers that describe the color at each point in the array: channelview(banana) # Note that you can call `Gray` on the `channelview` of an image to display the image! Gray.(channelview(banana)) # **So how can we use a SVD to determine what information in an image is really important?** # # The singular values tell us! # # If we have matrices $U$, $S$, and $V$ from our image, we can rebuild that image with the matrix product $USV^T$. # # Taking this matrix product is the same as adding together the outer products of each corresponding pair of vectors from $U$ and $V$, scaled by a singular value ($\sigma$) from $S$. In other words, for a (100 x 100) pixel image, # # $$A_{image} = USV^T = \sum_{i = 1}^{100} \sigma_i \mathbf{u_i}\mathbf{v_i'} $$ # # Every outer product $u_i * v_i'$ creates a (100 x 100) matrix. Here we're summing together one hundred (100 x 100) matrices in order to create the original matrix $A_{image}$. The matrices at the beginning of the series -- those that are scaled by **large** singular values -- will be **much** more important in recreating the original matrix $A_{image}$. # # This means we can approximate $A_{image}$ as # # $$A_{image} \approx \sum_{i = 1}^{n} \sigma_i \mathbf{u_i}\mathbf{v_i'}$$ # # where $n < 100$. # # For example, if we rebuild our image of a banana using only 30 (instead of all 100) singular values, we get # # <img src="images/banana_30svals.png" alt="Drawing" style="width: 100px;"/> # # Using 10, 5, and then 3 singular values, we get # # <img src="images/banana_10svals.png" alt="Drawing" style="width: 100px;"/> # <img src="images/banana_5svals.png" alt="Drawing" style="width: 100px;"/> # <img src="images/banana_3svals.png" alt="Drawing" style="width: 100px;"/> # # #### Exercise # # Write a function called `compress_image`. Its input arguments should be an image and the factor by which you want to compress the image. A compressed grayscale image should display when `compress_image` is called. # # For example, # # ```julia # compress_image("images/104_100.jpg", 33) # ``` # # will return a compressed image of a grayscale banana built using 3 singular values. (This image has 100 singular values, so use `fld(100, 33)` to determine how many singular values to keep. `fld` performs "floor" division.) # # *Hints*: # # * Perform the SVD on the `channelview` of a grayscale image. # * In an empty input cell, execute `?svd` to find a function that wil perform an SVD for you.
introductory-tutorials/intro-to-julia/compressing_an_image.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Preprocessing - No Lemmatization and No stemming and Removal of stop words # # ## Count Vectorizer import numpy as np import pandas as pd import os import nltk import re import sklearn import multiprocessing import xgboost as xg import string import joblib from scipy import interp from nltk.stem import WordNetLemmatizer from nltk.stem.lancaster import LancasterStemmer from collections import Counter from sklearn.metrics import confusion_matrix,roc_curve, precision_recall_fscore_support from sklearn.metrics import auc, accuracy_score, f1_score,precision_score,recall_score, make_scorer from sklearn.decomposition import PCA from sklearn.svm import LinearSVC,SVC from sklearn.pipeline import make_pipeline, Pipeline from sklearn.linear_model import SGDClassifier from sklearn.model_selection import KFold,train_test_split,GridSearchCV from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier, BaggingClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import GridSearchCV from sklearn.feature_selection import SelectKBest, chi2 from sklearn.preprocessing import MinMaxScaler from sklearn.feature_extraction.text import CountVectorizer import matplotlib.pyplot as plt # %matplotlib inline # + #function to extract data from the file def read_file(df_new): print("Started extracting data from file",df_new.shape) dfnew=pd.DataFrame() dfnew.insert(0,'Post',None) dfnew.insert(1,'class',None) for val in df_new.values: appList=[] sp=np.array_str(val).split(",") if len(sp)==2: appList.append(sp[0]) appList.append(sp[1]) dfnew.loc[len(dfnew)]=appList for i in range(0,dfnew.shape[0]): dfnew.values[i][1]=int(dfnew.values[i][1].strip("\'|]|\"")) print(dfnew['class'].value_counts()) print("Finished extracting data from file",dfnew.shape) return dfnew # + #performing data cleaning on the formspring.me dataset def post_tokenizing_dataset1(df): print("Started cleaning data in dataframe", df.shape) #print(df.head(5)) wpt = nltk.WordPunctTokenizer() stop_words = nltk.corpus.stopwords.words('english') lancaster_stemmer=LancasterStemmer() wordnet_lemmatizer = WordNetLemmatizer() token_list=[] phrase_list=[] token_df=pd.DataFrame() token_df.insert(0,'Post',None) token_df.insert(1,'class',None) for val in df.values: append_list=[] filter_val=re.sub(r'Q:','',val[0]) filter_val=re.sub(r'&#039;[a-z]{1}','',filter_val) filter_val=re.sub('<[a-z]+>',' ',filter_val).lower() filter_val=re.sub(r'[^a-zA-Z\s]', '', filter_val, re.I|re.A) filter_val=[token for token in wpt.tokenize(filter_val)] filter_val=[word for word in filter_val if word.isalpha()] #tokens=[wordnet_lemmatizer.lemmatize(token) for token in filter_val if token not in stop_words and len(token)>=3] tokens = [word for word in filter_val if len(word)>=3] if(tokens): append_list.append(' '.join(tokens)) append_list.append(val[1]) token_df.loc[len(token_df)]=append_list print("Finished cleaning data in dataframe",token_df.shape) #print(token_df.head(5)) return token_df # + #performing data cleaning on the twitter dataset def post_tokenizing_dataset3(df): print("Started cleaning data in dataframe", df.shape) #print(df.head(5)) wpt = nltk.WordPunctTokenizer() stop_words = nltk.corpus.stopwords.words('english') lancaster_stemmer=LancasterStemmer() wordnet_lemmatizer = WordNetLemmatizer() token_df=pd.DataFrame() token_df.insert(0,'Post',None) token_df.insert(1,'class',None) for val in df.values: filter_val=[] value=re.sub(r'@\w*','',val[0]) value=re.sub(r'&.*;','',value) value=re.sub(r'http[s?]?:\/\/.*[\r\n]*','',value) tokens=[token for token in wpt.tokenize(value)] tokens=[word for word in tokens if word.isalpha()] #tokens=[wordnet_lemmatizer.lemmatize(token) for token in tokens if token not in stop_words and len(token)>=3] tokens = [word for word in tokens if len(word)>=3] if len(tokens)!=0: filter_val.append(' '.join(tokens).lower()) filter_val.append(val[1]) token_df.loc[len(token_df)]=filter_val print("Finished cleaning data in dataframe",token_df.shape) #print(token_df.head(5)) return token_df # + #counting the number of unique words in the corpora def counter_word(text): print("Started counting words") count = Counter() for i in text.values: for word in i.split(): count[word] += 1 print("Finished post vector calculation :") return count # + #removal of words which occur once def remove_less_occurent_words(token_df,counter): print("Started removing less occurent words",token_df.shape) token_df_2=pd.DataFrame() token_df_2.insert(0,'Post',None) token_df_2.insert(1,'class',None) less_list=[] for key,val in counter.items(): if(val==1): less_list.append(key) for val in token_df.values: list_2=[] split_list=[] split_list=val[0].split(' ') for word in split_list: if word in less_list: split_list.remove(word) list_2.append(' '.join(split_list)) list_2.append(val[1]) token_df_2.loc[len(token_df_2)]=list_2 print("Finished removing less occurent words",token_df_2.shape) return token_df_2 # + #getting the data from csv files df_data_1=read_file(pd.read_csv("../../post.csv",sep="\t")) df_data_2=read_file(pd.read_csv("../../new_data.csv",sep=",")) df_data_3=pd.read_csv("../../dataset_4.csv",sep=",") # + #calling the function post_tokenizing_dataset1() and post_tokenizing_dataset3() for cleaning df_data_1=post_tokenizing_dataset1(df_data_1) tk=df_data_3[df_data_3['class']==1].iloc[0:7500,] post_tk=post_tokenizing_dataset3(tk) post_tk=post_tk.append(df_data_1[df_data_1['class']==0].iloc[0:7500,], ignore_index=True) print(post_tk['class'].value_counts()) post_tk=sklearn.utils.shuffle(post_tk) counter_tk = counter_word(post_tk['Post']) print(len(counter_tk)) token_tk=remove_less_occurent_words(post_tk,counter_tk) print(tk.shape) # - X_train, X_test, y_train, y_test = train_test_split(token_tk['Post'],token_tk['class'],test_size = 0.2, stratify=token_tk['class'], random_state = 42) print(X_train.shape, X_test.shape) print(y_train.shape,y_test.shape) vectorizer = CountVectorizer() X_v=vectorizer.fit_transform(X_train) Xt_v=vectorizer.transform(X_test) print(X_v.shape) print(Xt_v.shape) y_train=y_train.astype('int') y_test=y_test.astype('int') # ## Evaluation Metrics mean_fpr = np.linspace(start=0, stop=1, num=100) def model_evaluation(X_test, y_test, model): _probabilities = model.predict_proba(X_test)[:, 1] _predicted_values = model.predict(X_test) _accuracy = accuracy_score(y_test, _predicted_values) _precision, _recall, _f1_score, _ = precision_recall_fscore_support(y_test, _predicted_values, labels=[1]) _fpr, _tpr, _ = roc_curve(y_test, _probabilities) _tpr_transformed = np.array([interp(mean_fpr, _fpr, _tpr)]) _auc = auc(_fpr, _tpr) if X_test.shape[0]!=1: tn, fp, fn, tp = confusion_matrix(y_test, _predicted_values).ravel() print("Confusion matrix: tn={tn}, fp={fp}, fn={fn}, tp={tp}".format(tn=tn,fp=fp,fn=fn,tp=tp)) else: print(confusion_matrix(y_test, _predicted_values).ravel()) return _accuracy, _f1_score[0], _precision[0], _recall[0],_auc,_tpr_transformed def param_tuning(model, param_dict, X_train, y_train, X_test, y_test): grid_object = GridSearchCV(estimator = model, param_grid = param_dict, cv = 2,iid=False, refit=True) grid_fit = grid_object.fit(X_train, y_train) best_model = grid_fit.best_estimator_ predictions = (model.fit(X_train, y_train)).predict(X_test) best_predictions = best_model.predict(X_test) accuracy,f1score,precision,recall,auc,tpr=model_evaluation(X_test,y_test,best_model) print(model.__class__.__name__) print("\nOptimized Model\n------") print("Best Parameters: {}".format(grid_fit.best_params_)) print("Accuracy: {:.4f}".format(accuracy)) print("F1-score: {:.4f}".format(f1score)) print("Precision: {:.4f}".format(precision)) print("Recall: {:.4f}".format(recall)) print("AUC: {:.4f}".format(auc)) #tn, fp, fn, tp = confusion_matrix(y_test, best_predictions).ravel() #print("Confusion matrix: tn={tn}, fp={fp}, fn={fn}, tp={tp}".format(tn=tn,fp=fp,fn=fn,tp=tp)) return grid_object,accuracy,f1score,precision,recall,auc,tpr # ## ADA BOOST pipe = Pipeline([('fs',SelectKBest()), ('ada', AdaBoostClassifier())]) param_grid = { 'ada__n_estimators': [1, 2, 4, 6, 8, 32, 100, 200, 500], 'ada__random_state': [42], 'ada__learning_rate' : [1, 0.5, 0.25, 0.05, 0.01], 'fs__k':[1000], 'fs__score_func':[chi2] } model_ada,ADA_accuracy,ADA_f1_score,ADA_precision,ADA_recall,ADA_auc,ADA_tpr= param_tuning(pipe,param_grid,X_v,y_train,Xt_v,y_test) # ## SVM # + param_grid = { 'svc__C': [0.4], 'svc__gamma': [0.001,0.009,0.1,1], 'svc__kernel': ['linear','rbf'], 'fs__k':[1000], 'fs__score_func':[chi2] } pipe = Pipeline([('fs',SelectKBest()),('svc', SVC(probability=True))]) model_svc,SVC_accuracy,SVC_f1_score,SVC_precision,SVC_recall,SVC_auc,SVC_tpr = param_tuning(pipe,param_grid,X_v,y_train,Xt_v,y_test) # - # ## RandomForest Classifier # + param_grid = { 'rfc__n_estimators': [500], 'rfc__min_samples_leaf': [4], 'rfc__min_samples_split': [2], 'rfc__criterion':['entropy'], 'rfc__max_depth':[300,400], 'fs__k':[1000], 'fs__score_func':[chi2] } pipe = Pipeline([('fs',SelectKBest()),('rfc', RandomForestClassifier())]) model_rfc,RFC_accuracy,RFC_f1_score,RFC_precision,RFC_recall,RFC_auc,RFC_tpr = param_tuning(pipe,param_grid,X_v,y_train,Xt_v,y_test) # - ADA_metrics=np.array([ADA_accuracy,ADA_f1_score,ADA_precision,ADA_recall]) SVM_metrics = np.array([SVC_accuracy,SVC_f1_score,SVC_precision,SVC_recall]) RF_metrics = np.array([RFC_accuracy, RFC_precision, RFC_recall, RFC_f1_score]) index = ['accuracy', 'precision', 'recall', 'F1-score'] df_metrics = pd.DataFrame({'ADA': ADA_metrics, 'SVM':SVM_metrics,'Random Forest': RF_metrics}, index=index) df_metrics.plot.bar(rot=0) plt.legend(loc="lower right") plt.show() plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Chance', alpha=0.8) plt.plot(mean_fpr, SVC_tpr[0,:], lw=2, color='blue', label='SVM (AUC = %0.2f)' % (SVC_auc), alpha=0.8) plt.plot(mean_fpr, RFC_tpr[0,:], lw=2, color='orange', label='Random Forest (AUC = %0.2f)' % (RFC_auc), alpha=0.8) plt.plot(mean_fpr, ADA_tpr[0,:], lw=2, color='red', label='ADA BOOST (AUC = %0.2f)' % (ADA_auc), alpha=0.8) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC curves for multiple classifiers') plt.legend(loc="lower right") plt.show() # ## Testing test_data=pd.DataFrame() t1=df_data_2[df_data_2['class']==0].iloc[0:1000,] t1=post_tokenizing_dataset1(t1) t2=df_data_3[df_data_3['class']==1].iloc[8000:9000,] t2=post_tokenizing_dataset3(t2) test_data=test_data.append(t1 , ignore_index = True) test_data=test_data.append(t2 , ignore_index = True) test_data=sklearn.utils.shuffle(test_data) print(test_data['class'].value_counts()) def measure_model(token_df_test,model,vectorizer): token_list_test=[] for val in token_df_test.values: token_list_test.append(val[0]) X=vectorizer.transform(token_list_test) y=token_df_test['class'] y=y.astype('int') accuracy,f1,precision, recall,_,_=model_evaluation(X,y,model) return accuracy, f1,precision, recall # ## SVM SVC_accuracy_test,SVC_f1_score_test,SVC_precision_test,SVC_recall_test=measure_model(test_data,model_svc,vectorizer) print("accuracy:{a}, precision:{p}, recall:{r}, f1:{f}".format(a=SVC_accuracy_test,p=SVC_precision_test,r=SVC_recall_test,f=SVC_f1_score_test)) # ## ADA BOOST ADA_accuracy_test,ADA_f1_score_test,ADA_precision_test,ADA_recall_test=measure_model(test_data,model_ada,vectorizer) print("accuracy:{a}, precision:{p}, recall:{r}, f1:{f}".format(a=ADA_accuracy_test,p=ADA_precision_test,r=ADA_recall_test,f=ADA_f1_score_test)) # ## RandomForest Classifier RFC_accuracy_test,RFC_f1_score_test,RFC_precision_test,RFC_recall_test=measure_model(test_data,model_rfc,vectorizer) print("accuracy:{a}, precision:{p}, recall:{r}, f1:{f}".format(a=RFC_accuracy_test,p=RFC_precision_test,r=RFC_recall_test,f=RFC_f1_score_test)) ADA_metrics_test=np.array([ADA_accuracy_test,ADA_f1_score_test,ADA_precision_test,ADA_recall_test]) SVM_metrics_test = np.array([SVC_accuracy_test,SVC_f1_score_test,SVC_precision_test,SVC_recall_test]) RF_metrics_test = np.array([RFC_accuracy_test,RFC_f1_score_test,RFC_precision_test,RFC_recall_test]) index = ['accuracy', 'precision', 'recall', 'F1-score'] df_metrics = pd.DataFrame({'ADA': ADA_metrics_test,'SVM':SVM_metrics_test,'Random Forest': RF_metrics_test}, index=index) df_metrics.plot.bar(rot=0) plt.legend(loc="lower right") plt.show()
CountVectorizer with SVM, ADA Boost and Random Forest/CountVectorizer- No Lemma-No stem- Removing SW.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Deep Markov Model # # ## Introduction # # We're going to build a deep probabilistic model for sequential data: the deep markov model. The particular dataset we want to model is composed of snippets of polyphonic music. Each time slice in a sequence spans a quarter note and is represented by an 88-dimensional binary vector that encodes the notes at that time step. # # Since music is (obviously) temporally coherent, we need a model that can represent complex time dependencies in the observed data. It would not, for example, be appropriate to consider a model in which the notes at a particular time step are independent of the notes at previous time steps. One way to do this is to build a latent variable model in which the variability and temporal structure of the observations is controlled by the dynamics of the latent variables. # # One particular realization of this idea is a markov model, in which we have a chain of latent variables, with each latent variable in the chain conditioned on the previous latent variable. This is a powerful approach, but if we want to represent complex data with complex (and in this case unknown) dynamics, we would like our model to be sufficiently flexible to accommodate dynamics that are potentially highly non-linear. Thus a deep markov model: we allow for the transition probabilities governing the dynamics of the latent variables as well as the the emission probabilities that govern how the observations are generated by the latent dynamics to be parameterized by (non-linear) neural networks. # # The specific model we're going to implement is based on the following reference: # # [1] `Structured Inference Networks for Nonlinear State Space Models`,<br />&nbsp;&nbsp;&nbsp;&nbsp; # <NAME>, <NAME>, <NAME> # # Please note that while we do not assume that the reader of this tutorial has read the reference, it's definitely a good place to look for a more comprehensive discussion of the deep markov model in the context of other time series models. # # We've described the model, but how do we go about training it? The inference strategy we're going to use is variational inference, which requires specifying a parameterized family of distributions that can be used to approximate the posterior distribution over the latent random variables. Given the non-linearities and complex time-dependencies inherent in our model and data, we expect the exact posterior to be highly non-trivial. So we're going to need a flexible family of variational distributions if we hope to learn a good model. Happily, together Pytorch and Pyro provide all the necessary ingredients. As we will see, assembling them will be straightforward. Let's get to work. # ## The Model # # A convenient way to describe the high-level structure of the model is with a graphical model. # + raw_mimetype="text/html" active="" # <center><figure><img src="_static/img/model.png" style="width: 500px;"><figcaption> <font size="+1"><b>Figure 1</b>: The model rolled out for T=3 time steps.</font></figcaption></figure></center> # - # Here, we've rolled out the model assuming that the sequence of observations is of length three: $\{{\bf x}_1, {\bf x}_2, {\bf x}_3\}$. Mirroring the sequence of observations we also have a sequence of latent random variables: $\{{\bf z}_1, {\bf z}_2, {\bf z}_3\}$. The figure encodes the structure of the model. The corresponding joint distribution is # # $$p({\bf x}_{123} , {\bf z}_{123})=p({\bf x}_1|{\bf z}_1)p({\bf x}_2|{\bf z}_2)p({\bf x}_3|{\bf z}_3)p({\bf z}_1)p({\bf z}_2|{\bf z}_1)p({\bf z}_3|{\bf z}_2)$$ # # Conditioned on ${\bf z}_t$, each observation ${\bf x}_t$ is independent of the other observations. This can be read off from the fact that each ${\bf x}_t$ only depends on the corresponding latent ${\bf z}_t$, as indicated by the downward pointing arrows. We can also read off the markov property of the model: each latent ${\bf z}_t$, when conditioned on the previous latent ${\bf z}_{t-1}$, is independent of all previous latents $\{ {\bf z}_{t-2}, {\bf z}_{t-3}, ...\}$. This effectively says that everything one needs to know about the state of the system at time $t$ is encapsulated by the latent ${\bf z}_{t}$. # # We will assume that the observation likelihoods, i.e. the probability distributions $p({{\bf x}_t}|{{\bf z}_t})$ that control the observations, are given by the bernoulli distribution. This is an appropriate choice since our observations are all 0 or 1. For the probability distributions $p({\bf z}_t|{\bf z}_{t-1})$ that control the latent dynamics, we choose (conditional) gaussian distributions with diagonal covariances. This is reasonable since we assume that the latent space is continuous. # # # # The solid black squares represent non-linear functions parameterized by neural networks. This is what makes this a _deep_ markov model. Note that the black squares appear in two different places: in between pairs of latents and in between latents and observations. The non-linear function that connects the latent variables ('Trans' in Fig. 1) controls the dynamics of the latent variables. Since we allow the conditional probability distribution of ${\bf z}_{t}$ to depend on ${\bf z}_{t-1}$ in a complex way, we will be able to capture complex dynamics in our model. Similarly, the non-linear function that connects the latent variables to the observations ('Emit' in Fig. 1) controls how the observations depend on the latent dynamics. # # Some additional notes: # - we can freely choose the dimension of the latent space to suit the problem at hand: small latent spaces for simple problems and larger latent spaces for problems with complex dynamics # - note the parameter ${\bf z}_0$ in Fig. 1. as will become more apparent from the code, this is just a convenient way for us to parameterize the probability distribution $p({\bf z}_1)$ for the first time step, where there are no previous latents to condition on. # # ### The Gated Transition and the Emitter # # Without further ado, let's start writing some code. We first define the two Pytorch Modules that correspond to the black squares in Fig. 1. First the emission function: class Emitter(nn.Module): """ Parameterizes the bernoulli observation likelihood p(x_t | z_t) """ def __init__(self, input_dim, z_dim, emission_dim): super(Emitter, self).__init__() # initialize the three linear transformations used in the neural network self.lin_z_to_hidden = nn.Linear(z_dim, emission_dim) self.lin_hidden_to_hidden = nn.Linear(emission_dim, emission_dim) self.lin_hidden_to_input = nn.Linear(emission_dim, input_dim) # initialize the two non-linearities used in the neural network self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, z_t): """ Given the latent z at a particular time step t we return the vector of probabilities `ps` that parameterizes the bernoulli distribution p(x_t|z_t) """ h1 = self.relu(self.lin_z_to_hidden(z_t)) h2 = self.relu(self.lin_hidden_to_hidden(h1)) ps = self.sigmoid(self.lin_hidden_to_input(h2)) return ps # In the constructor we define the linear transformations that will be used in our emission function. Note that `emission_dim` is the number of hidden units in the neural network. We also define the non-linearities that we will be using. The forward call defines the computational flow of the function. We take in the latent ${\bf z}_{t}$ as input and do a sequence of transformations until we obtain a vector of length 88 that defines the emission probabilities of our bernoulli likelihood. Because of the sigmoid, each element of `ps` will be between 0 and 1 and will define a valid probability. Taken together the elements of `ps` encode which notes we expect to observe at time $t$ given the state of the system (as encoded in ${\bf z}_{t}$). # Now we define the gated transition function: class GatedTransition(nn.Module): """ Parameterizes the gaussian latent transition probability p(z_t | z_{t-1}) See section 5 in the reference for comparison. """ def __init__(self, z_dim, transition_dim): super(GatedTransition, self).__init__() # initialize the six linear transformations used in the neural network self.lin_gate_z_to_hidden = nn.Linear(z_dim, transition_dim) self.lin_gate_hidden_to_z = nn.Linear(transition_dim, z_dim) self.lin_proposed_mean_z_to_hidden = nn.Linear(z_dim, transition_dim) self.lin_proposed_mean_hidden_to_z = nn.Linear(transition_dim, z_dim) self.lin_sig = nn.Linear(z_dim, z_dim) self.lin_z_to_mu = nn.Linear(z_dim, z_dim) # modify the default initialization of lin_z_to_mu # so that it's starts out as the identity function self.lin_z_to_mu.weight.data = torch.eye(z_dim) self.lin_z_to_mu.bias.data = torch.zeros(z_dim) # initialize the three non-linearities used in the neural network self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.softplus = nn.Softplus() def forward(self, z_t_1): """ Given the latent z_{t-1} corresponding to the time step t-1 we return the mean and sigma vectors that parameterize the (diagonal) gaussian distribution p(z_t | z_{t-1}) """ # compute the gating function and one minus the gating function gate_intermediate = self.relu(self.lin_gate_z_to_hidden(z_t_1)) gate = self.sigmoid(self.lin_gate_hidden_to_z(gate_intermediate)) one_minus_gate = ng_ones(gate.size()).type_as(gate) - gate # compute the 'proposed mean' proposed_mean_intermediate = self.relu(self.lin_proposed_mean_z_to_hidden(z_t_1)) proposed_mean = self.lin_proposed_mean_hidden_to_z(proposed_mean_intermediate) # assemble the actual mean used to sample z_t, which mixes a linear transformation # of z_{t-1} with the proposed mean modulated by the gating function mu = one_minus_gate * self.lin_z_to_mu(z_t_1) + gate * proposed_mean # compute the sigma used to sample z_t, using the proposed mean from above as input # the softplus ensures that sigma is positive sigma = self.softplus(self.lin_sig(self.relu(proposed_mean))) # return mu, sigma which can be fed into Normal return mu, sigma # This mirrors the structure of `Emitter` above, with the difference that the computational flow is a bit more complicated. This is for two reasons. First, the output of `GatedTransition` needs to define a valid (diagonal) gaussian distribution. So we need to output two parameters: the mean `mu`, and the (square root) covariance `sigma`. These both need to have the same dimension as the latent space. Second, we don't want to _force_ the dynamics to be non-linear. Thus our mean `mu` is a sum of two terms, only one of which depends non-linearily on the input `z_t_1`. This way we can support both linear and non-linear dynamics (or indeed have the dynamics of part of the latent space be linear, while the remainder of the dynamics is non-linear). # ### Model - a Pyro Stochastic Function # # So far everything we've done is pure Pytorch. To finish translating our model into code we need to bring Pyro into the picture. Basically we need to implement the stochastic nodes (i.e. the circles) in Fig. 1. To do this we introduce a callable `model()` that contains the Pyro primitives `pyro.sample` and `pyro.observe`. The `sample` statements will be used to specify the joint distribution over the latents ${\bf z}_{1:T}$. The `observe` statements will specify how the observations ${\bf x}_{1:T}$ depend on the latents. Before we look at the complete code for `model()`, let's look at a stripped down version that contains the main logic: def model(...): z_prev = self.z_0 # sample the latents z and observed x's one time step at a time for t in range(1, T_max + 1): # the next two lines of code sample z_t ~ p(z_t | z_{t-1}) # first compute the parameters of the diagonal gaussian distribution p(z_t | z_{t-1}) z_mu, z_sigma = self.trans(z_prev) # then sample z_t according to dist.Normal(z_mu, z_sigma) z_t = pyro.sample("z_%d" % t, dist.Normal, z_mu, z_sigma) # compute the probabilities that parameterize the bernoulli likelihood emission_probs_t = self.emitter(z_t) # the next statement instructs pyro to observe x_t according to the # bernoulli distribution p(x_t|z_t) pyro.observe("obs_x_%d" % t, dist.bernoulli, mini_batch[:, t - 1, :], emission_probs_t) # the latent sampled at this time step will be conditioned upon # in the next time step so keep track of it z_prev = z_t # The first thing we need to do is sample ${\bf z}_1$. Once we've sampled ${\bf z}_1$, we can sample ${\bf z}_2 \sim p({\bf z}_2|{\bf z}_1)$ and so on. This is the logic implemented in the `for` loop. The parameters `z_mu` and `z_sigma` that define the probability distributions $p({\bf z}_t|{\bf z}_{t-1})$ are computed using `self.trans`, which is just an instance of the `GatedTransition` module defined above. For the first time step at $t=1$ we condition on `self.z_0`, which is a (trainable) `Parameter`, while for subsequent time steps we condition on the previously drawn latent. Note that each random variable `z_t` is assigned a unique name by the user. # # Once we've sampled ${\bf z}_t$ at a given time step, we need to observe the datapoint ${\bf x}_t$. So we pass `z_t` through `self.emitter`, an instance of the `Emitter` module defined above to obtain `emission_probs_t`. Together with the argument `dist.bernoulli` in the `observe` statement, these probabilities fully specify the observation likelihood. Finally, we also specify the slice of observed data ${\bf x}_t$: `mini_batch[:, t - 1, :]`. # # This fully specifies our model and encapsulates it in a callable that can be passed to Pyro. Before we move on let's look at the full version of `model()` and go through some of the details we glossed over in our first pass. def model(self, mini_batch, mini_batch_reversed, mini_batch_mask, mini_batch_seq_lengths, annealing_factor=1.0): # this is the number of time steps we need to process in the mini-batch T_max = mini_batch.size(1) # register all pytorch (sub)modules with pyro pyro.module("dmm", self) # set z_prev = z_0 to setup the recursive conditioning z_prev = self.z_0 # sample the latents z and observed x's one time step at a time for t in range(1, T_max + 1): # the next three lines of code sample z_t ~ p(z_t | z_{t-1}) # first compute the parameters of the diagonal gaussian distribution p(z_t | z_{t-1}) z_mu, z_sigma = self.trans(z_prev) # then sample z_t according to dist.Normal(z_mu, z_sigma) z_t = pyro.sample("z_%d" % t, dist.Normal, z_mu, z_sigma, log_pdf_mask=annealing_factor * mini_batch_mask[:, t - 1:t]) # compute the probabilities that parameterize the bernoulli likelihood emission_probs_t = self.emitter(z_t) # the next statement instructs pyro to observe x_t according to the # bernoulli distribution p(x_t|z_t) pyro.observe("obs_x_%d" % t, dist.bernoulli, mini_batch[:, t - 1, :], emission_probs_t, log_pdf_mask=mini_batch_mask[:, t - 1:t]) # the latent sampled at this time step will be conditioned upon # in the next time step so keep track of it z_prev = z_t # The first thing to note is that `model()` takes a number of arguments. For now let's just take a look at `mini_batch` and `mini_batch_mask`. `mini_batch` is a three dimensional tensor, with the first dimension being the batch dimension, the second dimension being the temporal dimension, and the final dimension being the features (88-dimensional in our case). To speed up the code, whenever we run `model` we're going to process an entire mini-batch of sequences (i.e. we're going to take advantage of vectorization). # # This is sensible because our model is implicitly defined over a single observed sequence. The probability of a set of sequences is just given by the products of the individual sequence probabilities. In other words, given the parameters of the model the sequences are conditionally independent. # # This vectorization introduces some complications because sequences can be of different lengths. This is where `mini_batch_mask` comes in. `mini_batch_mask` is a two dimensional 0/1 mask of dimensions `mini_batch_size` x `T_max`, where `T_max` is the maximum length of any sequence in the mini-batch. This encodes which parts of `mini_batch` are valid observations. # # So the first thing we do is grab `T_max`: we have to unroll our model for at least this many time steps. Note that this will result in a lot of 'wasted' computation, since some of the sequences will be shorter than `T_max`, but this is a small price to pay for the big speed-ups that come with vectorization. We just need to make sure that none of the 'wasted' computations 'pollute' our model computation. We accomplish this by passing the mask appropriate to time step $t$ as an argument `log_pdf_mask` to both the `sample` and `observe` statements. # # Finally, the line `pyro.module("dmm", self)` is equivalent to a bunch of `pyro.param` statements for each parameter in the model. This lets Pyro know which parameters are part of the model. Just like for `sample` and `observe` statements, we give the module a unique name. This name will be incorporated into the name of the `Parameters` in the model. We leave a discussion of the KL annealing factor for later. # ## Inference # # At this point we've fully specified our model. The next step is to set ourselves up for inference. As mentioned in the introduction, our inference strategy is going to be variational inference (see [SVI Part I](svi_part_i.html) for an introduction). So our next task is to build a family of variational distributions appropriate to doing inference in a deep markov model. However, at this point it's worth emphasizing that nothing about the way we've implemented `model()` ties us to variational inference. In principle we could use _any_ inference strategy available in Pyro. For example, in this particular context one could imagine using some variant of Sequential Monte Carlo (although this is not currently supported in Pyro). # # ### Guide # # The purpose of the guide (i.e. the variational distribution) is to provide a (parameterized) approximation to the exact posterior $p({\bf z}_{1:T}|{\bf x}_{1:T})$. Actually, there's an implicit assumption here which we should make explicit, so let's take a step back. # Suppose our dataset $\mathcal{D}$ consists of $N$ sequences # $\{ {\bf x}_{1:T_1}^1, {\bf x}_{1:T_2}^2, ..., {\bf x}_{1:T_N}^N \}$. Then the posterior we're actually interested in is given by # $p({\bf z}_{1:T_1}^1, {\bf z}_{1:T_2}^2, ..., {\bf z}_{1:T_N}^N | \mathcal{D})$, i.e. we want to infer the latents for _all_ $N$ sequences. Even for small $N$ this is a very high-dimensional distribution that will require a very large number of parameters to specify. In particular if we were to directly parameterize the posterior in this form, the number of parameters required would grow (at least) linearly with $N$. One way to avoid this nasty growth with the size of the dataset is *amortization* (see the analogous discussion in [SVI Part II](http://pyro.ai/examples/svi_part_ii.html)). # # #### Aside: Amortization # # This works as follows. Instead of introducing variational parameters for each sequence in our dataset, we're going to learn a single parametric function $f({\bf x}_{1:T})$ and work with a variational distribution that has the form $\prod_{n=1}^N q({\bf z}_{1:T_n}^n | f({\bf x}_{1:T_n}^n))$. The function $f(\cdot)$&mdash;which basically maps a given observed sequence to a set of variational parameters tailored to that sequence&mdash;will need to be sufficiently rich to capture the posterior accurately, but now we can handle large datasets without having to introduce an obscene number of variational parameters. # # So our task is to construct the function $f(\cdot)$. Since in our case we need to support variable-length sequences, it's only natural that $f(\cdot)$ have a RNN in the loop. Before we look at the various component parts that make up our $f(\cdot)$ in detail, let's look at a computational graph that encodes the basic structure: <p> # + raw_mimetype="text/html" active="" # <center><figure><img src="_static/img/guide.png" style="width: 400px;"><figcaption> <font size="+1"><b>Figure 2</b>: The guide rolled out for T=3 time steps. </font></figcaption></figure></center> # - # At the bottom of the figure we have our sequence of three observations. These observations will be consumed by a RNN that reads the observations from right to left and outputs three hidden states $\{ {\bf h}_1, {\bf h}_2,{\bf h}_3\}$. Note that this computation is done _before_ we sample any latent variables. Next, each of the hidden states will be fed into a `Combiner` module whose job is to output the mean and covariance of the the conditional distribution $q({\bf z}_t | {\bf z}_{t-1}, {\bf x}_{t:T})$, which we take to be given by a diagonal gaussian distribution. (Just like in the model, the conditional structure of ${\bf z}_{1:T}$ in the guide is such that we sample ${\bf z}_t$ forward in time.) In addition to the RNN hidden state, the `Combiner` also takes the latent random variable from the previous time step as input, except for $t=1$, where it instead takes the trainable (variational) parameter ${\bf z}_0^{\rm{q}}$. # # #### Aside: Guide Structure # Why do we setup the RNN to consume the observations from right to left? Why not left to right? With this choice our conditional distribution $q({\bf z}_t |...)$ depends on two things: # # - the latent ${\bf z}_{t-1}$ from the previous time step; and # - the observations ${\bf x}_{t:T}$, i.e. the current observation together with all future observations # # We are free to make other choices; all that is required is that that the guide is a properly normalized distribution that plays nice with autograd. This particular choice is motivated by the dependency structure of the true posterior: see reference [1] for a detailed discussion. In brief, while we could, for example, condition on the entire sequence of observations, because of the markov structure of the model everything that we need to know about the previous observations ${\bf x}_{1:t-1}$ is encapsulated by ${\bf z}_{t-1}$. We could condition on more things, but there's no need; and doing so will probably tend to dilute the learning signal. So running the RNN from right to left is the most natural choice for this particular model. # # So much for the high-level structure of the guide. Let's look at the component parts in detail. First, the `Combiner` module: class Combiner(nn.Module): """ Parameterizes q(z_t | z_{t-1}, x_{t:T}), which is the basic building block of the guide (i.e. the variational distribution). The dependence on x_{t:T} is through the hidden state of the RNN (see the pytorch module `rnn` below) """ def __init__(self, z_dim, rnn_dim): super(Combiner, self).__init__() # initialize the three linear transformations used in the neural network self.lin_z_to_hidden = nn.Linear(z_dim, rnn_dim) self.lin_hidden_to_mu = nn.Linear(rnn_dim, z_dim) self.lin_hidden_to_sigma = nn.Linear(rnn_dim, z_dim) # initialize the two non-linearities used in the neural network self.tanh = nn.Tanh() self.softplus = nn.Softplus() def forward(self, z_t_1, h_rnn): """ Given the latent z at at a particular time step t-1 as well as the hidden state of the RNN h(x_{t:T}) we return the mean and sigma vectors that parameterize the (diagonal) gaussian distribution q(z_t | z_{t-1}, x_{t:T}) """ # combine the rnn hidden state with a transformed version of z_t_1 h_combined = 0.5 * (self.tanh(self.lin_z_to_hidden(z_t_1)) + h_rnn) # use the combined hidden state to compute the mean used to sample z_t mu = self.lin_hidden_to_mu(h_combined) # use the combined hidden state to compute the sigma used to sample z_t sigma = self.softplus(self.lin_hidden_to_sigma(h_combined)) # return mu, sigma which can be fed into Normal return mu, sigma # This module has the same general structure as `Emitter` and `GatedTransition` in the model. The only thing of note is that because the `Combiner` needs to consume two inputs at each time step, it transforms the inputs into a single combined hidden state `h_combined` before it computes the outputs. # # Apart from the RNN, we now have all the ingredients we need to construct our guide distribution. # Happily, Pytorch has great built-in RNN modules, so we don't have much work to do here. We'll see where we instantiate the RNN later. Let's instead jump right into the definition of the stochastic function `guide()`. def guide(self, mini_batch, mini_batch_reversed, mini_batch_mask, mini_batch_seq_lengths, annealing_factor=1.0): # this is the number of time steps we need to process in the mini-batch T_max = mini_batch.size(1) # register all pytorch (sub)modules with pyro pyro.module("dmm", self) # if on gpu we need the fully broadcast view of the rnn initial state # to be in contiguous gpu memory h_0_contig = self.h_0 if not self.use_cuda \ else self.h_0.expand(1, mini_batch.size(0), self.rnn.hidden_size).contiguous() # push the observed x's through the rnn; # rnn_output contains the hidden state at each time step rnn_output, _ = self.rnn(mini_batch_reversed, h_0_contig) # reverse the time-ordering in the hidden state and un-pack it rnn_output = poly.pad_and_reverse(rnn_output, mini_batch_seq_lengths) # set z_prev = z_q_0 to setup the recursive conditioning in q(z_t |...) z_prev = self.z_q_0 # sample the latents z one time step at a time for t in range(1, T_max + 1): # get the parameters for the distribution q(z_t | z_{t-1}, x_{t:T}) z_mu, z_sigma = self.combiner(z_prev, rnn_output[:, t - 1, :]) # sample z_t from the distribution q(z_t|...) z_t = pyro.sample("z_%d" % t, dist.Normal, z_mu, z_sigma, log_pdf_mask=annealing_factor * mini_batch_mask[:, t - 1:t]) # the latent sampled at this time step will be conditioned upon in the next time step # so keep track of it z_prev = z_t # The high-level structure of `guide()` is very similar to `model()`. First note that the model and guide take the same arguments: this is a general requirement for model/guide pairs in Pyro. As in the model, there's a call to `pyro.module` that registers all the parameters with Pyro. Also, the `for` loop has the same structure as the one in `model()`, with the difference that the guide only needs to sample latents (there are no `observe` statements). Finally, note that the names of the latent variables in the guide exactly match those in the model. This is how Pyro knows to correctly align random variables. # # The RNN logic should be familar to Pytorch users, but let's go through it quickly. First we prepare the initial state of the RNN, `h_0`. Then we invoke the RNN via its forward call; the resulting tensor `rnn_output` contains the hidden states for the entire mini-batch. Note that because we want the RNN to consume the observations from right to left, the input to the RNN is `mini_batch_reversed`, which is a copy of `mini_batch` with all the sequences running in _reverse_ temporal order. Furthermore, `mini_batch_reversed` has been wrapped in a Pytorch `rnn.pack_padded_sequence` so that the RNN can deal with variable-length sequences. Since we do our sampling in latent space in normal temporal order, we use the helper function `pad_and_reverse` to reverse the hidden state sequences in `rnn_output`, so that we can feed the `Combiner` RNN hidden states that are correctly aligned and ordered. This helper function also unpacks the `rnn_output` so that it is no longer in the form of a Pytorch `rnn.pack_padded_sequence`. # ## Packaging the Model and Guide as a Pytorch Module # # At this juncture, we're ready to to proceed to inference. But before we do so let's quickly go over how we packaged the model and guide as a single Pytorch Module. This is generally good practice, especially for larger models. class DMM(nn.Module): """ This pytorch Module encapsulates the model as well as the variational distribution (the guide) for the Deep Markov Model """ def __init__(self, input_dim=88, z_dim=100, emission_dim=100, transition_dim=200, rnn_dim=600, rnn_dropout_rate=0.0, num_iafs=0, iaf_dim=50, use_cuda=False): super(DMM, self).__init__() # instantiate pytorch modules used in the model and guide below self.emitter = Emitter(input_dim, z_dim, emission_dim) self.trans = GatedTransition(z_dim, transition_dim) self.combiner = Combiner(z_dim, rnn_dim) self.rnn = nn.RNN(input_size=input_dim, hidden_size=rnn_dim, nonlinearity='relu', batch_first=True, bidirectional=False, num_layers=1, dropout=rnn_dropout_rate) # define a (trainable) parameters z_0 and z_q_0 that help define the probability # distributions p(z_1) and q(z_1) # (since for t = 1 there are no previous latents to condition on) self.z_0 = nn.Parameter(torch.zeros(z_dim)) self.z_q_0 = nn.Parameter(torch.zeros(z_dim)) # define a (trainable) parameter for the initial hidden state of the rnn self.h_0 = nn.Parameter(torch.zeros(1, 1, rnn_dim)) self.use_cuda = use_cuda # if on gpu cuda-ize all pytorch (sub)modules if use_cuda: self.cuda() # the model p(x_{1:T} | z_{1:T}) p(z_{1:T}) def model(...): # ... as above ... # the guide q(z_{1:T} | x_{1:T}) (i.e. the variational distribution) def guide(...): # ... as above ... # Since we've already gone over `model` and `guide`, our focus here is on the constructor. First we instantiate the four Pytorch modules that we use in our model and guide. On the model-side: `Emitter` and `GatedTransition`. On the guide-side: `Combiner` and the RNN. # # Next we define Pytorch `Parameter`s for the initial state of the RNN as well as `z_0` and `z_q_0`, which are fed into `self.trans` and `self.combiner`, respectively, in lieu of the non-existent random variable $\bf z_0$. # # The important point to make here is that all of these `Module`s and `Parameter`s are attributes of `DMM` (which itself inherits from `nn.Module`). This has the consequence they are all automatically registered as belonging to the module. So, for example, when we call `parameters()` on an instance of `DMM`, Pytorch will know to return all the relevant parameters. It also means that when we invoke `pyro.module("dmm", self)` in `model()` and `guide()`, all the parameters of both the model and guide will be registered with Pyro. Finally, it means that if we're running on a GPU, the call to `cuda()` will move all the parameters into GPU memory. # # ## Stochastic Variational Inference # # With our model and guide at hand, we're finally ready to do inference. Before we look at the full logic that is involved in a complete experimental script, let's first see how to take a single gradient step. First we instantiate an instance of `DMM` and setup an optimizer. # + # instantiate the dmm dmm = DMM(input_dim, z_dim, emission_dim, transition_dim, rnn_dim, args.rnn_dropout_rate, args.num_iafs, args.iaf_dim, args.cuda) # setup optimizer adam_params = {"lr": args.learning_rate, "betas": (args.beta1, args.beta2), "clip_norm": args.clip_norm, "lrd": args.lr_decay, "weight_decay": args.weight_decay} optimizer = ClippedAdam(adam_params) # - # Here we're using an implementation of the Adam optimizer that includes gradient clipping. This mitigates some of the problems that can occur when training recurrent neural networks (e.g. vanishing/exploding gradients). Next we setup the inference algorithm. # setup inference algorithm svi = SVI(dmm.model, dmm.guide, optimizer, "ELBO", trace_graph=False) # The inference algorithm `SVI` uses a stochastic gradient estimator to take gradient steps on an objective function, which in this case is given by the ELBO (the evidence lower bound). As the name indicates, the ELBO is a lower bound to the log evidence: $\log p(\mathcal{D})$. As we take gradient steps that maximize the ELBO, we move our guide $q(\cdot)$ closer to the exact posterior. # # The argument `trace_graph=False` indicates that we're using a version of the gradient estimator that doesn't need access to the dependency structure of the model and guide. Since all the latent variables in our model are reparameterizable, this is the appropriate gradient estimator for our use case. (It's also the default option.) # # Assuming we've prepared the various arguments of `dmm.model` and `dmm.guide`, taking a gradient step is accomplished by calling svi.step(mini_batch, ...) # That's all there is to it! # # Well, not quite. This will be the main step in our inference algorithm, but we still need to implement a complete training loop with preparation of mini-batches, evaluation, and so on. This sort of logic will be familiar to any deep learner but let's see how it looks in PyTorch/Pyro. # ## The Black Magic of Optimization # # Actually, before we get to the guts of training, let's take a moment and think a bit about the optimization problem we've setup. We've traded Bayesian inference in a non-linear model with a high-dimensional latent space&mdash;a hard problem&mdash;for a particular optimization problem. Let's not kid ourselves, this optimization problem is pretty hard too. Why? Let's go through some of the reasons: # - the space of parameters we're optimizing over is very high-dimensional (it includes all the weights in all the neural networks we've defined). # - our objective function (the ELBO) cannot be computed analytically. so our parameter updates will be following noisy Monte Carlo gradient estimates # - data-subsampling serves as an additional source of stochasticity: even if we wanted to, we couldn't in general take gradient steps on the ELBO defined over the whole dataset (actually in our particular case the dataset isn't so large, but let's ignore that). # - given all the neural networks and non-linearities we have in the loop, our (stochastic) loss surface is highly non-trivial # # The upshot is that if we're going to find reasonable (local) optima of the ELBO, we better take some care in deciding how to do optimization. This isn't the time or place to discuss all the different strategies that one might adopt, but it's important to emphasize how decisive a good or bad choice in learning hyperparameters (the learning rate, the mini-batch size, etc.) can be. # # Before we move on, let's discuss one particular optimization strategy that we're making use of in greater detail: KL annealing. In our case the ELBO is the sum of two terms: an expected log likelihood term (which measures model fit) and a sum of KL divergence terms (which serve to regularize the approximate posterior): # # $\rm{ELBO} = \mathbb{E}_{q({\bf z}_{1:T})}[\log p({\bf x}_{1:T}|{\bf z}_{1:T})] - \mathbb{E}_{q({\bf z}_{1:T})}[ \log q({\bf z}_{1:T}) - \log p({\bf z}_{1:T})]$ # # This latter term can be a quite strong regularizer, and in early stages of training it has a tendency to favor regions of the loss surface that contain lots of bad local optima. One strategy to avoid these bad local optima, which was also adopted in reference [1], is to anneal the KL divergence terms by multiplying them by a scalar `annealing_factor` that ranges between zero and one: # # $\mathbb{E}_{q({\bf z}_{1:T})}[\log p({\bf x}_{1:T}|{\bf z}_{1:T})] - \rm{annealing\_factor} \times \mathbb{E}_{q({\bf z}_{1:T})}[ \log q({\bf z}_{1:T}) - \log p({\bf z}_{1:T})]$ # # The idea is that during the course of training the `annealing_factor` rises slowly from its initial value at/near zero to its final value at 1.0. The annealing schedule is arbitrary; below we will use a simple linear schedule. # # Finally, we should mention that the main difference between the DMM implementation described here and the one used in reference [1] is that they take advantage of the analytic formula for the KL divergence between two gaussian distributions (whereas we rely on Monte Carlo estimates). This leads to lower variance gradient estimates of the ELBO, which makes training a bit easier. We can still train the model without making this analytic substitution, but training probably takes somewhat longer because of the higher variance. Support for analytic KL divergences in Pyro is something we plan to add in the near future. # ## Data Loading, Training, and Evaluation # # First we load the data. There are 229 sequences in the training dataset, each with an average length of ~60 time steps. jsb_file_loc = "./data/jsb_processed.pkl" data = pickle.load(open(jsb_file_loc, "rb")) training_seq_lengths = data['train']['sequence_lengths'] training_data_sequences = data['train']['sequences'] test_seq_lengths = data['test']['sequence_lengths'] test_data_sequences = data['test']['sequences'] val_seq_lengths = data['valid']['sequence_lengths'] val_data_sequences = data['valid']['sequences'] N_train_data = len(training_seq_lengths) N_train_time_slices = np.sum(training_seq_lengths) N_mini_batches = int(N_train_data / args.mini_batch_size + int(N_train_data % args.mini_batch_size > 0)) # For this dataset we will typically use a `mini_batch_size` of 20, so that there will be 12 mini-batches per epoch. Next we define the function `process_minibatch` which prepares a mini-batch for training and takes a gradient step: def process_minibatch(epoch, which_mini_batch, shuffled_indices): if args.annealing_epochs > 0 and epoch < args.annealing_epochs: # compute the KL annealing factor approriate for the current mini-batch in the current epoch min_af = args.minimum_annealing_factor annealing_factor = min_af + (1.0 - min_af) * \ (float(which_mini_batch + epoch * N_mini_batches + 1) / float(args.annealing_epochs * N_mini_batches)) else: # by default the KL annealing factor is unity annealing_factor = 1.0 # compute which sequences in the training set we should grab mini_batch_start = (which_mini_batch * args.mini_batch_size) mini_batch_end = np.min([(which_mini_batch + 1) * args.mini_batch_size, N_train_data]) mini_batch_indices = shuffled_indices[mini_batch_start:mini_batch_end] # grab the fully prepped mini-batch using the helper function in the data loader mini_batch, mini_batch_reversed, mini_batch_mask, mini_batch_seq_lengths \ = poly.get_mini_batch(mini_batch_indices, training_data_sequences, training_seq_lengths, cuda=args.cuda) # do an actual gradient step loss = svi.step(mini_batch, mini_batch_reversed, mini_batch_mask, mini_batch_seq_lengths, annealing_factor) # keep track of the training loss return loss # We first compute the KL annealing factor appropriate to the mini-batch (according to a linear schedule as described earlier). We then compute the mini-batch indices, which we pass to the helper function `get_mini_batch`. This helper function takes care of a number of different things: # - it sorts each mini-batch by sequence length # - it calls another helper function to get a copy of the mini-batch in reversed temporal order # - it packs each reversed mini-batch in a `rnn.pack_padded_sequence`, which is then ready to be ingested by the RNN # - it cuda-izes all tensors if we're on a GPU # - it calls another helper function to get an appropriate 0/1 mask for the mini-batch # # We then pipe all the return values of `get_mini_batch()` into `elbo.step(...)`. Recall that these arguments will be further piped to `model(...)` and `guide(...)` during construction of the gradient estimator in `elbo`. Finally, we return a float which is a noisy estimate of the loss for that mini-batch. # # We now have all the ingredients required for the main bit of our training loop: times = [time.time()] for epoch in range(args.num_epochs): # accumulator for our estimate of the negative log likelihood # (or rather -elbo) for this epoch epoch_nll = 0.0 # prepare mini-batch subsampling indices for this epoch shuffled_indices = np.arange(N_train_data) np.random.shuffle(shuffled_indices) # process each mini-batch; this is where we take gradient steps for which_mini_batch in range(N_mini_batches): epoch_nll += process_minibatch(epoch, which_mini_batch, shuffled_indices) # report training diagnostics times.append(time.time()) epoch_time = times[-1] - times[-2] log("[training epoch %04d] %.4f \t\t\t\t(dt = %.3f sec)" % (epoch, epoch_nll / N_train_time_slices, epoch_time)) # At the beginning of each epoch we shuffle the indices pointing to the training data. We then process each mini-batch until we've gone through the entire training set, accumulating the training loss as we go. Finally we report some diagnostic info. Note that we normalize the loss by the total number of time slices in the training set (this allows us to compare to reference [1]). # ## Evaluation # This training loop is still missing any kind of evaluation diagnostics. Let's fix that. First we need to prepare the validation and test data for evaluation. Since the validation and test datasets are small enough that we can easily fit them into memory, we're going to process each dataset batchwise (i.e. we will not be breaking up the dataset into mini-batches). [_Aside: at this point the reader may ask why we don't do the same thing for the training set. The reason is that additional stochasticity due to data-subsampling is often advantageous during optimization: in particular it can help us avoid local optima._] And, in fact, in order to get a lessy noisy estimate of the ELBO, we're going to compute a multi-sample estimate. The simplest way to do this would be as follows: val_loss = svi.evaluate_loss(val_batch, ..., num_particles=5) # This, however, would involve an explicit `for` loop with five iterations. For our particular model, we can do better and vectorize the whole computation. The only way to do this currently in Pyro is to explicitly replicate the data `n_eval_samples` many times. This is the strategy we follow: # + # package repeated copies of val/test data for faster evaluation # (i.e. set us up for vectorization) def rep(x): return np.repeat(x, n_eval_samples, axis=0) # get the validation/test data ready for the dmm: pack into sequences, etc. val_seq_lengths = rep(val_seq_lengths) test_seq_lengths = rep(test_seq_lengths) val_batch, val_batch_reversed, val_batch_mask, val_seq_lengths = poly.get_mini_batch( np.arange(n_eval_samples * val_data_sequences.shape[0]), rep(val_data_sequences), val_seq_lengths, volatile=True, cuda=args.cuda) test_batch, test_batch_reversed, test_batch_mask, test_seq_lengths = poly.get_mini_batch( np.arange(n_eval_samples * test_data_sequences.shape[0]), rep(test_data_sequences), test_seq_lengths, volatile=True, cuda=args.cuda) # - # Note that we make use of the same helper function `get_mini_batch` as before, except this time we select the entire datasets. Also, we mark the data as `volatile`, which lets Pytorch know that we won't be computing any gradients; this results in further speed-ups. With the test and validation data now fully prepped, we define the helper function that does the evaluation: def do_evaluation(): # put the RNN into evaluation mode (i.e. turn off drop-out if applicable) dmm.rnn.eval() # compute the validation and test loss val_nll = svi.evaluate_loss(val_batch, val_batch_reversed, val_batch_mask, val_seq_lengths) / np.sum(val_seq_lengths) test_nll = svi.evaluate_loss(test_batch, test_batch_reversed, test_batch_mask, test_seq_lengths) / np.sum(test_seq_lengths) # put the RNN back into training mode (i.e. turn on drop-out if applicable) dmm.rnn.train() return val_nll, test_nll # We simply call the `evaluate_loss` method of `elbo`, which takes the same arguments as `step()`, namely the arguments that are passed to the model and guide. Note that we have to put the RNN into and out of evaluation mode to account for dropout. We can now stick `do_evaluation()` into the training loop; see `dmm.py` for details. # ## Results # # Let's make sure that our implementation gives reasonable results. We can use the numbers reported in reference [1] as a sanity check. For the same dataset and a similar model/guide setup (dimension of the latent space, number of hidden units in the RNN, etc.) they report a normalized negative log likelihood (NLL) of `6.93` on the testset (lower is better$)^{\S}$. This is to be compared to our result of `6.87`. These numbers are very much in the same ball park, which is reassuring. It seems that, at least for this dataset, not using analytic expressions for the KL divergences doesn't degrade the quality of the learned model (although, as discussed above, the training probably takes somewhat longer). # + raw_mimetype="text/html" active="" # <figure><img src="_static/img/test_nll.png" style="width: 400px;"><center><figcaption> <font size="-1"><b>Figure 3</b>: Progress on the test set NLL as training progresses for a sample training run. </font></figcaption></figure></center> # - # In the figure we show how the test NLL progresses during training for a single sample run (one with a rather conservative learning rate). Most of the progress is during the first 3000 epochs or so, with some marginal gains if we let training go on for longer. On a GeForce GTX 1080, 5000 epochs takes about 20 hours. # # # | `num_iafs` | test NLL | # |---|---| # | `0` | `6.87` | # | `1` | `6.82` | # | `2` | `6.80` | # # Finally, we also report results for guides with normalizing flows in the mix (details to be found in the next section). # # ${ \S\;}$ Actually, they seem to report two numbers—6.93 and 7.03—for the same model/guide and it's not entirely clear how the two reported numbers are different. # ## Bells, whistles, and other improvements # # ### Inverse Autoregressive Flows # # One of the great things about a probabilistic programming language is that it encourages modularity. Let's showcase an example in the context of the DMM. We're going to make our variational distribution richer by adding normalizing flows to the mix (see reference [2] for a discussion). **This will only cost us four additional lines of code!** # # First, in the `DMM` constructor we add iafs = [InverseAutoregressiveFlow(z_dim, iaf_dim) for _ in range(num_iafs)] self.iafs = nn.ModuleList(iafs) # This instantiates `num_iafs` many normalizing flows of the `InverseAutoregressiveFlow` type (see references [3,4]); each normalizing flow will have `iaf_dim` many hidden units. We then bundle the normalizing flows in a `nn.ModuleList`; this is just the PyTorchy way to package a list of `nn.Module`s. Next, in the guide we add the lines if self.iafs.__len__() > 0: z_dist = TransformedDistribution(z_dist, self.iafs) # Here we're taking the base distribution `z_dist`, which in our case is a conditional gaussian distribution, and using the `TransformedDistribution` construct we transform it into a non-gaussian distribution that is, by construction, richer than the base distribution. Voila! # ### Checkpointing # # If we want to recover from a catastrophic failure in our training loop, there are two kinds of state we need to keep track of. The first is the various parameters of the model and guide. The second is the state of the optimizers (e.g. in Adam this will include the running average of recent gradient estimates for each parameter). # # In Pyro, the parameters can all be found in the `ParamStore`. However, Pytorch also keeps track of them for us via the `parameters()` method of `nn.Module`. So one simple way we can save the parameters of the model and guide is to make use of the `state_dict()` method of `dmm` in conjunction with `torch.save()`; see below. In the case that we have `InverseAutoregressiveFlow`'s in the loop, this is in fact the only option at our disposal. This is because the `InverseAutoregressiveFlow` module contains what are called 'persistent buffers' in PyTorch parlance. These are things that carry state but are not `Parameter`s. The `state_dict()` and `load_state_dict()` methods of `nn.Module` know how to deal with buffers correctly. # # To save the state of the optimizers, we have to use functionality inside of `pyro.optim.PyroOptim`. Recall that the typical user never interacts directly with PyTorch `Optimizers` when using Pyro; since parameters can be created dynamically in an arbitrary probabilistic program, Pyro needs to manage `Optimizers` for us. In our case saving the optimizer state will be as easy as calling `optimizer.save()`. The loading logic is entirely analagous. So our entire logic for saving and loading checkpoints only takes a few lines: # saves the model and optimizer states to disk def save_checkpoint(): log("saving model to %s..." % args.save_model) torch.save(dmm.state_dict(), args.save_model) log("saving optimizer states to %s..." % args.save_opt) optimizer.save(args.save_opt) log("done saving model and optimizer checkpoints to disk.") # loads the model and optimizer states from disk def load_checkpoint(): assert exists(args.load_opt) and exists(args.load_model), \ "--load-model and/or --load-opt misspecified" log("loading model from %s..." % args.load_model) dmm.load_state_dict(torch.load(args.load_model)) log("loading optimizer states from %s..." % args.load_opt) optimizer.load(args.load_opt) log("done loading model and optimizer states.") # ## Some final comments # # A deep markov model is a relatively complex model. Now that we've taken the effort to implement a version of the deep markov model tailored to the polyphonic music dataset, we should ask ourselves what else we can do. What if we're handed a different sequential dataset? Do we have to start all over? # # Not at all! The beauty of probalistic programming is that it enables&mdash;and encourages&mdash;modular approaches to modeling and inference. Adapting our polyphonic music model to a dataset with continuous observations is as simple as changing the observation likelihood. The vast majority of the code could be taken over unchanged. This means that with a little bit of extra work, the code in this tutorial could be repurposed to enable a huge variety of different models. # # ## References # # [1] `Structured Inference Networks for Nonlinear State Space Models`,<br />&nbsp;&nbsp;&nbsp;&nbsp; # <NAME>, <NAME>, <NAME> # # [2] `Variational Inference with Normalizing Flows`, # <br />&nbsp;&nbsp;&nbsp;&nbsp; # <NAME>, <NAME> # # [3] `Improving Variational Inference with Inverse Autoregressive Flow`, # <br />&nbsp;&nbsp;&nbsp;&nbsp; # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # [4] `MADE: Masked Autoencoder for Distribution Estimation Mathieu`, # <br />&nbsp;&nbsp;&nbsp;&nbsp; # Germain, <NAME>, <NAME>, <NAME> # # [5] `Modeling Temporal Dependencies in High-Dimensional Sequences:` # <br />&nbsp;&nbsp;&nbsp;&nbsp; # `Application to Polyphonic Music Generation and Transcription`, # <br />&nbsp;&nbsp;&nbsp;&nbsp; # <NAME>., <NAME>. and <NAME>.
tutorial/source/dmm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import random import matplotlib.pyplot as plt if __name__ == '__main__': f = open("1a.csv", "a") numbers = [] for x in range(1000): numbers.append(random.randint(0, 100)) #sort numbers.sort() for num in numbers: f.write(str(num)+'\n') #print(numbers) f.close() #visualization plt.hist(numbers,bins=32, alpha = 0.5, label = "0-100") plt.legend(loc='upper right') plt.xlabel("Number", size=14) plt.ylabel("Count", size=14) plt.title("Histograms of Random Numbers") plt.show() if __name__ == '__main__': input_file = open("1a.csv", "r") output_file = open("1b.csv", "a") lines = input_file.readlines() numbers =[] for line in lines: x=int(line.strip()) y = 3*x+6 output_file.write(str(y)+'\n') numbers.append(y) #print(numbers) input_file.close() output_file.close() #visualization plt.hist(numbers,bins=32, alpha = 0.5, label = "3x+6") plt.legend(loc='upper right') plt.xlabel("Number", size=14) plt.ylabel("Count", size=14) plt.title("Histograms of Random Numbers") plt.show() if __name__ == '__main__': data_1a = open("1a.csv", "r") data_1b = open("1b.csv", "r") x_1a = [] y_1b = [] for row in data_1a: x_1a.append(int(row)) for row in data_1b: y_1b.append(int(row)) plt.hist(x_1a,bins=32, alpha = 0.5, label = "0-100") plt.hist(y_1b,bins=32, alpha = 0.5, label = "3x+6") plt.legend(loc='upper right') plt.xlabel("Number", size=14) plt.ylabel("Count", size=14) plt.title("Overlapping Histograms of Random Numbers") plt.savefig('number_histogram.png') plt.show()
random_number_generator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] origin_pos=0 # # 梯度下降 # :label:`sec_gd` # # 尽管*梯度下降*(gradient descent)很少直接用于深度学习, # 但了解它是理解下一节随机梯度下降算法的关键。 # 例如,由于学习率过大,最优化问题中可能会出现分歧,这种现象早已在梯度下降中出现。 # 同样地,*预处理*(preconditioning)是梯度下降中的一种常用技术, # 还被沿用到更高级的算法中。 # 让我们从简单的一维梯度下降开始。 # # ## 一维梯度下降 # # 为什么梯度下降算法可以优化目标函数? # 一维中的梯度下降给我们很好的启发。 # 考虑一类连续可微实值函数$f: \mathbb{R} \rightarrow \mathbb{R}$, # 利用泰勒展开,我们可以得到 # # $$f(x + \epsilon) = f(x) + \epsilon f'(x) + \mathcal{O}(\epsilon^2).$$ # :eqlabel:`gd-taylor` # # 即在一阶近似中,$f(x+\epsilon)$可通过$x$处的函数值$f(x)$和一阶导数$f'(x)$得出。 # 我们可以假设在负梯度方向上移动的$\epsilon$会减少$f$。 # 为了简单起见,我们选择固定步长$\eta > 0$,然后取$\epsilon = -\eta f'(x)$。 # 将其代入泰勒展开式我们可以得到 # # $$f(x - \eta f'(x)) = f(x) - \eta f'^2(x) + \mathcal{O}(\eta^2 f'^2(x)).$$ # :eqlabel:`gd-taylor-2` # # 如果其导数$f'(x) \neq 0$没有消失,我们就能继续展开,这是因为$\eta f'^2(x)>0$。 # 此外,我们总是可以令$\eta$小到足以使高阶项变得不相关。 # 因此, # # $$f(x - \eta f'(x)) \lessapprox f(x).$$ # # 这意味着,如果我们使用 # # $$x \leftarrow x - \eta f'(x)$$ # # 来迭代$x$,函数$f(x)$的值可能会下降。 # 因此,在梯度下降中,我们首先选择初始值$x$和常数$\eta > 0$, # 然后使用它们连续迭代$x$,直到停止条件达成。 # 例如,当梯度$|f'(x)|$的幅度足够小或迭代次数达到某个值时。 # # 下面我们来展示如何实现梯度下降。为了简单起见,我们选用目标函数$f(x)=x^2$。 # 尽管我们知道$x=0$时$f(x)$能取得最小值, # 但我们仍然使用这个简单的函数来观察$x$的变化。 # # + origin_pos=2 tab=["pytorch"] # %matplotlib inline import numpy as np import torch from d2l import torch as d2l # + origin_pos=4 tab=["pytorch"] def f(x): # 目标函数 return x ** 2 def f_grad(x): # 目标函数的梯度 (导数) return 2 * x # + [markdown] origin_pos=5 # 接下来,我们使用$x=10$作为初始值,并假设$\eta=0.2$。 # 使用梯度下降法迭代$x$共10次,我们可以看到,$x$的值最终将接近最优解。 # # + origin_pos=6 tab=["pytorch"] def gd(eta, f_grad): x = 10.0 results = [x] for i in range(10): x -= eta * f_grad(x) results.append(float(x)) print(f'epoch 10, x: {x:f}') return results results = gd(0.2, f_grad) # + [markdown] origin_pos=7 # 对进行$x$优化的过程可以绘制如下。 # # + origin_pos=8 tab=["pytorch"] def show_trace(results, f): n = max(abs(min(results)), abs(max(results))) f_line = torch.arange(-n, n, 0.01) d2l.set_figsize() d2l.plot([f_line, results], [[f(x) for x in f_line], [ f(x) for x in results]], 'x', 'f(x)', fmts=['-', '-o']) show_trace(results, f) # + [markdown] origin_pos=9 # ### 学习率 # :label:`subsec_gd-learningrate` # # *学习率*(learning rate)决定目标函数能否收敛到局部最小值,以及何时收敛到最小值。 # 学习率$\eta$可由算法设计者设置。 # 请注意,如果我们使用的学习率太小,将导致$x$的更新非常缓慢,需要更多的迭代。 # 例如,考虑同一优化问题中$\eta = 0.05$的进度。 # 如下所示,尽管经过了10个步骤,我们仍然离最优解很远。 # # + origin_pos=10 tab=["pytorch"] show_trace(gd(0.05, f_grad), f) # + [markdown] origin_pos=11 # 相反,如果我们使用过高的学习率,$\left|\eta f'(x)\right|$对于一阶泰勒展开式可能太大。 # 也就是说,:eqref:`gd-taylor` 中的$\mathcal{O}(\eta^2 f'^2(x))$可能变得显著了。 # 在这种情况下,$x$的迭代不能保证降低$f(x)$的值。 # 例如,当学习率为$\eta=1.1$时,$x$超出了最优解$x=0$并逐渐发散。 # # + origin_pos=12 tab=["pytorch"] show_trace(gd(1.1, f_grad), f) # + [markdown] origin_pos=13 # ### 局部最小值 # # 为了演示非凸函数的梯度下降,考虑函数$f(x) = x \cdot \cos(cx)$,其中$c$为某常数。 # 这个函数有无穷多个局部最小值。 # 根据我们选择的学习率,我们最终可能只会得到许多解的一个。 # 下面的例子说明了高学习率会如何导致局部最小值的解的“不切实际”。 # # + origin_pos=14 tab=["pytorch"] c = torch.tensor(0.15 * np.pi) def f(x): # 目标函数 return x * torch.cos(c * x) def f_grad(x): # 目标函数的梯度 return torch.cos(c * x) - c * x * torch.sin(c * x) show_trace(gd(2, f_grad), f) # + [markdown] origin_pos=15 # ## 多元梯度下降 # # 现在我们对单变量的情况有了更好的理解,让我们考虑一下$\mathbf{x} = [x_1, x_2, \ldots, x_d]^\top$的情况。 # 即目标函数$f: \mathbb{R}^d \to \mathbb{R}$将向量映射成标量。 # 相应地,它的梯度也是多元的:它是一个由$d$个偏导数组成的向量: # # $$\nabla f(\mathbf{x}) = \bigg[\frac{\partial f(\mathbf{x})}{\partial x_1}, \frac{\partial f(\mathbf{x})}{\partial x_2}, \ldots, \frac{\partial f(\mathbf{x})}{\partial x_d}\bigg]^\top.$$ # # 梯度中的每个偏导数元素$\partial f(\mathbf{x})/\partial x_i$代表了当输入$x_i$时$f$在$\mathbf{x}$处的变化率。 # 和先前单变量的情况一样,我们可以对多变量函数使用相应的Taylor近似来思考。 # 具体来说, # # $$f(\mathbf{x} + \boldsymbol{\epsilon}) = f(\mathbf{x}) + \mathbf{\boldsymbol{\epsilon}}^\top \nabla f(\mathbf{x}) + \mathcal{O}(\|\boldsymbol{\epsilon}\|^2).$$ # :eqlabel:`gd-multi-taylor` # # 换句话说,在$\boldsymbol{\epsilon}$的二阶项中, # 最陡下降的方向由负梯度$-\nabla f(\mathbf{x})$得出。 # 选择合适的学习率$\eta > 0$来生成典型的梯度下降算法: # # $$\mathbf{x} \leftarrow \mathbf{x} - \eta \nabla f(\mathbf{x}).$$ # # 这个算法在实践中的表现如何呢? # 我们构造一个目标函数$f(\mathbf{x})=x_1^2+2x_2^2$, # 并有二维向量$\mathbf{x} = [x_1, x_2]^\top$作为输入, # 标量作为输出。 # 梯度由$\nabla f(\mathbf{x}) = [2x_1, 4x_2]^\top$给出。 # 我们将从初始位置$[-5, -2]$通过梯度下降观察$\mathbf{x}$的轨迹。 # # 我们还需要两个辅助函数: # 第一个是update函数,并将其应用于初始值20次; # 第二个函数会显示$\mathbf{x}$的轨迹。 # # + origin_pos=16 tab=["pytorch"] def train_2d(trainer, steps=20, f_grad=None): #@save """用定制的训练机优化2D目标函数。""" # `s1` 和 `s2` 是稍后将使用的内部状态变量 x1, x2, s1, s2 = -5, -2, 0, 0 results = [(x1, x2)] for i in range(steps): if f_grad: x1, x2, s1, s2 = trainer(x1, x2, s1, s2, f_grad) else: x1, x2, s1, s2 = trainer(x1, x2, s1, s2) results.append((x1, x2)) print(f'epoch {i + 1}, x1: {float(x1):f}, x2: {float(x2):f}') return results def show_trace_2d(f, results): #@save """显示优化过程中2D变量的轨迹。""" d2l.set_figsize() d2l.plt.plot(*zip(*results), '-o', color='#ff7f0e') x1, x2 = torch.meshgrid(torch.arange(-5.5, 1.0, 0.1), torch.arange(-3.0, 1.0, 0.1)) d2l.plt.contour(x1, x2, f(x1, x2), colors='#1f77b4') d2l.plt.xlabel('x1') d2l.plt.ylabel('x2') # + [markdown] origin_pos=17 # 接下来,我们观察学习率$\eta = 0.1$时优化变量$\mathbf{x}$的轨迹。 # 可以看到,经过20步之后,$\mathbf{x}$的值接近其位于$[0, 0]$的最小值。 # 虽然进展相当顺利,但相当缓慢。 # # + origin_pos=18 tab=["pytorch"] def f_2d(x1, x2): # 目标函数 return x1 ** 2 + 2 * x2 ** 2 def f_2d_grad(x1, x2): # 目标函数的梯度 return (2 * x1, 4 * x2) def gd_2d(x1, x2, s1, s2, f_grad): g1, g2 = f_grad(x1, x2) return (x1 - eta * g1, x2 - eta * g2, 0, 0) eta = 0.1 show_trace_2d(f_2d, train_2d(gd_2d, f_grad=f_2d_grad)) # + [markdown] origin_pos=19 # ## 自适应方法 # # 正如我们在 :numref:`subsec_gd-learningrate` 中所看到的,选择“恰到好处”的学习率$\eta$是很棘手的。 # 如果我们把它选得太小,就没有什么进展;如果太大,得到的解就会振荡,甚至可能发散。 # 如果我们可以自动确定$\eta$,或者完全不必选择学习率,会怎么样? # 除了考虑目标函数的值和梯度、还考虑它的曲率的二阶方法可以帮我们解决这个问题。 # 虽然由于计算成本的原因,这些方法不能直接应用于深度学习,但它们为如何设计高级优化算法提供了有用的思维直觉,这些算法可以模拟下面概述的算法的许多理想特性。 # # ### 牛顿法 # # 回顾一些函数$f: \mathbb{R}^d \rightarrow \mathbb{R}$的泰勒展开式,事实上我们可以把它写成 # # $$f(\mathbf{x} + \boldsymbol{\epsilon}) = f(\mathbf{x}) + \boldsymbol{\epsilon}^\top \nabla f(\mathbf{x}) + \frac{1}{2} \boldsymbol{\epsilon}^\top \nabla^2 f(\mathbf{x}) \boldsymbol{\epsilon} + \mathcal{O}(\|\boldsymbol{\epsilon}\|^3).$$ # :eqlabel:`gd-hot-taylor` # # 为了避免繁琐的符号,我们将$\mathbf{H} \stackrel{\mathrm{def}}{=} \nabla^2 f(\mathbf{x})$定义为$f$的Hessian,是$d \times d$矩阵。 # 当$d$的值很小且问题很简单时,$\mathbf{H}$很容易计算。 # 但是对于深度神经网络而言,考虑到$\mathbf{H}$可能非常大, # $\mathcal{O}(d^2)$个条目的存储成本会很高, # 此外通过反向传播进行计算可能雪上加霜。 # 然而,我们姑且先忽略这些考量,看看会得到什么算法。 # # 毕竟,$f$的最小值满足$\nabla f = 0$。 # 遵循 :numref:`sec_calculus` 中的微积分规则, # 通过取$\boldsymbol{\epsilon}$对 :eqref:`gd-hot-taylor`的导数, # 再忽略不重要的高阶项,我们便得到 # # $$\nabla f(\mathbf{x}) + \mathbf{H} \boldsymbol{\epsilon} = 0 \text{ and hence } # \boldsymbol{\epsilon} = -\mathbf{H}^{-1} \nabla f(\mathbf{x}).$$ # # 也就是说,作为优化问题的一部分,我们需要反转Hessian $\mathbf{H}$。 # # 举一个简单的例子,对于$f(x) = \frac{1}{2} x^2$,我们有$\nabla f(x) = x$和$\mathbf{H} = 1$。 # 因此,对于任何$x$,我们可以获得$\epsilon = -x$。 # 换言之,单单一步就足以完美地收敛,而无须任何调整。 # 我们在这里比较幸运:泰勒展开式是确切的,因为$f(x+\epsilon)= \frac{1}{2} x^2 + \epsilon x + \frac{1}{2} \epsilon^2$。 # # 让我们看看其他问题。 # 给定一个凸双曲余弦函数$c$,其中$c$为某些常数, # 我们可以看到经过几次迭代后,得到了$x=0$处的全局最小值。 # # + origin_pos=20 tab=["pytorch"] c = torch.tensor(0.5) def f(x): # O目标函数 return torch.cosh(c * x) def f_grad(x): # 目标函数的梯度 return c * torch.sinh(c * x) def f_hess(x): # 目标函数的Hessian return c**2 * torch.cosh(c * x) def newton(eta=1): x = 10.0 results = [x] for i in range(10): x -= eta * f_grad(x) / f_hess(x) results.append(float(x)) print('epoch 10, x:', x) return results show_trace(newton(), f) # + [markdown] origin_pos=21 # 现在让我们考虑一个非凸函数,比如$f(x) = x \cos(c x)$,$c$为某些常数。 # 请注意在牛顿法中,我们最终将除以Hessian。 # 这意味着如果二阶导数是负的,$f$的值可能会趋于增加。 # 这是这个算法的致命缺陷! # 让我们看看实践中会发生什么。 # # + origin_pos=22 tab=["pytorch"] c = torch.tensor(0.15 * np.pi) def f(x): # 目标函数 return x * torch.cos(c * x) def f_grad(x): # 目标函数的梯度 return torch.cos(c * x) - c * x * torch.sin(c * x) def f_hess(x): # 目标函数的Hessian return - 2 * c * torch.sin(c * x) - x * c**2 * torch.cos(c * x) show_trace(newton(), f) # + [markdown] origin_pos=23 # 这发生惊人的错误。我们怎样才能修正它? # 一种方法是用取Hessian的绝对值来修正,另一个策略是重新引入学习率。 # 这似乎违背了初衷,但不完全是——拥有二阶信息可以使我们在曲率较大时保持谨慎,而在目标函数较平坦时则采用较大的学习率。 # 让我们看看在学习率稍小的情况下它是如何生效的,比如$\eta = 0.5$。 # 如我们所见,我们有了一个相当高效的算法。 # # + origin_pos=24 tab=["pytorch"] show_trace(newton(0.5), f) # + [markdown] origin_pos=25 # ### 收敛性分析 # # 在此,我们以三次可微的目标凸函数$f$为例,分析它的牛顿法收敛速度。 # 假设它们的二阶导数不为零,即$f'' > 0$。 # # 用$x^{(k)}$表示$x$在第$k^\mathrm{th}$次迭代时的值, # 令$e^{(k)} \stackrel{\mathrm{def}}{=} x^{(k)} - x^*$表示$k^\mathrm{th}$迭代时与最优性的距离。 # 通过泰勒展开,我们得到条件$f'(x^*) = 0$可以写成 # # $$0 = f'(x^{(k)} - e^{(k)}) = f'(x^{(k)}) - e^{(k)} f''(x^{(k)}) + \frac{1}{2} (e^{(k)})^2 f'''(\xi^{(k)}),$$ # # 这对某些$\xi^{(k)} \in [x^{(k)} - e^{(k)}, x^{(k)}]$成立。 # 将上述展开除以$f''(x^{(k)})$得到 # # $$e^{(k)} - \frac{f'(x^{(k)})}{f''(x^{(k)})} = \frac{1}{2} (e^{(k)})^2 \frac{f'''(\xi^{(k)})}{f''(x^{(k)})}.$$ # # 回想之前的方程$x^{(k+1)} = x^{(k)} - f'(x^{(k)}) / f''(x^{(k)})$。 # 插入这个更新方程,取两边的绝对值,我们得到 # # $$\left|e^{(k+1)}\right| = \frac{1}{2}(e^{(k)})^2 \frac{\left|f'''(\xi^{(k)})\right|}{f''(x^{(k)})}.$$ # # 因此,每当我们处于有界区域$\left|f'''(\xi^{(k)})\right| / (2f''(x^{(k)})) \leq c$, # 我们就有一个二次递减误差 # # $$\left|e^{(k+1)}\right| \leq c (e^{(k)})^2.$$ # # 另一方面,优化研究人员称之为“线性”收敛,而$\left|e^{(k+1)}\right| \leq \alpha \left|e^{(k)}\right|$这样的条件称为“恒定”收敛速度。 # 请注意,我们无法估计整体收敛的速度,但是一旦我们接近极小值,收敛将变得非常快。 # 另外,这种分析要求$f$在高阶导数上表现良好,即确保$f$在变化他的值方面没有任何“超常”的特性。 # # ### 预处理 # # 计算和存储完整的Hessian非常昂贵,而改善这个问题的一种方法是“预处理”。 # 它回避了计算整个Hessian,而只计算“对角线”项,即如下的算法更新: # # $$\mathbf{x} \leftarrow \mathbf{x} - \eta \mathrm{diag}(\mathbf{H})^{-1} \nabla f(\mathbf{x}).$$ # # 虽然这不如完整的牛顿法精确,但它仍然比不使用要好得多。 # 为什么预处理有效呢? # 假设一个变量以毫米表示高度,另一个变量以公里表示高度的情况。 # 假设这两种自然尺度都以米为单位,那么我们的参数化就出现了严重的不匹配。 # 幸运的是,使用预处理可以消除这种情况。 # 梯度下降的有效预处理相当于为每个变量选择不同的学习率(矢量$\mathbf{x}$的坐标)。 # 我们将在后面一节看到,预处理推动了随机梯度下降优化算法的一些创新。 # # ### 梯度下降和线搜索 # # 梯度下降的一个关键问题是我们可能会超过目标或进展不足, # 解决这一问题的简单方法是结合使用线搜索和梯度下降。 # 也就是说,我们使用$\nabla f(\mathbf{x})$给出的方向, # 然后对以学习率$\eta$取$f(\mathbf{x} - \eta \nabla f(\mathbf{x}))$最小值的结果进行二进制搜索。 # # 有关分析和证明,此算法收敛迅速(请参见 :cite:`Boyd.Vandenberghe.2004`)。 # 然而,对深度学习而言,这并不太可行。 # 因为线搜索的每一步都需要评估整个数据集上的目标函数,实现它的方式太昂贵了。 # # ## 小结 # # * 学习率的大小很重要:学习率太大会使模型发散,学习率太小会没有进展。 # * 梯度下降会在求局部极小值中陷住,而得不到全局最小值。 # * 在高维模型中,调整学习率是很复杂的,预处理有助于调节比例。 # * 牛顿法在凸问题中一旦开始正常工作,速度就会快得多。 # * 对于非凸问题,不要不作任何调整就使用牛顿法。 # # ## 练习 # # 1. 用不同的学习率和目标函数进行梯度下降实验。 # 1. 在区间$[a, b]$中实现线搜索以最小化凸函数。 # 1. 你是否需要导数来进行二进制搜索,即,决定是选择$[a, (a+b)/2]$还是$[(a+b)/2, b]$。 # 1. 算法的收敛速度有多快? # 1. 实现该算法,并将其应用于求$\log (\exp(x) + \exp(-2x -3))$的最小值。 # 1. 设计一个定义在$\mathbb{R}^2$上的目标函数,它的梯度下降非常缓慢。提示:对不同的坐标使用不同的比例。 # 1. 使用预处理实现牛顿方法的轻量级版本: # 1. 使用对角Hessian作为预条件。 # 1. 使用它的绝对值,而不是实际值(可能有符号)。 # 1. 将此应用于上述问题。 # 1. 将上述算法应用于多个目标函数(凸或非凸)。如果你把坐标旋转$45$度会怎么样? # # + [markdown] origin_pos=27 tab=["pytorch"] # [Discussions](https://discuss.d2l.ai/t/3836) #
d2l/chapter_optimization/gd.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Announcements # - __Please familiarize yourself with the term projects, and sign up for your (preliminary) choice__ using [this form](https://forms.gle/ByLLpsthrpjCcxG89). _You may revise your choice, but I'd recommend settling on a choice well before Thanksgiving._ # - Problem Set 5 posted on D2L, due Oct 20. # - __Outlook__: algorithms for solving high-dimensional linear and non-linear equations; then Boundary Value Problems and Partial Differential Equations. # - Conference for Undergraduate Women in Physics: online event in 2021, [applications accepted until 10/25](https://www.aps.org/programs/women/cuwip/) # This notebook presents as selection of topics from the book "Numerical Linear Algebra" by Trefethen and Bau (SIAM, 1997), and uses notebooks by <NAME>. # # Conditioning and Stability # # Once an approximation to a linear system is constructed the next question is how much trust can we put in the approximation? Since the true solution is not known, one of the few tools we have is to ask how well the approximation matches the original equation. In other words, we seek a solution to a system, # $$ # \vec{f}(\vec{x}) = \vec{b}. # $$ # # We do not have $\vec{x}$ but instead have an approximation, $\hat{x}$, and we hope that # $$ # \vec{f}(\hat{x}) \approx \vec{b}. # $$ # In this section the question we explore is to try to determine a bound on the relative error, $\frac{||\vec{x}-\hat{x}||}{||\vec{x}||}$ given the matrix, $A$. # # This leads to the notion of conditioning. Conditioning is the behavior of a problem when the solution is a changed a small bit (perturbed), and it is a mathematical (analytic) property of the original system of equations. Stability, on the other hand, is concerned with how the algorithm used to obtain an approximation behaves when the approximation is perturbed. # # ## Conditioning and Condition Numbers # # A **well-conditioned** problem is one where a small perturbation to the original problem leads to only small changes in the solution. # # Formally we can think of a function $f$ which maps $x$ to $y$ # # $$ # f(x) = y \quad \text{or} \quad f: X \rightarrow Y. # $$ # # Let $x \in X$ where we perturb $x$ with $\delta x$ and we ask how the result $y$ changes: # # $$ # ||f(x) - f(x + \delta x)|| \leq C ||x - (x+\delta x)|| # $$ # # for some constant $C$ possible dependent on $\delta x$ depending on the type of conditioning we are considering. # # ### Absolute Condition Number # # If we let $\delta x$ be the small perturbation to the input and $\delta f = f(x + \delta x) - f(x)$ be the result the **absolute condition number** $\hat{~\kappa}$ can be defined as # # $$ # \hat{\!\kappa} = \sup_{\delta x} \frac{||\delta f||}{||\delta x||} # $$ # # for most problems (assuming $\delta f$ and $\delta x$ are both infinitesimal). # # When $f$ is differentiable we can evaluate the condition number via the Jacobian. Recall that the derivative of a multi-valued function can be termed in the form of a Jacobian $J(x)$ where # $$ # [J(x)]_{ij} = \frac{\partial f_i}{\partial x_j}(x). # $$ # # This allows us to write the infinitesimal $\delta f$ as # $$ # \delta f \approx J(x) \delta x # $$ # with equality when $||\delta x|| \rightarrow 0$. Then we can write the condition number as # $$ # \hat{\!\kappa} = ||J(x)|| # $$ # where the norm is the one induced by the spaces $X$ and $Y$. # # ### Relative Condition Number # # The **relative condition number** is defined similarly and is related to the difference before between the absolute error and relative error as defined previously. With the same caveats as before it can be defined as # $$ # \kappa = \sup_{\delta x} \left( \frac{\frac{||\delta f||}{||f(x)||}}{\frac{||\delta x||}{||x||}} \right). # $$ # # Again if $f$ is differentiable we can use the Jacobian $J(x)$ to evaluate the relative condition number as # $$ # \kappa = \frac{||J(x)||}{||f(x)|| ~/ ~||x||}. # $$ # # #### Example # Calculate the relative condition number of $\sqrt{x}$ for $x > 0$. # # $$ # f(x) = \sqrt{x}, \quad J(x) = f'(x) = \frac{1}{2 \sqrt{x}} \\ # \kappa = \frac{||J(x)||}{||f(x)|| / ||x||} = \frac{1}{2 \sqrt{x}} \frac{x}{\sqrt{x}} = \frac{1}{2} # $$ # The condition number of a function was discussed in general terms above. Now, we examine the more specific case of a linear function, a matrix-vector multiplication. Here we let $\vec{f}(\vec{x})=Ax$ and determine the condition number by perturbing $x$. # # We begin with the definition above, # $$\begin{aligned} # \kappa &= \sup_{\delta x} \left ( \frac{||A (\vec{x}+\delta x) - A \vec{x}||}{||A\vec{x}||} \frac{||\vec{x}||}{||\delta x||}\right ), \\ # &= \sup_{\delta x} \frac{ ||A \delta x||}{||\delta x||} \frac{||\vec{x}||}{||A\vec{x}||}, \\ # &= ||A|| \frac{||\vec{x}||}{||A \vec{x}||}, # \end{aligned}$$ # where $\delta x$ is a vector. # # If $A$ has an inverse, then we note that # $$ # \begin{align} # \vec{x} &= A^{-1}A \vec{x}, \\ # \Rightarrow ||\vec{x}|| &= || A^{-1}A \vec{x} ||, \\ # &\leq ||A^{-1}|| || A \vec{x} ||, # \end{align} # $$ # which implies that # $$ # \frac{||x||}{||A x||} \leq ||A^{-1}||. # $$ # _We can now bound the condition number for a matrix by_ # $$ # \kappa \leq ||A|| ||A^{-1}||. # $$ # ### Condition Number of a Matrix # # The condition number of a matrix is defined by the product # $$ # \kappa(A) = ||A||~||A^{-1}||. # $$ # where here we are thinking about the matrix rather than a problem. If $\kappa$ is small than $A$ is said to be **well-conditioned**. If $A$ is singular we assign $\kappa(A) = \infty$ as the matrix's condition number. # # When we are considering the $\ell_2$ norm then we can write the condition number as # # $$ # \kappa(A) = \frac{\sqrt{\rho(A^\ast A)}}{\sqrt{\rho((A^\ast A)^{-1})}} = \frac{\sqrt{\max |\lambda|}}{\sqrt{\min |\lambda|}}. # $$ # ### Condition Number of a System of Equations # # Another way to think about the conditioning of a problem we have looked at before is that the matrix $A$ itself is an input to the problem. Consider than the system of equations $A\vec{x} = \vec{b}$ where we will perturb both $A$ and $\vec{x}$ resulting in # $$ # (A + \delta A)(\vec{x} + \delta x) = \vec{b}. # $$ # # Assuming we solve the problem exactly we know that $A\vec{x} = \vec{b}$ and that the infinitesimals multiplied $\delta A \delta x$ are smaller than the other term, and the above expression can be approximation by # $$ # \begin{aligned} # (A + \delta A)(\vec{x} + \delta x) &= \vec{b}, \\ # A\vec{x} + \delta Ax + A \delta x + \delta A \delta \vec{x} &= \vec{b} \\ # \delta A\vec{x} + A \delta x & = 0 # \end{aligned} # $$ # # Solving for $\delta x$ leads to # $$ # \delta x = -A^{-1} \delta A \vec{x} # $$ # implying # $$ # ||\delta x|| \leq ||A^{-1}|| ~ ||\delta A|| ~ ||\vec{x}|| # $$ # and therefore # $$ # \frac{\frac{||\delta x||}{||\vec{x}||}}{\frac{||\delta A||}{||A||}} \leq ||A^{-1}||~||A|| = \kappa(A). # $$ # # We can also say the following regarding the condition number of a system of equations then # # **Theorem:** Let $\vec{b}$ be fixed and consider the problem of computing $\vec{x}$ in $A\vec{x} = \vec{b}$ where $A$ is square and non-singular. The condition number of this problem with respect to perturbations in $A$ is the condition number of the matrix $\kappa(A)$. # ## Stability # # We now return to the consideration of the fact that we are interested not only in the well-conditioning of a mathematical problem but in how we might solve it on a finite precision machine. In some sense conditioning describes how well we can solve a problem in exact arithmetic and stability how well we can solve the problem in finite arithmetic. # # ### Accuracy and Stability # # As we have defined before we will consider **absolute error** as # $$ # ||F(x) - f(x)|| # $$ # where $F(x)$ is the approximation to the true solution $f(x)$. Similarly we can define **relative error** as # $$ # \frac{||F(x) - f(x)||}{||f(x)||}. # $$ # In the ideal case we would like the relative error to be $\mathcal{O}(\epsilon_{\text{machine}})$. # # #### Forwards Stability # # A **forward stable** algorithm for $x \in X$ has # # $$ # \frac{||F(x) - f(x)||}{||f(x)||} = \mathcal{O}(\epsilon_{\text{machine}}) # $$ # # In other words # > A forward stable algorithm gives almost the right answer to exactly the right question. # # #### Backwards Stability # # A stronger notion of stability can also be defined which is satisfied by many approaches in numerical linear algebra. We say that an algorithm $F$ is **backward stable** if for $x \in X$ we have # # $$ # F(x) = f(\hat{\!x}) # $$ # # for some $\hat{\!x}$ with # # $$ # \frac{||\hat{\!x} - x||}{||x||} = \mathcal{O}(\epsilon_{\text{machine}}). # $$ # # In other words # > A backward stable algorithm gives exactly the right answer to nearly the right question. # # Combining these ideas along with the idea that we should not expect to be able to accurately compute the solution to a poorly conditioned problem we can form the mixed forward-backward sense of stability as for $x \in X$ if # # $$ # \frac{||F(x) - f(\hat{\!x})||}{||f(\hat{\!x})||} = \mathcal{O}(\epsilon_{\text{machine}}) # $$ # # for some $\hat{\!x}$ with # # $$ # \frac{||\hat{\!x} - x||}{||x||} = \mathcal{O}(\epsilon_{\text{machine}}). # $$ # # In other words # > A stable algorithm gives nearly the right answer to nearly the right question. # # _An important aspect of the above statement is that we can not necessarily guarantee an accurate result. If the condition number $\kappa(x)$ is small we would expect that a stable algorithm would give us an accurate result (by definition). This is reflected in the following theorem._ # # **Theorem:** Suppose a backward stable algorithm is applied to solve a problem $f: X \rightarrow Y$ with condition number $\kappa$ on a finite precision machine, then the relative errors satisfy # $$ # \frac{||F(x) - f(\hat{\!x})||}{||f(\hat{\!x})||} = \mathcal{O}(\kappa(x) ~ \epsilon_{\text{machine}}). # $$ # # **Proof:** By the definition of the condition number of a problem we can write # $$ # \frac{||F(x) - f(\hat{\!x})||}{||f(\hat{\!x})||} \leq (\kappa(x) + \mathcal{O}(\epsilon_{\text{machine}}))\frac{||\hat{\!x} - x||}{||x||}. # $$ # Combining this with the definition of backwards stability we can arrive at the statement of the theorem. # # To summarize: # > **Backward Error Analysis** - Process of using the condition number of the problem and stability of the algorithm to determine the error. # # > **Forward Error Analysis** - Considers the accrual of error at each step of an algorithm given slightly perturbed input. # # Eigenproblems # # ## Overview # # We will now consider eigenproblems of the form # $$ # A x = \lambda x # $$ # where $A \in \mathbb C^{m \times m}$, $x \in \mathbb C^m$ and $\lambda \in \mathbb C$. The vector $x$ is known as the **eigenvector** and $\lambda$ the **eigenvalue**. The set of all eigenvalues is called the **spectrum** of $A$. # # ### Eigenvalue Decomposition # Similar to QR factorization, an eigendecomposition is possible such that $A$ can be written as # # $$ # A = X \Lambda X^{-1} # $$ # # where $X$ is the matrix formed by the eigenvectors $x$ as its columns and $\Lambda$ is a diagonal matrix with the eigenvalues along its diagonal. # # This equation comes from the similar equation $A X = X \Lambda$ which is of course related to the original problem statement. This latter equation can be written out as # # $$ # \begin{bmatrix} # & & & & \\ # & & & & \\ # & & A & & \\ # & & & & \\ # & & & & # \end{bmatrix} # \begin{bmatrix} # & & & & \\ # & & & & \\ # x_1 & x_2 & \cdots & x_{m-1} & x_m \\ # & & & & \\ # & & & & # \end{bmatrix} = # \begin{bmatrix} # & & & & \\ # & & & & \\ # x_1 & x_2 & \cdots & x_{m-1} & x_m \\ # & & & & \\ # & & & & # \end{bmatrix} # \begin{bmatrix} # \lambda_1 & & & & \\ # & \lambda_2 & & & \\ # & & \ddots & & \\ # & & & \lambda_{m-1} & \\ # & & & & \lambda_m # \end{bmatrix} # $$ # # Here we note that the eigenpair $(x_j, \lambda_j)$ are matched as the $j$th column of $X$ and the $j$th element of $\Lambda$ on the diagonal. # # **Algebraic multiplicity** is the number of times overall an eigenvalue repeats itself. # # **Geometric multiplicity** is defined as the number of linearly independent eigenvectors that belong to each eigenvalue. # # If the algebraic multiplicity is equal to the geometric multiplicity for all $\lambda$ then we can say that there is a full eigenspace. # #### Example: Computing Multiplicities # # Compute the geometric and algebraic multiplicities for the following matrices. What is the relationship between the algebraic and geometric multiplicities? # # $$A = \begin{bmatrix} # 2 & & \\ # & 2 & \\ # & & 2 # \end{bmatrix}$$ # # $$B = \begin{bmatrix} 2 # & 1 & \\ # & 2 & 1 \\ # & & 2 # \end{bmatrix}$$ # # 1. The characteristic polynomial of $A$ is # # $$ # \mathcal{P}_A(z) = (2 - z)(2 - z)(2 - z) = (2 - z)^3 # $$ # # so the eigenvalues are all $\lambda = 2$ so we know the algebraic multiplicity is 3 of this eigenvalue. The geometric multiplicity is determined by the number of linearly independent eigenvectors. For this matrix we have three eigenvectors that are all linearly independent which happen to be the unit vectors in each direction (check!). This means that the geometric multiplicity is also 3. # # 1. The characteristic polynomial of $B$ is the same as $A$ so again we know $\lambda = 2$ but now we need to be a bit careful about the eigenvectors. In this case the only eigenvector is a scalar multiple of $e_1$ so the geometric multiplicity is 1. # # ### Interpretations of the Eigenspace # # One way to interpret the eigenproblem is that of one that tries to find the subspaces of $\mathbb C^m$ which act like scalar multiplication by $\lambda$. The eigenvectors associated with one eigenvalue then form a subspace of $S \subseteq \mathbb C^m$. # # When an eigenvalue has algebraic multiplicity that equals its geometric then it is called non-defective and otherwise defective. This property is also inherited to the matrix so in the above example $A$ and $B$ are non-defective and defective matrices respectively. # # ### Determinant and Trace # # Two important properties of matrices have important relationships with their eigenvalues, namely the determinant and trace. The determinant we have seen, the **trace** is defined as the sum of the elements on the diagonal of a matrix, in other words # $$ # \text{tr}(A) = \sum^m_{i=1} A_{ii}. # $$ # # The relationship between the determinant and the eigenvalues is not difficult to guess due to the nature of the characteristic polynomial. The trace of a diagonal matrix is clear and provides another suggestion to the relationship. # # **Theorem:** The determinant $\det(A)$ and trace $\text{trace}(A)$ are equal to the product and sum of the eigenvalues of $A$ respectively counting algebraic multiplicity. # # ### Similarity Transformations # # The relationship between a matrix's eigenvalues and its determinant and trace are due to the special relationship between the eigenvalue decomposition and what is called similarity transformations. A **similarity transformation** is defined as a transformation that takes A and maps it to $X^{-1} A X$ (assuming $X$ is non-singular). Two matrices are said to be **similar** if there is a similarity transformation between them. # # The most important property of similar matrices is that they have the same characteristic polynomial, eigenvalues, and multiplicities. # # This allows us to relate geometric and algebraic multiplicity as # # **Theorem:** The algebraic multiplicity of an eigenvalue $\lambda$ is at least as great as its geometric multiplicity. # # ### Schur Factorization # # A **Schur factorization** of a matrix $A$ is defined as # # $$ # A = Q T Q^\ast # $$ # # where $Q$ is unitary and $T$ is upper-triangular. In particular note that due do the structure of the resulting characteristic polynomial that $A$ and $T$ have identical eigenvalues. # # **Theorem:** Every matrix $A \in \mathbb C^{m \times m}$ has a Schur factorization. # # Note that the above results imply the following # - An eigen-decomposition $A = X \Lambda X^{-1}$ exists if and only if $A$ is non-defective (it has a complete set of eigenvectors) # - A unitary transformation $A = Q \Lambda Q^\ast$ exists if and only if $A$ is normal ($A^\ast A = A A^\ast$) # - A Schur factorization always exists # # Note that each of these lead to a means for isolating the eigenvalues of a matrix and will be useful when considering algorithms for finding them. # ## Condition Number of a Simple Eigenvalue # # Before we discuss a number of approaches to computing eigenvalues it good to consider what the condition number of a given eigenproblem is. # # Let # $$ # Ax = \lambda x # $$ # define the eigenvalue problem in question. Here we will introduce a related problem # $$ # y^\ast A = \lambda y^\ast # $$ # where $y$ is the **left eigenvector** and from before $x$ is the **right eigenvector**. These vectors also can be shown to have the relationship $y^\ast x \neq 0$ for a simple eigenvalue. # # Now consider the perturbed problem # $$ # (A + \delta A) (x + \delta x) = (\lambda + \delta \lambda) (x + \delta x). # $$ # Expanding this and throwing out quadratic terms and removing the eigenproblem we have # $$ # \delta A x + A \delta x = \delta \lambda x + \lambda \delta x. # $$ # # Multiple both sides of the above by the left eigenvector and use $y^\ast x \neq 0$ to find # $$\begin{aligned} # y^\ast \delta A x + y^\ast A \delta x &= y^\ast \delta \lambda x + y^\ast \lambda \delta x \\ # y^\ast \delta A x &= y^\ast \delta \lambda x # \end{aligned}$$ # where we again use the slightly different definition of the eigenproblem. We can then solve for $\delta \lambda$ to find # $$ # \delta \lambda = \frac{y^\ast \delta A x}{y^\ast x} # $$ # meaning that the ratio between the dot-product of the left and right eigenvectors and the conjugate dot-product of the matrix $\delta A$ then form a form of bound on the expected error in the simple eigenvalue. # ## Computing Eigenvalues # # The most obvious approach to computing eigenvalues is a direct computation of the roots of the characteristic polynomial. Unfortunately the following theorem suggests this is not a good way to compute eigenvalues: # # **Theorem:** For an $m \geq 5$ there is a polynomial $\mathcal{P}(z)$ of degree $m$ with rational coefficients that has a real root $\mathcal{P}(z_0) = 0$ with the property that $z_0$ cannot be written using any expression involving rational numbers, addition, subtraction, multiplication, division, and $k$th roots. # # Not all is lost however, we just cannot use any direct methods to solve for the eigenvalues. Instead we must use an iterative approach, in other words we want to construct a sequence that converges to the eigenvalues. How does this relate to how we found roots previously? # # Almost all approaches to computing eigenvalues do so through the computation of the Schur factorization. The Schur factorization, as we have seen, will preserve the eigenvalues. The steps to compute the Schur factorization are usually broken down into two steps # 1. Directly transform $A$ into a **Hessenberg** matrix, a matrix that contains zeros below its first sub-diagonal, directly using Householder reflections. # 1. Use an iterative method to change the sub-diagonal into all zeros # # ### Hessenberg and Tridiagonal form # # What we want to do is construct a sequence of unitary matrices that turns $A$ into a Hessenberg matrix to start. We can use Householder reflections to do this with the important distinction that we only want to remove zeros below the first sub-diagonal. The sequence would look something like # # $$ # \begin{bmatrix} # \text{x} & \text{x} & \text{x} & \text{x} & \text{x} \\ # \text{x} & \text{x} & \text{x} & \text{x} & \text{x} \\ # \text{x} & \text{x} & \text{x} & \text{x} & \text{x} \\ # \text{x} & \text{x} & \text{x} & \text{x} & \text{x} \\ # \text{x} & \text{x} & \text{x} & \text{x} & \text{x} # \end{bmatrix} \overset{Q_1}{\rightarrow} # \begin{bmatrix} # \text{x} & \text{x} & \text{x}& \text{x} & \text{x} \\ # \text{x} & \text{x} & \text{x}& \text{x} & \text{x} \\ # 0 & \text{x} & \text{x}& \text{x} & \text{x} \\ # 0 & \text{x} & \text{x}& \text{x} & \text{x} \\ # 0 & \text{x} & \text{x}& \text{x} & \text{x} # \end{bmatrix} \overset{Q_2}{\rightarrow} # \begin{bmatrix} # \text{x} & \text{x} & \text{x}& \text{x} & \text{x} \\ # \text{x} & \text{x} & \text{x}& \text{x} & \text{x} \\ # 0 & \text{x} & \text{x}& \text{x} & \text{x} \\ # 0 & 0 & \text{x}& \text{x} & \text{x} \\ # 0 & 0 & \text{x}& \text{x} & \text{x} # \end{bmatrix} \overset{Q_3}{\rightarrow} # \begin{bmatrix} # \text{x} & \text{x} & \text{x}& \text{x} & \text{x} \\ # \text{x} & \text{x} & \text{x}& \text{x} & \text{x} \\ # 0 & \text{x} & \text{x}& \text{x} & \text{x} \\ # 0 & 0 & \text{x}& \text{x} & \text{x} \\ # 0 & 0 & 0 & \text{x} & \text{x} # \end{bmatrix} # $$ # # so we have the sequence $Q^\ast_1 A Q_1$. Note we need both to preserve the entries of the first column that are not being transformed to zeros. # # One important special case of this sequence of transformations is that if the matrix $A$ is hermitian (the matrix is its own conjugate transpose, $A = A^\ast$, or symmetric in the real case) then the Hessenberg matrix is tridiagonal. # # We now will focus on how to formulate the iteration step of the eigenproblem. We will also restrict our attention to symmetric, real matrices. This implies that all eigenvalues will be real and have a complete set of orthogonal eigenvectors. Generalizations can be made of many of the following algorithms but is beyond the scope of this class. # ## Rayleigh Quotient and Inverse Iteration # # There are a number of classical approaches to computing the iterative step above which we will review here. Inverse power iteration in particular is today still the dominant means of finding the eigenvectors once the eigenvalues are known. # # ### Rayleigh Quotient # # The **Rayleigh quotient** of a vector $x \in \mathbb R^m$ is the scalar # $$ # r(x) = \frac{x^T A x}{x^T x}. # $$ # The importance of the Rayleigh quotient is made clear when we evaluate $r(x)$ at an eigenvector. When this is the case the quotient evaluates to the corresponding eigenvalue. # # The Rayleigh quotient can be motivated by asking the question, given an eigenvector $x$, what value $\alpha$ acts most like an eigenvalue in an $\ell_2$ sense: # $$ # \min_\alpha ||A x - \alpha x||_2. # $$ # # This can be reformulated as a least-squares problem noting that $x$ is the "matrix", $\alpha$ is the unknown vector (scalar) and $Ax$ is the right-hand side so we have # $$ # (x^T x) \alpha = x^T (A x) # $$ # which can be solved so that # $$ # \alpha = r(x) = \frac{x^T A x}{x^T x}. # $$ # # ### Power Iteration # # Power iteration is a straight forward approach to finding the eigenvector of the largest eigenvalue of $A$. The basic idea is that the sequence # $$ # \frac{x}{||x||}, \frac{Ax}{||Ax||}, \frac{A^2x}{||A^2x||}, \frac{A^3x}{||A^3x||}, \ldots # $$ # will converge (although very slowly) to the desired eigenvector. # # We implement this method by initializing the algorithm with some vector $v$ with $||v|| = 1$. We then apply the sequence of multiplications. # # The reason why this works can be seen by considering the initial vector $v$ as a linear combination of the orthonormal eigenvectors (which we have assumed exist) such that # # $$ # v^{(0)} = a_1 q_1 + a_2 q_2 + \cdots + a_m q_m. # $$ # # Multiplying $v^{(0)}$ by $A$ then leads to # # $$\begin{aligned} # Av^{(0)} = v^{(1)} &= a_1 A q_1 + a_2 A q_2 + \cdots + a_m A q_m \\ # &= c_1 (a_1 \lambda_1 q_1 + a_2 \lambda_2 q_2 + \cdots + a_m \lambda_m q_m) \\ # \end{aligned}$$ # # where $c_1$ is some constant due to the fact the eigenvectors are not uniquely specified. Repeating this $k$ times we have # # $$\begin{aligned} # Av^{(k-1)} = v^{(k)} &= a_1 A^k q_1 + a_2 A^k q_2 + \cdots + a_m A^k q_m \\ # &= c_k (a_1 \lambda_1^k q_1 + a_2 \lambda_2^k q_2 + \cdots + a_m \lambda_m^k q_m) \\ # &= c_k \lambda_1^k \left(a_1 q_1 + a_2 \frac{\lambda_2^k}{\lambda_1^k} q_2 + \cdots + a_m \frac{\lambda_m^k}{\lambda_1^k} q_m \right) # \end{aligned}$$ # # Since $\lambda_1 > \lambda_i$ for # ### Inverse Iteration # # Inverse iteration uses a similar approach with the difference being that we can use it to find any of the eigenvectors for the matrix $A$. # # Consider the matrix # # $$ # (A - \mu I)^{-1}, # $$ # # the eigenvectors of this matrix are the same as $A$ with the eigenvalues # # $$ # (\lambda_j - \mu)^{-1} # $$ # # where $\lambda_j$ are the eigenvalues of $A$. # # If $\mu$ is close to a particular $\lambda_j$, say $\lambda_J$, then # # $$ # (\lambda_J - \mu)^{-1} # $$ # # will be larger than any of the other $(\lambda_j - \mu)^{-1}$. In this way we effectively have picked out the eigenvalue we want to consider in the power iteration! # # ### Rayleigh Quotient Iteration # # By themselves the above approaches are not particularly useful but combining them we can iterate back and forth to find the eigenvalue, eigenvector pair: # 1. Compute the Rayleigh quotient and find an estimate for $\lambda_j$ # 1. Compute one step of inverse iteration to approximate $x_j$ # 1. Repeat... # + import numpy import matplotlib.pyplot as plt m = 3 A = numpy.array([[2, 1, 1], [1, 3, 1], [1, 1, 4]]) num_steps = 10 v = numpy.empty((num_steps, m)) lam = numpy.empty(num_steps) v[0, :] = numpy.array([1, 1, 1]) v[0, :] = v[0, :] / numpy.linalg.norm(v[0, :], ord=2) lam[0] = numpy.dot(v[0,:], numpy.dot(A, v[0, :])) for k in range(1, num_steps): w = numpy.linalg.solve(A - lam[k-1] * numpy.identity(m), v[k-1, :]) v[k, :] = w / numpy.linalg.norm(w, ord=2) lam[k] = numpy.dot(v[k,:], numpy.dot(A, v[k, :])) fig = plt.figure() axes = fig.add_subplot(1, 1, 1) axes.semilogy(range(10), numpy.abs(lam - numpy.linalg.eigvals(A)[0]), 'o') axes.set_title("Convergence of eigenvalue") axes.set_xlabel("Step") axes.set_ylabel("Error") plt.show() # - # ## QR Algorithm # # The most basic use of a $QR$ factorization to find eigenvalues is to iteratively compute the factorization and multiply the resulting $Q$ and $R$ in the reverse order. This sequence will eventually converge to the Schur decomposition of the matrix $A$. # + # %precision 6 m = 3 A = numpy.array([[2, 1, 1], [1, 3, 1], [1, 1, 4]]) MAX_STEPS = 10 for i in range(MAX_STEPS): Q, R = numpy.linalg.qr(A) A = numpy.dot(R, Q) print() print("A(%s) =" % (i)) print(A) print() print("True eigenvalues: ") print(numpy.linalg.eigvals(A)) print() print("Computed eigenvalues: ") for i in range(m): print(A[i, i]) # - # So why does this work? The first step is to find the $QR$ factorization of $A^{(k-1)}$ which is equivalent to finding # # $$ # (Q^{(k)})^T A^{(k-1)} = R^{(k)} # $$ # # and multiplying on the right leads to # # $$ # (Q^{(k)})^T A^{(k-1)} Q^{(k)} = R^{(k)} Q^{(k)}. # $$ # # In this way we can see that this is a similarity transformation of the matrix $A^{(k-1)}$ since the $Q^{(k)}$ is an orthogonal matrix ($Q^{-1} = Q^T$). This of course is not a great idea to do directly but works great in this case as we iterate to find the upper triangular matrix $R^{(k)}$ which is exactly where the eigenvalues appear. # # In practice this basic algorithm is modified to include a few additions: # # 1. Before starting the iteration $A$ is reduced to tridiagonal form. # 1. Motivated by the inverse power iteration we observed we instead consider a shifted matrix $A^{(k)} - \mu^{(k)} I$ for factoring. The $\mu$ picked is related to the estimate given by the Rayleigh quotient. Here we have # # $$ # \mu^{(k)} = \frac{(q_m^{(k)})^T A q_m^{(k)}}{(q_m^{(k)})^T q_m^{(k)}} = (q_m^{(k)})^T A q_m^{(k)}. # $$ # # 1. Deflation is used to reduce the matrix $A^{(k)}$ into smaller matrices once (or when we are close to) finding an eigenvalue to simplify the problem. # # This has been the standard approach until recently for finding eigenvalues of a matrix. # ## Alternatives # # ### Jacobi # # Jacobi iteration employs the idea that we know the eigenvalues of a matrix of size equal to or less than 4 (we know the roots of the characteristic polynomial directly). Jacobi iteration therefore attempts to break the matrix down into at most 4 by 4 matrices along the diagonal via a series of similarity transformations based on only diagonalizing sub-matrices 4 by 4 or smaller. # # ### Bisection # # It turns out if you do not want all of the eigenvalues of a matrix that using a bisection method to find some subset of the eigenvalues is often the most efficient way to get these. This avoids the pitfall of trying to find the eigenvalues via other root-finding approaches by only needing evaluations of the function and if a suitable initial guess is provided can find the eigenvalue quickly that is closest to the initial bracket provided. # # ### Divide-and-conquer # # This algorithm is actually the one used most often used if both eigenvalues and eigenvectors are needed and performs up to twice as fast as the $QR$ approach. The basic idea is to split the matrix into two pieces at every iteration by introducing zeros on the appropriate off-diagonals which neatly divides the problem into two pieces.
Lectures/Lecture 21/Lecture21_LA_stability_eigen.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ETL # ### Cargar Librerias import numpy as np import pandas as pd import warnings warnings.filterwarnings("ignore") # ### Cargar Datos # Datos extraidos de la página: https://www.kaggle.com/wendykan/lending-club-loan-data loan_data_backup = pd.read_csv('../1_Data/loan.csv') loan_data = loan_data_backup.copy() #Se realiza una copia de los datos para ser modificados pd.options.display.max_columns = None # Modificación de las opciones de pandas para mostrar todas las columnas del dataframe loan_data.head(3) loan_data.shape # 145 variables y 2.260.668 muestras (filas) # ### Transformación de Datos # Verificación de campos loan_data.columns.values # Se deberían entender las 145 variables que contiene la base de datos, para lo cual se requiere un experto en el dominio de conocimiento. Para el siguiene análisis se dividirán en varaibles continuas y discretas. Para el análisis de riesgo y con el fin de que los modelos puedan ser explicativos, se transformaran a variables discretas tipo dummy # ### Principales variables a usar # # #### Variables 'continuas' # - emp_lenght se transforma a emp_lenght_int: años de experiencia laboral. Variable categoria que se transformará a números para ser input de los modelos # - earliest_cr_line se transforma a earliest_cr_line_date y finalmente a mths_since_earliest_cr_line: Fecha de la primera linea de crédito se transforma luego a meses desde esa primera linea de crédito hasta la fecha de dic 2017 (suponiendo que el analisi sse hace en dicha fecha) # - term: duración en meses del crédito # - issue_d se transforma a issue_d_date y luego a mths_since_issue_d: Se transforma de fecha a meses hasta una fecha de corte # # #### Variables 'Discretas' # se crean varaibles dummy de las variables: # # grade, sub_grade, home_ownership, verification_status, loan_status, purpose, addr_state, initial_list_status # # ### Transformación de Algunas Variables Continuas loan_data['emp_length'].unique() # Displays unique values of a column. #Se usa str.replace para dejar solo los numeros (aún como strings). \ es un caracter de escape loan_data['emp_length_int'] = loan_data['emp_length'].str.replace('\+ years', '') loan_data['emp_length_int'] = loan_data['emp_length_int'].str.replace('< 1 year', str(0)) loan_data['emp_length_int'] = loan_data['emp_length_int'].str.replace('nan', str(0)) loan_data['emp_length_int'] = loan_data['emp_length_int'].str.replace(' years', '') loan_data['emp_length_int'] = loan_data['emp_length_int'].str.replace(' year', '') loan_data['emp_length_int'].unique() type(loan_data['emp_length_int'][0]) #verificar el tipo de datos => sabemos que es string loan_data['emp_length_int'] = pd.to_numeric(loan_data['emp_length_int']) #transformar de str a num print(type(loan_data['emp_length_int'][0])) loan_data['earliest_cr_line'][0:5] type(loan_data['earliest_cr_line'][0]) loan_data['earliest_cr_line_date'] = pd.to_datetime(loan_data['earliest_cr_line']) print(type(loan_data['earliest_cr_line_date'][0])) # Se transforma de tipo objeto a fecha con formato estandar de pandas type(loan_data['earliest_cr_line_date'][0]) loan_data['earliest_cr_line_date'].describe() # Para el modelo de crédito es más util el calculo de dias, meses o periodos entre fechas que el dato de la fecha, se calculará entonces los días entre la fecha y la fecha final del dataset (o fecha de uso del dataset) # Asumiendo que estamos en dic de 2017 #Se realiza una resta entre fechas y las respuesta es en días. estos dias se pasan a numeros y luego #se usa función de numpy para transformar los días a meses. Finalmente se redondea a entero loan_data['mths_since_earliest_cr_line'] = round(pd.to_numeric((pd.to_datetime('2017-12-01') - loan_data['earliest_cr_line_date']) / np.timedelta64(1, 'M'))) loan_data['mths_since_earliest_cr_line'][0:5] loan_data['mths_since_earliest_cr_line'].describe() loan_data['term'][0:5] loan_data['term'].describe() loan_data['term_int'] = pd.to_numeric(loan_data['term'].str.replace(' months', '')) type(loan_data['term_int'][0]) loan_data['issue_d'][0:3] # Asumiendo que estamos en dic 2017 loan_data['issue_d_date'] = pd.to_datetime(loan_data['issue_d']) loan_data['mths_since_issue_d'] = round(pd.to_numeric((pd.to_datetime('2017-12-01') - loan_data['issue_d_date']) / np.timedelta64(1, 'M'))) loan_data['mths_since_issue_d'].describe() # ### Transformación de Algunas Variables Discretas loan_data['grade'].unique() loan_data_dummies = [pd.get_dummies(loan_data['grade'], prefix = 'grade', prefix_sep = ':'), pd.get_dummies(loan_data['sub_grade'], prefix = 'sub_grade', prefix_sep = ':'), pd.get_dummies(loan_data['home_ownership'], prefix = 'home_ownership', prefix_sep = ':'), pd.get_dummies(loan_data['verification_status'], prefix = 'verification_status', prefix_sep = ':'), pd.get_dummies(loan_data['loan_status'], prefix = 'loan_status', prefix_sep = ':'), pd.get_dummies(loan_data['purpose'], prefix = 'purpose', prefix_sep = ':'), pd.get_dummies(loan_data['addr_state'], prefix = 'addr_state', prefix_sep = ':'), pd.get_dummies(loan_data['initial_list_status'], prefix = 'initial_list_status', prefix_sep = ':')] # Se crean varaibles dummy con las 8 varaibles originales y se coloca un prefijo para no perder el nombre de la variable # Note that we are using a particular naming convention for all variables: original variable name, colon, category name. #los datos se guardan en una lista de python type(loan_data_dummies) loan_data_dummies = pd.concat(loan_data_dummies, axis = 1) #se transforma la lista en un data frame print(loan_data_dummies.head()) print(loan_data_dummies.shape) # Las 8 variables categoricas originales se transformaron en 127 varibles tipo Dummy. Se debe tener en cuenta que no se ha eliminado una de ellas en cada dummy lo cual se debe hacer en la modelación loan_data = pd.concat([loan_data, loan_data_dummies], axis = 1) #las nueva variables se adicionan al dataframe original # ### Se depuran los datos vacios pd.options.display.max_rows = None # Opción para que Panda mueste todas las filas de los dataframes loan_data.isnull().sum() #Verificar si existen valores nulos para cada variable pd.options.display.max_rows = 100 # Solo mostrar 10 filas loan_data['total_rev_hi_lim'].fillna(loan_data['funded_amnt'], inplace=True) # se llenan los datos vacios de total_rev_hi_lim con los datos de funded_amnt para la misma fila loan_data['annual_inc'].fillna(loan_data['annual_inc'].mean(), inplace=True) #los datos vacios de annual_inc se llenan con la media loan_data['mths_since_earliest_cr_line'].fillna(0, inplace=True) loan_data['acc_now_delinq'].fillna(0, inplace=True) loan_data['total_acc'].fillna(0, inplace=True) loan_data['pub_rec'].fillna(0, inplace=True) loan_data['open_acc'].fillna(0, inplace=True) loan_data['inq_last_6mths'].fillna(0, inplace=True) loan_data['delinq_2yrs'].fillna(0, inplace=True) loan_data['emp_length_int'].fillna(0, inplace=True) # Se llenan los datos vacios con cero # ### Preparación de la Variable Dependiente loan_data['loan_status'].unique() loan_data['loan_status'].value_counts() loan_data['loan_status'].value_counts() / loan_data['loan_status'].count() # Se transforma a una varaible binaria - good - bad #las categorias 'Charged Off', 'Default','Does not meet the credit policy. Status:Charged Off', 'Late (31-120 days)' #Se consideran Bad=0, las demás o estan al día o están pagadas se condireran good=1 loan_data['good_bad'] = np.where(loan_data['loan_status'].isin(['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'Late (31-120 days)']), 0, 1) loan_data['good_bad'].unique()
2_Train_Model/1_ETL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="urkaWe-kSPNy" colab_type="code" outputId="980681fa-eacd-407b-9daa-5a9bbde1cc23" colab={"base_uri": "https://localhost:8080/", "height": 72} # !pip install -q tensorflow==2.0.0-beta1 # + id="EJxH9yutSfuA" colab_type="code" colab={} import tensorflow as tf import numpy as np import librosa import librosa.display import matplotlib.pyplot as plt import pandas as pd import os # + id="9x29knKvSpyC" colab_type="code" outputId="560365c6-caec-4461-d087-029513bdb677" colab={"base_uri": "https://localhost:8080/", "height": 35} tf.__version__ # + id="S7zr0vAaTfZG" colab_type="code" outputId="cfe7e615-fa72-453a-cbb0-309bd15bb29d" colab={"base_uri": "https://localhost:8080/", "height": 54} data_root = tf.keras.utils.get_file('Audio', 'https://os.unil.cloud.switch.ch/fma/fma_small.zip' , extract=True) # + id="T2R_yQcKXlNB" colab_type="code" colab={} # !cp -r ~/.keras/datasets/fma_small /content # + id="RaJXAtnLxjNO" colab_type="code" outputId="ac8b421b-b22c-4279-cead-e24dcfbc3bf3" colab={"base_uri": "https://localhost:8080/", "height": 54} meta_data = tf.keras.utils.get_file('Meta' , 'https://os.unil.cloud.switch.ch/fma/fma_metadata.zip' , extract=True) # + id="4D9JJexGcVDA" colab_type="code" colab={} # !cp ~/.keras/datasets/fma_metadata/tracks.csv /content # + id="mT1ga4uncbM0" colab_type="code" outputId="b74b24e3-a84f-4d37-afbc-53bb1c1f524c" colab={"base_uri": "https://localhost:8080/", "height": 196} genres_df = pd.read_csv('genres.csv') genres_df.head() # + id="T6wv_dVmdRtA" colab_type="code" outputId="b25ac38e-8724-4f30-cbfc-6b530b119b42" colab={"base_uri": "https://localhost:8080/", "height": 258} tracks_df = pd.read_csv('tracks.csv' , index_col= 0 , header = [0,1]) keep_cols = [('set', 'split'), ('set', 'subset'),('track', 'genre_top')] df_all = tracks_df[keep_cols] df_all = df_all[df_all[('set', 'subset')] == 'small'] df_all['track_id'] = df_all.index df_all.head() # + id="hFyFF7T4dcBO" colab_type="code" outputId="773a3ebc-d1dc-4068-e341-97e557a7bd27" colab={"base_uri": "https://localhost:8080/", "height": 54} df_all[('track' , 'genre_top')].unique() # + id="agxWfNGljQuP" colab_type="code" colab={} def readAudioFile(trackid): track = '{:06d}'.format(trackid) return os.path.join('fma_small' , track[:3] , track + '.mp3') # + id="MtoAijdWjMSc" colab_type="code" colab={} def createSpectrogram(trackid): audio = readAudioFile(trackid) y, sr = librosa.load(audio , duration=2.97) spect = librosa.feature.melspectrogram(y=y, sr=sr) return spect # + id="hrUjpGP2kajj" colab_type="code" colab={} def plotSpectrogram(trackid): spectrogram = createSpectrogram(trackid) print(spectrogram.shape) librosa.display.specshow(spectrogram , y_axis='mel', x_axis='time') # + id="C4Bg7jpJlLWe" colab_type="code" colab={} def getTracks(audio_dir): tids = [] for _, dirnames, files in os.walk(audio_dir): if dirnames == []: tids.extend(int(file[:-4]) for file in files) return tids # + id="z4rSZPpclSkd" colab_type="code" colab={} track_ids = getTracks('fma_small') # + id="GN-zkCEpk2t8" colab_type="code" colab={} plotSpectrogram(3400) # + id="jpADQprkqZgq" colab_type="code" outputId="5d7ecd28-f465-4592-91c3-2a2794c9a63c" colab={"base_uri": "https://localhost:8080/", "height": 35} df_train = df_all[df_all[('set', 'split')]=='training'] df_valid = df_all[df_all[('set', 'split')]=='validation'] df_test = df_all[df_all[('set', 'split')]=='test'] print(df_train.shape, df_valid.shape, df_test.shape) # + id="XF5YTLbSiqXq" colab_type="code" colab={} def create_dataset(df): genres = [] X_spect = np.empty((0 , 128 , 128)) count = 0 for index, row in df.iterrows(): try: count += 1 track_id = int(row['track_id']) genre = str(row[('track', 'genre_top')]) spect = createSpectrogram(track_id) X_spect = np.append(X_spect, [spect], axis=0) genres.append(genre) if count % 100 == 0: print("Currently processing: ", count) except: print("Not processed : " , count) continue y_arr = np.array(genres) return X_spect, y_arr # + id="2M09vJ7dUu4O" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 163} outputId="2ed90cf8-8b38-4d7a-ad19-20e487e9f223" X_valid , y_valid = create_dataset(df_valid) # + id="vIW1dTX4q5we" colab_type="code" colab={} X_train = create_dataset(df_train) # + id="CbhFjgP_rQ4r" colab_type="code" outputId="e2c7e68e-d3ad-4926-8c4e-3c2f2e80206f" colab={"base_uri": "https://localhost:8080/", "height": 35} X_train.shape # + id="c9aggbz4rg9P" colab_type="code" colab={} label_dict = {'Electronic':0, 'Experimental':1, 'Folk':2, 'Hip-Hop':3, 'Instrumental':4,'International':5, 'Pop' :6, 'Rock': 7 } # + id="MFiZQblesK41" colab_type="code" colab={} genres = [] # + id="1IGILcotqgKe" colab_type="code" colab={} for label in y_valid: genres.append(label_dict[label]) # + id="Y_5p_WbplA1E" colab_type="code" colab={} y_valid = np.array(genres) # + id="5pFF_4MCqnzI" colab_type="code" colab={} labels = np.array(genres) # + id="t-9p1yunuHfb" colab_type="code" outputId="869fa652-312c-446b-8f6f-3c78470d8c60" colab={"base_uri": "https://localhost:8080/", "height": 33} labels.shape # + id="CUNjN3pDkvwN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="3cadddd0-5a5d-444f-9aa5-45918879312d" y_valid.shape # + id="nqmzzlv4kzjC" colab_type="code" colab={} y_valid = tf.keras.utils.to_categorical(y_valid , 8) # + id="0U6vmLaiuRlt" colab_type="code" colab={} labels = tf.keras.utils.to_categorical(labels , 8) # + id="N-TxrOP1uYp6" colab_type="code" outputId="02fe34f1-7886-4524-da02-eda06218860c" colab={"base_uri": "https://localhost:8080/", "height": 33} labels[0] # + id="UL71HkacvcLb" colab_type="code" colab={} def getGenre(prob_vector): value = np.argmax(prob_vector) for genre , index in label_dict.items(): if index == value: return genre # + id="3NrTDI_svutQ" colab_type="code" outputId="7c09e824-e1f2-4e75-8f39-c996595931e4" colab={"base_uri": "https://localhost:8080/", "height": 35} print(getGenre([0., 0., 0., 1., 0., 0., 0., 0.])) # + id="-9Ymzapj3x2I" colab_type="code" colab={} labels = np.load('drive/My Drive/labels.npy') # + id="RgpIPDb-w94K" colab_type="code" outputId="fbbdebf1-9dec-4d07-e37b-c781326f3766" colab={"base_uri": "https://localhost:8080/", "height": 728} z = 0 fig , ax = plt.subplots(3 , 2 , figsize=(10 , 10)) fig.tight_layout() for i in range(0,3): for j in range(0,2): plt.subplot(3 , 2 , z+1) librosa.display.specshow(X_train[z]) plt.title(getGenre(labels[z])) z = z + 1 # + id="rxfBDeikwaPQ" colab_type="code" colab={} np.save('spectrograms.npy' , X_train) np.save('labels.npy' , labels) # + id="L37vvNO3EMgv" colab_type="code" colab={} # !cp labels.npy drive/My\ Drive/ # + id="uJC-Zfcu1Nzh" colab_type="code" colab={} X_valid = np.array([x.reshape((128 , 128 , 1)) for x in X_valid]) # + id="Tf9ClBoo2JmW" colab_type="code" outputId="b6f55af1-4c7b-4bdc-b55f-38cac450d376" colab={"base_uri": "https://localhost:8080/", "height": 35} X_valid.shape # + id="SQucaoac2040" colab_type="code" colab={} model = tf.keras.Sequential() input_shape = (128 , 128 , 1) model.add(tf.keras.layers.Conv2D(24 , (5 , 5) , strides=(1 , 1) , input_shape=input_shape)) model.add(tf.keras.layers.MaxPooling2D((4,2) , strides=(4 , 2))) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Conv2D(48 , (5 , 5) , padding='valid')) model.add(tf.keras.layers.MaxPooling2D((4 , 2) , strides=(4 , 2))) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Conv2D(48 , (5 , 5) , padding='valid')) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(64)) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(8)) model.add(tf.keras.layers.Activation('softmax')) # + id="Wbgb0Ij43Sb6" colab_type="code" outputId="52a980a5-b237-45d6-cbc7-cc27b13a3f4d" colab={"base_uri": "https://localhost:8080/", "height": 708} model.summary() # + id="TOoImYObWd3T" colab_type="code" colab={} callbacks = [ tf.keras.callbacks.EarlyStopping(patience=10, verbose=1), tf.keras.callbacks.ReduceLROnPlateau(factor=0.1, patience=10, min_lr=0.00001, verbose=1), tf.keras.callbacks.ModelCheckpoint('model.h5', verbose=1, save_best_only=True, save_weights_only=True), ] # + id="xuVByM0U4-aP" colab_type="code" colab={} model.compile( optimizer='Adam', loss='categorical_crossentropy', metrics = ['accuracy'] ) # + id="ike-yRIi5TpY" colab_type="code" outputId="c2473d1e-eb2d-4a8c-ac33-572f8dcfd774" colab={"base_uri": "https://localhost:8080/", "height": 1000} model.fit( x=X_train, y=labels, epochs=50, batch_size=128, validation_data=(X_valid , y_valid), callbacks=callbacks, ) # + id="huECGjnR-3Jy" colab_type="code" colab={} for index , row in df_test.iterrows(): track_id = int(row['track_id']) gen1 = str(row[('track', 'genre_top')]) spect1 = createSpectrogram(track_id) break # + id="p7Ys369N_bCZ" colab_type="code" colab={} test = np.array(specter).reshape((128 , 128 , 1)) # + id="hwyUYKr6__r7" colab_type="code" colab={} test1 = np.expand_dims(test , axis=0) # + id="RwAODiCmAJum" colab_type="code" outputId="21e0c3d4-dc71-4bf8-b2f9-1ddf872ae755" colab={"base_uri": "https://localhost:8080/", "height": 35} test1.shape # + id="KUVYTCWX5nHW" colab_type="code" colab={} res = model.predict(test2) # + id="iL7VxkPE_8TO" colab_type="code" outputId="329fcfcc-3bdf-4a3a-ee72-27be7b335361" colab={"base_uri": "https://localhost:8080/", "height": 35} getGenre(res) # + id="g1RBNEHjC9cK" colab_type="code" colab={} def getGenreFromValue(value): for i , j in label_dict.items(): if j == value: return i # + id="03YEq9NJAPSc" colab_type="code" outputId="2e57dc84-6130-4c68-a0b3-bd2d396f6e27" colab={"base_uri": "https://localhost:8080/", "height": 308} for i in range(0,8): print('{} : {}\n'.format(getGenreFromValue(i) , res[0][i])) # + id="LXhjDvTkAVVD" colab_type="code" colab={} y, sr = librosa.load('Coldplay - Hymn For The Weekend (Official Video).mp3' , offset=120.0 , duration=2.97) specter = librosa.feature.melspectrogram(y=y, sr=sr) # + id="6Dq3dUrjBUun" colab_type="code" colab={} model.save('genre-model.h5') # + id="iRdID1qY4T21" colab_type="code" colab={} model = tf.keras.models.load_model('genre-model.h5') # + id="HgrtJu_34og2" colab_type="code" colab={} model.load_weights('model.h5') # + id="BDS38YI26RZJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="ab872e50-8925-40e9-86f4-b0635e0995b2" model.outputs # + id="YJJENlB2FS6-" colab_type="code" colab={} tf.saved_model.save(model , "/tmp/genre/1/") # + id="-4Ir-HT9Fdgn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 217} outputId="d470c467-3a85-42e3-8fe2-6ad6146e2a51" # !saved_model_cli show --dir /tmp/genre/1 --tag_set serve --signature_def serving_default # + id="79uG1cTbHAn1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="173928e5-2372-4f71-f6d0-1be1de455014" loaded = tf.saved_model.load("/tmp/genre/1/") print(list(loaded.signatures.keys())) # + id="HMhsXAfpHM1Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="b5ef5929-4903-4668-e256-03c997c05676" infer = loaded.signatures["serving_default"] print(infer.structured_outputs) # + id="TKbE78QUHtQo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="11bbc14c-daed-468e-d807-aabe1fb55e73" model.output_names[0] # + id="Q3blYzRcIJ36" colab_type="code" colab={} test2 = test1.astype('float32') # + id="ZAQFC0RFIZUX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="7961a580-dbf9-4cc7-cb7c-72b7a200c0f7" test2.shape # + id="gBjDJ5YWHQgQ" colab_type="code" colab={} labeling = infer(tf.constant(test2))[model.output_names[0]] # + id="UjLttAzgHwLd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="5706bf63-6a0e-4b18-a472-77e640afacd5" getGenre(labeling) # + id="9nSgxfLXIuxy" colab_type="code" colab={} # !nohup tensorflow_model_server \ # --rest_api_port=8501 \ # --model_name=genre \ # --model_base_path="/tmp/genre" >server.log 2>&1 # + id="pu9ZWO-MI8HV" colab_type="code" colab={} # !pip install -q requests import json import numpy import requests data = json.dumps({"signature_name": "serving_default", "instances": test2.tolist()}) headers = {"content-type": "application/json"} json_response = requests.post('http://localhost:8501/v1/models/genre:predict', data=data, headers=headers) predictions = numpy.array(json.loads(json_response.text)["predictions"]) # + id="QotL1ZYtLMCF" colab_type="code" colab={} import subprocess # + id="DTtq4Ft8JVcI" colab_type="code" colab={} # !echo "deb http://storage.googleapis.com/tensorflow-serving-apt stable tensorflow-model-server tensorflow-model-server-universal" | tee /etc/apt/sources.list.d/tensorflow-serving.list && curl https://storage.googleapis.com/tensorflow-serving-apt/tensorflow-serving.release.pub.gpg | apt-key add - # !apt update # + id="sOZHhMvLJ-gr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 90} outputId="34bfcfbf-211b-48be-d06c-476ebdd9d5ba" # !apt-get install tensorflow-model-server # + id="oYb50na-KIdE" colab_type="code" colab={} # !cp -r /tmp /content/drive/My\ Drive/ # + id="d5wFo3hDMs04" colab_type="code" colab={}
music_genre_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 第5章 深層学習に基づく統計的パラメトリック音声合成 # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/r9y9/ttslearn/blob/master/notebooks/ch05_DNNTTS.ipynb) # + [markdown] tags=[] # ## 準備 # - # ### Python version # !python -VV # ### ttslearn のインストール # %%capture try: import ttslearn except ImportError: # !pip install ttslearn import ttslearn ttslearn.__version__ # ### パッケージのインポート # %pylab inline # %load_ext autoreload # %autoreload import IPython from IPython.display import Audio import os import numpy as np import torch import librosa import librosa.display # シードの固定 from ttslearn.util import init_seed init_seed(1234) # ### 描画周りの設定 from ttslearn.notebook import get_cmap, init_plot_style, savefig cmap = get_cmap() init_plot_style() # ## 5.3 フルコンテキストラベルとは? # ### モノフォンラベル # + from nnmnkwii.io import hts import ttslearn from os.path import basename labels = hts.load(ttslearn.util.example_label_file(mono=True)) print(labels[:6]) # - # 秒単位に変換 # NOTE: 100ナノ秒単位: 100 * 1e-9 = 1e-7 for s,e,l in labels[:6]: print(s*1e-7, e*1e-7, l) # ### フルコンテキストラベル labels = hts.load(ttslearn.util.example_label_file(mono=False)) for start_time, end_time, context in labels[:6]: print(f"{start_time} {end_time} {context}") # ## 5.4 言語特徴量の抽出 # ### Open JTalk による言語特徴量の抽出 # + import pyopenjtalk pyopenjtalk.g2p("今日もいい天気ですね", kana=True) # - pyopenjtalk.g2p("今日もいい天気ですね", kana=False) labels = pyopenjtalk.extract_fullcontext("今日") for label in labels: print(label) # ### HTS 形式の質問ファイル qst_path = ttslearn.util.example_qst_file() # ! cat $qst_path | grep QS | head -1 # ! cat $qst_path | grep CQS | head -1 # ! head {ttslearn.util.example_qst_file()} # ! tail {ttslearn.util.example_qst_file()} # ### HTS 形式の質問ファイルの読み込み # + from nnmnkwii.io import hts import ttslearn binary_dict, numeric_dict = hts.load_question_set(ttslearn.util.example_qst_file()) # 1番目の質問を確認します name, ex = binary_dict[0] print("二値特徴量の数:", len(binary_dict)) print("数値特徴量の数:", len(numeric_dict)) print("1 つ目の質問:", name, ex) # - # ### フルコンテキストラベルからの数値表現への変換 # + from nnmnkwii.frontend import merlin as fe labels = hts.load(ttslearn.util.example_label_file()) feats = fe.linguistic_features(labels, binary_dict, numeric_dict) print("言語特徴量(音素単位)のサイズ:", feats.shape) # - feats # + [markdown] tags=[] # ### 言語特徴量をフレーム単位に展開 # - feats_phoneme = fe.linguistic_features(labels, binary_dict, numeric_dict, add_frame_features=False) feats_frame = fe.linguistic_features(labels, binary_dict, numeric_dict, add_frame_features=True) print("言語特徴量(音素単位)のサイズ:", feats_phoneme.shape) print("言語特徴量(フレーム単位)のサイズ:", feats_frame.shape) # ### 言語特徴量の可視化 (bonus) # + # 可視化用に正規化 in_feats = feats_frame / np.maximum(1, np.abs(feats_frame).max(0)) fig, ax = plt.subplots(figsize=(8,4)) mesh = ax.imshow(in_feats.T, aspect="auto", interpolation="none", origin="lower", cmap=cmap) fig.colorbar(mesh, ax=ax) ax.set_xlabel("Time [frame]") ax.set_ylabel("Context") plt.tight_layout() # - # ## 5.5 音響特徴量の抽出 # ### 対数基本周波数 # + from scipy.io import wavfile import pyworld from nnmnkwii.preprocessing.f0 import interp1d # 基本周波数を対数基本周波数へ変換する関数 def f0_to_lf0(f0): lf0 = f0.copy() nonzero_indices = np.nonzero(f0) lf0[nonzero_indices] = np.log(f0[nonzero_indices]) return lf0 # 音声ファイルの読み込み sr, x = wavfile.read(ttslearn.util.example_audio_file()) x = x.astype(np.float64) # DIO による基本周波数推定 f0, timeaxis = pyworld.dio(x, sr) # 基本周波数を対数基本周波数に変換 lf0 = f0_to_lf0(f0) # 対数基本周波数に対して線形補間 clf0 = interp1d(lf0, kind="linear") # 可視化 fig, ax = plt.subplots(figsize=(8, 3)) ax.plot(timeaxis, np.exp(lf0), linewidth=2, label="F0") ax.plot(timeaxis, np.exp(clf0), "--", linewidth=2, label="Continuous F0") ax.set_xlabel("Time [sec]") ax.set_xticks(np.arange(0.3, 1.4, 0.2)) ax.set_xlim(0.28, 1.43) ax.set_ylabel("Frequency [Hz]") ax.legend() plt.tight_layout() # 図5-6 savefig("fig/dnntts_cf0") # - # ### 有声/無声フラグ # + # DIO による基本周波数推定 f0, timeaxis = pyworld.dio(x, sr) # 有声/無声フラグ の計算 vuv = (f0 > 0).astype(np.float32) hop_length = int(sr * 0.005) fig, ax = plt.subplots(2, 1, figsize=(8,4)) librosa.display.waveplot(x, sr=sr, x_axis="time", ax=ax[0]) ax[1].plot(timeaxis, vuv) ax[1].set_ylim(-0.1, 1.1) ax[0].set_title("Waveform") ax[1].set_title("V/UV") ax[0].set_xlabel("Time [sec]") ax[0].set_ylabel("Amplitude") ax[1].set_xlabel("Time [sec]") ax[1].set_ylabel("Binary value") for a in ax: a.set_xlim(0.28, 1.43) a.set_xticks(np.arange(0.3, 1.4, 0.2)) plt.tight_layout() # 図5-7 savefig("fig/dnntts_vuv") # - # ### メルケプストラム # + import pysptk # DIO による基本周波数の推定 f0, timeaxis = pyworld.dio(x, sr) # CheapTrick によるスペクトル包絡の推定 # 返り値は、パワースペクトルであることに注意 (振幅が 2 乗されている) spectrogram = pyworld.cheaptrick(x, f0, timeaxis, sr) # 線形周波数軸をメル周波数尺度に伸縮し、その後ケプストラムに変換 # alpha は周波数軸の伸縮のパラメータを表します alpha = pysptk.util.mcepalpha(sr) # FFT 長は、サンプリング周波数が 48kHz の場合は 2048 fftlen = pyworld.get_cheaptrick_fft_size(sr) # メルケプストラムの次元数は、 mgc_order + 1 となります # NOTE: メル一般化ケプストラム (Mel-generalized cepstrum) の頭文字を取り、 # 変数名を mgc とします mgc_order = 59 mgc = pysptk.sp2mc(spectrogram, mgc_order, alpha) # メルケプストラムから元のスペクトル包絡を復元 # スペクトルの次元数は、 fftlen//2 + 1 = 1025 spectrogram_reconstructed = pysptk.mc2sp(mgc, alpha, fftlen) # 可視化 hop_length = int(sr * 0.005) fig, ax = plt.subplots(3, 1, figsize=(8,8)) ax[0].set_title("Mel-cepstrum") ax[1].set_title("Reconstructed spectral envelope from Mel-cepstrum") ax[2].set_title("Spectral envelope of natural speech") mesh = librosa.display.specshow(mgc.T, sr=sr, hop_length=hop_length, x_axis="time", cmap=cmap, ax=ax[0]) fig.colorbar(mesh, ax=ax[0]) ax[0].set_yticks(np.arange(mgc_order+2)[::10]) log_sp_reconstructed = librosa.power_to_db(np.abs(spectrogram_reconstructed), ref=np.max) mesh = librosa.display.specshow(log_sp_reconstructed.T, sr=sr, hop_length=hop_length, x_axis="time", y_axis="hz", cmap=cmap, ax=ax[1]) fig.colorbar(mesh, ax=ax[1], format="%+2.f dB") log_sp = librosa.power_to_db(np.abs(spectrogram), ref=np.max) mesh = librosa.display.specshow(log_sp.T, sr=sr, hop_length=hop_length, x_axis="time", y_axis="hz", cmap=cmap, ax=ax[2]) fig.colorbar(mesh, ax=ax[2], format="%+2.f dB") ax[1].set_ylim(0, 12000) ax[2].set_ylim(0, 12000) for a in ax: a.set_xlabel("Time [sec]") a.set_xlim(0.28, 1.43) a.set_xticks(np.arange(0.3, 1.4, 0.2)) ax[0].set_ylabel("Mel channel") ax[1].set_ylabel("Frequency [Hz]") ax[2].set_ylabel("Frequency [Hz]") plt.tight_layout() # 図5-8 savefig("fig/dnntts_mcep_reconstructed") # - print("圧縮率:", spectrogram.shape[1]/mgc.shape[1]) # ### 帯域非周期性指標 # + # DIO による基本周波数の推定 f0, timeaxis = pyworld.dio(x, sr) # D4C による非周期性指標の推定 aperiodicity= pyworld.d4c(x, f0, timeaxis, sr) # 帯域別の非周期性指標に圧縮 bap = pyworld.code_aperiodicity(aperiodicity, sr) # 可視化 hop_length = int(sr * 0.005) fig, ax = plt.subplots(2, 1, figsize=(8,6)) mesh = librosa.display.specshow(20*np.log10(aperiodicity).T, sr=sr, hop_length=hop_length, x_axis="time", y_axis="linear", cmap=cmap, ax=ax[0]) ax[0].set_title("Aperiodicity") fig.colorbar(mesh, ax=ax[0], format="%+2.f dB") mesh = librosa.display.specshow(bap.T, sr=sr, hop_length=hop_length, x_axis="time", cmap=cmap, ax=ax[1]) fig.colorbar(mesh, ax=ax[1], format="%+2.f dB") ax[1].set_title("Band-aperiodicity") for a in ax: a.set_xlabel("Time [sec]") a.set_ylabel("Frequency [Hz]") a.set_xlim(0.28, 1.43) a.set_xticks(np.arange(0.3, 1.4, 0.2)) ax[1].set_yticks(np.arange(5+1)) ax[1].set_ylabel("Frequency band") plt.tight_layout() # 図5-9 savefig("fig/dnntts_bap") # - print("圧縮率:", aperiodicity.shape[1]/bap.shape[1]) # ### 動的特徴量 def compute_delta(x, w): y = np.zeros_like(x) # 特徴量の次元ごとに動的特徴量を計算 for d in range(x.shape[1]): y[:, d] = np.correlate(x[:, d], w, mode="same") return y # + import librosa # スペクトル包絡の推定 f0, timeaxis = pyworld.dio(x, sr) spectrogram = pyworld.cheaptrick(x, f0, timeaxis, sr) # パワースペクトルを対数に変換 spectrogram = librosa.power_to_db(spectrogram, ref=np.max) # 動的特徴量の計算 delta_window1 = [-0.5, 0.0, 0.5] # 1 次動的特徴量に対する窓 delta_window2 = [1.0, -2.0, 1.0] # 2 次動的特徴量に対する窓 # 1 次動的特徴量 delta = compute_delta(spectrogram, delta_window1) # 2 次動的特徴量 deltadelta = compute_delta(spectrogram, delta_window2) # スペクトル包絡に対して動的特徴量を計算して可視化 hop_length = int(sr * 0.005) fig, ax = plt.subplots(3, 1, figsize=(8,8)) ax[0].set_title("Static features") ax[1].set_title("Dynamic features (1st order)") ax[2].set_title("Dynamic features (2nd order)") mesh = librosa.display.specshow(spectrogram.T, sr=sr, hop_length=hop_length, x_axis="time", y_axis="hz", cmap=cmap, ax=ax[0]) fig.colorbar(mesh, ax=ax[0], format="%+2.f dB") mesh = librosa.display.specshow(delta.T, sr=sr, hop_length=hop_length, x_axis="time", y_axis="hz", cmap=cmap, ax=ax[1]) fig.colorbar(mesh, ax=ax[1], format="%+2.f dB") mesh = librosa.display.specshow(deltadelta.T, sr=sr, hop_length=hop_length, x_axis="time", y_axis="hz", cmap=cmap, ax=ax[2]) fig.colorbar(mesh, ax=ax[2], format="%+2.f dB") for a in ax: a.set_xlabel("Time [sec]") a.set_ylabel("Frequency [Hz]") a.set_ylim(0, 8000) a.set_xlim(0.28, 1.43) a.set_xticks(np.arange(0.3, 1.4, 0.2)) plt.tight_layout() # 図5-10 savefig("fig/dnntts_dynamic_features") # - # ### 音響特徴量の結合 # + from nnmnkwii.preprocessing import delta_features # WORLD による音声パラメータの推定 f0, timeaxis = pyworld.dio(x, sr) spectrogram = pyworld.cheaptrick(x, f0, timeaxis, sr) aperiodicity = pyworld.d4c(x, f0, timeaxis, sr) # スペクトル包絡をメルケプストラムに変換 mgc_order = 59 alpha = pysptk.util.mcepalpha(sr) mgc = pysptk.sp2mc(spectrogram, mgc_order, alpha) # 有声/無声フラグの計算 vuv = (f0 > 0).astype(np.float32) # 連続対数基本周波数系列 lf0 = interp1d(f0_to_lf0(f0), kind="linear") # 帯域非周期性指標 bap = pyworld.code_aperiodicity(aperiodicity, sr) # 基本周波数と有声/無声フラグを2次元の行列の形にしておく lf0 = lf0[:, np.newaxis] if len(lf0.shape) == 1 else lf0 vuv = vuv[:, np.newaxis] if len(vuv.shape) == 1 else vuv # 動的特徴量を計算するための窓 windows = [ [1.0], # 静的特徴量に対する窓 [-0.5, 0.0, 0.5], # 1次動的特徴量に対する窓 [1.0, -2.0, 1.0], # 2次動的特徴量に対する窓 ] # 静的特徴量と動的特徴量を結合した特徴量の計算 mgc = delta_features(mgc, windows) lf0 = delta_features(lf0, windows) bap = delta_features(bap, windows) # すべての特徴量を結合した特徴量を作成 feats = np.hstack([mgc, lf0, vuv, bap]) print(f"メルケプストラムの次元数: {mgc.shape[1]}") print(f"連続対数基本周波数の次元数: {lf0.shape[1]}") print(f"有声 / 無声フラグの次元数: {vuv.shape[1]}") print(f"帯域非周期性指標の次元数: {bap.shape[1]}") print(f"結合された音響特徴量の次元数: {feats.shape[1]}") # - # ## 5.6 音声波形の合成 # + from nnmnkwii.paramgen import mlpg from IPython.display import Audio import IPython from ttslearn.dnntts.multistream import get_windows, split_streams from ttslearn.dsp import world_spss_params # 音声ファイルの読み込み sr, x = wavfile.read(ttslearn.util.example_audio_file()) x = x.astype(np.float64) # 音響特徴量抽出のパラメータ mgc_order = 59 alpha = pysptk.util.mcepalpha(sr) fftlen = pyworld.get_cheaptrick_fft_size(sr) # 音響特徴量の抽出 feats = world_spss_params(x, sr, mgc_order) # パラメータ生成に必要な特徴量の分散 # 第6章で解説しますが、実際には学習データ全体に対して計算します feats_var = np.var(feats, axis=1) # 結合された特徴量から各特徴量の分離 stream_sizes = [(mgc_order+1)*3, 3, 1, pyworld.get_num_aperiodicities(sr)*3] mgc, lf0, vuv, bap = split_streams(feats, stream_sizes) start_ind = np.hstack(([0], np.cumsum(stream_sizes)[:-1])) end_ind = np.cumsum(stream_sizes) # パラメータ生成に必要な、動的特徴量の計算に利用した窓 windows = get_windows(num_window=3) # パラメータ生成 mgc = mlpg(mgc, feats_var[start_ind[0]:end_ind[0]], windows) lf0 = mlpg(lf0, feats_var[start_ind[1]:end_ind[1]], windows) bap = mlpg(bap, feats_var[start_ind[3]:end_ind[3]], windows) # メルケプストラムからスペクトル包絡への変換 spectrogram = pysptk.mc2sp(mgc, alpha, fftlen) # 連続対数基本周波数から基本周波数への変換 f0 = lf0.copy() f0[vuv < 0.5] = 0 f0[np.nonzero(f0)] = np.exp(f0[np.nonzero(f0)]) # 帯域非周期指標から非周期性指標への変換 aperiodicity = pyworld.decode_aperiodicity(bap.astype(np.float64), sr, fftlen) # WORLD による音声波形の合成 y = pyworld.synthesize( f0.flatten().astype(np.float64), spectrogram.astype(np.float64), aperiodicity.astype(np.float64), sr ) # オーディオプレイヤーの表示 IPython.display.display(Audio(x.astype(np.float32), rate=sr)) IPython.display.display(Audio(y.astype(np.float32), rate=sr)) # 可視化 fig, ax = plt.subplots(2, 1, figsize=(8,4), sharey=True) ax[0].set_title("Natural speech") ax[1].set_title("Reconstructed speech by acoustic features") librosa.display.waveplot(x.astype(np.float32), sr, ax=ax[0]) librosa.display.waveplot(y.astype(np.float32), sr, ax=ax[1]) for a in ax: a.set_xlabel("Time [sec]") a.set_ylabel("Amplitude") plt.tight_layout() # + n_fft = 1024 frame_shift = int(sr * 0.005) X = librosa.stft(x.astype(np.float32), n_fft=n_fft, win_length=n_fft, hop_length=frame_shift, window="hann") logX = librosa.amplitude_to_db(np.abs(X), ref=np.max) Y = librosa.stft(y.astype(np.float32), n_fft=n_fft, win_length=n_fft, hop_length=frame_shift, window="hann") log_Y = librosa.amplitude_to_db(np.abs(Y), ref=np.max) fig, ax = plt.subplots(2, 1, figsize=(8, 6)) ax[0].set_title("Natural spectrogram") ax[1].set_title("Reconstructed spectrogram from acoustic features") mesh = librosa.display.specshow(logX, sr=sr, hop_length=hop_length, x_axis="time", y_axis="hz", cmap=cmap, ax=ax[0]) fig.colorbar(mesh, ax=ax[0], format="%+2.f dB") mesh = librosa.display.specshow(log_Y, sr=sr, hop_length=hop_length, x_axis="time", y_axis="hz", cmap=cmap, ax=ax[1]) fig.colorbar(mesh, ax=ax[1], format="%+2.f dB") for a in ax: a.set_xlabel("Time [sec]") a.set_ylabel("Frequency [Hz]") a.set_ylim(0, 8000) a.set_xlim(0.28, 1.43) a.set_xticks(np.arange(0.3, 1.4, 0.2)) plt.tight_layout() # 図5-13 savefig("fig/dnntts_waveform_reconstruction")
notebooks/ch05_DNNTTS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Fit halo mass to shear profile: 1. ideal data # # _the LSST-DESC CLMM team_ # # # This notebook demonstrates how to use `clmm` to estimate a WL halo mass of a galaxy cluster in the ideal case: i) all galaxies on a single source plane, ii) no redshift errors, iii) no shape noise. The steps below correspond to: # - Setting things up, with the proper imports. # - Generating an ideal mock dataset. # - Computing the binned reduced tangential shear profile for two different binning scheme. # - Setting up the model to be fitted to the data. # - Perform a simple fit using NumCosmo tools to compute the best-fit and the Fisher Matrix, and visualize the results. # # Note that this notebook is equivalent to `Example1_Fit_Halo_Mass_to_Shear_Catalog.ipynb` except for the statistical analysis, where here we use some NumCosmo tools. # ## Setup # First, we import some standard packages. # + # For NumCosmo import os import sys import gi gi.require_version('NumCosmo', '1.0') gi.require_version('NumCosmoMath', '1.0') from gi.repository import GObject from gi.repository import NumCosmo as Nc from gi.repository import NumCosmoMath as Ncm os.environ['CLMM_MODELING_BACKEND'] = 'nc' __name__ = "NcContext" Ncm.cfg_init () Ncm.cfg_set_log_handler (lambda msg: sys.stdout.write (msg) and sys.stdout.flush ()) # + try: import clmm except: import notebook_install notebook_install.install_clmm_pipeline(upgrade=False) import clmm import numpy as np import matplotlib.pyplot as plt from numpy import random # - # Next, we import `clmm`'s core modules. import clmm import clmm.dataops as da import clmm.galaxycluster as gc import clmm.theory as theory from clmm import Cosmology # Make sure we know which version we're using clmm.__version__ # We then import support modules for a specific data set. # `clmm` includes support modules that enable the user to generate mock data in a format compatible with `clmm`. # We also provide support modules for processing other specific data sets for use with `clmm`. # Any existing support module can be used as a template for creating a new support module for another data set. # If you do make such a support module, please do consider making a pull request so we can add it for others to use. # ## Making mock data from clmm.support import mock_data as mock np.random.seed(11) # To create mock data, we need to define a true cosmology. mock_cosmo = Cosmology(H0 = 70.0, Omega_dm0 = 0.27 - 0.045, Omega_b0 = 0.045, Omega_k0 = 0.0) # We now set some parameters for a mock galaxy cluster. cosmo = mock_cosmo cluster_id = "Awesome_cluster" cluster_m = 1.e15 # M200,m [Msun] cluster_z = 0.3 # Cluster's redshift src_z = 0.8 # Background galaxies' redshifts (single source plane) concentration = 4 ngals = 10000 # Number of galaxies cluster_ra = 0.0 cluster_dec = 0.0 # Then we use the `mock_data` support module to generate a new galaxy catalog. ideal_data = mock.generate_galaxy_catalog(cluster_m, cluster_z, concentration, cosmo, src_z, ngals=ngals) # This galaxy catalog is then converted to a `clmm.GalaxyCluster` object. gc_object = clmm.GalaxyCluster(cluster_id, cluster_ra, cluster_dec, cluster_z, ideal_data) # A `clmm.GalaxyCluster` object can be pickled and saved for later use. gc_object.save('mock_GC.pkl') # Any saved `clmm.GalaxyCluster` object may be read in for analysis. # + cl = clmm.GalaxyCluster.load('mock_GC.pkl') print("Cluster info = ID:", cl.unique_id, "; ra:", cl.ra, "; dec:", cl.dec, "; z_l :", cl.z) print("The number of source galaxies is :", len(cl.galcat)) # Lens position and redshift ra_l = cl.ra dec_l = cl.dec z = cl.z # Galaxies: ellipticities, position (RA, DEC), redshift e1 = cl.galcat['e1'] e2 = cl.galcat['e2'] ra_s = cl.galcat['ra'] dec_s = cl.galcat['dec'] z_s = cl.galcat['z'] # - # We can visualize the distribution of galaxies on the sky. # + fsize = 15 fig = plt.figure(figsize=(10, 6)) hb = fig.gca().hexbin(ra_s, dec_s, gridsize=50) cb = fig.colorbar(hb) cb.set_label('Number of sources in bin', fontsize=fsize) plt.gca().set_xlabel(r'$\Delta RA$', fontsize=fsize) plt.gca().set_ylabel(r'$\Delta Dec$', fontsize=fsize) plt.gca().set_title('Source Galaxies', fontsize=fsize) plt.show() # - # `clmm` separates cosmology-dependent and cosmology-independent functionality. # ## Deriving observables # # We first demonstrate a few of the procedures one can perform on data without assuming a cosmology. # ### Computing shear # `clmm.dataops.compute_tangential_and_cross_components` calculates the tangential and cross shears for each source galaxy in the cluster. theta, g_t, g_x = da.compute_tangential_and_cross_components(ra_l, dec_l, ra_s, dec_s, e1, e2, geometry="flat") # We can visualize the shear field at each galaxy location. # + fig = plt.figure(figsize=(10, 6)) fig.gca().loglog(theta, g_t, '.') plt.ylabel("reduced shear", fontsize=fsize) plt.xlabel("angular distance [rad]", fontsize=fsize) # - # ### Radially binning the data # Here we compare the reconstructed mass under two different bin definitions. # # Note binning would cause fitted mass to be slightly larger than input mass. The reason is that g(r), the tangential reduced shear along cluster radius, is a convex function -- the function value after binning would be larger, but the bias becomes smaller as bin number increases. bin_edges1 = da.make_bins(0.01, 3.7, 50) bin_edges2 = da.make_bins(0.01, 3.7, 10) # `clmm.dataops.make_radial_profile` evaluates the average shear of the galaxy catalog in bins of radius. res1 = da.make_radial_profile( [g_t, g_x, z_s], theta, "radians", "Mpc", bins=bin_edges1, cosmo=cosmo, z_lens=z, include_empty_bins=False) res2 = da.make_radial_profile( [g_t, g_x, z_s], theta, "radians", "Mpc", bins=bin_edges2, cosmo=cosmo, z_lens=z, include_empty_bins=False) # Note that we set `include_empty_bins=False` explicitly here even though it is the default behavior. Setting the argument to `True` would also return empty bins (that is, bins with *at most one* data point in them), which would have to be excluded manually when fitting, though it might be useful e.g., when combining datasets. To clarify the behavior, consider the following comparison: res_with_empty = da.make_radial_profile( [g_t, g_x, z_s], theta, "radians", "Mpc", bins=1000, cosmo=cosmo, z_lens=z, include_empty_bins=True) # this is the default behavior res_without_empty = da.make_radial_profile( [g_t, g_x, z_s], theta, "radians", "Mpc", bins=1000, cosmo=cosmo, z_lens=z, include_empty_bins=False) res_with_empty['n_src'].size, res_without_empty['n_src'].size # i.e., 108 bins have fewer than two sources in them and are excluded by default (when setting the random seed to 11). # For later use, we'll define some variables for the binned radius and tangential shear. # + gt_profile1 = res1['p_0'] r1 = res1['radius'] z1 = res1['p_2'] gt_profile2 = res2['p_0'] r2 = res2['radius'] z2 = res2['p_2'] # - # We visualize the radially binned shear for our mock galaxies. # + fig = plt.figure(figsize=(10, 6)) fig.gca().loglog(r1, gt_profile1, '.', label='50 bins') fig.gca().loglog(r2, gt_profile2, '+', markersize=15, label='10 bins') plt.legend(fontsize=fsize) plt.gca().set_title(r'Binned shear of source galaxies', fontsize=fsize) plt.gca().set_xlabel(r'$r\;[Mpc]$', fontsize=fsize) plt.gca().set_ylabel(r'$g_t$', fontsize=fsize) # - # You can also run `make_radial_profile` direct on a `clmm.GalaxyCluster` object. cl.compute_tangential_and_cross_components() # You need to add the shear components first cl.make_radial_profile("Mpc", bins=1000, cosmo=cosmo, include_empty_bins=False) pass # After running `clmm.GalaxyCluster.make_radial_profile` object, the object acquires the `clmm.GalaxyCluster.profile` attribute. for n in cl.profile.colnames: cl.profile[n].format = "%6.3e" cl.profile.pprint(max_width=-1) # ## Modeling the data # # We next demonstrate a few of the procedures one can perform once a cosmology has been chosen. # ### Choosing a halo model # Here we model using the OO inteface, we also use NumCosmo statistical framework to perform the analysis. Below we create an object based on NumCosmo NcmDataGaussDiag (Gaussian likelihood with a diagonal covariance matrix) object. To connect with the C interface the object must implement the methods: `do_get_length`, `do_get_dof`, `do_begin`, `do_prepare` and `do_mean_func`. The last method is responsible to compute the theoretical predictions. In the param_set_ftype calls below one can change between FREE/FIXED to include/exclude the parameter from the analysis. # + class GaussGammaT (Ncm.DataGaussDiag): z_cluster = GObject.Property (type = float, flags = GObject.PARAM_READWRITE) z_source = GObject.Property (type = Ncm.Vector, flags = GObject.PARAM_READWRITE) r_source = GObject.Property (type = Ncm.Vector, flags = GObject.PARAM_READWRITE) def __init__ (self, z_cluster, r_source, z_source, gt_profile, moo = None): Ncm.DataGaussDiag.__init__ (self, n_points = len (gt_profile)) self.moo = moo if moo else clmm.Modeling () assert len (gt_profile) == len (z_source) assert len (gt_profile) == len (r_source) self.set_size (len (gt_profile)) self.props.z_cluster = z_cluster self.props.z_source = Ncm.Vector.new_array (z_source) self.props.r_source = Ncm.Vector.new_array (r_source) self.y.set_array (gt_profile) self.sigma.set_all (1.0e-2) # Diagonal covariance matrix: all points have the same standard deviation value self.set_init (True) # Once the NcmDataGaussDiag is initialized, its parent class variable np is set with the n_points value. def do_get_length (self): return self.np def do_get_dof (self): return self.np def do_begin (self): pass def do_prepare (self, mset): self.moo.set_mset (mset) def do_mean_func (self, mset, vp): vp.set_array (self.moo.eval_reduced_tangential_shear (self.props.r_source.dup_array (), self.props.z_cluster, self.props.z_source.dup_array ())) return GObject.type_register (GaussGammaT) # - # Defining the model set (NcmMset), data set (NcmDataset) and NcmLikelihood objects to carry out a statistical analysis. # # The method `param_set_ftype` defines the parameters that can be fitted: `mid` - to which model set the parameter belongs to, `pid` - parameters' id, NcmParamType (FREE or FIXED) to say if the parameter will be fitted or not. # + moo1 = clmm.Modeling (massdef = 'mean', delta_mdef = 200, halo_profile_model = 'nfw') moo1.set_cosmo (cosmo) moo1.set_concentration (4.0) moo2 = clmm.Modeling (massdef = 'mean', delta_mdef = 200, halo_profile_model = 'nfw') moo2.set_cosmo (cosmo) moo2.set_concentration (4.0) ggt1 = GaussGammaT (z_cluster = cluster_z, r_source = r1, z_source = z1, gt_profile = gt_profile1, moo = moo1) ggt2 = GaussGammaT (z_cluster = cluster_z, r_source = r2, z_source = z2, gt_profile = gt_profile2, moo = moo2) mset1 = ggt1.moo.get_mset () mset2 = ggt2.moo.get_mset () #Parameters: cluster mass (log base 10) and concentration log10MDelta_pi = mset1.param_get_by_full_name ("NcHaloDensityProfile:log10MDelta") cDelta_pi = mset1.param_get_by_full_name ("NcHaloDensityProfile:cDelta") mset1.param_set_ftype (log10MDelta_pi.mid, log10MDelta_pi.pid, Ncm.ParamType.FREE) mset1.param_set_ftype (cDelta_pi.mid, cDelta_pi.pid, Ncm.ParamType.FIXED) mset1.prepare_fparam_map () mset2.param_set_ftype (log10MDelta_pi.mid, log10MDelta_pi.pid, Ncm.ParamType.FREE) mset2.param_set_ftype (cDelta_pi.mid, cDelta_pi.pid, Ncm.ParamType.FIXED) mset2.prepare_fparam_map () dset1 = Ncm.Dataset.new () dset1.append_data (ggt1) lh1 = Ncm.Likelihood.new (dset1) dset2 = Ncm.Dataset.new () dset2.append_data (ggt2) lh2 = Ncm.Likelihood.new (dset2) # - # ### Fitting parameters: Fisher Matrix # # The NcmFit object receives the NcmLikelihood and NcmMset objects. The user also indicates the fitting algorithm and the numerical differentiation method. # Functions `run` and `fisher` computes the best-fit and the fisher matrix, respectively. `log_info` prints the complete information about the data used, models and its parameters, and `log_covar` prints the best-fit along with the error-bar and the covariance matrix. # + fit1 = Ncm.Fit.new (Ncm.FitType.NLOPT, "ln-neldermead", lh1, mset1, Ncm.FitGradType.NUMDIFF_FORWARD) fit2 = Ncm.Fit.new (Ncm.FitType.NLOPT, "ln-neldermead", lh2, mset2, Ncm.FitGradType.NUMDIFF_FORWARD) fit1.run (Ncm.FitRunMsgs.SIMPLE) fit1.fisher () fit1.log_info () fit1.log_covar () fit2.run (Ncm.FitRunMsgs.SIMPLE) fit2.fisher () fit2.log_info () fit2.log_covar () # - # Next, we calculate the reduced tangential shear predicted by the two models. # + rr = np.logspace(-2, np.log10(5), 100) gt_model1 = moo1.eval_reduced_tangential_shear (rr, cluster_z, src_z) gt_model2 = moo2.eval_reduced_tangential_shear (rr, cluster_z, src_z) m_est1 = 10**(mset1.param_get (log10MDelta_pi.mid, log10MDelta_pi.pid)) m_est2 = 10**(mset2.param_get (log10MDelta_pi.mid, log10MDelta_pi.pid)) print ("mest1 % 22.15g mest2 % 22.15g" % (m_est1, m_est2)) # - # We visualize the two predictions of reduced tangential shear. # + fig = plt.figure(figsize=(10, 6)) fig.gca().scatter(r1, gt_profile1, color='orange', label='binned mock data 1, M_input = %.3e Msun/h' % cluster_m) fig.gca().plot(rr, gt_model1, color='orange', label='best fit model 1, M_fit = %.3e' % m_est1) fig.gca().scatter(r2, gt_profile2, color='blue', alpha=0.5, label='binned mock data 2, M_input = %.3e Msun/h' % cluster_m) fig.gca().plot(rr, gt_model2, color='blue', linestyle='--', alpha=0.5, label='best fit model 2, M_fit = %.3e' % m_est2) plt.semilogx() plt.semilogy() plt.legend() plt.xlabel('R [Mpc]', fontsize=fsize) plt.ylabel('reduced tangential shear', fontsize=fsize) # -
examples/NumCosmo/Example1_Fit_Halo_Mass_to_Shear_Catalog_NC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:wildfires] * # language: python # name: conda-env-wildfires-python3-ffmpeg # --- # ## Setup from specific import * # ### Get unshifted data # + # XXX: # data_memory.clear() ( endog_data, exog_data, master_mask, filled_datasets, masked_datasets, land_mask, ) = get_data() # - selection_datasets = [ AvitabileThurnerAGB(), Copernicus_SWI(), ERA5_Temperature(), ESA_CCI_Landcover_PFT(), GFEDv4(), HYDE(), WWLLN(), ] # These datasets will potentially be shifted. datasets_to_shift = [ ERA5_DryDayPeriod(), MOD15A2H_LAI_fPAR(), VODCA(), GlobFluo_SIF(), ] selection_datasets += datasets_to_shift dataset_times(selection_datasets, lat_lon=True)[2] _ = cube_plotting(master_mask.astype("float")) for pretty_name in sort_features(masked_datasets.pretty_variable_names): cube = masked_datasets.cubes[ masked_datasets.pretty_variable_names.index(pretty_name) ] bounds = [] for i in range(cube.shape[0]): bounds.extend(cube.coord("time").cell(i).bound) print( f"{pretty_name:<30} {ensure_datetime(min(bounds)):%Y-%m-%d} {ensure_datetime(max(bounds)):%Y-%m-%d}" ) for pretty_name in sort_features(masked_datasets.pretty_variable_names): cube = masked_datasets.cubes[ masked_datasets.pretty_variable_names.index(pretty_name) ] if any( name in pretty_name for name in ("Dry Day Period", "GFED4 BA", "popd", "lightning") ): log = True else: log = False cube_plotting(cube.collapsed("month_number", iris.analysis.MEAN), log=log) cube_plotting(cube.data.mask.astype("float"), title=f"{pretty_name} Mask") # ### Get shifted data ( endog_data, exog_data, master_mask, filled_datasets, masked_datasets, land_mask, ) = get_offset_data() # ## Mapping with figure_saver("high_fapar_high_dry_day_period", sub_directory="map_plots"): mpl.rc("figure", figsize=(11, 4)) constrained_map_plot( {"FAPAR": (0.36, None), "Dry Day Period": (18, None)}, exog_data, master_mask, plot_variable="FAPAR", coastline_kwargs={"linewidth": 0.5}, ) with figure_saver("high_dry_day_period_18_medium_agbtree", sub_directory="map_plots"): mpl.rc("figure", figsize=(11, 4)) constrained_map_plot( {"Dry Day Period -18 - -6 Month": (22, None), "AGB Tree": (0.9, 20)}, exog_data, master_mask, plot_variable="AGB Tree", coastline_kwargs={"linewidth": 0.5}, ) with figure_saver("high_pftCrop", sub_directory="map_plots"): mpl.rc("figure", figsize=(11, 4)) constrained_map_plot( {"pftCrop": (0.6, None)}, exog_data, master_mask, plot_variable="pftCrop", coastline_kwargs={"linewidth": 0.5}, ) # ## Correlation Plot X_corr = exog_data with figure_saver("corr_plot"): corr_plot( shorten_columns( X_corr[sort_features(filter_by_month(X_corr.columns, None, 9))] ), fig_kwargs={"figsize": (12, 8)}, ) with figure_saver("corr_plot_full"): corr_plot( shorten_columns(X_corr[sort_features(X_corr.columns)]), fig_kwargs={"figsize": (14, 10)}, ) # + from wildfires.data.datasets import * agb = AvitabileThurnerAGB() _ = cube_plotting(agb.cube)
analyses/seasonality_paper_nn/all/variable_diagnostics.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- print('hello world!') baseball2<-read.table("baseball.txt",header = 1) head(baseball2,n=10L) install.packages("ggplot2") library(ggplot2) baseball<-baseball2[baseball2$hits>100,] baseball$age_z <- (baseball$age - mean(baseball$age))/(sd(baseball$age)) baseball$games_z <- (baseball$games - mean(baseball$games))/(sd(baseball$games)) baseball$at_bats_z <- (baseball$at_bats - mean(baseball$at_bats))/(sd(baseball$at_bats)) baseball$runs_z <- (baseball$runs - mean(baseball$runs))/(sd(baseball$runs)) baseball$hits_z <- (baseball$hits - mean(baseball$hits))/(sd(baseball$hits)) baseball$doubles_z <- (baseball$doubles - mean(baseball$doubles))/(sd(baseball$doubles)) baseball$triples_z <- (baseball$triples - mean(baseball$triples))/(sd(baseball$triples)) baseball$homeruns_z <- (baseball$homeruns - mean(baseball$homeruns))/(sd(baseball$homeruns)) baseball$RBIs_z <- (baseball$RBIs - mean(baseball$RBIs))/(sd(baseball$RBIs)) baseball$walks_z <- (baseball$walks - mean(baseball$walks))/(sd(baseball$walks)) baseball$strikeouts_z <- (baseball$strikeouts - mean(baseball$strikeouts))/(sd(baseball$strikeouts)) baseball$on_base_pct_z <- (baseball$on_base_pct - mean(baseball$on_base_pct))/(sd(baseball$on_base_pct)) baseball$slugging_pct_z <- (baseball$slugging_pct - mean(baseball$slugging_pct))/(sd(baseball$slugging_pct)) baseball$caught_stealing_z <- (baseball$caught_stealing - mean(baseball$caught_stealing))/(sd(baseball$caught_stealing)) baseball$bat_ave_z <- (baseball$bat_ave - mean(baseball$bat_ave))/(sd(baseball$bat_ave)) baseball$stolen_bases_z <- (baseball$stolen_bases - mean(baseball$stolen_bases))/(sd(baseball$stolen_bases)) install.packages("psych") library(psych) head(baseball) baseball[,c(20,35)] pca1 <- principal(baseball[,c(20:35)], nfactors=8, rotate="none", scores=TRUE) pca1$values pca1$loadings plot(pca1$values, type = "b", main = "Scree Plot for Houses Data") name <- c("age","games","at_bats", "runs","hits","doubles","triples","homeruns","RBIs","walks", "strikeouts","bat_ave","on_base_pct","slugging_pct", "stolen_bases","caught_stealing") #firstname lastname team 离散变量 baseball$age_zz <- scale(baseball$age)#z分数标准化
Code/Chpter4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ReddyNick/Practical_DL/blob/spring21/homework01/homework_modules.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="3qYfewwns6uf" import numpy as np # + [markdown] id="qrZRtCBps6uk" # **Module** is an abstract class which defines fundamental methods necessary for a training a neural network. You do not need to change anything here, just read the comments. # + id="4mQeHAqOs6ul" class Module(object): """ Basically, you can think of a module as of a something (black box) which can process `input` data and produce `ouput` data. This is like applying a function which is called `forward`: output = module.forward(input) The module should be able to perform a backward pass: to differentiate the `forward` function. More, it should be able to differentiate it if is a part of chain (chain rule). The latter implies there is a gradient from previous step of a chain rule. gradInput = module.backward(input, gradOutput) """ def __init__ (self): self.output = None self.gradInput = None self.training = True def forward(self, input): """ Takes an input object, and computes the corresponding output of the module. """ return self.updateOutput(input) def backward(self,input, gradOutput): """ Performs a backpropagation step through the module, with respect to the given input. This includes - computing a gradient w.r.t. `input` (is needed for further backprop), - computing a gradient w.r.t. parameters (to update parameters while optimizing). """ self.updateGradInput(input, gradOutput) self.accGradParameters(input, gradOutput) return self.gradInput def updateOutput(self, input): """ Computes the output using the current parameter set of the class and input. This function returns the result which is stored in the `output` field. Make sure to both store the data in `output` field and return it. """ # The easiest case: # self.output = input # return self.output pass def updateGradInput(self, input, gradOutput): """ Computing the gradient of the module with respect to its own input. This is returned in `gradInput`. Also, the `gradInput` state variable is updated accordingly. The shape of `gradInput` is always the same as the shape of `input`. Make sure to both store the gradients in `gradInput` field and return it. """ # The easiest case: # self.gradInput = gradOutput # return self.gradInput pass def accGradParameters(self, input, gradOutput): """ Computing the gradient of the module with respect to its own parameters. No need to override if module has no parameters (e.g. ReLU). """ pass def zeroGradParameters(self): """ Zeroes `gradParams` variable if the module has params. """ pass def getParameters(self): """ Returns a list with its parameters. If the module does not have parameters return empty list. """ return [] def getGradParameters(self): """ Returns a list with gradients with respect to its parameters. If the module does not have parameters return empty list. """ return [] def train(self): """ Sets training mode for the module. Training and testing behaviour differs for Dropout, BatchNorm. """ self.training = True def evaluate(self): """ Sets evaluation mode for the module. Training and testing behaviour differs for Dropout, BatchNorm. """ self.training = False def __repr__(self): """ Pretty printing. Should be overrided in every module if you want to have readable description. """ return "Module" # + [markdown] id="ladTp7lVs6um" # # Sequential container # + [markdown] id="U9PHj2B1s6um" # **Define** a forward and backward pass procedures. # + id="RobIdtSTs6un" class Sequential(Module): """ This class implements a container, which processes `input` data sequentially. `input` is processed by each module (layer) in self.modules consecutively. The resulting array is called `output`. """ def __init__ (self): super(Sequential, self).__init__() self.modules = [] def add(self, module): """ Adds a module to the container. """ self.modules.append(module) def updateOutput(self, input): """ Basic workflow of FORWARD PASS: y_0 = module[0].forward(input) y_1 = module[1].forward(y_0) ... output = module[n-1].forward(y_{n-2}) Just write a little loop. """ # Your code goes here. ################################################ y = input for module in self.modules: y = module.forward(y) self.output = y return self.output def backward(self, input, gradOutput): """ Workflow of BACKWARD PASS: g_{n-1} = module[n-1].backward(y_{n-2}, gradOutput) g_{n-2} = module[n-2].backward(y_{n-3}, g_{n-1}) ... g_1 = module[1].backward(y_0, g_2) gradInput = module[0].backward(input, g_1) !!! To each module you need to provide the input, module saw while forward pass, it is used while computing gradients. Make sure that the input for `i-th` layer the output of `module[i]` (just the same input as in forward pass) and NOT `input` to this Sequential module. !!! """ # Your code goes here. ################################################ grad = gradOutput for idx in range(len(self.modules) - 1, 0, -1): grad = self.modules[idx].backward(self.modules[idx - 1].output, grad) self.gradInput = self.modules[0].backward(input, grad) return self.gradInput def zeroGradParameters(self): for module in self.modules: module.zeroGradParameters() def getParameters(self): """ Should gather all parameters in a list. """ return [x.getParameters() for x in self.modules] def getGradParameters(self): """ Should gather all gradients w.r.t parameters in a list. """ return [x.getGradParameters() for x in self.modules] def __repr__(self): string = "".join([str(x) + '\n' for x in self.modules]) return string def __getitem__(self,x): return self.modules.__getitem__(x) def train(self): """ Propagates training parameter through all modules """ self.training = True for module in self.modules: module.train() def evaluate(self): """ Propagates training parameter through all modules """ self.training = False for module in self.modules: module.evaluate() # + [markdown] id="dN64iHWfs6uo" # # Layers # + [markdown] id="ZSmK1of1s6uo" # ## 1. Linear transform layer # Also known as dense layer, fully-connected layer, FC-layer, InnerProductLayer (in caffe), affine transform # - input: **`batch_size x n_feats1`** # - output: **`batch_size x n_feats2`** # + id="fX9RMYlcs6up" class Linear(Module): """ A module which applies a linear transformation A common name is fully-connected layer, InnerProductLayer in caffe. The module should work with 2D input of shape (n_samples, n_feature). """ def __init__(self, n_in, n_out): super(Linear, self).__init__() # This is a nice initialization stdv = 1./np.sqrt(n_in) self.W = np.random.uniform(-stdv, stdv, size = (n_out, n_in)) self.b = np.random.uniform(-stdv, stdv, size = n_out) self.gradW = np.zeros_like(self.W) self.gradb = np.zeros_like(self.b) def updateOutput(self, input): # Your code goes here. ################################################ self.output = np.add(input.dot(self.W.T), self.b) return self.output def updateGradInput(self, input, gradOutput): # Your code goes here. ################################################ self.gradInput = gradOutput.dot(self.W) return self.gradInput def accGradParameters(self, input, gradOutput): # Your code goes here. ################################################ self.gradW = gradOutput.T.dot(input) self.gradb = np.sum(gradOutput, axis=0) pass def zeroGradParameters(self): self.gradW.fill(0) self.gradb.fill(0) def getParameters(self): return [self.W, self.b] def getGradParameters(self): return [self.gradW, self.gradb] def __repr__(self): s = self.W.shape q = 'Linear %d -> %d' %(s[1],s[0]) return q # + [markdown] id="cNYwAjeus6up" # ## 2. SoftMax # - input: **`batch_size x n_feats`** # - output: **`batch_size x n_feats`** # # $\text{softmax}(x)_i = \frac{\exp x_i} {\sum_j \exp x_j}$ # # Recall that $\text{softmax}(x) == \text{softmax}(x - \text{const})$. It makes possible to avoid computing exp() from large argument. # + id="JOFf5le3s6uq" class SoftMax(Module): def __init__(self): super(SoftMax, self).__init__() def updateOutput(self, input): # start with normalization for numerical stability self.output = np.subtract(input, input.max(axis=1, keepdims=True)) # Your code goes here. ################################################ self.output = np.exp(self.output) / np.exp(self.output).sum(axis=1, keepdims=True) return self.output def updateGradInput(self, input, gradOutput): # Your code goes here. ################################################ def getSoftMaxGrad(S): return (np.eye(len(S)) - S) * S.reshape(len(S), 1) dSdX = np.apply_along_axis(getSoftMaxGrad, axis=1, arr=self.output) self.gradInput = np.einsum("ki, kij -> kj", gradOutput, dSdX) return self.gradInput def __repr__(self): return "SoftMax" # + [markdown] id="VeK5ZPE8s6uq" # ## 3. LogSoftMax # - input: **`batch_size x n_feats`** # - output: **`batch_size x n_feats`** # # $\text{logsoftmax}(x)_i = \log\text{softmax}(x)_i = x_i - \log {\sum_j \exp x_j}$ # # The main goal of this layer is to be used in computation of log-likelihood loss. # + id="IeNDBprks6uq" class LogSoftMax(Module): def __init__(self): super(LogSoftMax, self).__init__() def updateOutput(self, input): # start with normalization for numerical stability self.output = np.subtract(input, input.max(axis=1, keepdims=True)) self.output = self.output - np.log(np.exp(self.output).sum(axis=1, keepdims=True)) return self.output def updateGradInput(self, input, gradOutput): def getLogSoftMaxGrad(input_row): derivatives = np.exp(input_row) / np.exp(input_row).sum() return np.eye(len(input_row)) - derivatives dLdX = np.apply_along_axis(getLogSoftMaxGrad, axis=1, arr=input) self.gradInput = np.einsum("ki, kij -> kj", gradOutput, dLdX) return self.gradInput def __repr__(self): return "LogSoftMax" # + [markdown] id="SD7L2RVfs6ur" # ## 4. Batch normalization # One of the most significant recent ideas that impacted NNs a lot is [**Batch normalization**](http://arxiv.org/abs/1502.03167). The idea is simple, yet effective: the features should be whitened ($mean = 0$, $std = 1$) all the way through NN. This improves the convergence for deep models letting it train them for days but not weeks. **You are** to implement the first part of the layer: features normalization. The second part (`ChannelwiseScaling` layer) is implemented below. # # - input: **`batch_size x n_feats`** # - output: **`batch_size x n_feats`** # # The layer should work as follows. While training (`self.training == True`) it transforms input as $$y = \frac{x - \mu} {\sqrt{\sigma + \epsilon}}$$ # where $\mu$ and $\sigma$ - mean and variance of feature values in **batch** and $\epsilon$ is just a small number for numericall stability. Also during training, layer should maintain exponential moving average values for mean and variance: # ``` # self.moving_mean = self.moving_mean * alpha + batch_mean * (1 - alpha) # self.moving_variance = self.moving_variance * alpha + batch_variance * (1 - alpha) # ``` # During testing (`self.training == False`) the layer normalizes input using moving_mean and moving_variance. # # Note that decomposition of batch normalization on normalization itself and channelwise scaling here is just a common **implementation** choice. In general "batch normalization" always assumes normalization + scaling. # + id="kKPT3pYJs6ur" class BatchNormalization(Module): EPS = 1e-3 def __init__(self, alpha = 0.): super(BatchNormalization, self).__init__() self.alpha = alpha self.moving_mean = None self.moving_variance = None def updateOutput(self, input): # Your code goes here. ################################################ # use self.EPS please if not self.training and self.moving_mean is not None and self.moving_variance is not None: self.output = (input - self.moving_mean) / np.sqrt(self.moving_variance + self.EPS) return self.output batch_mean = np.mean(input, axis=0) batch_var = np.var(input, axis=0) if self.moving_mean is None or self.moving_variance is None: self.moving_mean = batch_mean self.batch_var = batch_var else: self.moving_mean = self.moving_mean * self.alpha + batch_mean * (1 - self.alpha) self.moving_variance = self.moving_variance * self.alpha + batch_var * (1 - self.alpha) self.output = (input - batch_mean) / np.sqrt(batch_var + self.EPS) return self.output def updateGradInput(self, input, gradOutput): # Your code goes here. ################################################ batch_size = input.shape[0] self.gradInput = (1./batch_size) / np.sqrt(np.var(input, axis=0) + self.EPS) * \ (batch_size * gradOutput - np.sum(gradOutput, axis=0) - \ self.output* np.sum(gradOutput * self.output, axis=0)) return self.gradInput def __repr__(self): return "BatchNormalization" # + id="amLBgA-7s6ur" class ChannelwiseScaling(Module): """ Implements linear transform of input y = \gamma * x + \beta where \gamma, \beta - learnable vectors of length x.shape[-1] """ def __init__(self, n_out): super(ChannelwiseScaling, self).__init__() stdv = 1./np.sqrt(n_out) self.gamma = np.random.uniform(-stdv, stdv, size=n_out) self.beta = np.random.uniform(-stdv, stdv, size=n_out) self.gradGamma = np.zeros_like(self.gamma) self.gradBeta = np.zeros_like(self.beta) def updateOutput(self, input): self.output = input * self.gamma + self.beta return self.output def updateGradInput(self, input, gradOutput): self.gradInput = gradOutput * self.gamma return self.gradInput def accGradParameters(self, input, gradOutput): self.gradBeta = np.sum(gradOutput, axis=0) self.gradGamma = np.sum(gradOutput*input, axis=0) def zeroGradParameters(self): self.gradGamma.fill(0) self.gradBeta.fill(0) def getParameters(self): return [self.gamma, self.beta] def getGradParameters(self): return [self.gradGamma, self.gradBeta] def __repr__(self): return "ChannelwiseScaling" # + [markdown] id="M0X-rZPzs6us" # Practical notes. If BatchNormalization is placed after a linear transformation layer (including dense layer, convolutions, channelwise scaling) that implements function like `y = weight * x + bias`, than bias adding become useless and could be omitted since its effect will be discarded while batch mean subtraction. If BatchNormalization (followed by `ChannelwiseScaling`) is placed before a layer that propagates scale (including ReLU, LeakyReLU) followed by any linear transformation layer than parameter `gamma` in `ChannelwiseScaling` could be freezed since it could be absorbed into the linear transformation layer. # + [markdown] id="nRwRQ3b7s6us" # ## 5. Dropout # Implement [**dropout**](https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf). The idea and implementation is really simple: just multimply the input by $Bernoulli(p)$ mask. Here $p$ is probability of an element to be zeroed. # # This has proven to be an effective technique for regularization and preventing the co-adaptation of neurons. # # While training (`self.training == True`) it should sample a mask on each iteration (for every batch), zero out elements and multiply elements by $1 / (1 - p)$. The latter is needed for keeping mean values of features close to mean values which will be in test mode. When testing this module should implement identity transform i.e. `self.output = input`. # # - input: **`batch_size x n_feats`** # - output: **`batch_size x n_feats`** # + id="77YJq4VMs6us" class Dropout(Module): def __init__(self, p=0.5): super(Dropout, self).__init__() self.p = p self.mask = None def updateOutput(self, input): if self.training: self.mask = np.random.binomial(1, 1 - self.p, input.shape) / (1 - self.p) self.output = np.multiply(input, self.mask) else: self.output = input return self.output def updateGradInput(self, input, gradOutput): self.gradInput = np.multiply(gradOutput, self.mask) return self.gradInput def __repr__(self): return "Dropout" # + [markdown] id="aAcTRJDZs6ut" # # Activation functions # + [markdown] id="hnCatiCks6ut" # Here's the complete example for the **Rectified Linear Unit** non-linearity (aka **ReLU**): # + id="jxnZg-kUs6ut" class ReLU(Module): def __init__(self): super(ReLU, self).__init__() def updateOutput(self, input): self.output = np.maximum(input, 0) return self.output def updateGradInput(self, input, gradOutput): self.gradInput = np.multiply(gradOutput , input > 0) return self.gradInput def __repr__(self): return "ReLU" # + [markdown] id="huehLqUws6ut" # ## 6. Leaky ReLU # Implement [**Leaky Rectified Linear Unit**](http://en.wikipedia.org/wiki%2FRectifier_%28neural_networks%29%23Leaky_ReLUs). Expriment with slope. # + id="_mrarmyos6uu" class LeakyReLU(Module): def __init__(self, slope = 0.03): super(LeakyReLU, self).__init__() self.slope = slope def updateOutput(self, input): # Your code goes here. ################################################ self.output = input.copy() np.multiply(input, self.slope, out=self.output, where=input < 0) return self.output def updateGradInput(self, input, gradOutput): # Your code goes here. ################################################ self.gradInput = gradOutput.copy() np.multiply(gradOutput, self.slope, out=self.gradInput, where=input < 0) return self.gradInput def __repr__(self): return "LeakyReLU" # + [markdown] id="3MgIY5Jfs6uu" # ## 7. ELU # Implement [**Exponential Linear Units**](http://arxiv.org/abs/1511.07289) activations. # + id="ItmQtoTHs6uu" class ELU(Module): def __init__(self, alpha = 1.0): super(ELU, self).__init__() self.alpha = alpha def updateOutput(self, input): self.output = input.copy() self.output[input < 0] = self.alpha * (np.exp(input[input < 0]) - 1) return self.output def updateGradInput(self, input, gradOutput): # Your code goes here. ################################################ self.gradInput = gradOutput.copy() self.gradInput[input < 0] *= self.output[input < 0] + self.alpha return self.gradInput def __repr__(self): return "ELU" # + [markdown] id="t-jrMSL7s6uu" # ## 8. SoftPlus # Implement [**SoftPlus**](https://en.wikipedia.org/wiki%2FRectifier_%28neural_networks%29) activations. Look, how they look a lot like ReLU. # + id="5DZFrAmRs6uu" class SoftPlus(Module): def __init__(self): super(SoftPlus, self).__init__() def updateOutput(self, input): # Your code goes here. ################################################ self.output = np.log(1 + np.exp(input)) return self.output def updateGradInput(self, input, gradOutput): # Your code goes here. ################################################ self.gradInput = gradOutput * 1 / (1 + np.exp(-input)) return self.gradInput def __repr__(self): return "SoftPlus" # + [markdown] id="vsCQJPFxs6uu" # # Criterions # + [markdown] id="3IzqtdLUs6uv" # Criterions are used to score the models answers. # + id="s5RUDSI4s6uv" class Criterion(object): def __init__ (self): self.output = None self.gradInput = None def forward(self, input, target): """ Given an input and a target, compute the loss function associated to the criterion and return the result. For consistency this function should not be overrided, all the code goes in `updateOutput`. """ return self.updateOutput(input, target) def backward(self, input, target): """ Given an input and a target, compute the gradients of the loss function associated to the criterion and return the result. For consistency this function should not be overrided, all the code goes in `updateGradInput`. """ return self.updateGradInput(input, target) def updateOutput(self, input, target): """ Function to override. """ return self.output def updateGradInput(self, input, target): """ Function to override. """ return self.gradInput def __repr__(self): """ Pretty printing. Should be overrided in every module if you want to have readable description. """ return "Criterion" # + [markdown] id="tQsRgydCs6uv" # The **MSECriterion**, which is basic L2 norm usually used for regression, is implemented here for you. # - input: **`batch_size x n_feats`** # - target: **`batch_size x n_feats`** # - output: **scalar** # + id="brmw9YRks6uv" class MSECriterion(Criterion): def __init__(self): super(MSECriterion, self).__init__() def updateOutput(self, input, target): self.output = np.sum(np.power(input - target,2)) / input.shape[0] return self.output def updateGradInput(self, input, target): self.gradInput = (input - target) * 2 / input.shape[0] return self.gradInput def __repr__(self): return "MSECriterion" # + [markdown] id="aR2X4ZJFs6uv" # ## 9. Negative LogLikelihood criterion (numerically unstable) # You task is to implement the **ClassNLLCriterion**. It should implement [multiclass log loss](http://scikit-learn.org/stable/modules/model_evaluation.html#log-loss). Nevertheless there is a sum over `y` (target) in that formula, # remember that targets are one-hot encoded. This fact simplifies the computations a lot. Note, that criterions are the only places, where you divide by batch size. Also there is a small hack with adding small number to probabilities to avoid computing log(0). # - input: **`batch_size x n_feats`** - probabilities # - target: **`batch_size x n_feats`** - one-hot representation of ground truth # - output: **scalar** # # # + id="6UYM83ANs6uv" class ClassNLLCriterionUnstable(Criterion): EPS = 1e-15 def __init__(self): a = super(ClassNLLCriterionUnstable, self) super(ClassNLLCriterionUnstable, self).__init__() def updateOutput(self, input, target): # Use this trick to avoid numerical errors input_clamp = np.clip(input, self.EPS, 1 - self.EPS) self.output = -1 / input.shape[0] * np.sum(np.log(input_clamp) * target) return self.output def updateGradInput(self, input, target): # Use this trick to avoid numerical errors input_clamp = np.clip(input, self.EPS, 1 - self.EPS) self.gradInput = -1 / input.shape[0] * target * 1 / input_clamp return self.gradInput def __repr__(self): return "ClassNLLCriterionUnstable" # + [markdown] id="F_GrkxTMs6uw" # ## 10. Negative LogLikelihood criterion (numerically stable) # - input: **`batch_size x n_feats`** - log probabilities # - target: **`batch_size x n_feats`** - one-hot representation of ground truth # - output: **scalar** # # Task is similar to the previous one, but now the criterion input is the output of log-softmax layer. This decomposition allows us to avoid problems with computation of forward and backward of log(). # + id="pjTkhIxrs6uw" class ClassNLLCriterion(Criterion): def __init__(self): a = super(ClassNLLCriterion, self) super(ClassNLLCriterion, self).__init__() def updateOutput(self, input, target): self.output = -1 / input.shape[0] * np.sum(input * target) return self.output def updateGradInput(self, input, target): # Your code goes here. ################################################ self.gradInput = -1 / input.shape[0] * target return self.gradInput def __repr__(self): return "ClassNLLCriterion" # + [markdown] id="YqjuL9EHs6uw" # # Optimizers # + [markdown] id="C0cx4lrxs6uw" # ### SGD optimizer with momentum # - `variables` - list of lists of variables (one list per layer) # - `gradients` - list of lists of current gradients (same structure as for `variables`, one array for each var) # - `config` - dict with optimization parameters (`learning_rate` and `momentum`) # - `state` - dict with optimizator state (used to save accumulated gradients) # + id="WJ-zkYjhs6uw" def sgd_momentum(variables, gradients, config, state): # 'variables' and 'gradients' have complex structure, accumulated_grads will be stored in a simpler one state.setdefault('accumulated_grads', {}) var_index = 0 for current_layer_vars, current_layer_grads in zip(variables, gradients): for current_var, current_grad in zip(current_layer_vars, current_layer_grads): old_grad = state['accumulated_grads'].setdefault(var_index, np.zeros_like(current_grad)) np.add(config['momentum'] * old_grad, config['learning_rate'] * current_grad, out=old_grad) current_var -= old_grad var_index += 1 # + [markdown] id="_DX0SiGCs6uw" # ## 11. [Adam](https://arxiv.org/pdf/1412.6980.pdf) optimizer # - `variables` - list of lists of variables (one list per layer) # - `gradients` - list of lists of current gradients (same structure as for `variables`, one array for each var) # - `config` - dict with optimization parameters (`learning_rate`, `beta1`, `beta2`, `epsilon`) # - `state` - dict with optimizator state (used to save 1st and 2nd moment for vars) # # Formulas for optimizer: # # Current step learning rate: $$\text{lr}_t = \text{learning_rate} * \frac{\sqrt{1-\beta_2^t}} {1-\beta_1^t}$$ # First moment of var: $$\mu_t = \beta_1 * \mu_{t-1} + (1 - \beta_1)*g$$ # Second moment of var: $$v_t = \beta_2 * v_{t-1} + (1 - \beta_2)*g*g$$ # New values of var: $$\text{variable} = \text{variable} - \text{lr}_t * \frac{\mu_t}{\sqrt{v_t} + \epsilon}$$ # + id="6Q4OmoDMs6ux" def adam_optimizer(variables, gradients, config, state): # 'variables' and 'gradients' have complex structure, accumulated_grads will be stored in a simpler one state.setdefault('m', {}) # first moment vars state.setdefault('v', {}) # second moment vars state.setdefault('t', 0) # timestamp state['t'] += 1 for k in ['learning_rate', 'beta1', 'beta2', 'epsilon']: assert k in config, config.keys() var_index = 0 lr_t = config['learning_rate'] * np.sqrt(1 - config['beta2']**state['t']) / (1 - config['beta1']**state['t']) for current_layer_vars, current_layer_grads in zip(variables, gradients): for current_var, current_grad in zip(current_layer_vars, current_layer_grads): var_first_moment = state['m'].setdefault(var_index, np.zeros_like(current_grad)) var_second_moment = state['v'].setdefault(var_index, np.zeros_like(current_grad)) # <YOUR CODE> ####################################### # update `current_var_first_moment`, `var_second_moment` and `current_var` values np.add(config['beta1']*var_first_moment, (1-config['beta1'])*current_grad, out=var_first_moment) np.add(config['beta2']*var_second_moment, (1-config['beta2'])*current_grad**2, out=var_second_moment) current_var -= lr_t * var_first_moment / (np.sqrt(var_second_moment) + config['epsilon']) # small checks that you've updated the state; use np.add for rewriting np.arrays values assert var_first_moment is state['m'].get(var_index) assert var_second_moment is state['v'].get(var_index) var_index += 1 # + [markdown] id="lP_dtHrTs6ux" # # Layers for advanced track homework # You **don't need** to implement it if you are working on `homework_main-basic.ipynb` # + [markdown] id="hC-qExBSs6ux" # ## 12. Conv2d [Advanced] # - input: **`batch_size x in_channels x h x w`** # - output: **`batch_size x out_channels x h x w`** # # You should implement something like pytorch `Conv2d` layer with `stride=1` and zero-padding outside of image using `scipy.signal.correlate` function. # # Practical notes: # - While the layer name is "convolution", the most of neural network frameworks (including tensorflow and pytorch) implement operation that is called [correlation](https://en.wikipedia.org/wiki/Cross-correlation#Cross-correlation_of_deterministic_signals) in signal processing theory. So **don't use** `scipy.signal.convolve` since it implements [convolution](https://en.wikipedia.org/wiki/Convolution#Discrete_convolution) in terms of signal processing. # - It may be convenient to use `numpy.pad` for zero-padding. # - It's rather ok to implement convolution over 4d array using 2 nested loops: one over batch size dimension and another one over output filters dimension # - Having troubles with understanding how to implement the layer? # - Check the last year video of lecture 3 (starting from ~1:14:20) # - May the google be with you # + id="VSH7D7o4s6ux" import scipy as sp import scipy.signal import skimage class Conv2d(Module): def __init__(self, in_channels, out_channels, kernel_size): super(Conv2d, self).__init__() assert kernel_size % 2 == 1, kernel_size stdv = 1./np.sqrt(in_channels) self.W = np.random.uniform(-stdv, stdv, size = (out_channels, in_channels, kernel_size, kernel_size)) self.b = np.random.uniform(-stdv, stdv, size=(out_channels,)) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.gradW = np.zeros_like(self.W) self.gradb = np.zeros_like(self.b) def updateOutput(self, input): pad_size = self.kernel_size // 2 # YOUR CODE ############################## # 1. zero-pad the input array # 2. compute convolution using scipy.signal.correlate(... , mode='valid') # 3. add bias value # self.output = ... return self.output def updateGradInput(self, input, gradOutput): pad_size = self.kernel_size // 2 # YOUR CODE ############################## # 1. zero-pad the gradOutput # 2. compute 'self.gradInput' value using scipy.signal.correlate(... , mode='valid') # self.gradInput = ... return self.gradInput def accGradParameters(self, input, gradOutput): pad_size = self.kernel_size // 2 # YOUR CODE ############# # 1. zero-pad the input # 2. compute 'self.gradW' using scipy.signal.correlate(... , mode='valid') # 3. compute 'self.gradb' - formulas like in Linear of ChannelwiseScaling layers # self.gradW = ... # self.gradb = ... pass def zeroGradParameters(self): self.gradW.fill(0) self.gradb.fill(0) def getParameters(self): return [self.W, self.b] def getGradParameters(self): return [self.gradW, self.gradb] def __repr__(self): s = self.W.shape q = 'Conv2d %d -> %d' %(s[1],s[0]) return q # + [markdown] id="weqS2w2Es6ux" # ## 13. MaxPool2d [Advanced] # - input: **`batch_size x n_input_channels x h x w`** # - output: **`batch_size x n_output_channels x h // kern_size x w // kern_size`** # # You are to implement simplified version of pytorch `MaxPool2d` layer with stride = kernel_size. Please note, that it's not a common case that stride = kernel_size: in AlexNet and ResNet kernel_size for max-pooling was set to 3, while stride was set to 2. We introduce this restriction to make implementation simplier. # # Practical notes: # - During forward pass what you need to do is just to reshape the input tensor to `[n, c, h / kern_size, kern_size, w / kern_size, kern_size]`, swap two axes and take maximums over the last two dimensions. Reshape + axes swap is sometimes called space-to-batch transform. # - During backward pass you need to place the gradients in positions of maximal values taken during the forward pass # - In real frameworks the indices of maximums are stored in memory during the forward pass. It is cheaper than to keep the layer input in memory and recompute the maximums. # + id="bY1y-4T5s6ux" class MaxPool2d(Module): def __init__(self, kernel_size): super(MaxPool2d, self).__init__() self.kernel_size = kernel_size self.gradInput = None def updateOutput(self, input): input_h, input_w = input.shape[-2:] # your may remove these asserts and implement MaxPool2d with padding assert input_h % self.kernel_size == 0 assert input_w % self.kernel_size == 0 # YOUR CODE ############################# # self.output = ... # self.max_indices = ... return self.output def updateGradInput(self, input, gradOutput): # YOUR CODE ############################# # self.gradInput = ... return self.gradInput def __repr__(self): q = 'MaxPool2d, kern %d, stride %d' %(self.kernel_size, self.kernel_size) return q # + [markdown] id="m168iS1Us6uy" # ### Flatten layer # Just reshapes inputs and gradients. It's usually used as proxy layer between Conv2d and Linear. # + id="JwY7ZPsGs6uy" class Flatten(Module): def __init__(self): super(Flatten, self).__init__() def updateOutput(self, input): self.output = input.reshape(len(input), -1) return self.output def updateGradInput(self, input, gradOutput): self.gradInput = gradOutput.reshape(input.shape) return self.gradInput def __repr__(self): return "Flatten" # + id="5ioAztfQs6uy"
homework01/homework_modules.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # https://github.com/sidhantx/LinearRegression # ## Linear Regression PSET # # data source : https://www.kaggle.com/ishaanv/ISLR-Auto?select=Advertising.csv from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error import pandas as pd import numpy as np import matplotlib.pyplot as plt # + df = pd.read_csv("advertising.csv") #display the top 5 rows of the df print("\n\nthe first few rows of our data:") #your code here # + #create a variable called X with fetures TV, Radio, Newspaper, and y with the response Sales #your code here X = ___ y = ___ # - #split the data into 80% train and 20% test. set the random state=1 X_train, _______ = _______________ # + #Fit a linear regression model on Feature Newspaper, and response Sales # Instantiate a linear regression model model1 = __________ #fit the training data to the model. use all features _______________ #find the rsq aka score for this model on both test and train score_train = ___________ score_test = ____________ # - # #### Visualizing your model # + fig, axes = plt.subplots(figsize=(12,6)) x = np.linspace(X_test['Newspaper'].min()-20, X_test['Newspaper'].max()+50,500) axes.plot(df["Newspaper"],df["Sales"], "o" ,alpha =0.4) axes.plot(X_train["Newspaper"],model1.predict(x.reshape(-1,1))) axes.set_xlabel("Newspaper advertising(1000$)") axes.set_ylabel("Sales(1000$)") axes.set_title("Newspaper Advertising vs Sales") plt.show() # - # ### Answer the Questions: # # What are the intercept, and slope of the line? # # What is the equation of this line. express in terms of sales and newspaper. # + #your code here # - # ### Now lets train a model on the whole data # + # Instantiate a linear regression model model = ___ #fit the training data to the model. use all features _______________ #predict on train and test y_pred_train = _______ #predict on test y_pred_test = ______ #print the test, and train R^2 # - # ### Answer the Questions: # # What are the intercept, and slope of the line? # # What is the equation of this line. express in terms of sales, tv, radio and newspaper. # ### We show the regression line on TV and Sales only even though we trained our model on 3 features # + fig, axes = plt.subplots(figsize=(12,8)) x1 = np.linspace(X_test['TV'].min(), X_test['TV'].max(),500) x2 = np.linspace(X_test['Radio'].min(), X_test['Radio'].max(),500) x3 = np.linspace(X_test['Newspaper'].min(), X_test['Newspaper'].max(),500) temp = pd.DataFrame(x1,columns=["TV"]) temp['Radio'] = x2 temp["Newspaper"] = x3 axes.plot(X_train['TV'],y_train, "o" ,alpha =0.4, label="Train") axes.plot(X_test['TV'],y_test,"^",alpha=0.4, label="Test" ) axes.plot(x1, model.predict(temp) , ".", color='black', label="y_avg", alpha=1, linewidth=1, markersize=4) axes.set_xlabel("TV(1000)") axes.set_ylabel("Sales(1000)") axes.set_title("Sales vs Sales") # - # ### Calculate the mse for both train, and test data, and report it. # + #your code here # -
pset_linear_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### 观察新闻语料数据特征 fname = 'F:\\NLP_CV\\lec\\7lesson\\sqlResult_1558435.csv' import pandas as pd content = pd.read_csv(fname, encoding='gb18030') content.head() xinhua_news = content[content['source'] == '新华社'] len(xinhua_news) / len(content) xinhua_news['content'][:10] content = content.dropna(subset=['content', 'source']) import jieba def cut(string): return jieba.lcut(string) import re from pyltp import SentenceSplitter from sklearn.feature_extraction.text import TfidfVectorizer # ### 语料清洗 cmd_df = content.apply(lambda x: pd.Series([re.sub(r'[\r\u3000\\n\n?]','',x['content']), x['source']] if bool(re.sub(r'[\r\u3000\\n\n?.]','',x['content'])) else None, index=['content', 'source']), axis=1) cmd_df = cmd_df.dropna(subset=['content', 'source']) cmd_df raw_df = content.apply(lambda x: pd.Series([re.sub(r'[\r\u3000\\n\n?]','',x['content']), (1 if x['source'] == '新华社' else 0)] if bool(re.sub(r'[\r\u3000\\n\n?.]','',x['content'])) else None, index=['content', 'label']), axis=1) raw_df = raw_df.dropna(subset=['content', 'label']) raw_df # ### 语料向量化 def vectz_content(texts): vectorizer = TfidfVectorizer() #return vectorizer.fit_transform([[' '.join(cut(re.sub('\W','',sent))) for sent in SentenceSplitter.split(text) if ' '.join(cut(re.sub('\W','',sent))) !=''] for text in texts]) #return vectorizer.fit_transform([' '.join([' '.join(cut(re.sub('\W','',sent))) for sent in SentenceSplitter.split(text) if ' '.join(cut(re.sub('\W','',sent))) !='']) for text in texts]) #return [vectorizer.fit_transform([' '.join(cut(re.sub('\W','',sent))) for sent in SentenceSplitter.split(text) if ' '.join(cut(re.sub('\W','',sent))) !='']) for text in texts] data = [] for index,text in enumerate(texts): print('\r Current : {}/{}'.format(index+1,len(texts)),end = '') if ' '.join(cut(re.sub('\W','',text))): data += [' '.join(cut(re.sub('\W','',text)))] print(data[:3]) vect = vectorizer.fit_transform(data) return vect vectz_content(content['content'][:20]) X = raw_df['content'] y = raw_df['label'] X[:10] x = vectz_content(X) type(x) x.shape # ### 构建Training set , Testing set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42) X_train.shape,y_train.shape,X_test.shape,y_test.shape # ### 使用KNN进行训练并预测 from sklearn import neighbors n_neighbors = 5 knn = neighbors.KNeighborsClassifier(n_neighbors, weights='distance') y_ = knn.fit(X_train, y_train).predict(X_test) knn.score(X_test,y_test) # ### 评估模型 from sklearn.metrics import accuracy_score y_.shape accuracy_score(y_test, y_) from sklearn.metrics import precision_score # ### precision = tp / (tp + fp) # ### precision : 直观地理解就是model能正确标识true positive的能力 precision_score(y_test, y_) from sklearn.metrics import recall_score # ### recall = tp / (tp + fn) # ### recall: 是指模型能找出所有样本中ture positive样本的能力 recall_score(y_test, y_) # ### F1 = 2 * (precision * recall) / (precision + recall) # ### f1 score: 相当于加权的prescision与recall的平均 from sklearn.metrics import f1_score f1_score(y_test, y_) # ### 调整参数,并评估新模型 from sklearn.model_selection import GridSearchCV param_grid = [ { 'weights': ['uniform'], 'n_neighbors':[i for i in range(1,6)] }, { 'weights':['distance'], 'n_neighbors':[i for i in range(1,6)], 'p':[i for i in range(1,6)] } ] knn_ini = neighbors.KNeighborsClassifier() grid_search = GridSearchCV(knn_ini,param_grid) grid_search.fit(X_train,y_train) def knns(ns = [1,2,3,4,5],weights = 'distance'): #ns = [1,2,3,4,5] scores,ys_,accuracys,precisions,recalls,f1s =[],[],[],[],[],[] for n_neighbors in ns: print('K:{} Training begins:'.format(n_neighbors)) knn_ = neighbors.KNeighborsClassifier(n_neighbors, weights=weights) ys_ += [knn_.fit(X_train, y_train).predict(X_test)] scores += [knn_.score(X_test,y_test)] accuracys += [accuracy_score(y_test,y_)] precisions += [precision_score(y_test,y_)] recalls += [recall_score(y_test,y_)] f1s += [f1_score(y_test,y_)] d = {'K': ns,'Score': scores,'Accuracy': accuracys,'Precision': precisions,'Recall': recalls,'F1': f1s} df = pd.DataFrame(d) return df knns([1,2,3,4,5]) knns(ns = 5,weights = 'uniform' ) # ### 找出fn样本 predp = ['新华社' for index,(pred,true) in enumerate(zip(y_,y_test)) if int(pred) == 1 and int(true) == 0] fn = [cmd_df['source'][y_test.index[index]] for index,(pred,true) in enumerate(zip(y_,y_test)) if int(pred) == 1 and int(true) == 0] fn_index = [y_test.index[index] for index,(pred,true) in enumerate(zip(y_,y_test)) if int(pred) == 1 and int(true) == 0] fn_index fn[:10] neg_content = [cmd_df['content'][y_test.index[index]] for index,(pred,true) in enumerate(zip(y_,y_test)) if int(pred) == 1 and int(true) == 0] fn_dict = {'content': neg_content,'prediction': predp,'true': fn} fn_df = pd.DataFrame(fn_dict,index = fn_index) fn_df # ### 数据思维和机器学习思维 # #### 数据思维 :以数据拟合为导向,通过观察数据的规律预设一个函数模型,并将数据进行拟合得到目标函数。 # #### 机器学习思维:目标是生成算法模型,通过灌入大量的数据进行训练,最终生成模型。 # ### 使用Edit Distance找出涉嫌抄袭文本的词语或者修改的词语 fn_df['content'][0] xinhua_df = cmd_df[cmd_df['source'] == '新华社'] xinhua_df fn_proc = fn_df.apply(lambda x: pd.Series([x['content'], x['true']], index=['content', 'source']), axis=1) fn_proc fn_proc.iloc[0] len(fn_proc) tf = xinhua_df for i in range(len(fn_proc)): tf = tf.append(fn_proc.iloc[i],ignore_index=False) tf len(tf) tf_vect = vectz_content(tf['content']) import numpy as np from sklearn.decomposition import TruncatedSVD svd = TruncatedSVD(n_components=20, n_iter=7, random_state=42) import time def most_similar_news(): #id_similar = [max([(i,j,cosine_similarity(tf_fn,v)[0][0]) for i,v in enumerate(tf_vect[:len(tf)-len(fn_proc)])],key = lambda x:x[2]) for j,tf_fn in enumerate(tf_vect[len(tf)-len(fn_proc):])] fake_and_true = [] for j,tf_fn in enumerate(tf_vect[len(tf)-len(fn_proc):]): print('\r Current : {}/{}'.format(j+1,len(fn_proc)),end = '') fn_idx = tf.index[j+len(tf)-len(fn_proc)] similar_degree = [] for i,v in enumerate(tf_vect[:len(tf)-len(fn_proc)]): tf_idx = tf.index[i] similar_degree += [(fn_idx,tf_idx,cosine_similarity(tf_fn,v)[0][0])] fake_idx,true_idx, similar = max(similar_degree,key = lambda x:x[2]) fake_text = tf['content'][fake_idx] true_text = tf['content'][true_idx] fake_and_true += [(fake_text,true_text,similar)] #texts = [(tf_df['content'][tf_df.index[i]],tf_df['content'][tf_df.index[j]]) for i,j,similar in id_similar] return fake_and_true start = time.clock() fake_true_similar = most_similar_news() end = time.clock() print(' wall:',(end-start)/3600) fake_true_similar[4] fake_true_samples = [(i,j,s) for i,j,s in fake_true_similar if s > 0.8] len(fake_true_samples) from sklearn.metrics.pairwise import cosine_similarity import jieba.posseg as pseg def plagiarize_detect(text1,text2,scheme = 'tfidf'): #get sentences from text1 and text2 #tfidf version if scheme == 'tfidf': corpus1 = [' '.join(cut(re.sub('\W','',sent))) for sent in SentenceSplitter.split(text1)] corpus2 = [' '.join(cut(re.sub('\W','',sent))) for sent in SentenceSplitter.split(text2)] v = TfidfVectorizer() x = v.fit_transform(corpus1+corpus2) copy = [] modify = [] result = {} for i ,f in enumerate(x[:len(corpus1)]): similar_list = [] for j,t in enumerate(x[len(corpus1):]): similar_list += [(corpus1[i],corpus2[j],cosine_similarity(f,t)[0][0])] stole = max(similar_list,key = lambda x:x[2]) if stole[2] > 0.9: copy += [stole] else: modify += [stole] #result += [max(similar_list,key = lambda x:x[2])] result['抄袭文字'] = copy result['修改文字'] = modify return result elif scheme == 'ED': corpus1 = [sent for sent in SentenceSplitter.split(text1)] corpus2 = [sent for sent in SentenceSplitter.split(text2)] copy = [] modify = [] result = [] for i ,f in enumerate(corpus1): similar_list = [] for j,t in enumerate(corpus2): similar_list += [(corpus1[i],corpus2[j],edit_distance(f,t))] stole = min(similar_list,key = lambda x:x[2]) #if stole[2] > 0.9: # copy += [stole] #else: modify += [stole] result += [stole] #result['抄袭文字'] = copy #result['修改文字'] = modify return result #v1 = TfidfVectorizer() #v2 = TfidfVectorizer() #x1 = v1.fit_transform(cut(re.sub('\W','',text1))) #words1 = list(set(v1.get_feature_names())) #print(words1) #x2 = v2.fit_transform(cut(re.sub('\W','',text2))) #words2 = list(set(v2.get_feature_names())) #print(words2) #result = [] #for w1 in words1: # if w1 in words2: # result += [(w1,w1)] # else: # for w2 in words2: # wd_dist = edit_distance(w1,w2) # if wd_dist <= max(len(w2),len(w1)) and bool(set(w1)&set(w2)) and ([tag for w,tag in pseg.cut(w1)][0] == [tag for w,tag in pseg.cut(w2)][0]): # result += [(w1,w2)] #return result sample = fake_true_samples[0] sample plagiarize_detect(sample[0],sample[1]) plagiarize_detect(sample[0],sample[1],scheme='ED') from collections import defaultdict solution = defaultdict(str) lru = defaultdict(int) def lru_cache(f): def pro(str1,str2): if (str1,str2) in lru: return lru[(str1,str2)] distance = f(str1,str2) lru[(str1,str2)] = distance return distance return pro def edit_distance(string1, string2): #if len(string1) == 0: # return len(string2) #if len(string2) == 0: # return len(string1) if len(string1) == 0 and len(string2) == 0: return 0 #tail_s1 = string1[-1] #tail_s2 = string2[-1] tail_s1 = '' tail_s2 = '' candidates = [] #candidates = [ # (edit_distance(string1[:-1], string2) + 1, 'DEL {}'.format(tail_s1)), # string 1 delete tail # (edit_distance(string1, string2[:-1]) + 1, 'ADD {}'.format(tail_s2)), # string 1 add tail of string2 #] if len(string1) == 0: tail_s1 = string1 tail_s2 = string2[-1] candidates = [(edit_distance(string1, string2[:-1]) + 1, 'ADD {}'.format(tail_s2)), # string 1 add tail of string2 ] elif len(string2) == 0: tail_s1 = string1[-1] tail_s2 = string2 candidates = [(edit_distance(string1[:-1], string2) + 1, 'DEL {}'.format(tail_s1)), # string 1 delete tail ] else: tail_s1 = string1[-1] tail_s2 = string2[-1] candidates = [ (edit_distance(string1[:-1], string2) + 1, 'DEL {}'.format(tail_s1)), # string 1 delete tail (edit_distance(string1, string2[:-1]) + 1, 'ADD {}'.format(tail_s2)), # string 1 add tail of string2 ] if tail_s1 == tail_s2: both_forward = (edit_distance(string1[:-1], string2[:-1]) + 0, '') else: both_forward = (edit_distance(string1[:-1], string2[:-1]) + 1, 'SUB {} => {}'.format(tail_s1, tail_s2)) candidates.append(both_forward) #if tail_s1 == tail_s2: # both_forward = (edit_distance(string1[:-1], string2[:-1]) + 0, '') #else: # both_forward = (edit_distance(string1[:-1], string2[:-1]) + 1, 'SUB {} => {}'.format(tail_s1, tail_s2)) #candidates.append(both_forward) min_distance, operation = min(candidates, key=lambda x: x[0]) solution[(string1, string2)] = operation return min_distance
nlp-backyard/assignment07.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import sys import datetime as dt import argparse import re import copy import os from itertools import chain #function to break skills, order them and concetenate. order is important so that skill_a+skill_b is the same as skill_B+skill_a def concetanete_skills(skills): if skills is None or skills is np.nan: return skills skills = skills.split('~~') skills = sorted(skills) return '+'.join(skills) def logProgressToWfl(progressMsg): logFile = open("multiskillConverterLog.wfl", "a") now = dt.datetime.now() progressPrepend = "%Progress::" logFile.write(progressPrepend + "@" + str(now) + "@" + progressMsg + "\n"); logFile.close(); # + #C:/ProgramData/Anaconda3/Python multiskill_converter.py -programDir . -workingDir . -userId 1 -kcModelsToConvert_nodeIndex 0 -kcModelsToConvert_fileIndex 0 -kcModelsToConvert "KC (CCSS)" -kcModelsToConvert_nodeIndex 0 -kcModelsToConvert_fileIndex 0 -kcModelsToConvert "KC (MATHia New)" -multiskillConversionMethod "Concatenate" -node 0 -fileIndex 0 C:\WPIDevelopment\dev06_dev\WorkflowComponents\MultiskillConverter\test\test_data\test.txt -inputFile test.txt #C:/ProgramData/Anaconda3/Python multiskill_converter.py -programDir . -workingDir . -userId 1 -kcModelToConvert_nodeIndex 0 -kcModelToConvert_fileIndex 0 -kcModelToConvert "KC (MATHia New)" -multiskillConversionMethod "Split to Multiple Rows" -valuesToBeSplit_nodeIndex 0 -valuesToBeSplit_fileIndex 0 -valuesToBeSplit "Correct Step Duration (sec)" -valuesToBeSplit_nodeIndex 0 -valuesToBeSplit_fileIndex 0 -valuesToBeSplit "Step Duration (sec)" -node 0 -fileIndex 0 C:\WPIDevelopment\dev06_dev\WorkflowComponents\MultiskillConverter\test\test_data\test.txt -inputFile test.txt #C:/ProgramData/Anaconda3/Python multiskill_converter.py -programDir . -workingDir . -userId 1 -kcModelToConvert_nodeIndex 0 -kcModelToConvert_fileIndex 0 -kcModelToConvert "KC (MATHia New)" -multiskillConversionMethod "Split to Multiple Rows" -valuesToBeSplit_nodeIndex 0 -valuesToBeSplit_fileIndex 0 -valuesToBeSplit "Step End Time" -valuesToBeSplit_nodeIndex 0 -valuesToBeSplit_fileIndex 0 -valuesToBeSplit "Step Duration (sec)" -node 0 -fileIndex 0 C:\WPIDevelopment\dev06_dev\WorkflowComponents\MultiskillConverter\test\test_data\test.txt -inputFile test.txt #command line parser = argparse.ArgumentParser(description='Process datashop file.') parser.add_argument('-programDir', type=str, help='the component program directory') parser.add_argument('-workingDir', type=str, help='the component instance working directory') parser.add_argument("-node", nargs=1, action='append') parser.add_argument("-fileIndex", nargs=2, action='append') parser.add_argument('-multiskillConversionMethod', choices=["Concatenate", "Split to Multiple Rows"], help='Method to handle multiskill steps(default="Concatenate")', default="Concatenate") parser.add_argument('-kcModelsToConvert', nargs=1, action='append', type=str, help='KC models to convert when concatenating; e.g., "Item"') parser.add_argument('-kcModelToConvert', nargs=1, type=str, help='KC model to convert when Split to Multiple Rows; e.g., "Item"') parser.add_argument('-valuesToBeSplit', nargs=1, action='append', type=str, help='KC model to convert when Split to Multiple Rows;') parser.add_argument('-averageColumnValues', choices=["Yes", "No"], help='If any column value should be averaged(default="No")', default="Concatenate") parser.add_argument('-inputFile', type=str, help='data file containing multi-skill steps') parser.add_argument('-userId', type=str, help='placeholder for WF', default='') args, option_file_index_args = parser.parse_known_args() filename = args.inputFile modification_method = args.multiskillConversionMethod kcms_to_change = args.kcModelsToConvert if kcms_to_change is not None: kcms_to_change = list(chain.from_iterable(kcms_to_change)) kcm_to_split = args.kcModelToConvert if kcm_to_split is not None: kcm_to_split = kcm_to_split[0] columns_value_to_be_split = args.valuesToBeSplit if columns_value_to_be_split is not None: columns_value_to_be_split = list(chain.from_iterable(columns_value_to_be_split)) average_column_values = args.averageColumnValues if average_column_values is not None and average_column_values == "Yes": average_column_values = True else: average_column_values = False # - if False: filename = 'test.txt' #modification_method = 'Concatenate' kcms_to_change = ['KC (CCSS)', 'KC (MATHia New)'] modification_method = 'Split to Multiple Rows' kcm_to_split = 'KC (MATHia New)' columns_value_to_be_split = ['Step Duration (sec)', 'Correct Step Duration (sec)'] average_column_values = True # + df = pd.read_csv(filename, dtype=str, na_values = ['null', 'na', 'NA', 'n/a', 'nan'], sep="\t", encoding = "ISO-8859-1") if modification_method == 'Concatenate': for kcm_to_change in kcms_to_change: print(kcm_to_change) if kcm_to_change in df.columns: #change ~~ to + df[kcm_to_change] = df[kcm_to_change].apply(concetanete_skills) #get KC model name without prefix "KC("" kcm_name = kcm_to_change if "KC (" in kcm_to_change and ")" in kcm_to_change: kc_name = kcm_to_change[len("KC ("):kcm_to_change.find(")")] kcm_opportunity = "Opportunity ({})".format(kc_name) if kcm_opportunity in df.columns: df.drop(kcm_opportunity, axis=1, inplace=True) df_omit_na = df[['Anon Student Id', kcm_to_change]] df_omit_na = df_omit_na.dropna() df_omit_na[kcm_opportunity] = df_omit_na.groupby(['Anon Student Id', kcm_to_change]).cumcount()+1 df_omit_na = df_omit_na[[kcm_opportunity]] df = df.merge(df_omit_na, left_index=True, right_index=True, how='left') filename = os.path.basename(os.path.normpath(filename)) df.to_csv('multiskill_converted_{}'.format(filename), sep='\t', index=False) elif modification_method == 'Split to Multiple Rows': proc_pct = 0.1 totalCnt = df.shape[0] if kcm_to_split in df.columns: #make a new dataframe split_df = pd.DataFrame(columns = df.columns) #loop through each rows cnt = 1 for index, row in df.iterrows(): #write to the workflow log for percentage processed if cnt/totalCnt > proc_pct: logProgressToWfl("{:.0%}".format(proc_pct)) proc_pct = proc_pct + 0.1 cnt = cnt + 1 #process skills skills = row[kcm_to_split] if skills is None or pd.isna(skills): split_df = split_df.append(row, ignore_index = True) continue skills = row[kcm_to_split].split('~~') for skill in skills: row_as_dict = {} for column in df.columns: if column == kcm_to_split: row_as_dict[kcm_to_split] = skill elif average_column_values == True and column in columns_value_to_be_split: val_to_split = row[column] if val_to_split is not None and not pd.isna(val_to_split): try: val = pd.to_numeric(val_to_split) row_as_dict[column] = val/len(skills) except: row_as_dict[column] = row[column] else: row_as_dict[column] = row[column] else: row_as_dict[column] = row[column] split_df = split_df.append(row_as_dict, ignore_index = True) #redo opportunity kcm_name = kcm_to_split if "KC (" in kcm_to_split and ")" in kcm_to_split: kc_name = kcm_to_split[len("KC ("):kcm_to_split.find(")")] kcm_opportunity = "Opportunity ({})".format(kc_name) if kcm_opportunity in split_df.columns: split_df.drop(kcm_opportunity, axis=1, inplace=True) df_omit_na = split_df[['Anon Student Id', kcm_to_split]] df_omit_na = df_omit_na.dropna() df_omit_na[kcm_opportunity] = df_omit_na.groupby(['Anon Student Id', kcm_to_split]).cumcount()+1 df_omit_na = df_omit_na[[kcm_opportunity]] split_df = split_df.merge(df_omit_na, left_index=True, right_index=True, how='left') filename = os.path.basename(os.path.normpath(filename)) split_df.to_csv('multiskill_converted_{}'.format(filename), sep='\t', index=False) # -
MultiskillConverter/program/multiskill_converter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # RMSProp算法 # # 我们在[“AdaGrad算法”](adagrad.ipynb)一节中提到,因为调整学习率时分母上的变量$\boldsymbol{s}_t$一直在累加按元素平方的小批量随机梯度,所以目标函数自变量每个元素的学习率在迭代过程中一直在降低(或不变)。因此,当学习率在迭代早期降得较快且当前解依然不佳时,AdaGrad算法在迭代后期由于学习率过小,可能较难找到一个有用的解。为了解决这一问题,RMSProp算法对AdaGrad算法做了一点小小的修改。该算法源自Coursera上的一门课程,即“机器学习的神经网络” [1]。 # # ## 算法 # # 我们在[“动量法”](momentum.ipynb)一节里介绍过指数加权移动平均。不同于AdaGrad算法里状态变量$\boldsymbol{s}_t$是截至时间步$t$所有小批量随机梯度$\boldsymbol{g}_t$按元素平方和,RMSProp算法将这些梯度按元素平方做指数加权移动平均。具体来说,给定超参数$0 \leq \gamma < 1$,RMSProp算法在时间步$t>0$计算 # # $$\boldsymbol{s}_t \leftarrow \gamma \boldsymbol{s}_{t-1} + (1 - \gamma) \boldsymbol{g}_t \odot \boldsymbol{g}_t. $$ # # 和AdaGrad算法一样,RMSProp算法将目标函数自变量中每个元素的学习率通过按元素运算重新调整,然后更新自变量 # # $$\boldsymbol{x}_t \leftarrow \boldsymbol{x}_{t-1} - \frac{\eta}{\sqrt{\boldsymbol{s}_t + \epsilon}} \odot \boldsymbol{g}_t, $$ # # 其中$\eta$是学习率,$\epsilon$是为了维持数值稳定性而添加的常数,如$10^{-6}$。因为RMSProp算法的状态变量$\boldsymbol{s}_t$是对平方项$\boldsymbol{g}_t \odot \boldsymbol{g}_t$的指数加权移动平均,所以可以看作是最近$1/(1-\gamma)$个时间步的小批量随机梯度平方项的加权平均。如此一来,自变量每个元素的学习率在迭代过程中就不再一直降低(或不变)。 # # 照例,让我们先观察RMSProp算法对目标函数$f(\boldsymbol{x})=0.1x_1^2+2x_2^2$中自变量的迭代轨迹。回忆在[“AdaGrad算法”](adagrad.ipynb)一节使用的学习率为0.4的AdaGrad算法,自变量在迭代后期的移动幅度较小。但在同样的学习率下,RMSProp算法可以更快逼近最优解。 # + attributes={"classes": [], "id": "", "n": "3"} # %matplotlib inline import d2lzh as d2l import math from mxnet import nd def rmsprop_2d(x1, x2, s1, s2): g1, g2, eps = 0.2 * x1, 4 * x2, 1e-6 s1 = gamma * s1 + (1 - gamma) * g1 ** 2 s2 = gamma * s2 + (1 - gamma) * g2 ** 2 x1 -= eta / math.sqrt(s1 + eps) * g1 x2 -= eta / math.sqrt(s2 + eps) * g2 return x1, x2, s1, s2 def f_2d(x1, x2): return 0.1 * x1 ** 2 + 2 * x2 ** 2 eta, gamma = 0.4, 0.9 d2l.show_trace_2d(f_2d, d2l.train_2d(rmsprop_2d)) # - # ## 从零开始实现 # # 接下来按照RMSProp算法中的公式实现该算法。 # + attributes={"classes": [], "id": "", "n": "22"} features, labels = d2l.get_data_ch7() def init_rmsprop_states(): s_w = nd.zeros((features.shape[1], 1)) s_b = nd.zeros(1) return (s_w, s_b) def rmsprop(params, states, hyperparams): gamma, eps = hyperparams['gamma'], 1e-6 for p, s in zip(params, states): s[:] = gamma * s + (1 - gamma) * p.grad.square() p[:] -= hyperparams['lr'] * p.grad / (s + eps).sqrt() # - # 我们将初始学习率设为0.01,并将超参数$\gamma$设为0.9。此时,变量$\boldsymbol{s}_t$可看作是最近$1/(1-0.9) = 10$个时间步的平方项$\boldsymbol{g}_t \odot \boldsymbol{g}_t$的加权平均。 # + attributes={"classes": [], "id": "", "n": "24"} d2l.train_ch7(rmsprop, init_rmsprop_states(), {'lr': 0.01, 'gamma': 0.9}, features, labels) # - # ## 简洁实现 # # 通过名称为“rmsprop”的`Trainer`实例,我们便可使用Gluon提供的RMSProp算法来训练模型。注意,超参数$\gamma$通过`gamma1`指定。 # + attributes={"classes": [], "id": "", "n": "29"} d2l.train_gluon_ch7('rmsprop', {'learning_rate': 0.01, 'gamma1': 0.9}, features, labels) # - # ## 小结 # # * RMSProp算法和AdaGrad算法的不同在于,RMSProp算法使用了小批量随机梯度按元素平方的指数加权移动平均来调整学习率。 # # ## 练习 # # * 把$\gamma$的值设为1,实验结果有什么变化?为什么? # * 试着使用其他的初始学习率和$\gamma$超参数的组合,观察并分析实验结果。 # # # # # ## 参考文献 # # [1] <NAME>., & <NAME>. (2012). Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude. COURSERA: Neural networks for machine learning, 4(2), 26-31. # # ## 扫码直达[讨论区](https://discuss.gluon.ai/t/topic/2275) # # ![](../img/qr_rmsprop.svg)
chapter_optimization/rmsprop.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Synapse PySpark # name: synapse_pyspark # --- # # Getting started with Azure Cosmos DB's API for MongoDB and Synapse Link # # ## Key Information about this notebook # # * This notebook is part of the Azure Synapse Link for Azure Cosmos DB analitycal sample notebooks. For more information, click [here](../../../README.md). # # * It was build for Azure Cosmos DB API for MongoDB but you can, by yourself, customize it for Azure Cosmos DB SQL API. Please read about the analytical store inference schema differences between these 2 APIs [here](https://docs.microsoft.com/azure/cosmos-db/analytical-store-introduction#analytical-schema). # # * This is a Synapse Notebook and it was created to run in Synapse Analytics workspaces. Please make sure that you followed the pre-reqs of the [README](/README.md) file. After that, please execute the steps below in the same order that they are presented here. # # * From now on, all operations are case sentitive. Please be careful with everything you need to type. # # In this sample we will execute the following tasks: # # 1. Insert a dataset using the traditional MongoDB client. # 1. Execute aggregation queries against the Analytical Store from the transactional data we inserted. # 1. Insert another dataset, but this time using a different datatype for the timestamp property. # 1. Execute aggregation queries again, consolidating both datasets. # # ## Pre-requisites # 1. Have you created a MongoDB API account in Azure Cosmos DB? If not, go to [Create an account for Azure Cosmos DB's API for MongoDB](https://docs.microsoft.com/azure/cosmos-db/mongodb-introduction). # 1. For your Cosmos DB account, have you enabled Synapse Link? If not, go to [Enable Synapse Link for Azure Cosmos DB's API for MongoDB](https://docs.microsoft.com/azure/cosmos-db/configure-synapse-link). # 1. Have you created a Synapse Workspace? If not, go to [Create Synapse Workspace account](https://docs.microsoft.com/azure/synapse-analytics/synapse-link/how-to-connect-synapse-link-cosmos-db). Please don't forget to add yourself as **Storage Blob Data Contributor** to the primary ADLS G2 account that is linked to the Synapse workspace. # # ## Create a Cosmos DB collection with analytical store enabled # # Please be careful, all commands are case sensitive. # # 1. Create a database named `DemoSynapseLinkMongoDB`. # 1. Create a collection named `HTAP` with a Shard key called `item`. Make sure you set the `Analytical store` option to `On` when you create your collection. # # ## Optional - Connect your collection to Synapse # # To accelerate future work, you can connect your collection to Synapse. **We won't use this capability in this demo**, but fell free to test and use it. # # 1. Go to your Synapse Analytics workspace. # 1. Create a `Linked Data` connection for your MongoDB API account. # 1. Under the `Data` blade, select the + (plus) sign. # 1. Select the `Connect to external data` option. # 1. Now select the `Azure Cosmos DB (MongoDB API)` option. # 1. Enter all the information regarding your specific Azure Cosmos DB account either by using the dropdowns or by entering the connection string. Take note of the name you assigned to your `Linked Data` connection. # - Alternatively, you can also use the connection parameters from your account overview. # 1. Test the connection by looking for your database accounts in the `Data` blade, and under the `Linked` tab. # - There should be a list that contains all accounts and collections. # - Collections that have an `Analytical Store` enabled will have a distinctive icon. # ### Let's get the environment ready # # This environment allows you to install and use any python libraries that you want to run. For this sample, you need to add the following libraries to your Spark pool: # # ``` # pymongo==3.5.1 # aenum==2.2.4 # ``` # # Learn how to import libraries into your Spark pools in [this article](https://docs.microsoft.com/en-us/azure/synapse-analytics/spark/apache-spark-azure-portal-add-libraries). Please use the `requirements.txt` file located in the same folder of this notebook to update your pool packages. # # You can execute the following command to make sure all the libraries are installed correctly: # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} import importlib packages = ['pymongo','aenum'] for package in packages: test = importlib.util.find_spec(package) if test: print(package, "OK") else: print(package, "PROBLEM - NOK") # - # ### Add your database account and collection details here! DATABASE_ACCOUNT_NAME = 'your-cosmos-db-mongodb-account-name' DATABASE_ACCOUNT_READWRITE_KEY = 'your-cosmos-db-mongodb-account-key' DATABASE_NAME = 'DemoSynapseLinkMongoDB' COLLECTION_NAME = 'HTAP' # ## Let's initialize the MongoDB client # # You are only going to need the following parameters from your account overview: # - Connection string. # - Primary or secondary ready/write key. # # Remember that we named our database `DemoSynapseLinkMongoDB` and our collection `HTAP`. # # The code snippet below shows how to initialize the `MongoClient` object. # + from pymongo import MongoClient from bson import ObjectId # For ObjectId to work client = MongoClient("mongodb://{account}.mongo.cosmos.azure.com:10255/?ssl=true&replicaSet=globaldb".format(account = DATABASE_ACCOUNT_NAME)) # Your own database account endpoint. db = client.DemoSynapseLinkMongoDB # Select the database db.authenticate(name=DATABASE_ACCOUNT_NAME,password=DATABASE_ACCOUNT_READWRITE_KEY) # Use your database account name and any of your read/write keys. # - # ## Inserting data with the MongoClient driver # # The following sample will generate 500 items based on random data. Each item will contain the following fields: # - item, string # - price, float # - rating, integer # - timestamp, [epoch integer](http://unixtimestamp.50x.eu/about.php) # # This data will be inserted into the MongoDB store of your database. This emulates the transactional data that an application would generate. # + from random import randint import time orders = db["HTAP"] items = ['Pizza','Sandwich','Soup', 'Salad', 'Tacos'] prices = [2.99, 3.49, 5.49, 12.99, 54.49] for x in range(1, 501): order = { 'item' : items[randint(0, (len(items)-1))], 'price' : prices[randint(0, (len(prices)-1))], 'rating' : randint(1, 5), 'timestamp' : time.time() } result=orders.insert_one(order) print('finished creating 500 orders') # - # ## Read data from the Analytical Store. # # Now that we have inserted some transactional data, let's read it from Azure Cosmos DB analytical store. Cosmos DB will automatically transform the BSON data (Binary JSON) into a columnar format, which will make it fast and easy to execute aggregation workloads on top of your transactional data, at no RUs or performance costs. # # The cells below will: # # 1. Load the data from analytical store into a DataFrame. # 1. Check the top rows. Yes, the BSON data was converted into columar structured format. # 1. Check the DataFrame schema. # 1. Run aggregations # # # > If you get an "no snapshot" error, Please check if your container was created with **analytical store** enabled. # # If your DataFrame has no data, please wait a couple of minutes because the root cause is that the auto sync between transactional and analytical stores isn't completed yet. This process usually takes 2 minutes, but in some cases it may take up to 5 minutes. Please wait a few minutes and run the command below again. # # **Important: Please note that we are using random values for prices and ratings. Don't expect the same results of the outputs below. What you can expect is the same behavior and experience.** # # + # Load the data from analytical store into a DataFrame. df = spark.read.format("cosmos.olap")\ .option("spark.cosmos.accountEndpoint", "https://{account}.documents.azure.com:443/".format(account = DATABASE_ACCOUNT_NAME))\ .option("spark.cosmos.accountKey", DATABASE_ACCOUNT_READWRITE_KEY)\ .option("spark.cosmos.database", DATABASE_NAME)\ .option("spark.cosmos.container", COLLECTION_NAME)\ .load() # Checking the data display(df) # + [markdown] nteract={"transient": {"deleting": false}} # ## First Schema Analysis # # Let's run the command below and check the schema of the `df` DataFrame that we just created and loaded. Please note that all properties of our document (item, price, rating, and timestamp) are represented in the DataFrame as a `struct` with one datatype within each one of them. This will change in the next cells, and to understand that is part of the learning objectives of this notebook. # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} df.printSchema() # + [markdown] nteract={"transient": {"deleting": false}} # ## Agregations # # Now let's run aggregations on top of the `df` DataFrame that we just created. # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} # Run aggregations df.groupBy(df.item.string).sum().show() # + [markdown] nteract={"transient": {"deleting": false}} # ## Important - Spark Syntax for Aggregations on DataFrames # # For the aggregation above, a syntax that doesn't explicity mentions the datatypes, like `df.groupBy(df['item']).sum().show()`, executes without an error. But it is **not recommended!** # # It runs because Spark automatically flattens the structure into an Array, where it takes each distinct value in the `struct` dict and applies the aggregation function. But you will see in the next cells that we may have more than one datatype for the same struct of a property, and the implicit conversion that Spark does can cause wrong results. # ## Schema Representation - A quick note about the MongoDB schema in analytical store # # Please note in the result above that for the `timestamp` field we have only 1 datatype: `struct<float64:double>`. # We will see that this detail will change since we will insert data with different datatype for that `timestamp` field. # # For Azure Cosmos DB API for MongoDB accounts, we make use of a **Full Fidelity Schema** as a default option. This is a representation of property names extended with their data types to provide an accurate # representation of their values and avoid ambiguity. # # This is why, when we called the fields above, we used their datatype as a suffix. Like in the example below: # # ``` # df.filter((df.item.string == "Pizza")).show(10) # ``` # # Notice how we specified the `string` type after the name of the property. Here is a map of all potential properties and their suffix representations in the Analytical Store: # # | Original Data Type &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;| Suffix &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;| Example &nbsp;&nbsp;&nbsp;&nbsp; | # |---------------|----------------|--------| # | Double | ".float64" | `24.99` | # | Array | ".array" | `["a", "b"]` | # | Binary | ".binary" | `0` | # | Boolean | ".bool" | `True` | # | Int32 | ".int32" | `123` | # | Int64 | ".int64" | `255486129307` | # | Null | ".null" | `null` | # | String | ".string" | `"ABC"` | # | Timestamp | ".timestamp" | `Timestamp(0, 0)` | # | DateTime | ".date" | `ISODate("2020-08-21T07:43:07.375Z")` | # | ObjectId | ".objectId" | `ObjectId("5f3f7b59330ec25c132623a2")` | # | Document | ".object" | `{"a": "a"}` | # # These types are inferred from the data that is inserted in the transactional store. You can see the schema by executing the following command: # ``` # df.printSchema # ``` # # > The default option for Azure Cosmos DB CORE (SQL) API, **Well defined Schema** is the default option. For more information about schemas representation, click [here](https://docs.microsoft.com/azure/cosmos-db/analytical-store-introduction#schema-representation) . # # # - # ## Let's insert more orders! # # This time we will use slightly different data. Each item will contain the following fields: # - item, string # - price, float # - rating, integer # - timestamp, [ISO String format](https://en.wikipedia.org/wiki/ISO_8601) # # Notice how the `Timestamp` field is now in a string format. This will help us understand how the different data fields can be read based on their data type. # # After that, we will load the data, check the schema, and run some queries. # + from random import randint from time import strftime orders = db["HTAP"] items = ['Pizza','Sandwich','Soup', 'Salad', 'Tacos'] prices = [2.99, 3.49, 5.49, 12.99, 54.49] for x in range(1, 501): order = { 'item' : items[randint(0, (len(items)-1))], 'price' : prices[randint(0, (len(prices)-1))], 'rating' : randint(1, 5), 'timestamp' : strftime("%Y-%m-%d %H:%M:%S") } result=orders.insert_one(order) print('finished creating 500 orders') # - # ## Let's reload the DataFrame and check the schema again! # + # Load the Analytical Store data into a dataframe # Make sure to run the cell with the secrets to get the DATABASE_ACCOUNT_NAME and the DATABASE_ACCOUNT_READWRITE_KEY variables. df = spark.read.format("cosmos.olap")\ .option("spark.cosmos.accountEndpoint", "https://{account}.documents.azure.com:443/".format(account = DATABASE_ACCOUNT_NAME))\ .option("spark.cosmos.accountKey", DATABASE_ACCOUNT_READWRITE_KEY)\ .option("spark.cosmos.database", DATABASE_NAME)\ .option("spark.cosmos.container", COLLECTION_NAME)\ .load() # Check the schema AGAIN. Try to find something different. df.printSchema() # + [markdown] nteract={"transient": {"deleting": false}} # ## Schema Representation - What Changed? # # Please note in the result above that now, for the `timestamp` field, we have 2 datatypes: `struct<float64:double>` and `string:string`. That happened because we added data with a different datatype. That's `Full Fidelity Schema`, when Azure Cosmos DB will do a full representation of your data, with the datatypes you used. # # > If the result doesn't show two datatypes for `timestamp`, then wait a few minutes because the backend auto-sync process has not yet occurred. # # ## Queries # # Now let's run some interesting queries, using the datypes to filter the data. # # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} # SQL!! # Let's see the data for pizzas that have a string timestamp df.createOrReplaceTempView("Pizza") sql_results = spark.sql("SELECT sum(price.float64),count(*) FROM Pizza where timestamp.string is not null and item.string = 'Pizza'") sql_results.show() # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} # SQL!! # Let's see the data for pizzas that have a string timestamp df.createOrReplaceTempView("Pizza") sql_results = spark.sql("SELECT sum(price.float64),count(*) FROM Pizza where timestamp.float64 is not null and item.string = 'Pizza'") sql_results.show() # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} # SQL!! # Let's compare both timestamp columns df.createOrReplaceTempView("Pizza") sql_results = spark.sql("SELECT max(timestamp.float64),max(timestamp.string) FROM Pizza where item.string = 'Pizza'") sql_results.show() # + [markdown] nteract={"transient": {"deleting": false}} # ## Schema Representation - Last thoughts # # Please note that the queries above return different data because of the filters on the timestamp column. From the user perspective, it's like there are 2 different columns, `timestamp.float64` and `timestamp.string`. # # ## Conclusion # # Now you know how to use Azure Synapse Link for Azure Cosmos DB analitical store for MongoDB API. Also, now you know how to work with dataframes, full fidelity schema, and Spark Sql.
Notebooks/PySpark/Synapse Link for Cosmos DB samples/E-Commerce/spark-notebooks/pyspark/01-CosmosDBSynapseMongoDB.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests,re url="https://study-ccna.com/classes-of-ip-addresses/" r=requests.get(url) data=r.text ip1=r"[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+" ip=r"[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\/[0-9]+" list_ip1=re.findall(ip1,data) list_ip1=list(set(list_ip1)) list_ip=re.findall(ip,data) list_ip=list(set(list_ip)) for each in list_ip1: print(each) for i in list_ip: print(i)
DAY 16 ASSIGNMENT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Inference pipeline # # Created by: <NAME> # # + # %load_ext autoreload # %autoreload 2 from pyproj import CRS import boto3 from rasterio.session import AWSSession from s3fs import S3FileSystem aws_session = AWSSession(boto3.Session(),#profile_name='default'), requester_pays=True) fs = S3FileSystem(requester_pays=True) #profile='default', import xgboost as xgb from osgeo.gdal import VSICurlClearCache import rasterio as rio import numpy as np import xarray as xr import dask import os import fsspec import geopandas as gpd import rioxarray # for the extension to load import matplotlib.pyplot as plt import utm import pandas as pd from datetime import datetime import time import json import zarr import awswrangler as wr from dask_gateway import Gateway from carbonplan_trace.v1.landsat_preprocess import access_credentials, test_credentials from carbonplan_trace.v1.inference import predict, predict_delayed from carbonplan_trace.v1 import utils # + from carbonplan_trace import version print(version) # - dask.config.set({"array.slicing.split_large_chunks": True}) # tcp is a transmission control protocol dask.config.set({"distributed.comm.timeouts.tcp": "50s"}) dask.config.set({"distributed.comm.timeouts.connect": "50s"}) # dask.config.set({"distributed.worker.resources.WORKERTOKEN": 1}) # + tags=[] kind_of_cluster = "local" # kind_of_cluster = "remote" if kind_of_cluster == "local": # spin up local cluster. must be on big enough machine from dask.distributed import Client client = Client(n_workers=2, threads_per_worker=15, resources={"workertoken": 1}) client else: gateway = Gateway() options = gateway.cluster_options() options.environment = { "AWS_REQUEST_PAYER": "requester", "AWS_REGION_NAME": "us-west-2", "DASK_DISTRIBUTED__WORKER__RESOURCES__WORKERTOKEN": "1", } options.worker_cores = 8 options.worker_memory = 100 options.image = "carbonplan/trace-python-notebook:latest" cluster = gateway.new_cluster(cluster_options=options) cluster.adapt(minimum=1, maximum=10) # cluster.scale(100) # - cluster # + # client = cluster.get_client() client # check this link first # possible scenario: # 1) everything is succeeding and cluster still running, no need to do anything # 2) most things are failing but cluster still running, restart, increase mem and decrease num worker, re start and run all # 3) 404 error -> cluster died -> restart and run all # - def shutdown_cluster(kind_of_cluster): if kind_of_cluster == "local": client.shutdown() elif kind_of_cluster == "remote": cluster.shutdown() access_key_id, secret_access_key = access_credentials() test_credentials(aws_session) # Then we take the list of files for a given year to average across growing season for each of the # tiles and write it out to a mapper with those specifications. # # + gdf = gpd.read_file( "https://prd-wret.s3-us-west-2.amazonaws.com/assets/" "palladium/production/s3fs-public/atoms/files/" "WRS2_descending_0.zip" ) bucket = "s3://carbonplan-climatetrace/v1" biomass_folder = "s3://carbonplan-climatetrace/intermediate/ecoregions_mask/" biomass_files = fs.ls(biomass_folder) lat_lon_tags = [utils.get_lat_lon_tags_from_tile_path(fp) for fp in biomass_files] bounding_boxes = [utils.parse_bounding_box_from_lat_lon_tags(lat, lon) for lat, lon in lat_lon_tags] # + from carbonplan_trace.v1.glas_allometric_eq import REALM_GROUPINGS processed_scenes = [] for year in np.arange(2014, 2021): processed_scenes.extend(fs.ls(f"{bucket}/inference/rf/{year}", recursive=True)) processed_scenes = [scene[-19:-8] for scene in processed_scenes] # - len(processed_scenes) len(processed_scenes) - 57875 len(bounding_boxes) # We'll loop through every scene and every year and calculate biomass for that scene. Will produce # table of values [x, y, (both specific to utm projection), lat, lon, biomass]. # # + tags=[] landsat_bucket = "s3://usgs-landsat/collection02/level-2/standard/etm/{}/{:03d}/{:03d}/" with rio.Env(aws_session): # tasks = [] task_ids = [] for bounding_box in bounding_boxes: print(bounding_box) min_lat, max_lat, min_lon, max_lon = bounding_box scenes_in_tile = gdf.cx[min_lon:max_lon, min_lat:max_lat][["PATH", "ROW"]].values for year in np.arange(2014, 2021): for [path, row] in scenes_in_tile: scene_stores = fs.ls(landsat_bucket.format(year, path, row)) output_name = f"{year}/{path:03d}{row:03d}" if len(scene_stores) == 0: continue elif output_name in processed_scenes: continue else: tasks.append( # predict( client.compute( predict_delayed( model_folder=f"{bucket}/models/", path=path, row=row, year=year, access_key_id=access_key_id, secret_access_key=secret_access_key, output_write_bucket=f"{bucket}/inference", ), resources={"workertoken": 1}, ) ) task_ids.append([path, row, year, max_lat, min_lon]) # + tags=[] len(tasks) # + tags=[] results = dask.compute(tasks, retries=1, resources={"workertoken": 1})[0] results # + tags=[] # i = 0 # path = task_id[i][0] # row = task_id[i][1] # year = task_id[i][2] path = 93 row = 11 year = 2014 print(path, row, year) predict( model_folder=f"{bucket}/models/", path=path, row=row, year=year, access_key_id=access_key_id, secret_access_key=secret_access_key, output_write_bucket=f"{bucket}/inference", ) # + tags=[] for i, task in enumerate(tasks): if task.status == "error" and i not in []: print(i) print(task.result()) # + tags=[] exclude_list = [] errors = [] for i, task in enumerate(tasks): if task.status == "error" and i not in []: print(i) # print(task.result()) try: print(task.result()) except Exception as e: print(e) exclude_list.append(list(task_id[i])) pd.DataFrame(exclude_list, columns=["path", "row", "year"]).to_csv("inference_failed_tasks.csv") # + tags=[] exclude_list # + tags=[] for i, task in enumerate(tasks): try: task.cancel() except: print(i)
notebooks/processing/inference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Goals # # # ### Learn how to change number of processors # # Table of Contents # # # ## [0. Install](#0) # # # ## [1. Load experiment with default settings](#1) # # # ## [2. Set intra epoch display as Off and Train](#2) # # # ## [3. Set inter epoch display as Off and Train](#3) # <a id='0'></a> # # Install Monk # # - git clone https://github.com/Tessellate-Imaging/monk_v1.git # # - cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt # - (Select the requirements file as per OS and CUDA version) # !git clone https://github.com/Tessellate-Imaging/monk_v1.git # Select the requirements file as per OS and CUDA version # !cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt # ## Dataset - Art style type classification # - https://www.kaggle.com/thedownhill/art-images-drawings-painting-sculpture-engraving # ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1elVvUTgoX_E4QuLAUP-tUhMcYNhH117E' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1elVvUTgoX_E4QuLAUP-tUhMcYNhH117E" -O art_style_type.zip && rm -rf /tmp/cookies.txt # ! unzip -qq art_style_type.zip # # Imports # Monk import os import sys sys.path.append("monk_v1/monk/"); #Using mxnet-gluon backend from gluon_prototype import prototype # <a id='1'></a> # # Load experiment with default settings gtf = prototype(verbose=1); gtf.Prototype("project", "experiment_with_display_params"); # + gtf.Default(dataset_path="art_style_type/train", model_name="resnet18_v1", freeze_base_network=True, num_epochs=5); #Read the summary generated once you run this cell. # - # <a id='2'></a> # # Set intra epoch display as OFF and train # + gtf.update_display_progress_realtime(False) # Very important to Reload post update gtf.Reload(); # - # + #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed # - # ## Details are displayed post epoch only # <a id='3'></a> # # Set inter epoch display as OFF and train # + gtf.update_display_progress(False) # Very important to Reload post update gtf.Reload(); # - # + #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed # - # ## Details are displayed post entire training
study_roadmaps/1_getting_started_roadmap/5_update_hyperparams/3_training_params/2) Change display params.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + deletable=true editable=true # %matplotlib notebook # %load_ext autoreload # %autoreload 2 from gym_extensions.continuous.gym_navigation_2d.env_generator import EnvironmentGenerator, Environment import matplotlib.pyplot as plt import matplotlib.patches as patches import matplotlib.cm as cmx import matplotlib.colors as colors import numpy as np def get_cmap(N): '''Returns a function that maps each index in 0, 1, ... N-1 to a distinct RGB color.''' color_norm = colors.Normalize(vmin=0, vmax=N-1) scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv') def map_index_to_rgb_color(index): return scalar_map.to_rgba(index) return map_index_to_rgb_color # + deletable=true editable=true eg = EnvironmentGenerator(x_range=[0, 50], y_range=[0, 50], width_range=[1, 8], height_range=[1,8]) centers, widths, heights = eg.sample_axis_aligned_rectangles(density=0.01) obstacles = eg.merge_rectangles_into_obstacles(centers, widths, heights, epsilon=0.2) world = Environment(eg.x_range, eg.y_range, obstacles) # + deletable=true editable=true fig1 = plt.figure() ax1 = fig1.add_subplot(111, aspect='equal') for i in xrange(len(centers)): c,w,h = centers[i], widths[i], heights[i] bl = c.reshape((c.shape[0],)) - np.array([w[0]/2., h[0]/2.]) ax1.add_patch(patches.Rectangle(bl, w, h, alpha=0.5)) plt.text(bl[0], bl[1], str(i)) plt.axis('auto') cmap = get_cmap(len(obstacles)) fig2 = plt.figure() ax2 = fig2.add_subplot(111, aspect='equal') ci = -1 for i in obstacles: ci += 1 obs = obstacles[i] for c,w,h in zip(obs.rectangle_centers, obs.rectangle_widths, obs.rectangle_heights): bl = c.reshape((c.shape[0],)) - np.array([w/2., h/2.]) ax2.add_patch(patches.Rectangle(bl, w, h, alpha=0.5, label=str(i), facecolor=cmap(ci))) #fill=None)) plt.text(bl[0], bl[1], str(i)) plt.axis('auto') # + deletable=true editable=true from rrt import * rrt_planner = RRT(world) start_state = State(5, 5, None) end_state = State(40,50, None) tree_nodes= [] plans = [] for i in xrange(5): plan, _ = rrt_planner.plan(start_state, end_state, max_num_steps=1000, max_steering_radius=2, dest_reached_radius=5) plans.append(plan) #homology_vector = rrt_planner.world.homology_vector(plan) # + cmap = get_cmap(len(obstacles)) fig = plt.figure() ax = fig.add_subplot(111, aspect='equal') ci = -1 for i in obstacles: ci += 1 obs = obstacles[i] for c,w,h in zip(obs.rectangle_centers, obs.rectangle_widths, obs.rectangle_heights): bl = c.reshape((c.shape[0],)) - np.array([w/2., h/2.]) ax.add_patch(patches.Rectangle(bl, w, h, alpha=0.5, label=str(i), facecolor=cmap(ci))) #fill=None)) plt.text(bl[0], bl[1], str(ci)) for plan in plans: x = [s.x for s in plan] y = [s.y for s in plan] plt.plot(x, y, 'o-r', ms=4, lw=4) tx = [s.x for s in tree_nodes] ty = [s.y for s in tree_nodes] plt.plot(tx, ty, 'o', ms=3, mfc='gray') for s in tree_nodes: for sc in s.children: plt.plot([s.x, sc.x], [s.y, sc.y], '-', ms=2, c='gray') print homology_vector > 0 plt.axis('auto') # - smooth_plans = [] for plan in plans: smooth_plan = rrt_planner.smooth(plan, alpha=20, rate=0.01, max_iterations=20) smooth_plans.append(smooth_plan) # + cmap = get_cmap(len(obstacles)) fig = plt.figure() ax = fig.add_subplot(111, aspect='equal') ci = -1 for i in obstacles: ci += 1 obs = obstacles[i] for c,w,h in zip(obs.rectangle_centers, obs.rectangle_widths, obs.rectangle_heights): bl = c.reshape((c.shape[0],)) - np.array([w/2., h/2.]) ax.add_patch(patches.Rectangle(bl, w, h, alpha=0.5, label=str(i), facecolor=cmap(ci))) #fill=None)) plt.text(bl[0], bl[1], str(ci)) for smooth_plan in smooth_plans: x = [s.x for s in smooth_plan] y = [s.y for s in smooth_plan] plt.plot(x, y, 'o-r', ms=4, lw=4) tx = [s.x for s in tree_nodes] ty = [s.y for s in tree_nodes] plt.plot(tx, ty, 'o', ms=3, mfc='gray') for s in tree_nodes: for sc in s.children: plt.plot([s.x, sc.x], [s.y, sc.y], '-', ms=2, c='gray') plt.axis('auto') # -
examples/EnvironmentGeneration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "skip"} # # PharmSci 175/275 (UCI) # ## What is this?? # The material below is an instructional session/lecture on docking, scoring and pose prediction from Drug Discovery Computing Techniques, PharmSci 175/275 at UC Irvine. # Extensive materials for this course, as well as extensive background and related materials, are available on the course GitHub repository: [github.com/mobleylab/drug-computing](https://github.com/mobleylab/drug-computing) # # This material is a set of slides intended for presentation with RISE as detailed [in the course materials on GitHub](https://github.com/MobleyLab/drug-computing/tree/master/uci-pharmsci/lectures/energy_minimization). While it may be useful without RISE, it will also likely appear somewhat less verbose than it would if it were intended for use in written form. # + [markdown] slideshow={"slide_type": "slide"} # # Molecules, 3D structure, and shape # # Today: Docking and scoring, pose prediction, etc. # # ### Instructor: <NAME> # # ### Contributors to today's materials: # - <NAME> # + [markdown] slideshow={"slide_type": "slide"} # # Drug discovery is often partly about finding inhibitors, as these can make good drugs # # <center><img src="images/BCLXL_peptide.png" alt="GitHub" style="width: 600px;"/></center> # Anticancer target BCL-XL; green (part of larger protein) and gray bind; goal: Free up green to perform important function in apoptosis, so need something else which binds this cleft. # # (Fesik et al., Nature Reviews: Cancer, 5:876; Oltersdorf et al., Nature 435:677) # + [markdown] slideshow={"slide_type": "subslide"} # ## Small molecules can mimic binding partners # # # <center><img src="images/BCLXL_ligand.png" alt="GitHub" style="width: 600px;"/></center> # Here, a green compound binds in the same cleft; potential anti-cancer drug. # # (Fesik et al., Nature Reviews: Cancer, 5:876; Oltersdorf et al., Nature 435:677) # + slideshow={"slide_type": "slide"} from IPython.display import HTML # + [markdown] slideshow={"slide_type": "fragment"} # $\Delta G = -k_B T \ln \frac{ Q_{PL}}{Q_P Q_L}$ # + slideshow={"slide_type": "fragment"} # %%HTML <video autoplay width='400'> <source src="images/IL2_complex-5-104.mov" type="video/quicktime"> </video> <video autoplay width='400'> <source src="images/IL2_protein-5-106.mov" type="video/quicktime"> </video> <video autoplay width='400'> <source src="images/IL2_ligand-5-108.mov" type="video/quicktime"> </video> # + [markdown] slideshow={"slide_type": "fragment"} # Binding free energy involves a weighted sum over all the relevant configurations of the systems involved. # + [markdown] slideshow={"slide_type": "slide"} # ## Docking treats binding mode and score as separate problems, requires thinking of binding modes as single orientations/placements # # Instead of $\Delta G = \Delta H - T\Delta S$, docking usually takes $\Delta G \approx \Delta H$: # - Score $\approx \Delta H$ # - Sometimes adds $-\Delta G_{solv}$ # - Uses "optimal" single orientations # # <center><img src="images/binding.png" alt="binding" style="width: 600px;"/></center> # + [markdown] slideshow={"slide_type": "slide"} # # Virtual screening provides the computational alternative to experimental high-throughput screening # # <center><img src="https://media.nature.com/lw926/nature-assets/nature/journal/v432/n7019/images/nature03197-f2.2.jpg" alt="screening" style="width: 900px;"/></center> # # (Shoichet, Nature 432:862 (2004)) # + [markdown] slideshow={"slide_type": "subslide"} # ## Virtual screening has different strengths and weaknesses # # <p float="left"> # # <img align="left" src="https://media.nature.com/lw926/nature-assets/nature/journal/v432/n7019/images/nature03197-f2.2.jpg" alt="screening" style="width: 400px;"/> # </p> # - false negatives and false positives are different, at (probably) higher rates # - Requires experimental confirmation # - Strengths: New chemistry, new compounds. # - Different hits # - Fast # - Perhaps higher hit rates? # + [markdown] slideshow={"slide_type": "subslide"} # ## Docking scores are somewhat arbitrary # - Convention: Lower score is better (like free energy/enthalpy of binding) # - Docking scores don't predict binding affinity (see e.g. work of Warren et al., J Med Chem). # - Usually they have a somewhat arbitrary scale, aren't really energies # - Some programs assign energy units and attempt to scale to correct range (but probably not trustworthy) # + [markdown] slideshow={"slide_type": "subslide"} # ### So it's not uncommon to see scores that are driven by choice of receptor # # <p float="right"> # <img align="right" src="images/rec1.png" alt="screening" style="width: 100px;"/> # <img align="right" src="images/rec2.png" alt="screening" style="width: 100px;"/> # </p> # + slideshow={"slide_type": "fragment"} rec1_scores = [-25, -22, -20, -17, -16, 20, 25, 26, 27, 29, 33] rec2_scores = [ 1, 2, 3, 4, 5, 6, 8, 9, 10, 25, 30 ] # %pylab inline plot(rec1_scores, 'ro') plot(rec2_scores, 'gd') ylabel('Score'), xlabel('Compound number') show() # + [markdown] slideshow={"slide_type": "subslide"} # ### So don't use docking to try and predict the receptor # - Score scales are arbitrary # - Score zeros are arbitrary # - Score range/scale depends on receptor, so don't compare scores across receptors or across mutants of a single receptor # + [markdown] slideshow={"slide_type": "slide"} # ## Several major classes of scoring function see common use # - Force field-based: Use similar FFs to molecular dynamics, perhaps with empirical modifications (e.g. D-Score, G-Score, GOLD, AutoDock, DOCK, ...) # - Knowledge-based: Use statistical potentials (PMF, SMoG, DrugScore, ...) # - Empirical: Fitted to reproduce particular properties (LUDI, F-Score, ChemScore, SCORE, Fresno, X-Score, GLIDE (?)) # - Conensus scoring: Combine multiple approaches in a "voting" type way, e.g. take top hits from several methods # (see e.g. Kitchen et al., NRDD 3:935 (2004)) # # There is no single best approach; all have strengths and weaknesses and success will depend on target. # # # + [markdown] slideshow={"slide_type": "subslide"} # ## Docking is best understood as a good filtering tool # # It works well for filtering out compounds that simply cannot fit in a binding site, or are likely to have very poor interactions there. # It is not a reliable predictor of potent compounds. # # So, for example, we might see: # + slideshow={"slide_type": "fragment"} rec1_scores = [-25, -22, -20, -17, -16, 20, 25, 26, 27, 29, 33] # %pylab inline plot(rec1_scores, 'ro') ylabel('Score'), xlabel('Compound number') show() # + [markdown] slideshow={"slide_type": "fragment"} # We would then likely filter out compounds with the least favorable scores (above 10) and be interested in the rest. # (In reality we would do this on far more compounds). # + [markdown] slideshow={"slide_type": "slide"} # ## Docking performs poorly at binding strength prediction: It's just not designed to do that # # - docking can't reliably calculate binding strength, or even relative binding strengths # - see Warren et al, JMC. 49:5912 (2005) (data plotted here is from SI); also Velec et al., JMC. 48:6296 (2005), Huang et al., JMC. 49:6789 (2006) # # # <p float="left"> # <img align="left" src="images/Warren_example_data.png" alt="screening" style="width: 700px;"/> # </p> # # # # + [markdown] slideshow={"slide_type": "slide"} # # (Need to continue converting slides to Jupyter notebook format here) # -
uci-pharmsci/lectures/docking_scoring_pose/docking.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:.conda-pytorch] # language: python # name: conda-env-.conda-pytorch-py # --- # + # # %load_ext autoreload # # %autoreload 2 import os import pandas as pd import sys sys.path.insert(0, os.path.abspath('../lib')) import config from feat_util import get_train_test_from_df, get_test_from_df, get_signal from write_audio import write_audio_for_df from evaluate import get_results from sklearn.utils import shuffle import numpy as np # Troubleshooting and visualisation import IPython.display as ipd # - # # General use instructions # This code is complementary to the paper: HumBugDB: a large-scale acoustic mosquito dataset. The paper describes motivations for the data, a suggestion for uses cases in [], the data collection procedure in Section [], and model benchmarking in Section []. Appendix [] gives detail on the meaning of the metadata fields that are present in the `csv` file `config.data_df`, while Appendix [] describes in more detail the models used here as baselines. # # # This notebook provides the interface to partition data, extract features, train a BNN model in either PyTorch or Keras and evaluate its accuracy, precision-recall, confusion matrices and uncertainty metrics. Settings are specified in `config.py` and `config_pytorch.py` or `config_keras.py` which are located in `../lib`. Functions are imported from data and feature processing code in `../lib/feat_util.py`, model training in `../lib/runTorch.py` or `../lib/runKeras.py` and evaluation in `../lib/evaluate.py`. # # ### Data configuration `config.py` # Specify the metadata (csv) location in `data_df`, with the location of the raw wave files in `data_dir`. The desired output for the features is set by `dir_out`. Model objects will be saved to `../models/PyTorch/`. # # The feature extraction uses log-mel features with `librosa`, configurable in `config.py` with the sample rate `rate`, to which data is re-sampled on loading, a window size `win_size` which determines the size of a training window (in number of _feature_ windows), `step_size`, which determines the step size taken by the window, `NFFT`, and `n_hop`, which are parameters for the core STFT transform upon which log-mel feature extraction is based. Finally, `n_feat` determines the number of mel-frequency bands. # # In `librosa`, we can calculate the value of `win_size` to achieve a user's desired `duration` for a label as follows: # # `win_size` = `duration` / `frame_duration`, where `frame_duration` = `n_hop`/`rate`. Librosa uses a default `hop_length` of `NFFT/4`. # The default values in `config.py` are optimised for `rate` = 8000 with `win_size` = 30, `NFFT` = 2048, `n_hop` = `default`, to achieve a label duration of $30 \times 2048/(4\times 8000) = 1.92$ (s). A discussion on feature transformations is given in Appendix [] of the paper []. # # ### PyTorch `config_pytorch.py` # `config_pytorch.py` incldues settings to change the learning rate, `lr`, the number of maximum overrun steps for a particular training criteria `max_overrun`, the number of `epochs`, and the `batch_size`. The type of training method used can be written in `train_model.py`, which by default supports saving the best epoch models for either the training accuracy, `best_train_acc`, or validation accuracy, `best_val_acc`, if supplied to `train_model`. # # ### Keras `config_keras.py` # `tau = 1.0`,`lengthscale = 0.01`, are parameters used for $l2$ weight regularization supplied in lines 35-37 of `runKeras.py`. `dropout = 0.2` controls the dropout rate,`validation_split = 0.2`, is the fraction of data supplied as validation to the model callbacks in `model.fit`, line 105. `batch_size` controls the batch size, and `epochs`, set the number of epochs to train. Note the slight difference between the two packages in the way validation data is passed to the model training. # ## Step 1: Choose Keras or PyTorch # + library = 'PyTorch' if library == 'PyTorch': from runTorch import train_model, load_model, evaluate_model elif library == 'Keras': from tensorflow import keras import config_keras from runKeras import train_model, load_model, evaluate_model else: print('Library:', library, 'not supported. Please add your own code for support of that framework.') # - # ## Step 2: Data partitioning for feature extraction # Feel free to adjust the training data according to any criteria available from the metadata of the labels, which are stored in `config.data_df`. The code here imports all data that is not in testing to the training set (which can then be further split into validation). # Take extra care to make sure recordings from the same experimental group, as given in Table (ref), and indicated in `df['country'], df['location_type']`. do not appear both in train and testing, resulting in overestimate of performance on evaluation. The assertion is given to perform a check to ensure no duplicates arise. # + df = pd.read_csv(config.data_df) # To be kept: please do not edit the test set: these paths select test set A, test set B as described in the paper idx_test_A = np.logical_and(df['country'].str.contains('Tanzania'),df['location_type'].str.contains('field')) idx_test_B = np.logical_and(df['place'].str.contains('Oxford'),df['location_type'].str.contains('culture')) idx_train = np.logical_not(np.logical_or(idx_test_A, idx_test_B)) df_test_A = df[idx_test_A] df_test_B = df[idx_test_B] df_train = df[idx_train] # Modify by addition or sub-sampling of df_train here # df_train ... # Assertion to check that train does NOT appear in test: assert len(np.where(pd.concat([df_train,df_test_A, df_test_B]).duplicated())[0]) == 0, 'Train dataframe contains overlap with Test A, Test B' # - # ### Performance optimisations # When creating features from data with mixed sample rates, `librosa.load` uses re-sampling (if specified) This is a really time consuming process, which may be circumvented by re-sampling the data once and storing in a folder before feature extraction. # ### Feature processing or loading # Note that these settings require at least 16 GB RAM to load into memory for ResNet-50 processing, as channels are replicated 3 times to match the pre-trained weights model (see more on StackExchange [here](https://stackoverflow.com/questions/62971655/how-can-i-change-number-of-channels-on-resnet-to-make-it-work-only-on-b-w-images)). To reduce the strain on memory, increase the `step_size` parameter in `config.py` to reduce the number of windows created by feature extraction. This reduces the overlap between samples. # # Alternatively, it is possible to use a non-pretrained architecture and change the tensor creation code in `build_dataloader()` from `runTorch.py` to remove `.repeat(1,3,1,1)` as there will be no need to copy over identical data over three channels. # # Note that once the tensors have been created, VRAM is not an issue due to the batching over the dataloaders (this code has been run on a GTX970 with 3.5GB available VRAM). # # If memory issues still persist, for further debugging see [here](https://medium.com/@raghadalghonaim/memory-leakage-with-pytorch-23f15203faa4). X_train, y_train, X_test_A, y_test_A, X_test_B, y_test_B = get_train_test_from_df(df_train, df_test_A, df_test_B, debug=True) from sklearn.model_selection import train_test_split X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42) # ## Step 3: Model training model=train_model(X_train, y_train, X_val, y_val) # ## Step 4: Model evaluation # Optional: laod test data and model only to avoid committing X_train to memory X_test_A, y_test_A, X_test_B, y_test_B = get_test_from_df(df_test_A, df_test_B, debug=True) # PyTorch: loads from ../outputs/models/pytorch/*.pth, Keras: ../outputs/models/keras/*.h5 model_name = 'neurips_2021_humbugdb_resnet50_bnn.pth' model = load_model(model_name) # Generate BNN samples. Run with n_samples = 1 for deterministic NN, n >= 10 for BNN. Calculate the predictive entropy (PE), mutual information (MI), and log probabilities. Also plot the ROC curve and confusion matrix. y_preds_all = evaluate_model(model, X_test_A, y_test_A, n_samples = 10) PE, MI, log_prob = get_results(y_preds_all, y_test_A, filename=model_name+'_Test_A') y_preds_all = evaluate_model(model, X_test_B, y_test_B, n_samples =10) PE, MI, log_prob = get_results(y_preds_all,y_test_B, filename=model_name+'_Test_B')
notebooks/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Welcome to Provis! # # # This is an example file to showcase the functionalities of provis and the easiest way to run it. # # This file should be located in the root directory, as specified in https://pro-vis.readthedocs.io/en/latest/setup.html#. # # Your pdb file should be located in "root directory"/data/pdb. # All temporary files (xyzrn, mol2, pqr, face, vert, area) will be saved to "root directory"/data/tmp. # Images of the plots will be saved to "root directory"/data/img and meshes to "root directory"/data/meshes. # import pyvista from provis.src.processing.protein import Protein from provis.src.processing.residue import Residue from provis.src.plotting.dynamic_structure import DynamicStructure # # Define variables # # To keep things organized we define all input variables here. # # - name: Name of pdb file you want to visualize. If the file is located in "root directory"/data/pdb then it is enough to simply pass the name of the file. Otherwise a full path is needed. (Works with or without the ".pdb" extension, eg.: "2fd7.pdb" OR "2fd7" OR "{full path to file}/2fd7.pdb" all work) # - base_path: The path to the "root directory". As we are currently in the "root directory"/examples folder we do not have to set a base_path variable. The path will automatically be found. # - density: "Used to modify the default triangulation density (1.0 vertex/Angstrom^2). No test is done on the validity of this parameter. Typical values are 1.0 for large molecules (>1000 atoms) and 3.0 for smaller molecules." Argument passed to the msms binary. Not needed if you use the non-msms/native mesh. From the msms wiki: # - plot_solvent: Set this to True if you want to plot the solvent atoms (water around the protein). Otherwise only the core molecule will be taken into consideration. # - set_msms: Set this to True if you want to use the msms binary for the surface creation. You need to have the msms binary installed and EITHER the MSMS_BIN environment variable set to its path OR have the binary in the "root directory"/binaries folder. # - set_notebook: Set this to True when using a Jupyter Notebook (like) environment. Keep as is, as this file is a notebook file. # + name = "traj" #"1a3n" # "data/pdb/2fd7" # "data/pdb/1a3n" # "data/pdb/7nkd" # base_path = None density = 3.0 msms = True plot_solvent=False notebook=True # - # # Initialize the main protein class # # This class encapsulates the provis library. By initializing the class we precompute some of the necessairy information needed later for plotting. # # You can initialize the class with the name of your pdb file and you are ready to plot! If you want more control the intricacies of the variables will be explained here: # # - name: Name of pdb file you want to visualize. If the file is located in "root directory"/data/pdb then it is enough to simply pass the name of the file. Otherwise a full path is needed. (Works with or without the ".pdb" extension, eg.: "2fd7.pdb" OR "2fd7" OR "{full path to file}/2fd7.pdb" all work) # - base_path: Path to the "root directory". This directory NEEDS to have the directory structure as specified [on the documentation](https://pro-vis.readthedocs.io/en/latest/setup.html#) # - density: Argument passed to the msms binary. Not needed if you use the non-msms/native mesh. From the msms wiki: "Used to modify the default triangulation density (1.0 vertex/Angstrom^2). No test is done on the validity of this parameter. Typical values are 1.0 for large molecules (>1000 atoms) and 3.0 for smaller molecules." # - plot_solvent: If set to True solvent atoms will also be plotted. Otherwise only the core molecule will be taken into consideration by provis. # - msms: Set this to True if you want to use the msms binary to compute the surface. If set to False the surface mesh will be computed natively. If you do not have the binary installed leave it as False, otherwise you will have errors. # - notebook: Set this to True when using a Jupyter Notebook (like) environment to turn on integrated plotting features. prot = Protein(name, base_path=None, density=density) # # Plotting # # This class encapsulates the provis library. By initializing the class we precompute some of the necessairy information needed later for plotting. # # The DynamicStructure class is used for the plotting of dynamic structures. It is built similarly to the Plotter class. # The DynamicStructure class has its own member functions for plotting. These member functions loop through the full dynamic trajectory and plot every single model/molecule it consists of. # # All plotting functions have five of the same input variables and some have more: # - box (bool): Bounding box - If this is set to True a bounding box will appear around the plotted molecule. # - res (Residue): Residue to be selected - Pass the residues you want to have selected on the plot as a provis.src.processing.Residue object. # - outname (str): Name of output file (with path) - If you do not want the screenshot of the plot to be saved in the default location with the default name you can change this variable. # - camera (pyvista.camera) - Pass a Pyvista Camera https://docs.pyvista.org/api/core/camera.html to manually set the camera position. If nothing/None is passed then the camera position will be set to 'xy'. Default: None. # - title (str) - Title of the plot window (not relevant for jupyter notebook). # # # plot_backbone: # Plots the backbone (roughly the amide bonds) of the protein. # # plot_atoms: # Plot the atoms as spheres. Coloring: https://en.wikipedia.org/wiki/CPK_coloring # # plot_bonds: # Plot only the bonds. By default all bonds will be plotted uniformly. If you want to view the difference in bonds you can set the colorful (Boolean) variable to True. # Single bonds: white # Double bonds: blue # Triple bonds: green # Amide bonds: red # Aromatic bonds: purple # Undefined/Anything else: black # # plot_vw: # Plot Van-der-Waals radius of atoms as wireframe spheres. # # plot_stick_point: # Plot stick and point model of the protein. Atoms are spheres, bonds are tubes. # # plot_structure: # This member function is called by all other member functions. Using this function you can plot any combination of the results gotten from the specialized member functions. For example you could plot the atoms and the backbone of the protein in the same plot. # It has a lot of boolean variables so you can easily choose what you want to see. # # plot_surface: Plot the surface of the protein. Uniform coloring (white) if no feature is specified. # # - feature (str): Specify feature you want to plot. Same as calling the member function plot_{feature}(). # - title (str): Title of the plot # - patch (bool): If True then coloring will be read in from "root directory"/data/tmp/{pdb_id}.pth file. Default: False. # # Disclaimer: color-scale explanation can be viewed by clicking the three dots on bottom left corner of the plot. # # plot_hydrophob: Plot hydrophobicity of the protein. # # plot_shape: Plot shape tension of protein. # # plot_charge: Plot charge of protein. dp = DynamicPlotter(prot, msms=msms, notebook=notebook, plot_solvent=plot_solvent) # The plot_surface() member function plots the surface of every single molecule within the trajectory. If the meshes are were already computed before and are saved as .obj files (in "root directory"/data/meshes) then they will be loaded from there. Otherwise the surface meshes will be computed on the go. # # - feature (str) - Pass which feature (coloring) you want to plot. Options: hydrophob, shape, charge. Default: None (uniform coloring). # - patch (bool) - If True then coloring will be read in from "root directory"/data/tmp/{pdb_id}.pth file. Default: False. # # All the following input variables are of the default value. # # NOTICE: pyvista does not support dynamic plotting in the Jupyter notebook environment. It is however possible view the created animation file, which is returned by the plotting methods if the **notebook** variable was set to True. surface_video = dp.plot_surface(feature=None, title="Surface", patch=False, box=None, res=None, outname=None, camera=None) # Play the created animation: from IPython.display import Video print(surface_video) Video(surface_video) # We can use the plot_atoms() function to plot the dynamic atom cloud. It takes the standard input variables as described in the section above. dp.plot_atoms(box=False, res=None, outname=None, camera=None, title="Atoms") # Apart from the two methods of the DynamicStructure class shown above, all methods of the Structure and Surface classes are also valid methods of the DynamicStructure class.
examples/dynamic_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from attacksplitnn.splitnn import Client, Server, SplitNN from attacksplitnn.attack import Black_Box_Model_Inversion from attacksplitnn.utils import DataSet from attacksplitnn.defense.shredder import Shredder # + import numpy as np import pandas as pd import random import matplotlib.pyplot as plt import torch import torchvision import torchvision.transforms as transforms import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data.dataset import Dataset from torch.utils.data import DataLoader from torch.utils.data.dataset import Subset from opacus import PrivacyEngine from sklearn.svm import SVC from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.metrics import accuracy_score, roc_auc_score # + config = { "batch_size":128 } device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') torch.random.manual_seed(42) print(device) # - # ## Load Data # + transform = transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) trainset = torchvision.datasets.MNIST(root='./data', train=True, transform=transform, download=True) testset = torchvision.datasets.MNIST(root='./data', train=False, transform=transform, download=True) # + victim_idx = random.sample(range(trainset.data.shape[0]), k=2000) victim_train_idx = victim_idx[:1000] attack_idx = victim_idx[1000:] victim_test_idx = random.sample(range(testset.data.shape[0]), k=15) victim_train_dataset = Subset(trainset, victim_train_idx) attack_dataset = Subset(trainset, attack_idx) victim_test_dataset = Subset(testset, victim_test_idx) victim_train_dataloader = torch.utils.data.DataLoader(victim_train_dataset, batch_size=64, shuffle=True) attack_dataloader = torch.utils.data.DataLoader(attack_dataset, batch_size=64, shuffle=True) victim_test_dataloader = torch.utils.data.DataLoader(victim_test_dataset, batch_size=64, shuffle=False) # - # ## Train SplitNN class FirstNet(nn.Module): def __init__(self): super(FirstNet, self).__init__() self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, padding=1, stride=1) #self.bn1 = nn.BatchNorm2d(64) self.conv2 = nn.Conv2d(64, 128, 3, padding=1) #self.bn2 = nn.BatchNorm2d(128) def forward(self, x): # 3ch > 64ch, shape 32 x 32 > 16 x 16 x = self.conv1(x) # [64,32,32] #x = self.bn1(x) x = F.relu(x) x = F.max_pool2d(x, 2, 2) # [64,16,16] # 64ch > 128ch, shape 16 x 16 > 8 x 8 x = self.conv2(x) # [128,16,16] #x = self.bn2(x) x = F.relu(x) x = F.max_pool2d(x, 2, 2) # [128,8,8] return x # CNNを実装する class SecondNet(nn.Module): def __init__(self): super(SecondNet, self).__init__() self.conv3 = nn.Conv2d(128, 256, 3, padding=1) self.bn3 = nn.BatchNorm2d(256) self.conv4 = nn.Conv2d(256, 512, 3, padding=1) self.bn4 = nn.BatchNorm2d(512) self.L1 = nn.Linear(512, 10) # 10クラス分類 def forward(self, x): # 128ch > 256ch, shape 8 x 8 > 4 x 4 x = self.conv3(x) # [256,8,8] x = self.bn3(x) x = F.relu(x) x = F.max_pool2d(x, 2, 2) # [256,4,4] # 256ch > 512ch, shape 4 x 4 > 2 x 2 x = self.conv4(x) # [512,4,4] x = self.bn4(x) x = F.relu(x) x = F.max_pool2d(x, 2, 2) # [512,2,2] # 全結合層 x = x.view(-1, 512) x = self.L1(x) #x = F.softmax(x, dim=0) return x # + model_1 = FirstNet() model_1 = model_1.to(device) model_2 = SecondNet() model_2 = model_2.to(device) opt_1 = optim.Adam(model_1.parameters(), lr=1e-3) opt_2 = optim.Adam(model_2.parameters(), lr=1e-3) criterion = nn.CrossEntropyLoss() # - def accuracy(label, output): pred = output.argmax(dim=1, keepdim=True) return pred.eq(label.view_as(pred)).sum().item() / pred.shape[0] # + client = Client(model_1) server = Server(model_2) splitnn = SplitNN(client, server, opt_1, opt_2) # - splitnn.train() for epoch in range(3): epoch_loss = 0 epoch_outputs = [] epoch_labels = [] for i, data in enumerate(victim_train_dataloader): splitnn.zero_grads() inputs, labels = data inputs = inputs.to(device) labels = labels.to(device) outputs = splitnn(inputs) loss = criterion(outputs, labels) loss.backward() epoch_loss += loss.item() / len(victim_train_dataloader.dataset) epoch_outputs.append(outputs) epoch_labels.append(labels) splitnn.backward() splitnn.step() print(epoch_loss, accuracy(torch.cat(epoch_labels), torch.cat(epoch_outputs))) # ## Apply Shredder sd = Shredder(splitnn, (128, 7, 7), 15, criterion, optim.Adam, scale=3, alpha=-1e-2, lr=1e-2) sd.fit(victim_train_dataloader) guarded_client = sd.update_client() splitnn.client = guarded_client # + epoch_outputs = [] epoch_labels = [] splitnn.eval() for i, data in enumerate(victim_train_dataloader): inputs, labels = data inputs = inputs.to(device) labels = labels.to(device) outputs = splitnn(inputs) epoch_outputs.append(outputs) epoch_labels.append(labels) print(accuracy(torch.cat(epoch_labels), torch.cat(epoch_outputs))) # - # ## Black Box Model Inversion Attack # + # CNNを実装する class Attacker(nn.Module): def __init__(self): super(Attacker, self).__init__() self.fla = nn.Flatten() self.ln1 = nn.Linear(128*7*7, 1000) self.ln2 = nn.Linear(1000, 784) def forward(self, x): x = self.fla(x) x = self.ln1(x) x = F.relu(x) x = self.ln2(x) x = x.view(-1, 1, 28, 28) return x attacker = Attacker() attacker = attacker.to(device) opt_3 = optim.Adam(attacker.parameters(), lr=1e-3) # - bbmi = Black_Box_Model_Inversion(splitnn, attacker, opt_3) bbmi.fit(attack_dataloader, 15) attack_result = bbmi.attack(victim_test_dataloader) attack_result = attack_result.detach().numpy() # ### Reconstructed Images for i in range(1,16): plt.subplot(3,5,i) plt.imshow(attack_result[i-1].reshape(28,28),cmap='gray_r') #plt.title("reconstructed data") plt.show()
Attack_SplitNN/examples/Shredder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import torch.nn as nn import torch import pandas import torchvision.transforms as transforms import torchvision.datasets as datasets import torch.utils.data as udata import torch.nn.functional as F import torch.optim as optim torch.manual_seed(0) np.random.seed(0) # - trainpath = "cifar10-2classes-trainset" #trainpath = "singan-train" transformer = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) trainset1 = datasets.ImageFolder(root=trainpath, transform=transformer) trainset2 = datasets.ImageFolder(root="singan-train", transform=transformer) trainset = udata.ConcatDataset([trainset1, trainset2]) #trainset.idx_to_class = {v: k for k, v in trainset.class_to_idx.items()} print(trainset.idx_to_class) print(trainset.class_to_idx) testpath = "cifar10-2classes-testset" testset = datasets.ImageFolder(root=testpath, transform=transformer) testset.idx_to_class = {v: k for k, v in testset.class_to_idx.items()} print(testset.idx_to_class) print(testset.class_to_idx) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device) trainloader = udata.DataLoader(trainset, batch_size = 20, shuffle=True) testloader = udata.DataLoader(testset, batch_size = 20, shuffle=True) # + class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.conv1 = nn.Conv2d(3, 16, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(16, 32, 5) self.fc1 = nn.Linear(32 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 2) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 32 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x cnn = CNN() cnn.to(device) # - criterion = nn.CrossEntropyLoss() #optimizer = optim.SGD(cnn.parameters(), lr=0.001, momentum=0.9) optimizer = optim.Adam(cnn.parameters(), lr= 0.001) def accuarcy(dataloader): correct = 0 total = 0 with torch.no_grad(): for i, data in enumerate(dataloader, 0): inputs, labels = data[0].to(device), data[1].to(device) #print(inputs.size()) #print("labels: ", labels, labels.size(0)) outputs = cnn(inputs) #print(outputs.data) #might change softmax = nn.Softmax(dim=1) probability = softmax(outputs.data) _, predicted = torch.max(probability, 1) #print("pred: ", predicted) total += labels.size(0) #print("total: ", total) correct += (predicted == labels).sum().item() #print("correct: ", correct) if total % 100 == 98: print(total) #print(total) #print('Accuracy of the network on the train data: %d %%' % ( #100 * correct / total)) return 100 * correct / total def train(cnn,num, epsilon=0.003, filename = None): pre_epoch = float("inf") for epoch in range(num): running_loss = 0.0 total = 0 for i, data in enumerate(trainloader, 0): inputs, labels = data[0].to(device), data[1].to(device) #print(inputs.size()) optimizer.zero_grad() outputs = cnn(inputs) #print(inputs.size(), outputs.size()) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() total += 1 test_accuarcy = accuarcy(testloader) train_accuarcy = accuarcy(trainloader) epoch_loss = running_loss / total print('epoch [%d] loss: %.3f' % (epoch + 1, epoch_loss), train_accuarcy, test_accuarcy) if filename != None: f = open(filename, 'a') f.write('epoch {} loss: {}, train_accuracy: {}, test_accuracy: {} \n'.format(epoch+1, epoch_loss, train_accuarcy, test_accuarcy)) pre_epoch = epoch_loss print('Finished Training') if filename != None: f.close() train(cnn, 100) PATH = './model/cnn_20_cd_e20.pth' torch.save(cnn.state_dict(), PATH) PATH = './model/cnn_batch1_cd.pth' cnn = CNN() cnn.load_state_dict(torch.load(PATH)) cnn.to(device) accuracy(trainloader) accura() # + def train_model(use_singan, dir_name): #load train set trainpath = "cifar10-2classes-trainset" transformer = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) if use_singan: #trainpath = "singan-train" trainset1 = datasets.ImageFolder(root=trainpath, transform=transformer) trainset2 = datasets.ImageFolder(root="singan-train", transform=transformer) trainset = udata.ConcatDataset([trainset1, trainset2]) else: trainset = datasets.ImageFolder(root=trainpath, transform=transformer) #load test set testpath = "cifar10-2classes-testset" testset = datasets.ImageFolder(root=testpath, transform=transformer) #set up device and train/test loader device = torch.device("cuda" if torch.cuda.is_available() else "cpu") trainloader = udata.DataLoader(trainset, batch_size = 20, shuffle=True) testloader = udata.DataLoader(testset, batch_size = 20, shuffle=True) #Build CNN class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.conv1 = nn.Conv2d(3, 16, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(16, 32, 5) self.fc1 = nn.Linear(32 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 2) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 32 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x cnn = CNN() cnn.to(device) return cnn # + # run train model 20 times, assign each experiment with a different random seed and save the output to a file import time import os for experiment_num in range(20): t = int(time.time()) dir_name = str(t) print(dir_name) #make directory os.system("mkdir result") os.system("mkdir ./result/{}".format(dir_name)) #set random seed torch.manual_seed(t) np.random.seed(t) #train model without singan-generated images use_singan = False cnn1 = train_model(use_singan, dir_name) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(cnn.parameters(), lr= 0.001) #start training data and save the print out to a file if use_singan: filename = './result/{}/with_singan.txt'.format(dir_name) else: filename = './result/{}/without_singan.txt'.format(dir_name) train(cnn1, 10, filename = filename) #set random seed torch.manual_seed(t) np.random.seed(t) #train model with singan-generated images use_singan = True cnn1 = train_model(use_singan, dir_name) criterion = nn.CrossEntropyLoss() #optimizer = optim.SGD(cnn.parameters(), lr=0.001, momentum=0.9) optimizer = optim.Adam(cnn.parameters(), lr= 0.001) #start training data and save the print out to a file if use_singan: filename = './result/{}/with_singan.txt'.format(dir_name) else: filename = './result/{}/without_singan.txt'.format(dir_name) train(cnn1, 10, filename = filename) # -
Data Augumentation/cnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # %matplotlib inline from matplotlib import style style.use('fivethirtyeight') import matplotlib.pyplot as plt from flask import Flask, jsonify import numpy as np import pandas as pd import datetime as dt # # Reflect Tables into SQLAlchemy ORM # Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, inspect, func # create engine to hawaii.sqlite engine = create_engine("sqlite:///hawaii.sqlite") # reflect an existing database into a new model Base = automap_base() Base.prepare(engine, reflect=True) Base.classes.keys() # reflect the tables # View all of the classes that automap found Measurement = Base.classes.measurement # Save references to each table Station = Base.classes.station inspector = inspect(engine) #inspector.get_table_names() columns = inspector.get_columns('Station') for c in columns: print(c['name'], c["type"]) columns = inspector.get_columns('Measurement') for c in columns: print(c['name'], c["type"]) # Create our session (link) from Python to the DB session = Session(engine) # # Exploratory Precipitation Analysis for row in session.query(Measurement, Measurement.date).limit(5).all(): print(row) # Find the most recent date in the data set. results = session.query(Measurement.date).\ order_by(Measurement.date.desc()).all() print(results) #Another way to find it MaxDate = session.query(func.max(Measurement.date)).all() print(MaxDate) import datetime as dt Maxi = MaxDate[0] Maxi Maxi[0] from datetime import datetime a=datetime.strptime(Maxi[0], '%Y-%m-%d').date() a Yearago = a - dt.timedelta(days=365) Yearago Yearagostr=Yearago.strftime('%Y-%m-%d') Yearagostr # + YearDates = session.query(Measurement.date,Measurement.prcp).order_by(Measurement.date.desc()).\ filter(Measurement.date >= Yearagostr).all() # - for row in YearDates: print (row) Long = len(YearDates) df = pd.DataFrame(YearDates[::], columns=['date', 'prep']) df.set_index('date', inplace=True, ) df = df.sort_index () df.head(10) len(df) Data = df.dropna() Data.index.unique() ax= Data.plot(figsize=(20, 10)) plt.show() Res = Data.prep.agg(["mean","median","var","std","sem"]) Res Howmanyst = session.query(Station).count() Howmanyst results2 = session.query(Station.station,Station.name).\ order_by(Station.station.desc()).all() print(results2) # # Exploratory Station Analysis # + Sta = session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all() print(Sta) # - Counttemp = session.query(func.count(Measurement.tobs)).filter_by(station="USC00519281").all() Maxtemp = session.query(func.max(Measurement.tobs)).filter_by(station="USC00519281").all() Mintemp = session.query(func.min(Measurement.tobs)).filter_by(station="USC00519281").all() Avgtemp = session.query(func.avg(Measurement.tobs)).filter_by(station="USC00519281").all() [Counttemp[0][0]] Counttemp = Counttemp[0] Maxtemp = Maxtemp[0] Mintemp = Mintemp[0] Avgtemp = Avgtemp[0] print(f'Number of temperatures for station USC00519281: {Counttemp[0]}') print(f'Max temperature for station USC00519281: {Maxtemp[0]}') print(f'Min temperature for station USC00519281: {Mintemp[0]}') print(f'Average of temperatures for station USC00519281: {Avgtemp[0]}') # + Yearagostation = session.query(Measurement.date,Measurement.tobs).\ filter(Measurement.date >= Yearagostr).filter_by(station="USC00519281").all() # - len(Yearagostation) df2 = pd.DataFrame(Yearagostation[::], columns=['date','tobs']) df2.set_index('date', inplace=True, ) df2 = df2.sort_index () df2.head() len(df2) plt.hist(df2.tobs,bins=12) plt.xlabel("Temperatures") plt.ylabel("Frequency") plt.show() # # Close session # Close Session session.close()
climate_starter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # # Exercise 10 - The Tipping Problem # - # #### Aim: # To develop a fuzzy controller for the Tipping problem using python. # #### The Tipping Problem # # # A fuzzy control system is created to model how you might choose to tip # at a restaurant. When tipping, the service and food quality are considered and rated between 0 and 10. A tip of between 0 and 25% is suggested. # #### Problem Formulation: # # * Antecednets (Inputs) # - `service` # * Universe (ie, crisp value range): How good was the service of the wait # staff, on a scale of 0 to 10? # * Fuzzy set (ie, fuzzy value range): poor, acceptable, amazing # - `food quality` # * Universe: How tasty was the food, on a scale of 0 to 10? # * Fuzzy set: bad, decent, great # * Consequents (Outputs) # - `tip` # * Universe: How much should we tip, on a scale of 0% to 25% # * Fuzzy set: low, medium, high # * Rules # - IF the *service* was good *or* the *food quality* was good, # THEN the tip will be high. # - IF the *service* was average, THEN the tip will be medium. # - IF the *service* was poor *and* the *food quality* was poor # THEN the tip will be low. # * Usage # - If I tell this controller that I rated: # * the service as 9.8, and # * the quality as 6.5, # - it would recommend I leave: # * a 20.2% tip. # #### Program import numpy as np import skfuzzy as fuzz from skfuzzy import control as ctrl # %matplotlib widget # New Antecedent/Consequent objects hold universe variables and membership # functions quality = ctrl.Antecedent(np.arange(0, 11, 1), 'quality') service = ctrl.Antecedent(np.arange(0, 11, 1), 'service') tip = ctrl.Consequent(np.arange(0, 26, 1), 'tip') # Auto-membership function population is possible with .automf(3, 5, or 7) quality.automf(3) service.automf(3) # Custom membership functions can be built interactively with a familiar, # Pythonic API tip['low'] = fuzz.trimf(tip.universe, [0, 0, 13]) tip['medium'] = fuzz.trimf(tip.universe, [0, 13, 25]) tip['high'] = fuzz.trimf(tip.universe, [13, 25, 25]) # You can see how these look with .view() quality.view() service.view() tip.view() rules = [ ctrl.Rule(quality['poor'] | service['poor'], tip['low']), ctrl.Rule(service['average'], tip['medium']), ctrl.Rule(service['good'] | quality['good'], tip['high']) ] rules[1].view() tipping_ctrl = ctrl.ControlSystem(rules) tipping = ctrl.ControlSystemSimulation(tipping_ctrl) # Pass inputs to the ControlSystem using Antecedent labels with Pythonic API # Note: if you like passing many inputs all at once, use .inputs(dict_of_data) tipping.input['quality'] = 6.5 tipping.input['service'] = 9.8 # Crunch the numbers tipping.compute() print(tipping.output['tip']) tip.view(sim=tipping)
notebooks/10 - The Tipping Problem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # + from astropy.io.fits import getdata, getheader import astropy.units as u from nustar_lunar_pointing.tracking import get_epoch_tle from nustar_lunar_pointing.tracking import convert_nustar_time from nustar_lunar_pointing.tracking import get_moon_j2000 from astropy.time import Time import matplotlib.pyplot as plt # %matplotlib notebook # + from datetime import datetime checktime = datetime.strptime('2017-05-08', "%Y-%m-%d") tlefile = '../data/NuSTAR.tle' mindt, line1, line2 = get_epoch_tle(checktime, tlefile) print('Days between TLE entry and when you want to observe: ', mindt) # + from skyfield.api import EarthSatellite, load ts = load.timescale() planets = load('de436.bsp') moon = planets['Moon'] earth = planets['Earth'] sun = planets['Sun'] checktime = datetime.strptime('2017-05-08', "%Y-%m-%d") tlefile = '../data/NuSTAR.tle' mindt, line1, line2 = get_epoch_tle(checktime, tlefile) print('Days between TLE entry and when you want to observe: ', mindt) nustar = EarthSatellite(line1, line2) geometry = nustar +earth # + from astropy.coordinates import SkyCoord from datetime import timedelta ra = [] dec =[] times= [] base_ra = None base_dec = None ra_sky = [] dec_sky = [] step_size = timedelta(0, 10.) # 1000 second steps step_size = timedelta(0, 10.) # 1000 second steps checktime = datetime.strptime('2017-01-08', "%Y-%m-%d") end_check = datetime.strptime('2017-01-08T01', "%Y-%m-%dT%H") while (end_check - checktime).total_seconds() > 0: checktime += step_size utc = Time(checktime) tcheck = ts.from_astropy(utc) nustar_bary = geometry.at(tcheck) astrometric = nustar_bary.observe(sun) this_ra_sky, this_dec_sky, dist = astrometric.radec() geocentric = earth.at(tcheck).observe(sun) this_ra_geo, this_dec_geo, dist = geocentric.radec() nustar_coord = SkyCoord(this_ra_sky.to(u.deg), this_dec_sky.to(u.deg)) geocentric_coord = SkyCoord(this_ra_geo.to(u.deg), this_dec_geo.to(u.deg)) print(nustar_coord.separation(geocentric_coord)) # - dt = checktime - end_check dt.total_seconds()
notebooks/SkyfieldTest_SolarParallax.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Advanced Feature Engineering in Keras # # **Learning Objectives** # # 1. Process temporal feature columns in Keras # 2. Use Lambda layers to perform feature engineering on geolocation features # 3. Create bucketized and crossed feature columns # # # ## Introduction # # In this notebook, we use Keras to build a taxifare price prediction model and utilize feature engineering to improve the fare amount prediction for NYC taxi cab rides. # # Each learning objective will correspond to a __#TODO__ in this student lab notebook -- try to complete this notebook first and then review the [Solution Notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/feature_engineering/solutions/4_keras_adv_feat_eng.ipynb) for reference. # # ## Set up environment variables and load necessary libraries # We will start by importing the necessary libraries for this lab. # !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst # + import datetime import logging import os import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from tensorflow import feature_column as fc from tensorflow.keras import layers from tensorflow.keras import models # set TF error log verbosity logging.getLogger("tensorflow").setLevel(logging.ERROR) print(tf.version.VERSION) # - # !ls -l ../data/*.csv # !head ../data/*.csv # ## Create an input pipeline # # Typically, you will use a two step process to build the pipeline. Step one is to define the columns of data; i.e., which column we're predicting for, and the default values. Step 2 is to define two functions - a function to define the features and label you want to use and a function to load the training data. Also, note that pickup_datetime is a string and we will need to handle this in our feature engineered model. # CSV_COLUMNS = [ 'fare_amount', 'pickup_datetime', 'pickup_longitude', 'pickup_latitude', 'dropoff_longitude', 'dropoff_latitude', 'passenger_count', 'key', ] LABEL_COLUMN = 'fare_amount' NUMERIC_COLS = ['pickup_longitude', 'pickup_latitude', 'dropoff_longitude', 'dropoff_latitude', 'passenger_count'] DEFAULTS = [[0.0], ['na'], [0.0], [0.0], [0.0], [0.0], [0.0], ['na']] DAYS = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'] # + # A function to define features and labesl def features_and_labels(row_data): for unwanted_col in ['key']: row_data.pop(unwanted_col) label = row_data.pop(LABEL_COLUMN) return row_data, label # A utility method to create a tf.data dataset from a Pandas Dataframe def load_dataset(pattern, batch_size=1, mode='eval'): dataset = tf.data.experimental.make_csv_dataset(pattern, batch_size, CSV_COLUMNS, DEFAULTS) dataset = dataset.map(features_and_labels) # features, label if mode == 'train': dataset = dataset.shuffle(1000).repeat() # take advantage of multi-threading; 1=AUTOTUNE dataset = dataset.prefetch(1) return dataset # - # ## Create a Baseline DNN Model in Keras # # Now let's build the Deep Neural Network (DNN) model in Keras using the functional API. Unlike the sequential API, we will need to specify the input and hidden layers. Note that we are creating a linear regression baseline model with no feature engineering. Recall that a baseline model is a solution to a problem without applying any machine learning techniques. # + # Build a simple Keras DNN using its Functional API def rmse(y_true, y_pred): # Root mean square error return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true))) def build_dnn_model(): # input layer inputs = { colname: layers.Input(name=colname, shape=(), dtype='float32') for colname in NUMERIC_COLS } # feature_columns feature_columns = { colname: fc.numeric_column(colname) for colname in NUMERIC_COLS } # Constructor for DenseFeatures takes a list of numeric columns dnn_inputs = layers.DenseFeatures(feature_columns.values())(inputs) # two hidden layers of [32, 8] just in like the BQML DNN h1 = layers.Dense(32, activation='relu', name='h1')(dnn_inputs) h2 = layers.Dense(8, activation='relu', name='h2')(h1) # final output is a linear activation because this is regression output = layers.Dense(1, activation='linear', name='fare')(h2) model = models.Model(inputs, output) # compile model model.compile(optimizer='adam', loss='mse', metrics=[rmse, 'mse']) return model # - # We'll build our DNN model and inspect the model architecture. # + model = build_dnn_model() tf.keras.utils.plot_model(model, 'dnn_model.png', show_shapes=False, rankdir='LR') # - # ## Train the model # # To train the model, simply call [model.fit()](https://keras.io/models/model/#fit). Note that we should really use many more NUM_TRAIN_EXAMPLES (i.e. a larger dataset). We shouldn't make assumptions about the quality of the model based on training/evaluating it on a small sample of the full data. # # We start by setting up the environment variables for training, creating the input pipeline datasets, and then train our baseline DNN model. TRAIN_BATCH_SIZE = 32 NUM_TRAIN_EXAMPLES = 7332 * 5 NUM_EVALS = 5 NUM_EVAL_EXAMPLES = 1570 # + trainds = load_dataset('../data/taxi-train*', TRAIN_BATCH_SIZE, 'train') evalds = load_dataset('../data/taxi-valid*', 1000, 'eval').take(NUM_EVAL_EXAMPLES//1000) steps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS) history = model.fit(trainds, validation_data=evalds, epochs=NUM_EVALS, steps_per_epoch=steps_per_epoch) # - # ### Visualize the model loss curve # # Next, we will use matplotlib to draw the model's loss curves for training and validation. A line plot is also created showing the mean squared error loss over the training epochs for both the train (blue) and test (orange) sets. def plot_curves(history, metrics): nrows = 1 ncols = 2 fig = plt.figure(figsize=(10, 5)) for idx, key in enumerate(metrics): ax = fig.add_subplot(nrows, ncols, idx+1) plt.plot(history.history[key]) plt.plot(history.history['val_{}'.format(key)]) plt.title('model {}'.format(key)) plt.ylabel(key) plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left'); plot_curves(history, ['loss', 'rmse']) # ### Predict with the model locally # # To predict with Keras, you simply call [model.predict()](https://keras.io/models/model/#predict) and pass in the cab ride you want to predict the fare amount for. Next we note the fare price at this geolocation and pickup_datetime. model.predict({ 'pickup_longitude': tf.convert_to_tensor([-73.982683]), 'pickup_latitude': tf.convert_to_tensor([40.742104]), 'dropoff_longitude': tf.convert_to_tensor([-73.983766]), 'dropoff_latitude': tf.convert_to_tensor([40.755174]), 'passenger_count': tf.convert_to_tensor([3.0]), 'pickup_datetime': tf.convert_to_tensor(['2010-02-08 09:17:00 UTC'], dtype=tf.string), }, steps=1) # ## Improve Model Performance Using Feature Engineering # # We now improve our model's performance by creating the following feature engineering types: Temporal, Categorical, and Geolocation. # ### Temporal Feature Columns # ### Geolocation/Coordinate Feature Columns # # The pick-up/drop-off longitude and latitude data are crucial to predicting the fare amount as fare amounts in NYC taxis are largely determined by the distance traveled. As such, we need to teach the model the Euclidean distance between the pick-up and drop-off points. # # Recall that latitude and longitude allows us to specify any location on Earth using a set of coordinates. In our training data set, we restricted our data points to only pickups and drop offs within NYC. New York city has an approximate longitude range of -74.05 to -73.75 and a latitude range of 40.63 to 40.85. # # #### Computing Euclidean distance # The dataset contains information regarding the pickup and drop off coordinates. However, there is no information regarding the distance between the pickup and drop off points. Therefore, we create a new feature that calculates the distance between each pair of pickup and drop off points. We can do this using the Euclidean Distance, which is the straight-line distance between any two coordinate points. def euclidean(params): lon1, lat1, lon2, lat2 = params londiff = lon2 - lon1 latdiff = lat2 - lat1 return tf.sqrt(londiff*londiff + latdiff*latdiff) # #### Scaling latitude and longitude # # It is very important for numerical variables to get scaled before they are "fed" into the neural network. Here we use min-max scaling (also called normalization) on the geolocation features. Later in our model, you will see that these values are shifted and rescaled so that they end up ranging from 0 to 1. # # First, we create a function named 'scale_longitude', where we pass in all the longitudinal values and add 78 to each value. Note that our scaling longitude ranges from -70 to -78. Thus, the value 78 is the maximum longitudinal value. The delta or difference between -70 and -78 is 8. We add 78 to each longitudinal value and then divide by 8 to return a scaled value. def scale_longitude(lon_column): return (lon_column + 78)/8. # Next, we create a function named 'scale_latitude', where we pass in all the latitudinal values and subtract 37 from each value. Note that our scaling latitude ranges from -37 to -45. Thus, the value 37 is the minimal latitudinal value. The delta or difference between -37 and -45 is 8. We subtract 37 from each latitudinal value and then divide by 8 to return a scaled value. def scale_latitude(lat_column): return (lat_column - 37)/8. # ### Putting it all together # We will create a function called "euclidean" to initialize our geolocation parameters. We then create a function called transform. The transform function passes our numerical and string column features as inputs to the model, scales geolocation features, then creates the Euclidean distance as a transformed variable with the geolocation features. Lastly, we bucketize the latitude and longitude features. # **Lab Task #2:** We will use Lambda layers to create two new "geo" functions for our model. # **Lab Task #3:** Creating the bucketized and crossed feature columns def transform(inputs, numeric_cols, nbuckets): print("Inputs before features transformation: {}".format(inputs.keys())) # Pass-through columns transformed = inputs.copy() feature_columns = { colname: tf.feature_column.numeric_column(colname) for colname in numeric_cols } # Scaling longitude from range [-70, -78] to [0, 1] for lon_col in ['pickup_longitude', 'dropoff_longitude']: transformed[lon_col] = layers.Lambda( scale_longitude, name="scale_{}".format(lon_col))(inputs[lon_col]) # Scaling latitude from range [37, 45] to [0, 1] for lat_col in ['pickup_latitude', 'dropoff_latitude']: transformed[lat_col] = layers.Lambda( scale_latitude, name='scale_{}'.format(lat_col))(inputs[lat_col]) # add Euclidean distance transformed['euclidean'] = layers.Lambda( euclidean, name='euclidean')([inputs['pickup_longitude'], inputs['pickup_latitude'], inputs['dropoff_longitude'], inputs['dropoff_latitude']]) feature_columns['euclidean'] = fc.numeric_column('euclidean') # TODO 3a # create bucketized features latbuckets = np.linspace(0, 1, nbuckets).tolist() lonbuckets = np.linspace(0, 1, nbuckets).tolist() b_plat = fc.bucketized_column( feature_columns['pickup_latitude'], latbuckets) b_dlat = fc.bucketized_column( feature_columns['dropoff_latitude'], latbuckets) b_plon = fc.bucketized_column( feature_columns['pickup_longitude'], lonbuckets) b_dlon = fc.bucketized_column( feature_columns['dropoff_longitude'], lonbuckets) # TODO 3b # create crossed columns ploc = fc.crossed_column([b_plat, b_plon], nbuckets * nbuckets) dloc = fc.crossed_column([b_dlat, b_dlon], nbuckets * nbuckets) pd_pair = fc.crossed_column([ploc, dloc], nbuckets ** 4) # create embedding columns feature_columns['pickup_and_dropoff'] = fc.embedding_column(pd_pair, 100) print("Transformed features: {}".format(transformed.keys())) print("Feature columns: {}".format(feature_columns.keys())) return transformed, feature_columns # Next, we'll create our DNN model now with the engineered features. We'll set `NBUCKETS = 10` to specify 10 buckets when bucketizing the latitude and longitude. # + NBUCKETS = 10 # DNN MODEL def rmse(y_true, y_pred): return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true))) def build_dnn_model(): # input layer is all float except for pickup_datetime which is a string inputs = { colname: layers.Input(name=colname, shape=(), dtype='float32') for colname in NUMERIC_COLS } # transforms transformed, feature_columns = transform(inputs, numeric_cols=NUMERIC_COLS, nbuckets=NBUCKETS) dnn_inputs = layers.DenseFeatures(feature_columns.values())(transformed) # two hidden layers of [32, 8] just in like the BQML DNN h1 = layers.Dense(32, activation='relu', name='h1')(dnn_inputs) h2 = layers.Dense(8, activation='relu', name='h2')(h1) # final output is a linear activation because this is regression output = layers.Dense(1, activation='linear', name='fare')(h2) model = models.Model(inputs, output) # Compile model model.compile(optimizer='adam', loss='mse', metrics=[rmse, 'mse']) return model # - model = build_dnn_model() # Let's see how our model architecture has changed now. tf.keras.utils.plot_model(model, 'dnn_model_engineered.png', show_shapes=False, rankdir='LR') # + trainds = load_dataset('../data/taxi-train*', TRAIN_BATCH_SIZE, 'train') evalds = load_dataset('../data/taxi-valid*', 1000, 'eval').take(NUM_EVAL_EXAMPLES//1000) steps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS) history = model.fit(trainds, validation_data=evalds, epochs=NUM_EVALS+10, steps_per_epoch=steps_per_epoch) # - # As before, let's visualize the DNN model layers. plot_curves(history, ['loss', 'rmse']) # Let's a prediction with this new model with engineered features on the example we had above. model.predict({ 'pickup_longitude': tf.convert_to_tensor([-73.982683]), 'pickup_latitude': tf.convert_to_tensor([40.742104]), 'dropoff_longitude': tf.convert_to_tensor([-73.983766]), 'dropoff_latitude': tf.convert_to_tensor([40.755174]), 'passenger_count': tf.convert_to_tensor([3.0]), 'pickup_datetime': tf.convert_to_tensor(['2010-02-08 09:17:00 UTC'], dtype=tf.string), }, steps=1) # Copyright 2020 Google Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
courses/machine_learning/deepdive2/introduction_to_tensorflow/labs/keras_adv_feat_eng-lab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # + import datetime import pymc import numpy as np import spacepy.plot as spp # for the style import matplotlib.pyplot as plt import spacepy.toolbox as tb import spacepy.plot as spp # %matplotlib inline datetime.datetime.now() # - # From https://stackoverflow.com/questions/17409324/solving-inverse-problems-with-pymc # # Suppose we're given a prior on **X** (e.g. X ~ Gaussian) and a forward operator **y = f(x)**. Suppose further we have observed **y** by means of an experiment and that this experiment can be repeated indefinitely. The output **Y** is assumed to be Gaussian (Y ~ Gaussian) or noise-free (Y ~ Delta(observation)). # # How to consistently update our subjective degree of knowledge about **X** given the observations? I've tried the following model with PyMC, but it seems I'm missing something: # # from pymc import * # # xtrue = 2 # this value is unknown in the real application # x = rnormal(0, 0.01, size=10000) # initial guess # # for i in range(5): # X = Normal('X', x.mean(), 1./x.var()) # Y = X*X # f(x) = x*x # OBS = Normal('OBS', Y, 0.1, value=xtrue*xtrue+rnormal(0,1), observed=True) # model = Model([X,Y,OBS]) # mcmc = MCMC(model) # mcmc.sample(10000) # # x = mcmc.trace('X')[:] # posterior samples # # The posterior is not converging to **xtrue**. # + xtrue = 2 # this value is unknown in the real application x = pymc.rnormal(0, 0.01, size=10000) # initial guess for i in range(5): X = pymc.Normal('X', x.mean(), 1./x.var()) Y = X*X # f(x) = x*x OBS = pymc.Normal('OBS', Y, 0.1, value=xtrue*xtrue+pymc.rnormal(0,1), observed=True) model = pymc.Model([X,Y,OBS]) mcmc = pymc.MCMC(model) mcmc.sample(10000) x = mcmc.trace('X')[:] # posterior samples # - pymc.Matplot.plot(mcmc) # There is a clear issue here that $y=x^2$ loses the negative when applied so that the result is peaks at both -2 and 2. # # 1. Try doing this again with better constraints on the model (x>=0) # + xtrue = 2 # this value is unknown in the real application x = pymc.rpoisson(1, size=10000) # initial guess for i in range(5): X = pymc.Normal('X', x.mean(), 1./x.var()) Y = X*X # f(x) = x*x OBS = pymc.Normal('OBS', Y, 0.1, value=xtrue*xtrue+pymc.rnormal(0,1), observed=True) model = pymc.Model([X,Y,OBS]) mcmc = pymc.MCMC(model) mcmc.sample(10000) x = mcmc.trace('X')[:] # posterior samples pymc.Matplot.plot(mcmc) # -
Inversion/Inversion1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [Source](https://www.dataquest.io/blog/pandas-python-tutorial/) # # Python is a great language for doing data analysis, primarily because of the fantastic ecosystem of data-centric Python packages. Pandas is one of those packages, and makes importing and analyzing data much easier. Pandas builds on packages like NumPy and matplotlib to give you a single, convenient, place to do most of your data analysis and visualization work. # # In this introduction, we'll use Pandas to analyze data on video game reviews from IGN, a popular video game review site. The data was scraped by [<NAME>](https://www.kaggle.com/egrinstein), and can be found [here](https://www.kaggle.com/egrinstein/20-years-of-games). As we analyze the video game reviews, we'll learn key Pandas concepts like indexing. # # Do games like the Witcher 3 tend to get better reviews on the PS4 than the Xbox One? This dataset can help us find out. # # Importing Data with Pandas # # The first step we'll take is to read the data in. The data is stored as a comma-separated values, or csv, file, where each row is separated by a new line, and each column by a comma (,). Here are the first few rows of the ign.csv file: # # ``` # ,score_phrase,title,url,platform,score,genre,editors_choice,release_year,release_month,release_day # 0,Amazing,LittleBigPlanet PS Vita,/games/littlebigplanet-vita/vita-98907,PlayStation Vita,9.0,Platformer,Y,2012,9,12 # 1,Amazing,LittleBigPlanet PS Vita -- Marvel Super Hero Edition,/games/littlebigplanet-ps-vita-marvel-super-hero-edition/vita-20027059,PlayStation Vita,9.0,Platformer,Y,2012,9,12 # 2,Great,Splice: Tree of Life,/games/splice/ipad-141070,iPad,8.5,Puzzle,N,2012,9,12 # 3,Great,NHL 13,/games/nhl-13/xbox-360-128182,Xbox 360,8.5,Sports,N,2012,9,11 # ``` # # As you can see above, each row in the data represents a single game that was reviewed by IGN. The columns contain information about that game: # # * score_phrase — how IGN described the game in one word. This is linked to the score it received. # * title — the name of the game. # * url — the URL where you can see the full review. # * platform — the platform the game was reviewed on (PC, PS4, etc). # * score — the score for the game, from 1.0 to 10.0. # * genre — the genre of the game. # * editors_choice — N if the game wasn't an editor's choice, Y if it was. This is tied to score. # * release_year — the year the game was released. # * release_month — the month the game was released. # * release_day — the day the game was released. # # There's also a leading column that contains row index values. We can safely ignore this column, but we'll dive into what index values are later on. In order to be able to work with the data in Python, we'll need to read the csv file into a Pandas DataFrame. A DataFrame is a way to represent and work with tabular data. Tabular data has rows and columns, just like our csv file. # In order to read in the data, we'll need to use the pandas.read_csv function. This function will take in a csv file and return a DataFrame. The below code will: # # * Import the pandas library. We rename it to pd so it's faster to type out. # * Read ign.csv into a DataFrame, and assign the result to reviews. # disable warnings for lecture import warnings warnings.filterwarnings('ignore') # + import pandas as pd reviews = pd.read_csv("ign.csv") # - # Once we read in a DataFrame, Pandas gives us two methods that make it fast to print out the data. These functions are: # # * ```pandas.DataFrame.head``` -- prints the first N rows of a DataFrame. By default 5. # * ```pandas.DataFrame.tail``` -- prints the last N rows of a DataFrame. By default 5. # # We'll use the head method to see what's in reviews: reviews.head(3) # We can also access the ```pandas.DataFrame.shape``` property to see row many rows and columns are in reviews: reviews.shape # As you can see, everything has been read in properly -- we have 18625 rows and 11 columns. # # One of the big advantages of Pandas vs just using NumPy is that Pandas allows you to have columns with different data types. reviews has columns that store float values, like score, string values, like score_phrase, and integers, like release_year. # # Now that we've read the data in properly, let's work on indexing reviews to get the rows and columns that we want. # ## Indexing DataFrames with Pandas # # Earlier, we used the head method to print the first 5 rows of reviews. We could accomplish the same thing using the ```pandas.DataFrame.iloc``` method. The ```iloc``` method allows us to retrieve rows and columns by position. In order to do that, we'll need to specify the positions of the rows that we want, and the positions of the columns that we want as well. # # The below code will replicate reviews.head(): reviews.iloc[0:5,:] # As you can see above, we specified that we wanted rows 0:5. This means that we wanted the rows from position 0 up to, but not including, position 5. The first row is considered to be in position 0. This gives us the rows at positions 0, 1, 2, 3, and 4. # # If we leave off the first position value, like :5, it's assumed we mean 0. If we leave off the last position value, like 0:, it's assumed we mean the last row or column in the DataFrame. # # We wanted all of the columns, so we specified just a colon (:), without any positions. This gave us the columns from 0 to the last column. # # Here are some indexing examples, along with the results: # # * ```reviews.iloc[:5,:]``` — the first 5 rows, and all of the columns for those rows. # * ```reviews.iloc[:,:]``` — the entire DataFrame. # * ```reviews.iloc[5:,5:]``` — rows from position 5 onwards, and columns from position 5 onwards. # * ```reviews.iloc[:,0]``` — the first column, and all of the rows for the column. # * ```reviews.iloc[9,:]``` — the 10th row, and all of the columns for that row. # # Indexing by position is very similar to NumPy indexing. # Now that we know how to index by position, let's remove the first column, which doesn't have any useful information: reviews = reviews.iloc[:,1:] reviews.head() reviews.shape # ### Indexing Using Labels in Pandas # Now that we know how to retrieve rows and columns by position, it's worth looking into the other major way to work with DataFrames, which is to retrieve rows and columns by label. # # A major advantage of Pandas over NumPy is that each of the columns and rows has a label. Working with column positions is possible, but it can be hard to keep track of which number corresponds to which column. # # We can work with labels using the ```pandas.DataFrame.loc``` method, which allows us to index using labels instead of positions. # # We can display the first five rows of reviews using the ```loc``` method like this: reviews.loc[0:5,:] # The above doesn't actually look much different from ```reviews.iloc[0:5,:]```. This is because while row labels can take on any values, our row labels match the positions exactly. You can see the row labels on the very left of the table above (they're in bold). You can also see them by accessing the index property of a DataFrame. We'll display the row indexes for reviews: reviews.index # pull out the 1st twenty indexes list(reviews.index)[:20] # Indexes don't always have to match up with positions, though. In the below code cell, we'll: # # Get row 10 to row 20 of reviews, and assign the result to some_reviews. # Display the first 5 rows of some_reviews. some_reviews = reviews.iloc[10:20,] some_reviews.head() # As we mentioned earlier, column labels can make life much easier when you're working with data. We can specify column labels in the loc method to retrieve columns by label instead of by position. reviews.loc[:5, "score"] # We can also specify more than one column at a time by passing in a list: reviews.loc[:5, ["score", "release_year"]] reviews.loc[[0, 3], ["score", "release_year"]] # ## Pandas Series Objects # # We can retrieve an individual column in Pandas a few different ways. So far, we've seen two types of syntax for this: # # * ```reviews.iloc[:,1]``` — will retrieve the second column. # * ```reviews.loc[:,"score_phrase"]``` — will also retrieve the second column. # # There's a third, even easier, way to retrieve a whole column. We can just specify the column name in square brackets, like with a dictionary: reviews["score"].head() # We can also use lists of columns with this method: x = reviews[["score", "release_year"]] x.head() # When we retrieve a single column, we're actually retrieving a Pandas ```Series``` object. A ```DataFrame``` stores **tabular data**, but a ```Series``` stores **a single column or row** of data. # # We can verify that a single column is a ```Series```: type(reviews["score"]) # We can create a ```Series``` manually to better understand how it works. To create a ```Series```, we pass a list or NumPy array into the ```Series``` object when we instantiate it: s1 = pd.Series([1, 2]) s1 # A ```Series``` can contain any type of data, including mixed types. Here, we create a ```Series``` that contains string objects: s2 = pd.Series(["<NAME>", "<NAME>"]) s2 # # Creating A DataFrame in Pandas # We can create a ```DataFrame``` by passing multiple ```Series``` into the ```DataFrame``` class. Here, we pass in the two ```Series``` objects we just created, ```s1``` as the first row, and ```s2``` as the second row: # create a dataframe from two series x = pd.DataFrame() x['rank'] = s1 x['name'] = s2 x # create a datafrme from a dictionary y = pd.DataFrame({ 'rank': s1, 'name': s2 }) y # create a dataframe from a list pd.DataFrame([s1, s2]) # We can also accomplish the same thing with a list of lists. Each inner list is treated as a row in the resulting DataFrame: # create a dataframe from a list of lists pd.DataFrame( [ [1, "<NAME>"], [2, "<NAME>"] ] ) # We can specify the column labels when we create a DataFrame: # specify column labels pd.DataFrame( [ [1, "<NAME>"], [2, "<NAME>"] ], columns=["rank", "name"] ) # As well as the row labels (the index): # specify the index labels frame = pd.DataFrame( [ [1,2], ["<NAME>", "<NAME>"] ], index=["rank", "name"], columns=["person1", "person2"] ) frame # We're then able index the ```DataFrame``` using the labels: frame.loc["rank":"name", "person1"] # We can skip specifying the columns keyword argument if we pass a dictionary into the ```DataFrame``` constructor. This will automatically setup column names: frame = pd.DataFrame( { "person1": [1, "<NAME>"], "person2": [2, "<NAME>"] } ) frame # ## Pandas DataFrame Methods # # As we mentioned earlier, each column in a ```DataFrame``` is a ```Series``` object: type(reviews["title"]) # We can call most of the same methods on a ```Series``` object that we can on a ```DataFrame```, including head: reviews["title"].head() # Pandas ```Series``` and ```DataFrame``` also have other methods that make calculations simpler. For example, we can use the ```pandas.Series.mean``` method to find the mean of a ```Series```: reviews["score"].mean() # We can also call the similar ```pandas.DataFrame.mean``` method, which will find the mean of each numerical column in a ```DataFrame``` by default: x = reviews.mean() x # We can modify the ```axis``` keyword argument to mean in order to compute the mean of each row or of each column. By default, ```axis``` is equal to 0, and will compute the mean of each **column**. We can also set it to 1 to compute the mean of each **row**. Note that this will only compute the mean of the numerical values in each row: reviews.mean(axis=1).head() # There are quite a few methods on ```Series``` and ```DataFrame``` that behave like ```mean```. Here are some handy ones: # # * ```pandas.DataFrame.corr``` — finds the correlation between columns in a DataFrame. # * ```pandas.DataFrame.count``` — counts the number of non-null values in each DataFrame column. # * ```pandas.DataFrame.max``` — finds the highest value in each column. # * ```pandas.DataFrame.min``` — finds the lowest value in each column. # * ```pandas.DataFrame.median``` — finds the median of each column. # * ```pandas.DataFrame.std``` — finds the standard deviation of each column. # We can use the corr method to see if any columns correlation with score. For instance, this would tell us if games released more recently have been getting higher reviews (release_year), or if games released towards the end of the year score better (release_month): reviews.corr() # As you can see above, none of our numeric columns correlates with score, meaning that release timing doesn't linearly relate to review score. # ## DataFrame Math with Pandas # We can also perform math operations on ```Series``` or ```DataFrame``` objects. For example, we can divide every value in the score column by 2 to switch the scale from 0-10 to 0-5: reviews["score"] / 2 # All the common mathematical operators that work in Python, like ```+```, ```-```, ```*```, ```/```, and ```^``` will work, and will apply to each element in a ```DataFrame``` or a ```Series```. # ## Boolean Indexing in Pandas # # As we saw above, the mean of all the values in the score column of reviews is around 7. What if we wanted to find all the games that got an above average score? We could start by doing a comparison. The comparison compares each value in a ```Series``` to a specified value, then generate a ```Series``` full of ```Boolean``` values indicating the status of the comparison. For example, we can see which of the rows have a score value higher than 7: score_filter = reviews["score"] > 7 score_filter # Once we have a ```Boolean``` ```Series```, we can use it to select only rows in a ```DataFrame``` where the ```Series``` contains the value ```True```. So, we could only select rows in reviews where score is greater than 7: filtered_reviews = reviews[score_filter] filtered_reviews.head() # It's possible to use multiple conditions for filtering. Let's say we want to find games released for the Xbox One that have a score of more than 7. In the below code, we: # # * Setup a filter with two conditions: # * Check if score is greater than 7. # * Check if platform equals Xbox One # * Apply the filter to reviews to get only the rows we want. # * Use the head method to print the first 5 rows of filtered_reviews. # + # setup the boolean filtr xbox_one_filter = (reviews["score"] > 7) & (reviews["platform"] == "Xbox One") # select the data based on the filter filtered_reviews = reviews[xbox_one_filter] # display the first 5 results filtered_reviews.head() # - # When filtering with multiple conditions, it's important to put each condition in parentheses ```()```, and separate them with a single ampersand ```&```. # # Pandas Plotting # # Now that we know how to filter, we can create plots to observe the review distribution for the Xbox One vs the review distribution for the PlayStation 4. This will help us figure out which console has better games. We can do this via a histogram, which will plot the frequencies for different score ranges. This will tell us which console has more highly reviewed games. # # We can make a histogram for each console using the ```pandas.DataFrame.plot``` method. This method utilizes ```matplotlib```, the popular Python plotting library, under the hood to generate good-looking plots. # # The plot method defaults to drawing a line graph. We'll need to pass in the keyword argument ```kind="hist"``` to draw a histogram instead. # # In the below code, we: # # * Call ```%matplotlib inline``` to set up plotting inside a Jupyter notebook. # * Filter reviews to only have data about the Xbox One. # * Plot the score column. reviews["platform"].unique() # + # enable display of chart inline # %matplotlib inline # setup the filter platform_filter = reviews["platform"] == "Xbox One" # select and plot reviews[platform_filter]["score"].plot(kind="hist") # - # We can also do the same for the PS4: # + # setup the filter mask = reviews["platform"].isin(["Xbox One", "PlayStation 4"]) # select and plot reviews[mask]["score"].plot(kind="hist") # - # It appears from our histogram that the PlayStation 4 has many more highly rated games than the Xbox One. # plot all filtered_reviews["score"].hist()
lectures/Week 05 - Data Processing and Visualization Part 2/02.a - Pandas Data analysis Part 1.ipynb
-- --- -- jupyter: -- jupytext: -- text_representation: -- extension: .hs -- format_name: light -- format_version: '1.5' -- jupytext_version: 1.14.4 -- kernelspec: -- display_name: Haskell -- language: haskell -- name: haskell -- --- -- # Chapter 05 - Statistics -- -- + import Chapter05.Statistics import Graphics.Rendering.Chart toRenderable histogramChart -- + import Data.Statistics mean friends quantile friends 0.10 quantile friends 0.25 quantile friends 0.75 quantile friends 0.90 smallestValue largestValue secondSmallestValue secondLargestValue -- - mode friends dataRange friends variance friends stdDev friends interquartileRange friends correlation friends dailyMinutes covariance friends dailyMinutes -- + import Graphics.Rendering.Chart.Easy scatterPlot :: Renderable () scatterPlot = toRenderable $ do layout_title .= "Correlation with an Outlier" layout_x_axis . laxis_title .= "# of friends" layout_y_axis . laxis_title .= "minutes per day" plot $ points "" $ zip friends dailyMinutes scatterPlot -- + -- correlation while ignoring the outlier correlation (tail friends) (tail dailyMinutes) scatterPlotNoOutlier :: Renderable () scatterPlotNoOutlier = toRenderable $ do layout_title .= "Correlation with an Outlier" layout_x_axis . laxis_title .= "# of friends" layout_y_axis . laxis_title .= "minutes per day" plot $ points "" $ zip (tail friends) (tail dailyMinutes) scatterPlotNoOutlier
notebooks/05_statistics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd from numpy import exp from hw import Jamshidian as jamsh from hw import Henrard as henr from hw import calibration as hw_calib import hw.calibration as calib from hw.const import * from fox_toolbox.utils import xml_parser as log from scipy.stats import norm import scipy.integrate as integrate from numpy import exp, sqrt from scipy.stats import norm from fox_toolbox.utils import volatility as vols # %load_ext autoreload # %autoreload 2 # - # ### Read IRSM FORM # + _, irsmform = log.get_xml(INPUT_2SWO) main_curve, sprds = log.get_curves(irsmform) dsc_curve = main_curve try: estim_curve = sprds[0] except TypeError: estim_curve = main_curve cal_basket = list(log.get_calib_basket(irsmform)) swos = cal_basket # - def fwd_fadj_ann (cal_basket): for swo in cal_basket: fwd = swo.get_swap_rate(main_curve, estim_curve) flt_adjs = swo.get_flt_adjustments(main_curve, estim_curve) calib_annuity = swo.get_annuity(main_curve) yield fwd, flt_adjs, calib_annuity # ### READ IRSM OUT # + _, irsmout = log.get_xml(OUTPUT_2SWO) ref_swos = list(log.get_calib_basket(irsmout)) ref_mr, (buckets, hw_sigma) = log.get_hw_params(irsmout) ref_sigmas = hw_sigma[1:-1] # - # ### MAIN HW (Jamshidian) # + calib_premiumsJ = [] for swo, ref_sigma in zip(cal_basket, ref_sigmas): if np.isnan(swo.strike): swo.strike = 0. coefJ = jamsh.get_coef(swo, ref_mr, ref_sigma, main_curve, estim_curve) b_i = jamsh.get_b_i(swo, ref_mr) varx = jamsh.var_x(swo.expiry, ref_mr, ref_sigma) x_starJ = jamsh.get_x_star(coefJ, b_i, varx) jamsh_price = jamsh.hw_swo(swo, ref_mr, ref_sigma, main_curve, estim_curve) calib_premiumsJ.append(jamsh_price) calib_premiumsJ # - # ## MAIN HW (Hernard) # + calib_premiumsH = [] debug_list = [] for swo, ref_sigma in zip(cal_basket, ref_sigmas): start_date = swo.start_date hw_dates = np.insert(swo.payment_dates, 0, start_date) flt_adjs = swo.get_flt_adjustments(main_curve, estim_curve) coefH = henr.get_coef(swo.day_count_fractions, swo.strike, flt_adjs) P_i = henr.get_P_i(dsc_curve, start_date, swo.payment_dates) alpha_i = henr.get_alpha_i(ref_mr, swo.expiry, ref_sigma, hw_dates) x_starH = henr.get_x_star(coefH, P_i, alpha_i) henr_price, debug = henr.hw_swo(swo, ref_mr, ref_sigma, dsc_curve, estim_curve) debug_list.append(pd.DataFrame(data=debug)) calib_premiumsH.append(henr_price) calib_premiumsH # - debug_list[0] debug_list[1] # #### ! <font color='red'>With reference HW sigma swo 2 tgt premium is not perfectly matched </font> ! for i in range(2): print(f' *-*-* ') print(f'swaption number {i}: ') swo = cal_basket[i] ref_swo = ref_swos[i] fwd = swos[i][0] calib_annuity = swos[i][2] strike = swo.strike w = -1.0 if swo.pay_rec == 'Receiver' else 1.0 black_price = calib_annuity * vols.BachelierPrice(fwd, strike, swo.vol.value * np.sqrt(swo.expiry), w=w) print('Jamsh price with <RefSigma> ', 100*calib_premiumsJ[i]) print('Hern price with <RefSigma> ', 100*calib_premiumsH[i]) print('<BlackPrice> ', 100*ref_swo.tgt_premium) print('<CalibPremium> ', 100*ref_swo.cal_premium) print('Jamsh price with my Jamsh sigma ', 100*jamsh.hw_swo(swo, ref_mr, sigma_hw_jamsh[i], main_curve, estim_curve)) print('Hern price with my hern sigma ', 100*henr.hw_swo(swo, ref_mr, sigma_hw_henr[i], main_curve, estim_curve)) print(' ') print('My calibrated Jamshidian sigma: ', 100*sigma_hw_jamsh[i]) print('My calibrated Henrard sigma: ', 100*sigma_hw_henr[i]) print('<RefSigma> (Henrard): ', 100*ref_sigmas[i]) # ## HW Calibration sigma_hw_jamsh = hw_calib._calibrate_sigma_hw(cal_basket, ref_mr, dsc_curve, estim_curve, True) sigma_hw_jamsh sigma_hw_henr = hw_calib._calibrate_sigma_hw(cal_basket, ref_mr, dsc_curve, estim_curve, False) sigma_hw_henr swos[0][0] sigma_hw_henr = hw_calib.calibrate_sigma_hw(cal_basket, ref_mr, dsc_curve, estim_curve, False) 100*sigma_hw_henr 100*(sigma_hw_henr - ref_sigma)/ref_sigma print('fwd recon :', ref_swo.fwd - fwd) print('Annuity recon', ref_swo.annuity - calib_annuity) print('') print('***Jamshidian pricing info***') print('x_star Jamshidian', x_starJ) print('swap_value Jamshidian', jamsh.swap_value(coefJ, b_i, varx, x_starJ)) print('') print('***Henrard pricing info***') print('x_star Hernard', x_starH) print('swap_value Henrard', henr.swap_value(coefH, P_i, alpha_i, x_starH)) print(' ') print('TEST Bachelier -> Black price recon', black_price - ref_swo.tgt_premium ) print(' ') print('***Prices %***') print('TEST Jamsh hw_swo ', 100*jamsh_price) print('TEST Henr hw_swo ', 100*henr_price) print('out put price ', 100*ref_swo.tgt_premium) print(' ') print('***Calibration: sigma Hull White %***') print('TEST hw_calib Jamshidian: ', 100*sigma_hw_jamsh) print('TEST hw_calib Henrard: ', 100*sigma_hw_henr) print('out put reference sigma: ', 100*ref_sigma)
notebooks/archive/calibration/HW_two_swaptions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Advanced Lane Finding Project # # The goals / steps of this project are the following: # # * Compute the camera calibration matrix and distortion coefficients given a set of chessboard images. # * Apply a distortion correction to raw images. # * Use color transforms, gradients, etc., to create a thresholded binary image. # * Apply a perspective transform to rectify binary image ("birds-eye view"). # * Detect lane pixels and fit to find the lane boundary. # * Determine the curvature of the lane and vehicle position with respect to center. # * Warp the detected lane boundaries back onto the original image. # * Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position. # # --- # ## First, I'll compute the camera calibration using chessboard images # + import numpy as np import cv2 import glob import matplotlib.pyplot as plt # %matplotlib qt # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((6*9,3), np.float32) objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # Make a list of calibration images images = glob.glob('../camera_cal/calibration*.jpg') # Step through the list and search for chessboard corners for fname in images: img = cv2.imread(fname) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # Find the chessboard corners ret, corners = cv2.findChessboardCorners(gray, (9,6),None) # If found, add object points, image points if ret == True: objpoints.append(objp) imgpoints.append(corners) # Draw and display the corners img = cv2.drawChessboardCorners(img, (9,6), corners, ret) cv2.imshow('img',img) cv2.waitKey(500) cv2.destroyAllWindows() # - # ## And so on and so forth...
examples/example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: MindSpore-1.0.1 # language: python # name: mindspore-1.0.1 # --- # # 深度概率编程 # # ## 概述 # # 深度学习模型具有强大的拟合能力,而贝叶斯理论具有很好的可解释能力。MindSpore深度概率编程(MindSpore Deep Probabilistic Programming, MDP)将深度学习和贝叶斯学习结合,通过设置网络权重为分布、引入隐空间分布等,可以对分布进行采样前向传播,由此引入了不确定性,从而增强了模型的鲁棒性和可解释性。MDP不仅包含通用、专业的概率学习编程语言,适用于“专业”用户,而且支持使用开发深度学习模型的逻辑进行概率编程,让初学者轻松上手;此外,还提供深度概率学习的工具箱,拓展贝叶斯应用功能。 # # 本章将详细介绍深度概率编程在MindSpore上的应用。在动手进行实践之前,确保,你已经正确安装了MindSpore 0.7.0-beta及其以上版本。 # # > 本例适用于GPU和Ascend环境。 # ## 环境准备 # # 设置训练模式为图模式,计算平台为GPU。 # + from mindspore import context context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="GPU") # - # ## 数据准备 # # ### 下载数据集 # 下载MNIST数据集并解压到指定位置,执行如下命令: # !wget -N https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/datasets/MNIST_Data.zip # !unzip -o MNIST_Data.zip -d ./datasets/ # !tree ./datasets/MNIST_Data/ # ### 定义数据集增强方法 # # MNIST数据集的原始训练数据集是60000张$28\times28$像素的单通道数字图片,本次训练用到的含贝叶斯层的LeNet5网络接收到训练数据的张量为`(32,1,32,32)`,通过自定义create_dataset函数将原始数据集增强为适应训练要求的数据,具体的增强操作解释可参考官网快速入门[实现一个图片分类应用](https://www.mindspore.cn/tutorial/training/zh-CN/master/quick_start/quick_start.html)。 # + import mindspore.dataset.vision.c_transforms as CV import mindspore.dataset.transforms.c_transforms as C from mindspore.dataset.vision import Inter from mindspore import dataset as ds def create_dataset(data_path, batch_size=32, repeat_size=1, num_parallel_workers=1): # define dataset mnist_ds = ds.MnistDataset(data_path) # define some parameters needed for data enhancement and rough justification resize_height, resize_width = 32, 32 rescale = 1.0 / 255.0 shift = 0.0 rescale_nml = 1 / 0.3081 shift_nml = -1 * 0.1307 / 0.3081 # according to the parameters, generate the corresponding data enhancement method c_trans = [ CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR), CV.Rescale(rescale_nml, shift_nml), CV.Rescale(rescale, shift), CV.HWC2CHW() ] type_cast_op = C.TypeCast(mstype.int32) # using map to apply operations to a dataset mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=num_parallel_workers) mnist_ds = mnist_ds.map(operations=c_trans, input_columns="image", num_parallel_workers=num_parallel_workers) # process the generated dataset buffer_size = 10000 mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True) mnist_ds = mnist_ds.repeat(repeat_size) return mnist_ds # - # ## 定义深度神经网络 # # 在经典LeNet5网络中,数据经过如下计算过程:卷积1->激活->池化->卷积2->激活->池化->降维->全连接1->全连接2->全连接3。 # 本例中将引入概率编程方法,将卷积1和全连接1两个计算层改造成贝叶斯层,构造成含贝叶斯层的LeNet5网络。 # + from mindspore.common.initializer import Normal import mindspore.nn as nn from mindspore.nn.probability import bnn_layers import mindspore.ops as ops from mindspore import dtype as mstype class BNNLeNet5(nn.Cell): def __init__(self, num_class=10): super(BNNLeNet5, self).__init__() self.num_class = num_class self.conv1 = bnn_layers.ConvReparam(1, 6, 5, stride=1, padding=0, has_bias=False, pad_mode="valid") self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid') self.fc1 = bnn_layers.DenseReparam(16 * 5 * 5, 120) self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02)) self.fc3 = nn.Dense(84, self.num_class) self.relu = nn.ReLU() self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) self.flatten = nn.Flatten() def construct(self, x): x = self.max_pool2d(self.relu(self.conv1(x))) x = self.max_pool2d(self.relu(self.conv2(x))) x = self.flatten(x) x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) x = self.fc3(x) return x # - # 本例中将卷积层1和全连接1两个计算层换成了贝叶斯卷积层`bnn_layers.ConvReparam`和贝叶斯全连接层`bnn_layers.DenseReparam`。 # ### 定义训练网络 # # 定义训练网络并进行训练。 # + from mindspore.nn import TrainOneStepCell from mindspore import Tensor, Model from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor from mindspore.nn.metrics import Accuracy from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits import os lr = 0.01 momentum = 0.9 model_path = "./models/ckpt/probability_bnnlenet5/" # clean old run files os.system("rm -f {0}*.meta {0}*.ckpt".format(model_path)) network = BNNLeNet5() criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") optimizer = nn.Momentum(network.trainable_params(), lr, momentum) model = Model(network, criterion, optimizer, metrics={"Accuracy": Accuracy()} ) config_ck = CheckpointConfig(save_checkpoint_steps=1875, keep_checkpoint_max=16) ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", directory=model_path, config=config_ck) ds_train_path = "./datasets/MNIST_Data/train/" train_set = create_dataset(ds_train_path, 32, 1) model.train(1, train_set, callbacks=[ckpoint_cb, LossMonitor()]) # - # 训练完成后会在对应的路径上生成`.ckpt`为后缀的权重参数文件和`.meta`为后缀的计算图文件。 # 其路径结构为: # !tree $model_path # ## 验证模型精度 # # 载入验证数据集,并验证含有贝叶斯层的LeNet5网络模型的精度。 ds_eval_path = "./datasets/MNIST_Data/test/" test_set = create_dataset(ds_eval_path, 32, 1) acc = model.eval(test_set) print(acc) # 模型精度大于0.95,证明模型效果良好。 # ## 总结 # # 本例使用了深度概率编程在经典LeNet5深度神经网络中应用,含有贝叶斯层的LeNet5网络和原本的LeNet5网络的训练体验过程极其相似,有心的用户可以对比两者在训练收敛效率,稳定性等方面的不同,是否体现了概述中深度概率编程的优点。 # 当然深度概率编程近年来最激动人心的是在CVAE以及GAN等生成网络中的应用,这使我们在拥有了以假乱真的数据生成能力,接下来一篇就以CVAE网络体验介绍深度概率编程的另一种应用。
tutorials/notebook/apply_deep_probability_programming/apply_deep_probability_programming_bnnlenet5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import jax import jax.numpy as jnp import matplotlib.pyplot as plt import numpy as np from jax import lax from jax import scipy as jscipy import numpyro import numpyro.distributions as dist import numpyro.infer.kernels as kernels from numpyro.distributions import constraints from numpyro.infer import ELBO, Stein from numpyro.infer.autoguide import AutoDelta from numpyro.infer.initialization import init_to_value, init_with_noise from numpyro.callbacks import Progbar from numpyro.optim import Adagrad # - rng_key = jax.random.PRNGKey(42) # # Sine Model class Sine(dist.Distribution): support = constraints.real def __init__(self, alpha=jnp.array(1.), sigma1=jnp.array(3e-3), sigma2=jnp.array(1.0), validate_args=None): batch_shape = lax.broadcast_shapes(jnp.shape(alpha), jnp.shape(sigma1), jnp.shape(sigma2)) self.alpha = jnp.broadcast_to(alpha, batch_shape) self.sigma1 = jnp.broadcast_to(sigma1, batch_shape) self.sigma2 = jnp.broadcast_to(sigma2, batch_shape) super(Sine, self).__init__(batch_shape=batch_shape, validate_args=validate_args) def log_prob(self, value): return jnp.where(jnp.logical_and(jnp.all(-1 <= value, axis=-1), jnp.all(value <= 1, axis=-1)), - (value[..., 1] + jnp.sin(self.alpha * value[..., 0])) ** 2 / (2 * self.sigma1) - (value[..., 0] ** 2 + value[..., 1] ** 2) / (2 * self.sigma2), -10e3) def sample(self, key, sample_shape=()): xs = jnp.array(np.linspace(-1, 1, num=100)) ys = jnp.array(np.linspace(-1, 1, num=100)) zs = jnp.stack(jnp.meshgrid(xs, ys), axis=-1) logits = jnp.expand_dims(jnp.ravel(self.log_prob(zs)), axis=0) cs = dist.Categorical(logits=logits).sample(key, sample_shape) res = jnp.concatenate(jnp.divmod(cs, zs.shape[0]), axis=-1).astype('float') /\ jnp.array([jnp.max(xs) - jnp.min(xs), jnp.max(ys) - jnp.min(ys)]) + jnp.array([jnp.min(xs), jnp.min(ys)]) return res sine_xs = np.linspace(-1, 1, num=100) sine_ys = np.linspace(-1, 1, num=100) sine_zs = np.stack(np.meshgrid(sine_xs, sine_ys), axis=-1) sine_lps = np.exp(Sine().log_prob(sine_zs)) plt.imshow(sine_lps, origin='lower', interpolation='bicubic', extent=[np.min(sine_xs), np.max(sine_xs), np.min(sine_ys), np.max(sine_ys)]) plt.show() num_iterations = 30 def model(): numpyro.sample('x', Sine()) guide = AutoDelta(model, init_strategy=init_with_noise(init_to_value(values={'x': jnp.array([0., 0.])}), noise_scale=1.0)) svgd = Stein(model, guide, Adagrad(step_size=0.1), ELBO(), kernels.RBFKernel(), num_particles=50) # + tags=[] svgd_state, loss = svgd.train(rng_key, num_iterations, callbacks=[Progbar()]) res = svgd.get_params(svgd_state)['auto_x'] plt.clf() plt.imshow(sine_lps, origin='lower', interpolation='bicubic', extent=[np.min(sine_xs), np.max(sine_xs), np.min(sine_ys), np.max(sine_ys)]) plt.scatter(res[..., 0], res[..., 1], c='orange', marker='x') plt.xlim((np.min(sine_xs), np.max(sine_xs))) plt.ylim((np.min(sine_ys), np.max(sine_ys))) plt.show() # - svgd = Stein(model, guide, Adagrad(step_size=0.1), ELBO(), kernels.PrecondMatrixKernel(kernels.HessianPrecondMatrix(), kernels.RBFKernel(mode='matrix'), precond_mode='const'), num_particles=50) # + tags=[] svgd_state, loss = svgd.train(rng_key, num_iterations, callbacks=[Progbar()]) res = svgd.get_params(svgd_state)['auto_x'] plt.clf() plt.imshow(sine_lps, origin='lower', interpolation='bicubic', extent=[np.min(sine_xs), np.max(sine_xs), np.min(sine_ys), np.max(sine_ys)]) plt.scatter(res[..., 0], res[..., 1], c='orange', marker='x') plt.xlim((np.min(sine_xs), np.max(sine_xs))) plt.ylim((np.min(sine_ys), np.max(sine_ys))) plt.show() # - svgd = Stein(model, guide, Adagrad(step_size=0.5), ELBO(), kernels.PrecondMatrixKernel(kernels.HessianPrecondMatrix(), kernels.RBFKernel(mode='matrix'), precond_mode='anchor_points'), num_particles=50) # + tags=[] svgd_state, loss = svgd.train(rng_key, num_iterations, callbacks=[Progbar()]) res = svgd.get_params(svgd_state)['auto_x'] plt.clf() plt.imshow(sine_lps, origin='lower', interpolation='bicubic', extent=[np.min(sine_xs), np.max(sine_xs), np.min(sine_ys), np.max(sine_ys)]) plt.scatter(res[..., 0], res[..., 1], c='orange', marker='x') plt.xlim((np.min(sine_xs), np.max(sine_xs))) plt.ylim((np.min(sine_ys), np.max(sine_ys))) plt.show() # - # # Double Banana Model class DoubleBanana(dist.Distribution): support = constraints.real def __init__(self, y=jnp.log(30.), sigma1=jnp.array(1.0), sigma2=jnp.array(9e-2), validate_args=None): batch_shape = lax.broadcast_shapes(jnp.shape(y), jnp.shape(sigma1), jnp.shape(sigma2)) self.y = jnp.broadcast_to(y, batch_shape) self.sigma1 = jnp.broadcast_to(sigma1, batch_shape) self.sigma2 = jnp.broadcast_to(sigma2, batch_shape) super(DoubleBanana, self).__init__(batch_shape=batch_shape, validate_args=validate_args) def log_prob(self, value): fx = jnp.log((1 - value[..., 0]) ** 2.0 + 100 * (value[..., 1] - value[..., 0] ** 2.0) ** 2.0) return - jnp.sqrt(value[..., 0] ** 2.0 + value[..., 1] ** 2.0) ** 2.0 / (2.0 * self.sigma1) -\ (self.y - fx) ** 2.0 / (2.0 * self.sigma2) def sample(self, key, sample_shape=()): xs = jnp.array(np.linspace(-1.5, 1.5, num=100)) ys = jnp.array(np.linspace(-1, 2, num=100)) zs = jnp.stack(jnp.meshgrid(xs, ys), axis=-1) logits = jnp.expand_dims(jnp.ravel(self.log_prob(zs)), axis=0) cs = dist.Categorical(logits=logits).sample(key, sample_shape) res = jnp.concatenate(jnp.divmod(cs, zs.shape[0]), axis=-1).astype('float') /\ jnp.array([jnp.max(xs) - jnp.min(xs), jnp.max(ys) - jnp.min(ys)]) + jnp.array([jnp.min(xs), jnp.min(ys)]) return res db_xs = np.linspace(-1.5, 1.5, num=100) db_ys = np.linspace(-1, 2, num=100) db_zs = np.stack(np.meshgrid(db_xs, db_ys), axis=-1) db_lps = np.exp(DoubleBanana().log_prob(db_zs)) plt.imshow(db_lps, origin='lower', interpolation='bicubic', extent=[np.min(db_xs), np.max(db_xs), np.min(db_ys), np.max(db_ys)]) plt.show() num_iterations = 100 def model(): numpyro.sample('x', DoubleBanana()) guide = AutoDelta(model, init_strategy=init_with_noise(init_to_value(values={'x': jnp.array([0., 0.])}), noise_scale=3.0)) svgd = Stein(model, guide, Adagrad(step_size=1.0), ELBO(), kernels.RBFKernel(), num_particles=50) # + tags=[] svgd_state, loss = svgd.train(rng_key, num_iterations, callbacks=[Progbar()]) res = svgd.get_params(svgd_state)['auto_x'] plt.clf() plt.imshow(db_lps, origin='lower', interpolation='bicubic', extent=[np.min(db_xs), np.max(db_xs), np.min(db_ys), np.max(db_ys)]) plt.scatter(res[..., 0], res[..., 1], c='orange', marker='x') plt.xlim((np.min(db_xs), np.max(db_xs))) plt.ylim((np.min(db_ys), np.max(db_ys))) plt.show() # - svgd = Stein(model, guide, Adagrad(step_size=1.0), ELBO(), kernels.PrecondMatrixKernel(kernels.HessianPrecondMatrix(), kernels.RBFKernel(mode='matrix'), precond_mode='const'), num_particles=50) # + tags=[] svgd_state, loss = svgd.train(rng_key, num_iterations, callbacks=[Progbar()]) res = svgd.get_params(svgd_state)['auto_x'] plt.imshow(db_lps, origin='lower', interpolation='bicubic', extent=[np.min(db_xs), np.max(db_xs), np.min(db_ys), np.max(db_ys)]) plt.scatter(res[..., 0], res[..., 1], c='orange', marker='x') plt.xlim((np.min(db_xs), np.max(db_xs))) plt.ylim((np.min(db_ys), np.max(db_ys))) plt.show() # - svgd = Stein(model, guide, Adagrad(step_size=.8), ELBO(), kernels.PrecondMatrixKernel(kernels.HessianPrecondMatrix(), kernels.RBFKernel(mode='matrix'), precond_mode='anchor_points'), num_particles=50) # + tags=[] svgd_state, loss = svgd.train(rng_key, num_iterations, callbacks=[Progbar()]) res = svgd.get_params(svgd_state)['auto_x'] plt.imshow(db_lps, origin='lower', interpolation='bicubic', extent=[np.min(db_xs), np.max(db_xs), np.min(db_ys), np.max(db_ys)]) plt.scatter(res[..., 0], res[..., 1], c='orange', marker='x') plt.xlim((np.min(db_xs), np.max(db_xs))) plt.ylim((np.min(db_ys), np.max(db_ys))) plt.show() # - # # Star Model class Star(dist.Distribution): support = constraints.real def __init__(self, mu0=jnp.array([0., 1.5]), cov0=jnp.diag(jnp.array([1e-2, 0.25])), n_comp=5, validate_args=None): batch_shape = lax.broadcast_shapes(jnp.shape(mu0)[:-1], jnp.shape(cov0)[:-2]) mu0 = jnp.broadcast_to(mu0, batch_shape + jnp.shape(mu0)[-1:]) cov0 = jnp.broadcast_to(cov0, batch_shape + jnp.shape(cov0)[-2:]) self.n_comp = n_comp mus = [mu0] covs = [cov0] theta = 2 * jnp.pi / n_comp rot = jnp.array([[jnp.cos(theta), -jnp.sin(theta)], [jnp.sin(theta), jnp.cos(theta)]]) for i in range(n_comp - 1): mui = rot @ mus[-1] covi = rot @ covs[-1] @ rot.transpose() mus.append(mui) covs.append(covi) self.mus = jnp.stack(mus) self.covs = jnp.stack(covs) super(Star, self).__init__(batch_shape=batch_shape, validate_args=validate_args) def log_prob(self, value): lps = [] for i in range(self.n_comp): lps.append(dist.MultivariateNormal(self.mus[i], self.covs[i]).log_prob(value)) return jscipy.special.logsumexp(jnp.stack(lps, axis=0), axis=0) / self.n_comp def sample(self, key, sample_shape=()): zs = dist.Categorical(probs=jnp.array([1/self.n_comp] * self.n_comp)).sample(key, sample_shape) xs = jnp.stack([dist.MultivariateNormal(self.mus[i], self.covs[i]).sample(key, sample_shape) for i in range(self.n_comp)], axis=0) return jnp.take_along_axis(xs, jnp.expand_dims(jnp.expand_dims(zs, axis=-1), axis=-1), axis=0) star_xs = np.linspace(-3, 3, num=100) star_ys = np.linspace(-3, 3, num=100) star_zs = np.stack(np.meshgrid(star_xs, star_ys), axis=-1) star_lps = np.exp(Star().log_prob(star_zs)) plt.imshow(star_lps, origin='lower', interpolation='bicubic', extent=[np.min(star_xs), np.max(star_xs), np.min(star_ys), np.max(star_ys)]) plt.show() num_iterations = 60 def model(): numpyro.sample('x', Star()) guide = AutoDelta(model, init_strategy=init_with_noise(init_to_value(values={'x': np.array([[0., 0.]])}), noise_scale=3.0)) svgd = Stein(model, guide, Adagrad(step_size=1.0), ELBO(), kernels.RBFKernel(), num_particles=50) # + tags=[] svgd_state, loss = svgd.train(rng_key, num_iterations, callbacks=[Progbar()]) res = svgd.get_params(svgd_state)['auto_x'] plt.clf() plt.imshow(star_lps, origin='lower', interpolation='bicubic', extent=[np.min(star_xs), np.max(star_xs), np.min(star_ys), np.max(star_ys)]) plt.scatter(res[..., 0], res[..., 1], c='orange', marker='x') plt.xlim((np.min(star_xs), np.max(star_xs))) plt.ylim((np.min(star_ys), np.max(star_ys))) plt.show() # - svgd = Stein(model, guide, Adagrad(step_size=0.5), ELBO(), kernels.PrecondMatrixKernel(kernels.HessianPrecondMatrix(), kernels.RBFKernel(mode='matrix'), precond_mode='const'), num_particles=50) # + tags=[] svgd_state, loss = svgd.train(rng_key, num_iterations, callbacks=[Progbar()]) res = svgd.get_params(svgd_state)['auto_x'] plt.clf() plt.imshow(star_lps, origin='lower', interpolation='bicubic', extent=[np.min(star_xs), np.max(star_xs), np.min(star_ys), np.max(star_ys)]) plt.scatter(res[..., 0], res[..., 1], c='orange', marker='x') plt.xlim((np.min(star_xs), np.max(star_xs))) plt.ylim((np.min(star_ys), np.max(star_ys))) plt.show() # - svgd = Stein(model, guide, Adagrad(step_size=0.8), ELBO(), kernels.PrecondMatrixKernel(kernels.HessianPrecondMatrix(), kernels.RBFKernel(mode='matrix'), precond_mode='anchor_points'), num_particles=50) # + tags=[] svgd_state, loss = svgd.train(rng_key, num_iterations, callbacks=[Progbar()]) res = svgd.get_params(svgd_state)['auto_x'] plt.clf() plt.imshow(star_lps, origin='lower', interpolation='bicubic', extent=[np.min(star_xs), np.max(star_xs), np.min(star_ys), np.max(star_ys)]) plt.scatter(res[..., 0], res[..., 1], c='orange', marker='x') plt.xlim((np.min(star_xs), np.max(star_xs))) plt.ylim((np.min(star_ys), np.max(star_ys))) plt.show() # - svgd = Stein(model, guide, Adagrad(step_size=0.8), ELBO(), kernels.RBFKernel(), num_particles=50, num_mcmc_particles=5, sp_mode='global', num_mcmc_updates=100) # + tags=[] svgd_state, loss = svgd.train(rng_key, num_iterations, callbacks=[Progbar()]) res = svgd.get_params(svgd_state)['auto_x'] plt.clf() plt.imshow(star_lps, origin='lower', interpolation='bicubic', extent=[np.min(star_xs), np.max(star_xs), np.min(star_ys), np.max(star_ys)]) plt.scatter(res[..., 0], res[..., 1], c='orange', marker='x') plt.xlim((np.min(star_xs), np.max(star_xs))) plt.ylim((np.min(star_ys), np.max(star_ys))) plt.show() # -
examples/stein_vi/matrix_toy_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib.pyplot as plt from matplotlib.dates import date2num import numpy as np import os import glob import xarray as xr import utils from LoopTimer import LoopTimer import met_utils as mu from importlib import reload import pickle from scipy.stats import linregress import inversion_heights as inv import datetime as dt from subprocess import check_call import CSET_data_classes as cd from geopy import distance from lagrangian_case import TrajectoryCase, AircraftCase, CombinedCase, all_cases import unified_traj_data # %load_ext autoreload # %autoreload 2 # + def do_the_whole_damn_case(case_num): savedir = r'/home/disk/eos4/jkcm/Data/CSET/model_forcings/case_L{:02d}'.format(case_num) if not os.path.exists(savedir): os.mkdir(savedir) case = all_cases[case_num] flight = case['TLC_name'].split("_")[1][:4].lower() traj_list = case['TLC_name'].split('_')[2].split('-') for dirn in ['forward', 'backward']: nc_name = os.path.join(utils.trajectory_netcdf_dir, "{}_48h_backward_{}.nc".format(flight, traj)) # - do_the_whole_damn_case(4) # + for traj in traj_list: # name = os.path.join(utils.trajectory_netcdf_dir, "{}_all_traj_{}.nc".format(flight, traj)) name = os.path.join(utils.trajectory_netcdf_dir, "{}_48h_backward_{}.nc".format(flight, traj)) print("working on {}...".format(os.path.basename(name))) if os.path.exists(name): print("already exists!") if not force_override: continue else: print('overriding') # os.rename(name, os.path.join(utils.trajectory_netcdf_dir, 'old', "{}_all_traj_{}.nc".format(flight, traj))) os.rename(name, os.path.join(utils.trajectory_netcdf_dir, 'old', "{}_48h_backward_{}.nc".format(flight, traj))) # ds = make_trajectory(rfnum=flight, trajnum=float(traj), save=name); ds = make_trajectory(rfnum=flight, trajnum=float(traj), save=name, trajectory_type='500m_-48'); #load a case #locate all profiles (including overflow) #locate all trajectories #get forward ERA/GOES #get back ERA #make a folder #save: #profiles #map #fwd trajectories with ERA #back trajectories with ERA
make_model_forcings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt partidas = [] for k in range(0,500): andares = [] andar_atual = 0 #np.random.randint(0,100) andares.append(andar_atual) for y in range(0,100): dado = np.random.randint(1,7) if dado == 1: if andar_atual > 0: andar_atual -= 1 elif dado == 2: if andar_atual > 0: andar_atual -= 1 elif dado == 3: andar_atual += 1 elif dado == 4: andar_atual += 1 elif dado == 5: andar_atual += 1 elif dado == 6: andar_atual += np.random.randint(1,7) prob = np.random.rand(1) if prob <= 0.001: andar_atual = 0 andares.append(andar_atual) partidas.append(andares) total = np.array(partidas) plt.plot(total) plt.show() # -
atividadeElevadores.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Task: Explore the Datasets and Seggregate the forwads from other players based on the position # ### Import the libraries import pandas as pd import numpy as np from pandas import DataFrame from pymongo import MongoClient import matplotlib.pyplot as plt # To print multiple outputs from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # ### Set up the MongoDB connection in Python client=MongoClient('localhost',27017) db=client['Wyscout'] # ### select only forwards from player_advance_stats and write to a DF # + forward_details=db.player_advance_stats.aggregate([ {'$unwind': "$positions" }, {'$project':{"_id":0,"playerId":1,"competitionId":1,"seasonId":1,"positions.position.code":1,"positions.position.name":1} }, {'$match' : { '$or': [{"positions.position.code":"lwf"},{"positions.position.code":"rwf"},{"positions.position.code":"rw"},{"positions.position.code":"lw"},{"positions.position.code":"ss"},{"positions.position.code":"cf"}]} } ]) forward_details_df=pd.DataFrame(forward_details) # - forward_details_df.head(5) # ### consider only the required columns forward_details_df1 = forward_details_df[['playerId','competitionId','seasonId']] len(forward_details_df1) # ### Remove duplicate records & compare the number of records forward_details_df1=forward_details_df1.drop_duplicates() len(forward_details_df1) # #### while merging getting an error: 'trying to merge on object and int64 columns' so tried converting all the relevant cols to 'int32' # #### it didn't work and hence it should be converted to csv and read it back as DF, reference:https://stackoverflow.com/questions/50649853/trying-to-merge-2-dataframes-but-get-valueerror forward_details_df1.to_csv('forward_details_df1.csv') forward_details_df2 = pd.read_csv('forward_details_df1.csv', index_col=0) # + # forward_details_df1.playerId.astype(int) # - # #### Read the 'matches_events' into DF & as mentioned above convert it into CSV & read it back into DF event_details=db.matches_events_scaling_v02.find({},{"_id":0 }) event_details_df=pd.DataFrame(event_details) event_details_df.head(5) event_details_df.to_csv('event_details_df.csv') event_details_df = pd.read_csv('event_details_df.csv', index_col=0) event_details_df.head(2) # ### Merge both the dataframes on 'playerId' : Final Result ForwardEventDetails=forward_details_df2.merge(event_details_df, on='playerId', how = 'left') pd.set_option('display.max_columns',24) # all the 24 columns will be displayed ForwardEventDetails.head(3) ForwardEventDetails.isnull().values.any() # There are null values ForwardEventDetails.isnull().sum().sum() ForwardEventDetails=ForwardEventDetails.replace(np.nan,0) # replace 'NaN with 0' ForwardEventDetails.head(5)
Filtering_playersByPosition.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import pandas as pd import matplotlib.pyplot as plt import numpy as np table = pd.read_csv('UP.csv', index_col='Sl. No.') table.columns = ['year', 'area', 'population', 'crops_area', 'crops_value', 'houses', 'houses_value', 'cattle','life_lost', 'public_utilities', 'total_damage'] table t_table = pd.DataFrame(table.loc[1.0:64.0]) for column in t_table: t_table[column] = pd.to_numeric(t_table[column], errors='coerce') t_table = t_table.replace(np.nan, 0.0, regex=True) t_table years = t_table['year'].values # # Start Plotting # ## All Data Visualization # %matplotlib inline plt.rcParams.update({'font.size': 16}) # + fig1 = plt.figure(figsize=(20,15)) fig1.suptitle('Uttar Pradesh: Complete Data Visualization', fontsize = 24) ax1 = fig1.add_subplot(2, 2, 1) ax2 = fig1.add_subplot(2, 2, 2) ax3 = fig1.add_subplot(2, 2, 3) ax4 = fig1.add_subplot(2, 2, 4) ax1.set_title('Areas Affected') ax1.set_xlabel('Year') ax1.set_ylabel('Area (m. ha.)') ax1.plot(years, t_table['area'].values, label="Total Land Area", c='green'); ax1.plot(years, t_table['crops_area'].values, label="Crop Area", c='red'); ax1.legend(loc='upper left'); ax2.set_title('Monetary Damage') ax2.set_xlabel('Year') ax2.set_ylabel('Damage Value (Crores)') ax2.plot(years, t_table['crops_value'].values, linestyle="dotted", label="Crop Damage", c='green'); ax2.plot(years, t_table['houses_value'].values, linestyle="dotted", label="Houses Damage", c='red'); ax2.plot(years, t_table['public_utilities'].values, linestyle="dotted", label="Public Utilities Damage", c='blue'); ax2.plot(years, t_table['total_damage'].values, label="Total Damage", c='black', linewidth=1); ax2.legend(loc='upper left'); ax3.set_title('Population Affected') ax3.set_xlabel('Year') ax3.set_ylabel('Population (Scale in Legend)') ax3.plot(years, t_table['population'].values, label="Population Affected (millions)", c='green'); ax3.plot(years, t_table['life_lost'].div(100).values, label="Human Life Lost (hundreds)", c='red'); ax3.legend(loc='upper left'); ax4.set_title('Houses & Cattle Damage') ax4.set_xlabel('Year') ax4.set_ylabel('Number (Scale in Legend)') ax4.plot(years, t_table['houses'].div(1000).values, label="No. of Houses Damaged (Thousands)", c='green'); ax4.plot(years, t_table['cattle'].div(10).values, label="No. of Cattle Lost (Tens)", c='red'); ax4.legend(loc='upper left'); plt.savefig("UP_1.png") plt.show() # - # ## Monetary Damage Analysis # + monetary_labels = 'crops', 'houses', 'public utilities' total_damages = table[table['year'] == 'Total'].iloc[0] total_monetary_damages = [float(total_damages['crops_value']), float(total_damages['houses_value']), float(total_damages['public_utilities'])] average_damages = table[table['year'] == 'Average'].iloc[0] average_monetary_damages = [float(average_damages['crops_value']), float(average_damages['houses_value']), float(average_damages['public_utilities'])] # + fig2 = plt.figure(figsize=(15,10)) fig2.suptitle('Uttar Pradesh: Monetary Damage Analysis', fontsize = 24) explode = (0.01, 0.01, 0.01) ax1_2 = fig2.add_subplot(2, 3, 1) ax2_2 = fig2.add_subplot(2, 3, 3) ax1_2.set_title('Total Monetary Damage') ax1_2.pie(total_monetary_damages, explode=explode, labels = monetary_labels, autopct='%1.1f%%', textprops={'fontsize': 16}, startangle=90); ax1_2.axis('equal'); ax2_2.set_title('Average Monetary Damage') ax2_2.pie(average_monetary_damages, explode=explode, labels = monetary_labels, autopct='%1.1f%%', textprops={'fontsize': 16}, startangle=90); ax2_2.axis('equal'); plt.savefig("UP_2.png") plt.show() # - # ## Area Damage Trends # + area_affected = t_table['area'].values changes = [] for i in range(1, area_affected.size): changes.append(area_affected[i] - area_affected[i-1]) # + crop_area = t_table['crops_area'].values percentages = [] for i in range(0, crop_area.size): if(area_affected[i] == 0.0): percentages.append(0) else: perc = round((crop_area[i]/area_affected[i])*100, 2) if(perc > 100): perc = 100 percentages.append(perc) # + fig3 = plt.figure(figsize=(20,10)) fig3.suptitle('Uttar Pradesh: Area Damage Trends', fontsize = 24) ax1_3 = fig3.add_subplot(2, 2, 1) ax2_3 = fig3.add_subplot(2, 2, 2) ax1_3.set_title('Change in Area Affected Per Year') ax1_3.set_xlabel('Year') ax1_3.set_ylabel('Change in Area (m. ha.)') ax1_3.plot(years[1:], changes, label="Total Land Area", c='green'); ax2_3.set_title('Crop Damage Area as % of Total Area Affected') ax2_3.set_xlabel('Year') ax2_3.set_ylabel('Percentage (%)') ax2_3.plot(years, percentages, label="Total Land Area", c='green'); plt.savefig("UP_3.png") plt.show()
UP/UP_1953_2016.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # URL: http://bokeh.pydata.org/en/latest/docs/gallery/step_chart.html # # Most examples work across multiple plotting backends, this example is also available for: # # * [Bokeh - step_chart example](../bokeh/step_chart.ipynb) # + import holoviews as hv from holoviews import opts hv.extension('matplotlib') # - # ## Declare data # + # build a dataset where multiple columns measure the same thing stamp = [.33, .33, .34, .37, .37, .37, .37, .39, .41, .42, .44, .44, .44, .45, .46, .49, .49] postcard = [.20, .20, .21, .23, .23, .23, .23, .24, .26, .27, .28, .28, .29, .32, .33, .34, .35] group = "U.S. Postage Rates (1999-2015)" stamp = hv.Curve(stamp, vdims='Rate per ounce', label='stamp', group=group) postcard = hv.Curve(postcard, vdims='Rate per ounce', label='postcard', group=group) postage = (stamp * postcard) # - # ## Plot postage.opts( opts.Curve(interpolation='steps-mid', linestyle=hv.Cycle(values=['--', '-'])), opts.Overlay(legend_position='top_left'))
examples/gallery/demos/matplotlib/step_chart.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import matplotlib.pyplot as plt df = pd.read_csv('out.csv') df_y = df.truncate(after = 999) df_ = df.drop(columns=['Day1_Hs', 'Day1_D','Day2_Hs', 'Day2_D','Day3_Hs', 'Day3_D']) df_train = df_.truncate(after = 999) # df_train # df_train.head() X_train = df_train.to_numpy()[:-200] X_test = df_train.to_numpy()[-200:] print(len(df_train.to_numpy())) print(len(X_train)) print(len(X_test)) X_test[0] # + # Data-Labeling Procedure def sleep_score(a): if a < 8: return -1 if a == 8: return 1 if a == 9: return 2 if a == 10: return 1 if a > 10: return -1 def delay_score(a): if a > 2: return -2 if a == 2: return -1 if a == 1 or a == 0 or a == -1 : return 0 if a == -2: return -1 if a < -2: return -2 sc_weight = 2 ev_weight = 1 sl_weight = 3 social_r = -0.05 edu_r = 0.22 game_r = -0.2 max_score = 6 min_score = -9 df_label = pd.DataFrame(columns = ["Day1_Hs","Day1_D","Day2_Hs","Day2_D","Day3_Hs","Day3_D", "Score"] ,index=[0]) for idx, row in df_y.iterrows(): # print(row['Sc_Game']) # print(period_score(row['Sc_Game'])) score = [] score.append(sleep_score(row['Day1_Hs'])) score.append(delay_score(row['Day1_D'])) score.append(sleep_score(row['Day2_Hs'])) score.append(delay_score(row['Day2_D'])) score.append(sleep_score(row['Day3_Hs'])) score.append(delay_score(row['Day3_D'])) wellbeing_score = ((sum(score) - min_score)*10)/(max_score - min_score) score.append(wellbeing_score) df_label.loc[df_label.index.max()+1] = score df_label = df_label.drop([0]) # Y = df_label['Score'].to_numpy() Y_train = df_label['Score'].to_numpy()[:-200] Y_test = df_label['Score'].to_numpy()[-200:] print(len(df_label['Score'].to_numpy())) print(len(Y_train)) print(len(Y_test)) Y_test # - x = df_label['Score'] x.plot.hist(grid=True, bins=7, rwidth=0.9, color='#607c8e') plt.title('Histogram of Score') plt.xlabel('Score') plt.ylabel('Num of records') plt.grid(axis='y', alpha=0.75) # + import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Activation import matplotlib.pyplot as plt from tensorflow.keras.callbacks import EarlyStopping # %matplotlib inline from tensorflow.keras import backend as K def r_square(y_true, y_pred): SS_res = K.sum(K.square( y_true-y_pred )) SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) ) return ( 1 - SS_res/(SS_tot + K.epsilon()) ) model = tf.keras.Sequential() model.add(Dense(9, input_shape=(9,), activation = tf.math.square)) model.add(Dense(1)) model.compile(loss='mean_squared_error', metrics=['mean_squared_error',tf.keras.metrics.RootMeanSquaredError(name='rmse'),'mean_absolute_error', r_square], optimizer='adam') earlystopping=EarlyStopping(monitor="mean_squared_error", patience=40, verbose=1, mode='auto') # print(model.summary()) # optimizer = tf.keras.optimizers.RMSprop(0.0099) # model.fit(X,Y,epochs=1000, batch_size=200) history = model.fit(X_train, Y_train, validation_split= 0.2, epochs=200, batch_size=20, callbacks=[earlystopping] ) # + plt.plot(history.history['rmse']) plt.plot(history.history['val_rmse']) plt.title('Root mean square error') plt.ylabel('RMSE') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() # + plt.plot(history.history['mean_absolute_error']) plt.plot(history.history['val_mean_absolute_error']) plt.title('mean_absolute_error') plt.ylabel('mean_absolute_error') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() # - plt.plot(history.history['r_square']) plt.title('Model $R^2$') plt.ylabel('$R^2$') plt.xlabel('epoch') plt.legend(['val'], loc='upper left') plt.show() print("Evaluate on test data") results = model.evaluate(X_test, Y_test, batch_size=10) print("test loss, test acc:", results) i = 999 j = 0 while i < len(df.index): j += 1 _df_train = df_.truncate(before = i+1, after = i+100) i += 100 _X = _df_train.to_numpy() _Y = model.predict(_X) X_train = np.concatenate((X_train, _X), axis=0) Y_train = np.append(Y_train,_Y) model.fit(X_train,Y_train, epochs=200, batch_size=20, callbacks=[earlystopping]) if j%3 == 0: print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@") results = model.evaluate(X_test, Y_test, batch_size=20) print(results) print("Evaluate on test data") results = model.evaluate(X_test, Y_test, batch_size=10) print("test loss, test acc:", results) model.get_weights() a = np.array([[-2.36087590e-02, -5.20601034e-01, 3.79112303e-01, 6.85166866e-02, -2.57370751e-02, -1.94074169e-01, -8.70853197e-03, 3.57228220e-01, 3.50797363e-02], [ 4.46248800e-02, 1.11133903e-01, -5.28463066e-01, -7.41788000e-02, 7.61075467e-02, -3.07507068e-01, -2.08253805e-02, 1.19817443e-01, -7.43552744e-02], [-2.23592725e-02, -2.75830179e-01, -2.02181607e-01, -3.18101980e-02, -4.65674043e-01, -1.54785335e-01, -7.97070004e-03, -7.04302788e-01, -3.54508519e-01], [-2.07305329e-05, -6.59517109e-01, -1.88920185e-01, 1.94061212e-02, 2.11506918e-01, -4.03130263e-01, 6.62487606e-03, 3.16875786e-01, 5.79200447e-01], [-2.57243309e-02, 3.01611751e-01, 4.58977133e-01, 2.39977330e-01, -1.31074879e-02, -4.59236115e-01, -1.35173211e-02, -7.41272330e-01, 4.29468714e-02], [-5.93085289e-02, 3.11634302e-01, 2.74691582e-01, 4.55129683e-01, -3.57747078e-01, -3.33878756e-01, -9.00138356e-03, -5.25740862e-01, 3.66750538e-01], [ 1.16910450e-02, -9.27691981e-02, 2.00206220e-01, -2.68541705e-02, 1.44670844e-01, -2.10818231e-01, -2.22847168e-03, -6.71090186e-02, -1.27368644e-01], [ 8.93315766e-03, -3.26891579e-02, -1.48632705e-01, -1.29274294e-01, 3.26632857e-01, -1.01819344e-01, 1.71716306e-02, -1.72078133e-01, -1.70253590e-01], [-2.37425696e-02, 2.90112440e-02, 7.23632649e-02, 4.08127874e-01, -1.93478480e-01, -2.93932587e-01, 2.12160610e-02, 1.25051379e-01, -4.96561453e-02]]) a.T
TrainModel/.ipynb_checkpoints/train-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd csv = 'driving_log.csv' data = pd.read_csv(csv) # - # ## Loss # + import numpy as np history_pilot = np.load('train-pilot/history_object.npy').item() history_ours = np.load('train-ours/history_object.npy').item() # - # 平滑曲线 def smooth(points, smoothing=0.8): last = points[0] smoothed = [] for point in points: smoothed_val = last * smoothing + (1 - smoothing) * point smoothed.append(smoothed_val) last = smoothed_val return smoothed # + import matplotlib.pyplot as plt # %matplotlib inline plt.plot(smooth(history_pilot['loss'])) plt.plot(smooth(history_ours['loss'])) plt.title('mse') plt.ylabel('mean squared error loss') plt.xlabel('epoch') plt.legend(['training loss(pilotNet)', 'training loss(ours)'], loc='upper right') plt.ylim([0, 0.1]) plt.show() # + import matplotlib.pyplot as plt # %matplotlib inline plt.plot(smooth(history_pilot['val_loss'])) plt.plot(smooth(history_ours['val_loss'])) plt.title('mse') plt.ylabel('mean squared error loss') plt.xlabel('epoch') plt.legend(['validation loss(pilotNet)', 'validation loss(ours)'], loc='upper right') plt.ylim([0, 0.1]) plt.show() # - # ## 评估 # # 1. 实际与预测偏差不超过0.05 # 2. 实际与预测偏差不超过0.10 # 3. 实际与预测偏差不超过0.15 # 4. 实际与预测偏差不超过0.20 # 5. 实际与预测偏差不超过0.25 # + from keras.models import load_model model_pilot = load_model('train-pilot/model/model.h5') model_ours = load_model('train-ours/model/model.h5') # + import numpy as np import cv2 X_test = [] y_test = [] for index in range(len(data)): image = data['center'][index].strip() image = cv2.imread(image) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) steer = data['steering'][index] X_test.append(image) y_test.append(steer) # - len(X_test) num1 = [] num2 = [] num3 = [] num4 = [] num5 = [] y_preds = [] # ## train-pilotNet # + n1 = 0 n2 = 0 n3 = 0 n4 = 0 n5 = 0 y_pred = [] for X, y in zip(X_test, y_test): pred = model_pilot.predict(np.expand_dims(X, axis=0))[0][0] y_pred.append(pred) if np.abs(y - pred) <= 0.05: n1 += 1 if np.abs(y - pred) <= 0.10: n2 += 1 if np.abs(y - pred) <= 0.15: n3 += 1 if np.abs(y - pred) <= 0.20: n4 += 1 if np.abs(y - pred) <= 0.25: n5 += 1 num1.append(n1 / len(X_test)) num2.append(n2 / len(X_test)) num3.append(n3 / len(X_test)) num4.append(n4 / len(X_test)) num5.append(n5 / len(X_test)) y_preds.append(y_pred) # - print('0.05: %f' % num1[-1]) print('0.10: %f' % num2[-1]) print('0.15: %f' % num3[-1]) print('0.20: %f' % num4[-1]) print('0.25: %f' % num5[-1]) # ## train-ours # + n1 = 0 n2 = 0 n3 = 0 n4 = 0 n5 = 0 y_pred = [] for X, y in zip(X_test, y_test): pred = model_ours.predict(np.expand_dims(X, axis=0))[0][0] y_pred.append(pred) if np.abs(y - pred) <= 0.05: n1 += 1 if np.abs(y - pred) <= 0.10: n2 += 1 if np.abs(y - pred) <= 0.15: n3 += 1 if np.abs(y - pred) <= 0.20: n4 += 1 if np.abs(y - pred) <= 0.25: n5 += 1 num1.append(n1 / len(X_test)) num2.append(n2 / len(X_test)) num3.append(n3 / len(X_test)) num4.append(n4 / len(X_test)) num5.append(n5 / len(X_test)) y_preds.append(y_pred) # - print('0.05: %f' % num1[-1]) print('0.10: %f' % num2[-1]) print('0.15: %f' % num3[-1]) print('0.20: %f' % num4[-1]) print('0.25: %f' % num5[-1]) # ## Evaluate # + plt.figure(figsize=(16, 4)) plt.subplot(1, 3, 1) plt.title('origin') plt.xlabel('steer') plt.ylabel('num') plt.hist(np.array(y_test), bins=50, range=(-1, 1)) plt.subplot(1, 3, 2) plt.title('train-pilot') plt.xlabel('steer') plt.ylabel('num') plt.hist(np.array(y_preds[0]), bins=50, range=(-1, 1)) plt.subplot(1, 3, 3) plt.title('train-ours') plt.xlabel('steer') plt.ylabel('num') plt.hist(np.array(y_preds[1]), bins=50, range=(-1, 1)) plt.show() # + name_list = ['0.05 (9°)', '0.10 (18°)', '0.15 (27°)', '0.20 (36°)', '0.25 (45°)'] list0 = [num1[0], num2[0], num3[0], num4[0], num5[0]] list1 = [num1[1], num2[1], num3[1], num4[1], num5[1]] # 设置柱状图 bar_width = 0.1 index = np.arange(len(name_list)) bar0 = plt.bar(index + bar_width * 0, list0, bar_width, color='g', label='pilot') bar1 = plt.bar(index + bar_width * 1, list1, bar_width, color='r', label='ours') plt.xlabel('steer') plt.ylabel('acc') plt.xticks(index + bar_width, name_list) plt.ylim(0, 1) plt.legend() plt.tight_layout() plt.show()
evaluate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np from matplotlib import pyplot as plt data = pd.read_csv("fifa20.csv") data data.groupby("body_type").count() data.describe() import seaborn as sns sns.set_style("whitegrid") # ### Melihat korelasi antar data corr = data.select_dtypes(include = ['float64', 'int64']).iloc[:, :].corr() plt.figure(figsize=(10, 10)) ax = sns.heatmap(corr, vmax=1, square=True) # ax.set_xticklabels(rotation=30) plt.xticks(rotation=45) plt.yticks(rotation=45) corr # I slice the data because I curious with the correlation of this data data2 = pd.DataFrame(data, columns=['value_eur', 'age', 'wage_eur', 'overall', 'potential']) corr2 = data2.select_dtypes(include = ['float64', 'int64']).iloc[:, :].corr() corr2 # ## Pemotongan Data # <h4>Data untuk <b>Body Type</b> <i>Classification</i> berdasarkan <b>Height</b> dan <b>Weight</b></h4> csv_clas = pd.DataFrame(data, columns=['height_cm', 'weight_kg', 'body_type']) csv_clas # ##### Menyimpan Classification dataset csv_clas.to_csv("Classification/classification-datasets.csv", index=False) # <h4>Data untuk <i>Clustering</i> berdasarkan Nilai <b>Overall</b> dan Nilai <b>Potential</b></h4> csv_clus = pd.DataFrame(data, columns=['overall', 'potential']) csv_clus # ##### Menyimpan Clustering dataset csv_clus.to_csv("Clustering/clustering-datasets.csv", index=False) csv_clus2 = pd.DataFrame(data, columns=['overall', 'value_eur']) csv_clus2 csv_clus2.to_csv("Clustering/clustering-datasets2.csv", index=False) # <b>After all data is exported to csv, <span style="color : red">move the dataset to every task folder</span></b>
.ipynb_checkpoints/Data Preparation-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: weixiang # language: python # name: weixiang # --- import pandas as pd import pickle import numpy as np from matplotlib import pyplot as plt # # Load Data # + # Bit = 8 # PPO with open('./Test Reward Plot/test_rewards_ppo_bitflipping8', 'rb') as fp1: test_rewards8 = pickle.load(fp1) with open('./Loss Plot/mean_actor_loss_ppo_bitflipping8', 'rb') as fp2: mean_actor_loss8 = pickle.load(fp2) with open('./Loss Plot/mean_critic_loss_ppo_bitflipping8', 'rb') as fp3: mean_critic_loss8 = pickle.load(fp3) #HPPO with open('./Test Reward Plot/test_rewards_hppo_bitflipping8', 'rb') as fp1: test_rewards_hppo8 = pickle.load(fp1) with open('./Loss Plot/mean_actor_loss_hppo_bitflipping8', 'rb') as fp2: mean_actor_loss_hppo8 = pickle.load(fp2) with open('./Loss Plot/mean_critic_loss_hppo_bitflipping8', 'rb') as fp3: mean_critic_loss_hppo8 = pickle.load(fp3) # + # ppo reward_plot = np.array(test_rewards8) rewarddata = pd.Series(reward_plot) plt.figure(figsize=(12,4)) rewarddata.plot(alpha=0.1,color='b',label='reward') rewarddata.rolling(window=1).mean().plot(style='r',alpha=.9,label='mean_reward') plt.ylim(-8,-3) plt.legend(loc='lower right') plt.grid() plt.title('Mean reward over episodes with window 1 (PPO)') # plt.savefig('meanrewardbitflipping8') # hppo reward_plot_hppo = np.array(test_rewards_hppo8[:28000]) rewarddatahppo = pd.Series(reward_plot_hppo) plt.figure(figsize=(12,4)) rewarddatahppo.plot(alpha=0.1,color='b',label='reward') rewarddatahppo.rolling(window=1).mean().plot(style='b',alpha=.9,label='mean_reward') plt.legend(loc='lower right') plt.ylim(-8,-3) plt.grid() plt.title('Mean reward over episodes with window 1 (HPPO)') # plt.savefig('meanrewardbitflipping8') # + print (reward_plot) reward_plot = reward_plot[:28000] reward_plot_hppo = reward_plot_hppo[:28000] reward_plot[reward_plot == -8.0] = np.nan reward_plot_hppo[reward_plot_hppo == -8.0] = np.nan # unique1, counts1 = np.unique(reward_plot, return_counts=True) # unique2, counts2 = np.unique(reward_plot_hppo, return_counts=True) # print ('counts1: ', counts1) # print ('counts2: ', counts2) # dict1 = dict(zip(unique1, counts1)) # dict2 = dict(zip(unique2, counts2)) # min_number = min(dict1[-8.0], dict2[-8.0]) # print (min_number) # index1 = np.where(reward_plot != -8.0)[0] # index2 = np.where(reward_plot_hppo != -8.0)[0] # print (index1,index2) # # new_reward_plot_hppo = # # print (len(new_reward_plot), len(new_reward_plot_hppo)) plt.figure(figsize=(12,4)) plt.scatter(range(28000),reward_plot,marker = '.') plt.scatter(range(28000),reward_plot_hppo, marker = '.') # - import scipy.io scipy.io.savemat('test_reward_bitflipping_ppo_full.mat', mdict={'returns': reward_plot}) scipy.io.savemat('test_reward_bitflipping_hppo_full.mat', mdict={'returns': reward_plot_hppo}) # Bit = 4 with open('./Test Reward Plot/test_rewards_ppo_bitflipping4', 'rb') as fp1: test_rewards4 = pickle.load(fp1) with open('./Loss Plot/mean_actor_loss_ppo_bitflipping4', 'rb') as fp2: mean_actor_loss4 = pickle.load(fp2) with open('./Loss Plot/mean_critic_loss_ppo_bitflipping4', 'rb') as fp3: mean_critic_loss4 = pickle.load(fp3) # + reward_plot = np.array(test_rewards4) rewarddata = pd.Series(reward_plot) plt.figure(figsize=(12,4)) rewarddata.plot(alpha=0.1,color='b',label='reward') rewarddata.rolling(window=50).mean().plot(style='b',alpha=.9,label='mean_reward') plt.legend(loc='lower right') plt.grid() plt.title('Mean reward over episodes with window 50') # plt.savefig('meanrewardbitflipping8') # -
BitFlipping/Plot Analysis BitFlipping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Softmax 回归的简洁实现 # # 我们在[“线性回归的简洁实现”](linear-regression-gluon.md)一节中已经了解了使用 Gluon 实现模型的便利。下面,让我们再次使用 Gluon 来实现一个 softmax 回归模型。首先导入本节实现所需的包或模块。 # + attributes={"classes": [], "id": "", "n": "1"} # %matplotlib inline import d2lzh as d2l from mxnet import gluon, init from mxnet.gluon import loss as gloss, nn # - # ## 获取和读取数据 # # 我们仍然使用 Fashion-MNIST 数据集和上一节中设置的批量大小。 # + attributes={"classes": [], "id": "", "n": "2"} batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) # - # ## 定义和初始化模型 # # 在[“Softmax 回归”](softmax-regression.md)一节中,我们提到 softmax 回归的输出层是一个全连接层。因此,我们添加一个输出个数为 10 的全连接层。我们使用均值为 0 标准差为 0.01 的正态分布随机初始化模型的权重参数。 # + attributes={"classes": [], "id": "", "n": "3"} net = nn.Sequential() net.add(nn.Dense(10)) net.initialize(init.Normal(sigma=0.01)) # - # ## Softmax 和交叉熵损失函数 # # 如果你做了上一节的练习,那么你可能意识到了分开定义 softmax 运算和交叉熵损失函数可能会造成数值不稳定。因此,Gluon 提供了一个包括 softmax 运算和交叉熵损失计算的函数。它的数值稳定性更好。 # + attributes={"classes": [], "id": "", "n": "4"} loss = gloss.SoftmaxCrossEntropyLoss() # - # ## 定义优化算法 # # 我们使用学习率为 0.1 的小批量随机梯度下降作为优化算法。 # + attributes={"classes": [], "id": "", "n": "5"} trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1}) # - # ## 训练模型 # # 接下来,我们使用上一节中定义的训练函数来训练模型。 # + attributes={"classes": [], "id": "", "n": "6"} num_epochs = 5 d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, trainer) # - # ## 小结 # # * Gluon 提供的函数往往具有更好的数值稳定性。 # * 我们可以使用 Gluon 更简洁地实现 softmax 回归。 # # ## 练习 # # * 尝试调一调超参数,例如批量大小、迭代周期和学习率,看看结果会怎样。 # # ## 扫码直达[讨论区](https://discuss.gluon.ai/t/topic/740) # # ![](../img/qr_softmax-regression-gluon.svg)
chapter_deep-learning-basics/softmax-regression-gluon.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (rtvc) # language: python # name: rtvc # --- # + from feedback_synthesizer.inference import Synthesizer from IPython.display import Audio import tensorflow as tf import os import numpy as np from pathlib import Path import librosa import random from tqdm import tqdm from vocoder.inference import load_model, infer_waveform os.environ['CUDA_VISIBLE_DEVICES']='1' # - wavrnn = load_model('vocoder/saved_models/vctk/vctk.pt') # + Synthesizer.hparams.tacotron_num_gpus = 1 Synthesizer.hparams.postnet_num_layers = 5 Synthesizer.hparams.outputs_per_step=2 checkpoints_dir = 'feedback_synthesizer/saved_models/logs-vctk/taco_pretrained/' checkpoints_dir = Path(checkpoints_dir) synthesizer=Synthesizer(checkpoints_dir,low_mem=False) # + texts=['There were many editions of these works still being used in the nineteenth century.'] # validation set: p260_006 p260_004 # test set: p340_006 p231_010 name = 'p340_006' embed=np.load('datasets/vctk/synthesizer/embeds/embed-' + name + '.npy') embeds = np.stack([embed] * len(texts)) specs = synthesizer.synthesize_spectrograms(texts, embeds) breaks = [spec.shape[1] for spec in specs] spec = np.concatenate(specs, axis=1) assert spec is not None wav = Synthesizer.griffin_lim(spec) # - b_ends = np.cumsum(np.array(breaks) * Synthesizer.hparams.hop_size) b_starts = np.concatenate(([0], b_ends[:-1])) wavs = [wav[start:end] for start, end, in zip(b_starts, b_ends)] breaks = [np.zeros(int(0.15 * Synthesizer.sample_rate))] * len(breaks) wav = np.concatenate([i for w, b in zip(wavs, breaks) for i in (w, b)]) wav = wav / np.abs(wav).max() * 0.97 Audio(wav,rate=Synthesizer.sample_rate) neural_wav = infer_waveform(spec) Audio(wav,rate=Synthesizer.sample_rate) ori_wav = np.load('datasets/vctk/synthesizer/audio/audio-' + name + '.npy') Audio(ori_wav, rate=16000)
feedback_syn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # **Technical Note**: # - Install <code>deap</code> in ALMA cluster without root user<br> # <code>pip install --user deap</code><br> # Then it is automatically included in CASA environment<br> # # # ## First Run # # I use parameters as follow: # # Function setting # - Objective function is to minimize the RMS # - using 10 MS from B6 # - visweight of the first MS set to 1, the other (9) as free parameter that we want to find. We can try to set all of the visweight as free parameter. # - visweight is generated as uniform random variable from 0 to 4 # - CLEAN parameter: # - niter = 500 # - threshold = "0.035mJy" # - psfmode = 'clark' # - interactive = False # - mask = "circle[[65.8158364deg, -1.3425182deg], 5arcsec]" # - imsize = 1250 # - cell = "0.04arcsec" # - phasecenter = "J2000 04:23:15.800730 -01.20.33.065501" # - weighting = 'briggs' # - robust = 0.5 # - pbcor = False # - Region to calculate RMS with imstat: # - regionrms = "annulus[[65.8158364deg, -1.3425182deg], [5arcsec, 15arcsec]]" # # GA setting: # - Simple GA steps: selection, crossover, and mutation # - Population size: 10 # - Insert 3 best guess at first generation (weight based on 1/rms^2 and all 1 (no weight scale)) # - GA iteration/generation: 20 # - crossover probability: 0.5 # - probability of individu get a mutation: 0.1 # - DNA mutation probability: 0.8 # - # # Finished < 6 hours # # **Final result**: # # - Best of the best: [1 (fixed), 0.0, 1.071, 1, 1, 0.8, 0.0, 0.08, 0.05, 0.05] # # with fitness (rms) = 3.1720439437776804e-05 # # - total evaluation "concat+clean process" : 164 (if the population always change (mutate) it should be 10 x 20=200) # # # **Comments**: # # 1. compare to 1/rms^2 scale: [1.0 (fixed), 0.55, 0.82, 0.34, 0.28, 0.80, 0.37, 0.08, 0.05, 0.05] # # (with rms ~ 3.4uJy after 1000 iteration) # # the final result (best of the best) parameter above is a bit weird: we can achieve comparably (lower) rms without including 2nd MS and 7th MS (0.0 in weight)? # # 2. something weird on 2nd MS (0th in plot below) the weight-value can change from 0 to 3 without changing much of rms. -> divergent case, # # 3. we can check the data of 2nd MS, my guess is maybe because it is redundat with the 1st MS that we set to 1, they are from same EB, 1st is as bandpass calibrator, 2nd as phase calib, or vice versa (I don't remember ;) ). # # 4. From the best result, it seems that CLEAN can achieve the threshold (35uJy) before iter_max reached (500 iterations). So, for next testing we should just set the threshold to 0.0, so all CLEANing process have same number of iteration (stoped because of iter_max) # # # # # This plot is statistics of population as function of generation, x-axis is generation/iteration, y-axis is fitness (rms), and statistics shown are minimum (best), average, and maximum. # <img src="stat_fitness.png" alt="stat" style="width: 600px;"/> # --- # # # We can plot the parameter of "best individu" in population as function of generation. # 0 - 8 means weight of the 2nd MS to 10th MS. x-axis is generation/iteration, y-axis is weight of each MS. # <img src="best_weight_on_iteration.png" alt="stat" style="width: 600px;"/> # # # # # # --- # ## Second Run # # for the second run, we # - remove the threshold -> set it to '0mJy' (so it really stop due to niter) # - change the mutation probability # - increase the population also generations (iteration) # # Parameter: # + population: 20 # + generation: 50 # + indpb: 0.2 # + mutpb: 0.5 # # After running ~ 1 day # # Total evalution of "concat+clean": 734 # # Weird/unexpected result: # # + best weight = [1 (fixed), 0.0, 0.0, 2.6, 0.0, 0.8, 0.0, 0.08, 0.05, 0.05] # # + fitness (rms) = 3.1307943572755903e-05 # # Many 0's ! # # Statistic of population as function of generation # <img src="stat2.png" alt="stat" style="width: 600px;"/> # # --- # # --- # # Best weight of each MS as function of generation # <img src="bestweight2.png" alt="stat" style="width: 600px;"/> # **Comments**: # # - We can try to see the connection (I add intent, because my hypothesis after the first run) # # <table style="width:100%"> # <tr> # <th>Projects</th> # <th>MS</th> # <th>Individual RMS</th> # <th>Weight</th> # <th>Intent</th> # </tr> # <tr> # <td>2013.1.01241.S</td> # <td>A002_X9646fb_Xccd.ms.split.cal.clb.field_0.J0423-0120.ms.self3.substracted.ms</td> # <td>5.8e-05</td> # <td>1 (fixed)</td> # <td>Bandpass</td> # </tr> # <tr> # <td></td> # <td>A002_X9646fb_Xccd.ms.split.cal.clb.field_1.J0423-0120.ms.self3.substracted.ms</td> # <td>7.8e-05</td> # <td>0</td> # <td>Flux</td> # </tr> # <tr> # <td></td> # <td>A002_X97db9c_X159b.ms.split.cal.clb.field_0.J0423-0120.ms.self3.substracted.ms</td> # <td>6.4e-05</td> # <td>0</td> # <td>Bandpass</td> # </tr> # <tr> # <td>2012.1.00304.S</td> # <td>A002_X95b353_X6f7.ms.split.cal.clb.field_0.J0423-0120.ms.self3.substracted.ms</td> # <td>1.0e-04</td> # <td>2.6</td> # <td>Bandpass</td> # </tr> # <tr> # <td></td> # <td>A002_X9630c0_X81b.ms.split.cal.clb.field_0.J0423-0120.ms.self3.substracted.ms</td> # <td>1.1e-04</td> # <td>0</td> # <td>Bandpass</td> # </tr> # <tr> # <td>2013.1.00916.S</td> # <td>A002_Xa916fc_X6d6b.ms.split.cal.clb.field_0.J0423-0120.ms.self3.substracted.ms</td> # <td>6.5e-05</td> # <td>0.8</td> # <td>Bandpass and Phase</td> # </tr> # <tr> # <td></td> # <td>A002_Xa916fc_X6d6b.ms.split.cal.clb.field_1.J0423-0120.ms.self3.substracted.ms</td> # <td>9.5e-05</td> # <td>0</td> # <td>Flux</td> # </tr> # <tr> # <td></td> # <td>A002_Xaaa05f_X15bb.ms.split.cal.clb.field_0.J0423-0120.ms.self3.substracted.ms</td> # <td>2e-04</td> # <td>0.08</td> # <td>Bandpass and Delay</td> # </tr> # <tr> # <td></td> # <td>A002_Xaaa05f_X1af7.ms.split.cal.clb.field_0.J0423-0120.ms.self3.substracted.ms</td> # <td>2.6e-04</td> # <td>0.05</td> # <td>Bandpass and Delay</td> # </tr> # <tr> # <td></td> # <td>A002_Xaaa05f_X1af7.ms.split.cal.clb.field_1.J0423-0120.ms.self3.substracted.ms</td> # <td>2.6e-04</td> # <td>0.05</td> # <td>Flux</td> # </tr> # </table> # # From the table above: # - 1st MS of each project get "highest" weight, and the other can be "ignored". hmmm... # - from the second plot, MS that can be "ignored" (0 in weight) actually have divergent value ( in this case from 0 to 1). # - the final rms itself actually does not change much 3.5e-05 to 3.1e-05 # - maybe we need further test, e.g. # + "free" all the visweightscale # + shuffle the MS or insert new MS from different project # + change set of MS # ### Third Run # # Only change the mutation probability. # # Best individual is: [1.0 (fixed), 0.0, 0.0, 1.455593086305221, 3.381977817494705, 0.8, 0.0, 0.08, 0.05, 0.05] # # with fitness (rms) : 3.129861579509452e-05 # # Total evaluation of objective function: 595 # # Running time: 25.8437290611 hour # # # Similar trend, the only different is on 5th MS that has high value, but from the previous run, we see that this MS is "divergent" (high or low will not change the rms) # # Statistic of population as function of generation # <img src="stat3.png" alt="stat" style="width: 600px;"/> # # --- # # --- # # Best weight of each MS as function of generation # <img src="bestweight3.png" alt="stat" style="width: 600px;"/> # ### Fourth Run # # **For Band3** (23MS) # # Population=30 # Generation=25 # Mutation prob=0.2 # # Change the clean mask, and annulus to calc rms accordingly. # # --- # # Best individual is: [1.0 (fixed), 1.0, 3.928086429715624, 3.134443963345863, 0.0, 0.0, 0.990328281303368, 0.7313379069052468, 0.0, 0.9992785039072531, 0.0, 0.0, 1.0, 1.6488508173642726, 0.4370299009063334, 0.0, 0.0, 3.7061401281437623, 3.8371338322924218, 0.0, 0.027060796771145146, 3.351606446959614, 1.0] # # with fitness: 0.00072437291964888573 (still high, maybe because niter is not enough) # # Total evaluation of objective function: 435 # # Running time: 33.4 hour # # Statistic of population as function of generation # <img src="stat4.png" alt="stat" style="width: 600px;"/> # # --- # # --- # # Best weight of each MS as function of generation # <img src="bestweight4.png" alt="stat" style="width: 600px;"/> # # # We need to be careful because there are some correlation between 2 or more MS-weight in the plot. Same fitness (rms) can be produced when one MS weight increase and the other decrease at the same time (on one individual). This effect can be 'eliminated' if we increase the number of generation (GA iteration).
weighting/GA_deap_02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Machine Learning Engineer Nanodegree # ## Unsupervised Learning # ## Project: Creating Customer Segments # Welcome to the third project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and it will be your job to implement the additional functionality necessary to successfully complete this project. Sections that begin with **'Implementation'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `'TODO'` statement. Please be sure to read the instructions carefully! # # In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide. # # >**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode. # + active="" # ## Getting Started # # In this project, you will analyze a dataset containing data on various customers' annual spending amounts (reported in *monetary units*) of diverse product categories for internal structure. One goal of this project is to best describe the variation in the different types of customers that a wholesale distributor interacts with. Doing so would equip the distributor with insight into how to best structure their delivery service to meet the needs of each customer. # # The dataset for this project can be found on the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Wholesale+customers). For the purposes of this project, the features `'Channel'` and `'Region'` will be excluded in the analysis — with focus instead on the six product categories recorded for customers. # # Run the code block below to load the wholesale customers dataset, along with a few of the necessary Python libraries required for this project. You will know the dataset loaded successfully if the size of the dataset is reported. # + # Import libraries necessary for this project import numpy as np import pandas as pd from IPython.display import display # Allows the use of display() for DataFrames # Import supplementary visualizations code visuals.py import visuals as vs # Pretty display for notebooks # %matplotlib inline # Load the wholesale customers dataset try: data = pd.read_csv("customers.csv") data.drop(['Region', 'Channel'], axis = 1, inplace = True) print "Wholesale customers dataset has {} samples with {} features each.".format(*data.shape) except: print "Dataset could not be loaded. Is the dataset missing?" # - # ## Data Exploration # In this section, you will begin exploring the data through visualizations and code to understand how each feature is related to the others. You will observe a statistical description of the dataset, consider the relevance of each feature, and select a few sample data points from the dataset which you will track through the course of this project. # # Run the code block below to observe a statistical description of the dataset. Note that the dataset is composed of six important product categories: **'Fresh'**, **'Milk'**, **'Grocery'**, **'Frozen'**, **'Detergents_Paper'**, and **'Delicatessen'**. Consider what each category represents in terms of products you could purchase. # Display a description of the dataset display(data.describe()) # ### Implementation: Selecting Samples # To get a better understanding of the customers and how their data will transform through the analysis, it would be best to select a few sample data points and explore them in more detail. In the code block below, add **three** indices of your choice to the `indices` list which will represent the customers to track. It is suggested to try different sets of samples until you obtain customers that vary significantly from one another. # + # TODO: Select three indices of your choice you wish to sample from the dataset indices = [65, 101, 125] # Create a DataFrame of the chosen samples samples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True) print "Chosen samples of wholesale customers dataset:" display(samples) # - # ### Question 1 # Consider the total purchase cost of each product category and the statistical description of the dataset above for your sample customers. # *What kind of establishment (customer) could each of the three samples you've chosen represent?* # **Hint:** Examples of establishments include places like markets, cafes, and retailers, among many others. Avoid using names for establishments, such as saying *"McDonalds"* when describing a sample customer as a restaurant. # + import matplotlib.pyplot as plt import seaborn as sns # visualize percentiles with heatmap # ref: http://seaborn.pydata.org/generated/seaborn.heatmap.html sns.heatmap(100.0 * data.rank(axis=0, pct=True).iloc[indices].round(decimals=3), annot=True, linewidth=.1, vmax=99, fmt='.1f', cmap='YlOrRd', square=True, cbar=False) plt.yticks([2.5,1.5,.5], ['Sample '+str(x) for x in range(0,3)], rotation='horizontal') plt.xticks(rotation=45, ha='center') plt.title('Percentile ranks') # - # **Answer:** # # My guesses for the types of establishment based on the visualization provided by the Percentile Rank Heatmap above: # # Shades of Red and Orange indicate higher percentile values. Hence, we can make the following guesses about the class: # # - Sample 0: Cafe - The purchases of Milk, Grocery and Detergents_Paper is usually high in cafes. # - Sample 1: Restaurant - The purchases of Milk, Grocery and Detergents is high usually at retailers. # - Sample 2: Market - Fresh Foods, Groceries, Frozen foods are usually high in markets. # ### Implementation: Feature Relevance # One interesting thought to consider is if one (or more) of the six product categories is actually relevant for understanding customer purchasing. That is to say, is it possible to determine whether customers purchasing some amount of one category of products will necessarily purchase some proportional amount of another category of products? We can make this determination quite easily by training a supervised regression learner on a subset of the data with one feature removed, and then score how well that model can predict the removed feature. # # In the code block below, you will need to implement the following: # - Assign `new_data` a copy of the data by removing a feature of your choice using the `DataFrame.drop` function. # - Use `sklearn.cross_validation.train_test_split` to split the dataset into training and testing sets. # - Use the removed feature as your target label. Set a `test_size` of `0.25` and set a `random_state`. # - Import a decision tree regressor, set a `random_state`, and fit the learner to the training data. # - Report the prediction score of the testing set using the regressor's `score` function. # + # TODO: Make a copy of the DataFrame, using the 'drop' function to drop the given feature from sklearn.cross_validation import train_test_split features = ['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicatessen'] score_sum = {} for i in features: score_sum[i] = 0.0 # set N = 100 for averaging over random_state as suggested by reviewer 1 N = 100 for r in range(0,N): for dropped_feature in features: y_target = data[dropped_feature] new_data = data.drop([dropped_feature], axis = 1, inplace = False) # TODO: Split the data into training and testing sets using the given feature as the target X_train, X_test, y_train, y_test = train_test_split(new_data, y_target, test_size=0.25, random_state=0) # TODO: Create a decision tree regressor and fit it to the training set from sklearn.tree import DecisionTreeRegressor regressor = DecisionTreeRegressor(random_state=r) regressor.fit(X_train, y_train) # TODO: Report the score of the prediction using the testing set score = regressor.score(X_test, y_test) score_sum[dropped_feature] += score # + print 'Feat.\tMean Score' mean_scores = [(score_sum[i]/N, i) for i in features] for j,i in mean_scores: print str(i)[:6] + '.\t' + str(j) # - # ### Question 2 # *Which feature did you attempt to predict? What was the reported prediction score? Is this feature is necessary for identifying customers' spending habits?* # **Hint:** The coefficient of determination, `R^2`, is scored between 0 and 1, with 1 being a perfect fit. A negative `R^2` implies the model fails to fit the data. max(mean_scores) # **Answer:** # # I looped over all the features to get the R^2 score for when that feature was dropped. # # `R^2`, is scored between 0 and 1, with 1 being a perfect fit. If a feature having weak usefulness is removed, R^2 will still be closer to 1. That means that the feature is strongly predictable by all other categroies. In other words, we can say that the dropped feature will have strong correlation with the other categories and will not be necessary. # # As reported above, I found that dropping Detergents_Paper seems to have the least impact on lowering the R^2 score and hence, I think it is not necessary for identifying customers' spending habits. # ### Visualize Feature Distributions # To get a better understanding of the dataset, we can construct a scatter matrix of each of the six product features present in the data. If you found that the feature you attempted to predict above is relevant for identifying a specific customer, then the scatter matrix below may not show any correlation between that feature and the others. Conversely, if you believe that feature is not relevant for identifying a specific customer, the scatter matrix might show a correlation between that feature and another feature in the data. Run the code block below to produce a scatter matrix. # Produce a scatter matrix for each pair of features in the data pd.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde'); # ### Question 3 # *Are there any pairs of features which exhibit some degree of correlation? Does this confirm or deny your suspicions about the relevance of the feature you attempted to predict? How is the data for those features distributed?* # **Hint:** Is the data normally distributed? Where do most of the data points lie? # **Answer:** # # The most visible correlation is between Grocery and Detergents_Paper. In the previous answer, I also found them to be most predictable when dropped. # # Correlation between Grocery and Milk as well Milk and Detergents_Paper can also been seen. # # No, the data does not appear to be normally distributed and most of the datapoints lie around low values. Due to this skew, we must apply some kind of normalisation to make the features normally distributed since the algorithms we use work under the assumption that the data features are roughly normally distributed # ## Data Preprocessing # In this section, you will preprocess the data to create a better representation of customers by performing a scaling on the data and detecting (and optionally removing) outliers. Preprocessing data is often times a critical step in assuring that results you obtain from your analysis are significant and meaningful. # ### Implementation: Feature Scaling # If data is not normally distributed, especially if the mean and median vary significantly (indicating a large skew), it is most [often appropriate](http://econbrowser.com/archives/2014/02/use-of-logarithms-in-economics) to apply a non-linear scaling — particularly for financial data. One way to achieve this scaling is by using a [Box-Cox test](http://scipy.github.io/devdocs/generated/scipy.stats.boxcox.html), which calculates the best power transformation of the data that reduces skewness. A simpler approach which can work in most cases would be applying the natural logarithm. # # In the code block below, you will need to implement the following: # - Assign a copy of the data to `log_data` after applying logarithmic scaling. Use the `np.log` function for this. # - Assign a copy of the sample data to `log_samples` after applying logarithmic scaling. Again, use `np.log`. # + # TODO: Scale the data using the natural logarithm log_data = np.log(data) # TODO: Scale the sample data using the natural logarithm log_samples = pd.DataFrame(log_data.loc[indices], columns = data.keys()).reset_index(drop = True) # Produce a scatter matrix for each pair of newly-transformed features pd.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde'); # - # ### Observation # After applying a natural logarithm scaling to the data, the distribution of each feature should appear much more normal. For any pairs of features you may have identified earlier as being correlated, observe here whether that correlation is still present (and whether it is now stronger or weaker than before). # # Run the code below to see how the sample data has changed after having the natural logarithm applied to it. # Display the log-transformed sample data display(log_samples) # ### Implementation: Outlier Detection # Detecting outliers in the data is extremely important in the data preprocessing step of any analysis. The presence of outliers can often skew results which take into consideration these data points. There are many "rules of thumb" for what constitutes an outlier in a dataset. Here, we will use [Tukey's Method for identfying outliers](http://datapigtechnologies.com/blog/index.php/highlighting-outliers-in-your-data-with-the-tukey-method/): An *outlier step* is calculated as 1.5 times the interquartile range (IQR). A data point with a feature that is beyond an outlier step outside of the IQR for that feature is considered abnormal. # # In the code block below, you will need to implement the following: # - Assign the value of the 25th percentile for the given feature to `Q1`. Use `np.percentile` for this. # - Assign the value of the 75th percentile for the given feature to `Q3`. Again, use `np.percentile`. # - Assign the calculation of an outlier step for the given feature to `step`. # - Optionally remove data points from the dataset by adding indices to the `outliers` list. # # **NOTE:** If you choose to remove any outliers, ensure that the sample data does not contain any of these points! # Once you have performed this implementation, the dataset will be stored in the variable `good_data`. # + # OPTIONAL: Select the indices for data points you wish to remove outliers = [] # For each feature find the data points with extreme high or low values for feature in log_data.keys(): # TODO: Calculate Q1 (25th percentile of the data) for the given feature Q1 = np.percentile(log_data[feature], 25) # TODO: Calculate Q3 (75th percentile of the data) for the given feature Q3 = np.percentile(log_data[feature], 75) # TODO: Use the interquartile range to calculate an outlier step (1.5 times the interquartile range) step = 1.5 * (Q3 - Q1) # Display the outliers print "Data points considered outliers for the feature '{}':".format(feature) _outliers = log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))] display(_outliers) outliers += [_outliers.index.tolist()] unique_outliers = list(set([j for i in outliers for j in i])) from collections import Counter outlier_count = dict(Counter([j for i in outliers for j in i])) outlier_for_multiple = [] for key, value in outlier_count.iteritems(): if value > 1: outlier_for_multiple += [(value,key)] # print([i for j, i in outlier_for_multiple]) # Remove the outliers, if any were specified good_data = log_data.drop(log_data.index[unique_outliers]).reset_index(drop = True) # - # ### Question 4 # *Are there any data points considered outliers for more than one feature based on the definition above? Should these data points be removed from the dataset? If any data points were added to the `outliers` list to be removed, explain why.* # + # outlier analysis print "Unique Outlier Count = " + str(len(outlier_count)) print "Datapoints having more than one outlying feature with counts:" print [(k,v) for (v,k) in sorted(outlier_for_multiple)] # - # **Answer:** # # We can see that, 154 has the most outlying features (count = 3). Other datapoints like 65, 66, 75, 128 also have two outlying features. # # These datapoints along with the other 42 are also removed in accordance to Tukey's method rather than choosing only some of the datapoints to remove since these points are well out of 1.5 times the IQR and will skew our results. # ## Feature Transformation # In this section you will use principal component analysis (PCA) to draw conclusions about the underlying structure of the wholesale customer data. Since using PCA on a dataset calculates the dimensions which best maximize variance, we will find which compound combinations of features best describe customers. # ### Implementation: PCA # # Now that the data has been scaled to a more normal distribution and has had any necessary outliers removed, we can now apply PCA to the `good_data` to discover which dimensions about the data best maximize the variance of features involved. In addition to finding these dimensions, PCA will also report the *explained variance ratio* of each dimension — how much variance within the data is explained by that dimension alone. Note that a component (dimension) from PCA can be considered a new "feature" of the space, however it is a composition of the original features present in the data. # # In the code block below, you will need to implement the following: # - Import `sklearn.decomposition.PCA` and assign the results of fitting PCA in six dimensions with `good_data` to `pca`. # - Apply a PCA transformation of `log_samples` using `pca.transform`, and assign the results to `pca_samples`. # + from sklearn.decomposition import PCA # TODO: Apply PCA by fitting the good data with the same number of dimensions as features pca = PCA(n_components=6) pca.fit(good_data) # TODO: Transform log_samples using the PCA fit above pca_samples = pca.transform(log_samples) # Generate PCA results plot pca_results = vs.pca_results(good_data, pca) # - # ### Question 5 # *How much variance in the data is explained* ***in total*** *by the first and second principal component? What about the first four principal components? Using the visualization provided above, discuss what the first four dimensions best represent in terms of customer spending.* # **Hint:** A positive increase in a specific dimension corresponds with an *increase* of the *positive-weighted* features and a *decrease* of the *negative-weighted* features. The rate of increase or decrease is based on the indivdual feature weights. # Calculate Cumulative Sum print pca_results['Explained Variance'].cumsum() # **Answer:** # # Using first 2 components: # - 1st PC: 0.499 # - 2nd PC: 0.226 # - Total: ~72% # # Using first 4 components: # - 1st PC: 0.499 # - 2nd PC: 0.226 # - 3rd PC: 0.105 # - 4th PC: 0.098 # - Total: ~93% # # Using the visualization we can see the linear coefficients of each feature for the components. The value indicates the contribution of the feature, where lower values imply low contribution. We can thereby describe the components as given below: # # We can think of these components to be describing customers with high spending on the features having high weights and low spending on other features with lower weights. And each component desribes a certain spending pattern, i.e. 49% of the datapoints have a pattern similar to the one described by component 1. # # - *Component 1* : Here, detergent_paper has a very high weight along with high weights for milk and grocery. # # - *Component 2* : Here, fresh, frozen and delicatessen have higher weight. # # - *Component 3* : It is interesting note that this component describes a spending pattern where there is a lot of purchase of Delicatessen, frozen and milk but large avoidance in terms of purchase of fresh and detergents_paper. # # - *Component 4* : This component describes a pattern where delicatessen and fresh are purchased but frozen and detergents_paper are avoided. # ### Observation # Run the code below to see how the log-transformed sample data has changed after having a PCA transformation applied to it in six dimensions. Observe the numerical value for the first four dimensions of the sample points. Consider if this is consistent with your initial interpretation of the sample points. # Display sample log-data after having a PCA transformation applied display(pd.DataFrame(np.round(pca_samples, 4), columns = pca_results.index.values)) # ### Implementation: Dimensionality Reduction # When using principal component analysis, one of the main goals is to reduce the dimensionality of the data — in effect, reducing the complexity of the problem. Dimensionality reduction comes at a cost: Fewer dimensions used implies less of the total variance in the data is being explained. Because of this, the *cumulative explained variance ratio* is extremely important for knowing how many dimensions are necessary for the problem. Additionally, if a signifiant amount of variance is explained by only two or three dimensions, the reduced data can be visualized afterwards. # # In the code block below, you will need to implement the following: # - Assign the results of fitting PCA in two dimensions with `good_data` to `pca`. # - Apply a PCA transformation of `good_data` using `pca.transform`, and assign the results to `reduced_data`. # - Apply a PCA transformation of `log_samples` using `pca.transform`, and assign the results to `pca_samples`. # + # TODO: Apply PCA by fitting the good data with only two dimensions pca = PCA(n_components=2) pca.fit(good_data) # TODO: Transform the good data using the PCA fit above reduced_data = pca.transform(good_data) # TODO: Transform log_samples using the PCA fit above pca_samples = pca.transform(log_samples) # Create a DataFrame for the reduced data reduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2']) vs.pca_results(good_data, pca) # - # ### Observation # Run the code below to see how the log-transformed sample data has changed after having a PCA transformation applied to it using only two dimensions. Observe how the values for the first two dimensions remains unchanged when compared to a PCA transformation in six dimensions. # Display sample log-data after applying PCA transformation in two dimensions display(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2'])) # ## Visualizing a Biplot # A biplot is a scatterplot where each data point is represented by its scores along the principal components. The axes are the principal components (in this case `Dimension 1` and `Dimension 2`). In addition, the biplot shows the projection of the original features along the components. A biplot can help us interpret the reduced dimensions of the data, and discover relationships between the principal components and original features. # # Run the code cell below to produce a biplot of the reduced-dimension data. # Create a biplot vs.biplot(good_data, reduced_data, pca) # ### Observation # # Once we have the original feature projections (in red), it is easier to interpret the relative position of each data point in the scatterplot. For instance, a point the lower right corner of the figure will likely correspond to a customer that spends a lot on `'Milk'`, `'Grocery'` and `'Detergents_Paper'`, but not so much on the other product categories. # # From the biplot, which of the original features are most strongly correlated with the first component? What about those that are associated with the second component? Do these observations agree with the pca_results plot you obtained earlier? # ## Clustering # # In this section, you will choose to use either a K-Means clustering algorithm or a Gaussian Mixture Model clustering algorithm to identify the various customer segments hidden in the data. You will then recover specific data points from the clusters to understand their significance by transforming them back into their original dimension and scale. # ### Question 6 # *What are the advantages to using a K-Means clustering algorithm? What are the advantages to using a Gaussian Mixture Model clustering algorithm? Given your observations about the wholesale customer data so far, which of the two algorithms will you use and why?* # **Answer:** # # K-Means clustering algorithm is simple to understand, fast and more scalable. It is much faster due to lower number of parameters and is well suited towards situations with lots of data, and where clusters are clearly seperable and non-uniform. The algorithm means data points rigidly belong to one cluster or another. # # Gaussian Mixture Models have many more parameters than K-Means is a soft clustering method. Using Gaussian distributions and probabilities, data points do not necessarilly have to be assigned rigidly, and ones with lower probability can be assigned to multiple clusters at once. Also, GMMs are able to assign non-spherical clusters. Moreover, it can be used to predict probabilities of events rather than rigid features. # # Since the scatter plot appears to be quite uniform, a lot of the data points will clearly belong to one or more cluster it would be appropriate to use a GMM in our case. # ### Implementation: Creating Clusters # Depending on the problem, the number of clusters that you expect to be in the data may already be known. When the number of clusters is not known *a priori*, there is no guarantee that a given number of clusters best segments the data, since it is unclear what structure exists in the data — if any. However, we can quantify the "goodness" of a clustering by calculating each data point's *silhouette coefficient*. The [silhouette coefficient](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_score.html) for a data point measures how similar it is to its assigned cluster from -1 (dissimilar) to 1 (similar). Calculating the *mean* silhouette coefficient provides for a simple scoring method of a given clustering. # # In the code block below, you will need to implement the following: # - Fit a clustering algorithm to the `reduced_data` and assign it to `clusterer`. # - Predict the cluster for each data point in `reduced_data` using `clusterer.predict` and assign them to `preds`. # - Find the cluster centers using the algorithm's respective attribute and assign them to `centers`. # - Predict the cluster for each sample data point in `pca_samples` and assign them `sample_preds`. # - Import `sklearn.metrics.silhouette_score` and calculate the silhouette score of `reduced_data` against `preds`. # - Assign the silhouette score to `score` and print the result. # + from sklearn.mixture import GaussianMixture from sklearn.metrics import silhouette_score scores = [] for k in range(2,16): # TODO: Apply your clustering algorithm of choice to the reduced data clusterer = GaussianMixture(n_components=k, random_state=0) clusterer.fit(reduced_data) # TODO: Predict the cluster for each data point preds = clusterer.predict(reduced_data) # TODO: Find the cluster centers centers = clusterer.means_ # TODO: Predict the cluster for each transformed sample data point sample_preds = clusterer.predict(pca_samples) # TODO: Calculate the mean silhouette coefficient for the number of clusters chosen score = silhouette_score(reduced_data,preds) # Append to scores list scores += [(score,k)] # - # ### Question 7 # *Report the silhouette score for several cluster numbers you tried. Of these, which number of clusters has the best silhouette score?* # **Answer:** # + # Display Scores print(('#\tSilhouette Score')) for s, n in scores: print(str(n)+'\t'+str(s)) score, clusters = max(scores) print '\nGaussian Mixture Model with '+str(clusters)+' clusters has the best silhouette score of '+str(score)+'.' # - # ### Cluster Visualization # Once you've chosen the optimal number of clusters for your clustering algorithm using the scoring metric above, you can now visualize the results by executing the code block below. Note that, for experimentation purposes, you are welcome to adjust the number of clusters for your clustering algorithm to see various visualizations. The final visualization provided should, however, correspond with the optimal number of clusters. # + # Choosing k = 2 k = 2 clusterer = GaussianMixture(n_components=k, random_state=0) clusterer.fit(reduced_data) # TODO: Predict the cluster for each data point preds = clusterer.predict(reduced_data) # TODO: Find the cluster centers centers = clusterer.means_ # TODO: Predict the cluster for each transformed sample data point sample_preds = clusterer.predict(pca_samples) # TODO: Calculate the mean silhouette coefficient for the number of clusters chosen score = silhouette_score(reduced_data,preds) # Display the results of the clustering from implementation vs.cluster_results(reduced_data, preds, centers, pca_samples) # - # ### Implementation: Data Recovery # Each cluster present in the visualization above has a central point. These centers (or means) are not specifically data points from the data, but rather the *averages* of all the data points predicted in the respective clusters. For the problem of creating customer segments, a cluster's center point corresponds to *the average customer of that segment*. Since the data is currently reduced in dimension and scaled by a logarithm, we can recover the representative customer spending from these data points by applying the inverse transformations. # # In the code block below, you will need to implement the following: # - Apply the inverse transform to `centers` using `pca.inverse_transform` and assign the new centers to `log_centers`. # - Apply the inverse function of `np.log` to `log_centers` using `np.exp` and assign the true centers to `true_centers`. # # + # TODO: Inverse transform the centers log_centers = pca.inverse_transform(centers) # TODO: Exponentiate the centers true_centers = np.exp(log_centers) # Display the true centers segments = ['Segment {}'.format(i) for i in range(0,len(centers))] true_centers = pd.DataFrame(np.round(true_centers), columns = data.keys()) true_centers.index = segments display(true_centers) # - # ### Question 8 # Consider the total purchase cost of each product category for the representative data points above, and reference the statistical description of the dataset at the beginning of this project. *What set of establishments could each of the customer segments represent?* # **Hint:** A customer who is assigned to `'Cluster X'` should best identify with the establishments represented by the feature set of `'Segment X'`. # + display(np.exp(good_data).describe()) plt.figure() plt.axes().set_title("Segment 0") sns.barplot(x=true_centers.columns.values,y=true_centers.iloc[0].values) plt.figure() plt.axes().set_title("Segment 1") sns.barplot(x=true_centers.columns.values,y=true_centers.iloc[1].values) # - # **Answer:** # # - *Segment 0*: I think this segment represents customers purchasing large quantities of fresh and frozen products, i.e. more than 50th percentile of the data. These are customers who are purchasing in bulk from markets to reduce their expenses. # - *Segment 1*: This segment represents customers with large purchases of milk, grocery and detergents_paper, i.e. more than 75th percentile of the data. I think this represents cafe/restaurants. # ### Question 9 # *For each sample point, which customer segment from* ***Question 8*** *best represents it? Are the predictions for each sample point consistent with this?* # # Run the code block below to find which cluster each sample point is predicted to be. # + print "Samples:" display(samples) print "Predictions for samples:" # Display the predictions for i, pred in enumerate(sample_preds): print "Sample point", i, "predicted to be in Cluster", pred # - # **Answer:** # # My predictions were: # 0. Cafe - The purchases of Milk, Grocery and Detergents_Paper is high usually in cafes. # 1. Restaurant - The purchases of Milk, Grocery and Detergents is high usually at retailers. # 2. Market - Fresh Foods, Groceries, Frozen foods are usually high in markets. # # Yes, these predictions are indeed consistent with my initial intuitive guesses. # ## Conclusion # In this final section, you will investigate ways that you can make use of the clustered data. First, you will consider how the different groups of customers, the ***customer segments***, may be affected differently by a specific delivery scheme. Next, you will consider how giving a label to each customer (which *segment* that customer belongs to) can provide for additional features about the customer data. Finally, you will compare the ***customer segments*** to a hidden variable present in the data, to see whether the clustering identified certain relationships. # ### Question 10 # Companies will often run [A/B tests](https://en.wikipedia.org/wiki/A/B_testing) when making small changes to their products or services to determine whether making that change will affect its customers positively or negatively. The wholesale distributor is considering changing its delivery service from currently 5 days a week to 3 days a week. However, the distributor will only make this change in delivery service for customers that react positively. *How can the wholesale distributor use the customer segments to determine which customers, if any, would react positively to the change in delivery service?* # **Hint:** Can we assume the change affects all customers equally? How can we determine which group of customers it affects the most? # **Answer:** # # The model has clustered into two segments - *Markets* and *Cafe/Restaurant*. Customers that fall into the Cafe/Restaurant would prefer a delivery service with 5 days since they serve fresh food where as the other segment would be more flexible since they purchase non/perishable goods as well. # # The company can therefore run A/B tests on a subset from each cluster and evaluate their feedback and establish whether changing the delivery service would have an impact on customer satisfaction or not. This allows the distributor to make better decisions along with extending the benefits of these tailor made decisions to the customer as well # ### Question 11 # Additional structure is derived from originally unlabeled data when using clustering techniques. Since each customer has a ***customer segment*** it best identifies with (depending on the clustering algorithm applied), we can consider *'customer segment'* as an **engineered feature** for the data. Assume the wholesale distributor recently acquired ten new customers and each provided estimates for anticipated annual spending of each product category. Knowing these estimates, the wholesale distributor wants to classify each new customer to a ***customer segment*** to determine the most appropriate delivery service. # *How can the wholesale distributor label the new customers using only their estimated product spending and the* ***customer segment*** *data?* # **Hint:** A supervised learner could be used to train on the original customers. What would be the target variable? # **Answer:** # # We can train a supervised learner such as SVM on the engineered 'Customer Segment' feature trained using the GMM as the target variable of the datapoints having annual spending amounts. Then using the spending estimates, we can then predict the customer segment for the new customers. # ### Visualizing Underlying Distributions # # At the beginning of this project, it was discussed that the `'Channel'` and `'Region'` features would be excluded from the dataset so that the customer product categories were emphasized in the analysis. By reintroducing the `'Channel'` feature to the dataset, an interesting structure emerges when considering the same PCA dimensionality reduction applied earlier to the original dataset. # # Run the code block below to see how each data point is labeled either `'HoReCa'` (Hotel/Restaurant/Cafe) or `'Retail'` the reduced space. In addition, you will find the sample points are circled in the plot, which will identify their labeling. # Display the clustering results based on 'Channel' data vs.channel_results(reduced_data, unique_outliers, pca_samples) # ### Question 12 # *How well does the clustering algorithm and number of clusters you've chosen compare to this underlying distribution of Hotel/Restaurant/Cafe customers to Retailer customers? Are there customer segments that would be classified as purely 'Retailers' or 'Hotels/Restaurants/Cafes' by this distribution? Would you consider these classifications as consistent with your previous definition of the customer segments?* # **Answer:** # # The actual data seems to correlate with our predicted clusters. The number of clusters we decided using silhouette score is the same as the number of actual classes in the dataset. Although, the distribution of retailers and HoReCa is more mixed than predicted using GMM. It seems to be consistent with my guesses of the class names in question 1. # # # > **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to # **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
customer_segments/customer_segments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from scrapy import Selector import scrapy import requests r = requests.get('https://www.datacamp.com/courses/all') r.text[:300] r.url request = scrapy.Request('https://www.datacamp.com/courses/all') response = scrapy.http.Response('https://www.datacamp.com/courses/all') sel = Selector(text=r.text) # Create a SelectorList of the course titles crs_title_els = sel.css( 'h4::text' ) # + # Extract the course titles crs_titles = crs_title_els.extract() # Print out the course titles for i in crs_titles: print( ">>", i ) if i == 'Text Mining: Bag of Words': break # -
curso/5.2 Web Scraping/Scrap_basis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Are comments using profanity doomed to a high toxicity score? # **Disclaimer:** This data analysis examines profanity within a Data Frame. # **1**. First, I import pandas and with it the CSV data set. # + import pandas as pd import numpy as np df = pd.read_csv('labeled_and_scored_comments.csv') # - # **2**. Then, I plotted the csv just to verify the file is correct df.head() # **3**. I imported the Google API Client, and included my own API key. # + from googleapiclient.discovery import build import json def get_toxicity_score(comment): API_KEY = 'KEY' client = build( "commentanalyzer", "v1alpha1", developerKey=API_KEY, discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1", static_discovery=False, ) analyze_request = { 'comment': { 'text': comment }, 'requestedAttributes': {'TOXICITY': {}} } response = client.comments().analyze(body=analyze_request).execute() toxicity_score = response["attributeScores"]["TOXICITY"]["summaryScore"]["value"] return toxicity_score # - # **4**. I decided to see what happens if I perform a command to score all the comments in the data set using the df function to isolate the text under the 'comment_text' attribute. # + comment_list = df['comment_text'] for comment in comment_list: score = get_toxicity_score(comment) print(comment, score) # - # Note: This worked once, but produced an error message the second time. From the comments it did score I decided to determine a hypothesis. # **5**. I sorted the dataset in terms of 'score' value to see the comments rated highest and lowest. df.sort_values(['score']) # **My hypothesis, seeing the scores for the comments as well as the sorted values for the table, is that: the API will not rate a comment that uses profanity lower than a 0.5, even if profanity is not used in a negative context.** # Method: To test my hypothesis, I will attempt to extract the comments using profanity and describe them to see what the lowest/highest scores are. I will also formulate original comments to test how the API responds to positive comments containing profanity. # **6**. I began the test by using the describe command to provide data on the comments containing the word(s) that was most prevanlent in the highest rated comments. df.loc[df['comment_text'].str.contains("fuck|fucking|fucker|fucked", case=False)].describe() # *Note*: The mean score is about 0.92, which confirms that most comments containing these words are rated highly. However, there is a score minimum of about 0.15, which is an outlier. # **7**. Next, I searched for the outlier in the DataFrame. df[df['score'] == 0.989706] # **8**. I decided to look for the comment separately on the CSV file because the code was not revealing the comment (I suspect because the command does not work in floating numbers). # Through this I discovered that the comment was a biography of a web author/musician. This revealed that the word was part of the title of a song, which means that it was not used within the commenters contribution. This explains why there was an outlier. # # Fragment from comment: # # "Destiny Lativia Flowers(born October 19,1999), known by her stage name Cash Lady... # # Music with PHG # # Fuck you (2014) # # I love you (Remix of End of Time by <NAME>)(2014) # # Trust and Believe (Cover)(2013) # # Cash Lady" # # **9**. Just to make sure profanity follows a similar pattern to the first case observed, I analyzed the data around more words. df.loc[df['comment_text'].str.contains("damn|cunt|bastard|bitch|dick|shit", case=False)].describe() # The result of extracting other profanity is similar, with the mean reflecting a high number and the data showing a minimum score that is an outlier. # # When investigating the outlier using the CSV file (like in the previous example), it was found that there was a mistake in the identification of the word and that none of the comments containing the keywords matched the minimum score (0.060533). # **10**. Finally, I analyzed original comments to test whether profanity used in a positive context would reflect a lower score (compared to the mean). # + comment_list = ['I fucking love you!', 'You are the shit!', 'I love you bitch!'] for comment in comment_list: score = get_toxicity_score(comment) print(comment, score) # - # Results: # The scores for the last two comments reveal what is expected from the data. So we observe no change when those two words are used positively. However, the first example has a significantly lower score compared to the mean of the comments using the same word. This may reveal that it is a common enough phrase that the algorithm recognizes a positive intent. The score is still above a 0.5, which can lead us to conclude that profanity definately carries significant weight when determining scores.
Analysis of a Data Set using API.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Problem statement: predicting turbine energy yield (TEY) using ambient variables as features. # # Attribute Information: # # The explanations of sensor measurements and their brief statistics are given below. # # Variable (Abbr.) Unit Min Max Mean # Ambient temperature (AT) C –6.23 37.10 17.71 # Ambient pressure (AP) mbar 985.85 1036.56 1013.07 # Ambient humidity (AH) (%) 24.08 100.20 77.87 # Air filter difference pressure (AFDP) mbar 2.09 7.61 3.93 # Gas turbine exhaust pressure (GTEP) mbar 17.70 40.72 25.56 # Turbine inlet temperature (TIT) C 1000.85 1100.89 1081.43 # Turbine after temperature (TAT) C 511.04 550.61 546.16 # Compressor discharge pressure (CDP) mbar 9.85 15.16 12.06 # Turbine energy yield (TEY) MWH 100.02 179.50 133.51 # Carbon monoxide (CO) mg/m3 0.00 44.10 2.37 # Nitrogen oxides (NOx) mg/m3 25.90 119.91 65.29 import pandas as pd import numpy as np from keras.layers import Dense from keras.models import Sequential gas = pd.read_csv("G:/data sceince/Assignments/Neural Networks/gas_turbines.csv") gas.head() gas.shape gas.info() gas.corr() # + # standardizing the data from sklearn.preprocessing import StandardScaler # - x = gas.drop(['TEY'],axis = 1) y = gas['TEY'] x.head() y.head() std = StandardScaler() x_std = std.fit_transform(x) x_std x.describe() # #### Hyper tuning of parameters from sklearn.model_selection import GridSearchCV,KFold from keras.wrappers.scikit_learn import KerasRegressor # As the task is Regression from tensorflow.keras.optimizers import Adam import warnings warnings.filterwarnings("ignore") # #### Creating a framework def create_model(): model = Sequential() model.add(Dense(8,input_dim = 10,kernel_initializer = 'uniform', activation = 'relu')) model.add(Dense(5,kernel_initializer = 'uniform', activation = 'relu')) model.add(Dense(1,kernel_initializer = 'uniform', activation = 'sigmoid')) adam = Adam(lr = 0.01) model.compile(loss = 'mean_squared_error', optimizer = 'adam', metrics = ['accuracy']) return model # ##### Creating the model # ##### Tuning for batch size and epochs # + model = KerasRegressor(build_fn = create_model, verbose = 0) batch_size = [60,80,100,200] epochs = [20,40,50,70] param_grid = dict(batch_size = batch_size, epochs = epochs) grid = GridSearchCV(estimator = model, param_grid = param_grid, cv = KFold(), verbose = 10) grid.fit(x_std,y) # + print('Best: {} with: {}'.format(grid.best_score_ , grid.best_params_)) means = grid.cv_results_['mean_test_score'] std = grid.cv_results_['std_test_score'] params = grid.cv_results_['params'] for mean,stdev,param in zip(means,std,params): print('{} {} with : {}'.format(mean,stdev,param)) # - # ##### Tuning for dropout rate and learning rate # + from keras.layers import Dropout def create_model(learning_rate,dropout_rate): model = Sequential() model.add(Dense(8,input_dim = 10,kernel_initializer = 'uniform', activation = 'relu')) model.add(Dropout(dropout_rate)) model.add(Dense(5,kernel_initializer = 'uniform', activation = 'relu')) model.add(Dropout(dropout_rate)) model.add(Dense(1,kernel_initializer = 'uniform', activation = 'sigmoid')) adam = Adam(lr = learning_rate) model.compile(loss = 'mean_squared_error', optimizer = 'adam', metrics = ['accuracy']) return model model = KerasRegressor(build_fn = create_model,batch_size = 80, epochs = 70, verbose = 0) dropout_rate = [0.1,0.2,0.5] learning_rate = [0.1,0.01,0.001] param_grid = dict(dropout_rate = dropout_rate, learning_rate = learning_rate) grid = GridSearchCV(estimator = model, param_grid = param_grid, cv = KFold(n_splits = 3), verbose = 10) grid.fit(x_std,y) # + print('Best: {} with: {}'.format(grid.best_score_ , grid.best_params_)) means = grid.cv_results_['mean_test_score'] std = grid.cv_results_['std_test_score'] params = grid.cv_results_['params'] for mean,stdev,param in zip(means,std,params): print('{} {} with : {}'.format(mean,stdev,param)) # - # ##### Tuning for Activation Function and kernel initializer # + from keras.layers import Dropout def create_model(activation_function,init): model = Sequential() model.add(Dense(8,input_dim = 10,kernel_initializer = init, activation = activation_function)) model.add(Dropout(0.1)) model.add(Dense(5,kernel_initializer = init, activation = activation_function)) model.add(Dropout(0.1)) model.add(Dense(1,kernel_initializer = 'uniform', activation = 'sigmoid')) adam = Adam(lr = 0.001) model.compile(loss = 'mean_squared_error', optimizer = 'adam', metrics = ['accuracy']) return model model = KerasRegressor(build_fn = create_model,batch_size = 80, epochs = 70, verbose = 0) activation_function = ['relu','maxout','elu','tanh','sigmoid'] init = ['uniform','normal','zero'] param_grid = dict(activation_function = activation_function, init = init) grid = GridSearchCV(estimator = model, param_grid = param_grid, cv = KFold(n_splits = 3), verbose = 10) grid.fit(x_std,y) # + print('Best: {} with: {}'.format(grid.best_score_ , grid.best_params_)) means = grid.cv_results_['mean_test_score'] std = grid.cv_results_['std_test_score'] params = grid.cv_results_['params'] for mean,stdev,param in zip(means,std,params): print('{} {} with : {}'.format(mean,stdev,param)) # - # ##### Tuning for number of neurons # + def create_model(neuron1,neuron2): model = Sequential() model.add(Dense(8,input_dim = 10,kernel_initializer = 'normal', activation = 'relu')) model.add(Dropout(0.1)) model.add(Dense(5,kernel_initializer = 'normal', activation = 'relu')) model.add(Dropout(0.1)) model.add(Dense(1,kernel_initializer = 'normal', activation = 'relu')) adam = Adam(lr = 0.001) model.compile(loss = 'mean_squared_error', optimizer = 'adam', metrics = ['accuracy']) return model model = KerasRegressor(build_fn = create_model,batch_size = 80, epochs = 70, verbose = 0) neuron1 = [5,8,10] neuron2 = [2,3,5] param_grid = dict(neuron1 = neuron1, neuron2 = neuron2) grid = GridSearchCV(estimator = model, param_grid = param_grid, cv = KFold(), verbose = 10) grid.fit(x_std,y) # + print('Best: {} with: {}'.format(grid.best_score_ , grid.best_params_)) means = grid.cv_results_['mean_test_score'] std = grid.cv_results_['std_test_score'] params = grid.cv_results_['params'] for mean,stdev,param in zip(means,std,params): print('{} {} with : {}'.format(mean,stdev,param)) # - from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 0.2, random_state = 101) std1 = StandardScaler() x_train_std = std1.fit_transform(x_train) x_test_std = std1.fit_transform(x_test) # ##### Building the final model with optimum values # + from tensorflow.keras.optimizers import Adam model = Sequential() model.add(Dense(5,input_dim = 10, kernel_initializer = 'uniform', activation = 'relu')) model.add(Dense(3,kernel_initializer = 'uniform', activation = 'relu')) model.add(Dense(1,kernel_initializer = 'uniform', activation = 'relu')) adam = Adam(lr = 0.001) model.compile(optimizer='adam',loss='mse') model.fit(x_train_std,y_train, epochs = 70, batch_size = 80) # - score = model.evaluate(x_test_std,y_test) print("Score : %.2f" % score)
Assign_ANN_gas_turbines.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/tyoc213/fastai_xla_extensions/blob/ProxyToGetAttr/explore_nbs/PickableOpt%2BBasic_lenet_exploration_MultiTPU.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="BmnUX_l8lQ6B" # # Install fastai2 from github # + id="B4E9PKCjqpg3" outputId="a8c90508-1d7b-4f5f-8788-c1c93537f9d6" colab={"base_uri": "https://localhost:8080/"} # !python --version # + id="Q5DZXcBNJoy1" outputId="6d89728c-b355-48c0-be24-9a4241823df8" colab={"base_uri": "https://localhost:8080/"} # #!pip install -U pandas --upgrade # #!pip install -U fastcore --upgrade # !pip install -U fastai --upgrade # !pip install -Uqq git+https://github.com/tyoc213/fastai_xla_extensions@ProxyToGetAttr # + id="BxoA3fJusV17" outputId="60b0c971-a11f-479c-992f-ef7845dc9ab8" colab={"base_uri": "https://localhost:8080/"} VERSION = "20201001" #"20200515" @param ["1.5" , "20200325", "nightly"] # !curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py # !echo "-------------------------------------------------------------------------" # #!TORCH_SHOW_CPP_STACKTRACES=1 python pytorch-xla-env-setup.py --apt-packages libomp5 libopenblas-dev # !python pytorch-xla-env-setup.py --version $VERSION --apt-packages libomp5 libopenblas-dev # #!python pytorch-xla-env-setup.py --version $VERSION # + id="aJMhjxPPaPo8" outputId="6605fde2-3ca4-4752-bd77-f11b3a7793b1" colab={"base_uri": "https://localhost:8080/"} # !pip freeze | grep torch # !echo "------------------------------------------" # !pip freeze | grep fast # + id="O5vVQw3JM7yA" outputId="661f41ee-4e5e-4c83-fbb6-cdfea864c1c2" colab={"base_uri": "https://localhost:8080/"} import fastai_xla_extensions.core from fastai.vision.all import * defaults.callbacks # + id="rHE_YF1_wHeO" outputId="a3644252-f67a-4041-a5e1-367aa127d01e" colab={"base_uri": "https://localhost:8080/"} default_device() # + id="OD7QTq_ulNZK" outputId="087f3219-e1bb-4ef6-f12a-b7e964489488" colab={"base_uri": "https://localhost:8080/", "height": 34} path = untar_data(URLs.MNIST_SAMPLE) Path.BASE_PATH = path; path.ls() (path/'train').ls() # + id="UV7A3hzlyYMC" outputId="1ed93492-4471-4c8f-9903-fe73a5de9d0b" colab={"base_uri": "https://localhost:8080/"} # Configures training (and evaluation) parameters import torchvision from torchvision import datasets import torchvision.transforms as transforms import torch_xla.distributed.xla_multiprocessing as xmp import torch_xla.distributed.parallel_loader as pl import torch_xla.core.xla_model as xm from fastai.vision.all import * import time from fastai.test_utils import * print(f'torch version {torch.__version__}') path = untar_data(URLs.MNIST_SAMPLE) Path.BASE_PATH = path; path.ls() # + [markdown] id="Obb_HBYU4wM0" # # multi TPU # + id="FwqEIE9aA9ZT" outputId="195f75be-70c6-4f7e-e35e-1230b113c2f6" colab={"base_uri": "https://localhost:8080/", "height": 1000} import pdb def debug_on(*exceptions): if not exceptions: exceptions = (AssertionError, ) def decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except exceptions: pdb.post_mortem(sys.exc_info()[2]) return wrapper return decorator class Lenet2(nn.Module): def __init__(self): super(Lenet2, self).__init__() self.conv1 = nn.Conv2d(3, 6, 3) self.conv2 = nn.Conv2d(6, 16, 3) self.fc1 = nn.Linear(400, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 2) # Only 2 outputs instead of 10 @debug_on(KeyError) def forward(self, x): # Max pooling over a (2, 2) window x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) # If the size is a square you can only specify a single number x = F.max_pool2d(F.relu(self.conv2(x)), 2) x = x.view(-1, self.num_flat_features(x)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x @debug_on(KeyError) def num_flat_features(self, x): size = x.size()[1:] # all dimensions except the batch dimension num_features = 1 for s in size: num_features *= s return num_features def map_fn(index, flags): # from fastai.callback.all import * dede = xm.xla_device() print(f'index is {index} and flags are {flags}') #xm.rendezvous('init') if not xm.is_master_ordinal(): print(f"this is {dede}:{index} entering download once") xm.rendezvous('download_only_once') dblock = DataBlock( splitter = GrandparentSplitter(), item_tfms = Resize(28), blocks = (ImageBlock, CategoryBlock), get_items = get_image_files, get_y = parent_label, batch_tfms = [] ) if xm.is_master_ordinal(): xm.master_print(f'this is {dede} exiting download once') xm.rendezvous('download_only_once') xm.master_print('creating lenet_tpu') lenet_tpu = Lenet2() xm.master_print('lenet created, goiing for dls_tpu') dls_tpu = dblock.dataloaders(path, device=dede) xm.master_print(f'creating learner!!! for {dede}') tpu_learner = Learner(dls_tpu, lenet_tpu, metrics=accuracy, loss_func=F.cross_entropy, cbs=[]) print(f"################ fit for {dede}") xm.master_print(f'***** fit for {dede}') # tpu_learner.fit(1, cbs=[fastai_xla_extensions.core.XLAOptCallback()]) tpu_learner.fit(1) xm.master_print(f'***** end fit for {dede}') t = torch.randn((2, 2), device=dede) print("################Process", index ,"is using", xm.xla_real_devices([str(dede)])[0]) # https://stackoverflow.com/a/9929970/682603 # excepthook # import traceback import logging import os, sys def my_excepthook(excType, excValue, traceback, logger): print("=== *** @@@ ### %%% === *** @@@ ### %%% === *** @@@ ### %%% === *** @@@ ### %%% === *** @@@ ### %%% === *** @@@ ### %%% Logging an uncaught exception", exc_info=(excType, excValue, traceback)) sys.excepthook = my_excepthook sys.unraisablehook = my_excepthook ##############threading.excepthook #https://docs.python.org/3/library/sys.html#sys.excepthook print('launching n procs') flags={} flags['batch_size'] = 32 flags['num_workers'] = 8 flags['num_epochs'] = 1 flags['seed'] = 1234 xmp.spawn(map_fn, args=(flags,), nprocs=8, start_method='fork') print('end of launch') # + id="P0E0dthTvO-9" outputId="0a3f1be6-fd1c-4e61-9497-00c86c764274" colab={"base_uri": "https://localhost:8080/"} # !python --version # + id="fnhxamwfqYeV"
explore_nbs/PickableOpt+Basic_lenet_exploration_MultiTPU.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This notebook is intended to demonstrate the basic features of the Python API for constructing input files and running OpenMC. In it, we will show how to create a basic reflective pin-cell model that is equivalent to modeling an infinite array of fuel pins. If you have never used OpenMC, this can serve as a good starting point to learn the Python API. We highly recommend having a copy of the [Python API reference documentation](https://docs.openmc.org/en/stable/pythonapi/index.html) open in another browser tab that you can refer to. # %matplotlib inline import openmc # ## Defining Materials # # Materials in OpenMC are defined as a set of nuclides with specified atom/weight fractions. To begin, we will create a material by making an instance of the `Material` class. In OpenMC, many objects, including materials, are identified by a "unique ID" that is simply just a positive integer. These IDs are used when exporting XML files that the solver reads in. They also appear in the output and can be used for identification. Since an integer ID is not very useful by itself, you can also give a material a `name` as well. uo2 = openmc.Material(1, "uo2") print(uo2) # On the XML side, you have no choice but to supply an ID. However, in the Python API, if you don't give an ID, one will be automatically generated for you: mat = openmc.Material() print(mat) # We see that an ID of 2 was automatically assigned. Let's now move on to adding nuclides to our `uo2` material. The `Material` object has a method `add_nuclide()` whose first argument is the name of the nuclide and second argument is the atom or weight fraction. help(uo2.add_nuclide) # We see that by default it assumes we want an atom fraction. # Add nuclides to uo2 uo2.add_nuclide('U235', 0.03) uo2.add_nuclide('U238', 0.97) uo2.add_nuclide('O16', 2.0) # Now we need to assign a total density to the material. We'll use the `set_density` for this. uo2.set_density('g/cm3', 10.0) # You may sometimes be given a material specification where all the nuclide densities are in units of atom/b-cm. In this case, you just want the density to be the sum of the constituents. In that case, you can simply run `mat.set_density('sum')`. # # With UO2 finished, let's now create materials for the clad and coolant. Note the use of `add_element()` for zirconium. # + zirconium = openmc.Material(name="zirconium") zirconium.add_element('Zr', 1.0) zirconium.set_density('g/cm3', 6.6) water = openmc.Material(name="h2o") water.add_nuclide('H1', 2.0) water.add_nuclide('O16', 1.0) water.set_density('g/cm3', 1.0) # - # An astute observer might now point out that this water material we just created will only use free-atom cross sections. We need to tell it to use an $S(\alpha,\beta)$ table so that the bound atom cross section is used at thermal energies. To do this, there's an `add_s_alpha_beta()` method. Note the use of the GND-style name "c_H_in_H2O". water.add_s_alpha_beta('c_H_in_H2O') # When we go to run the transport solver in OpenMC, it is going to look for a `materials.xml` file. Thus far, we have only created objects in memory. To actually create a `materials.xml` file, we need to instantiate a `Materials` collection and export it to XML. materials = openmc.Materials([uo2, zirconium, water]) # Note that `Materials` is actually a subclass of Python's built-in `list`, so we can use methods like `append()`, `insert()`, `pop()`, etc. materials = openmc.Materials() materials.append(uo2) materials += [zirconium, water] isinstance(materials, list) # Finally, we can create the XML file with the `export_to_xml()` method. In a Jupyter notebook, we can run a shell command by putting `!` before it, so in this case we are going to display the `materials.xml` file that we created. materials.export_to_xml() # !cat materials.xml # ### Element Expansion # # Did you notice something really cool that happened to our Zr element? OpenMC automatically turned it into a list of nuclides when it exported it! The way this feature works is as follows: # # - First, it checks whether `Materials.cross_sections` has been set, indicating the path to a `cross_sections.xml` file. # - If `Materials.cross_sections` isn't set, it looks for the `OPENMC_CROSS_SECTIONS` environment variable. # - If either of these are found, it scans the file to see what nuclides are actually available and will expand elements accordingly. # # Let's see what happens if we change O16 in water to elemental O. # + water.remove_nuclide('O16') water.add_element('O', 1.0) materials.export_to_xml() # !cat materials.xml # - # We see that now O16 and O17 were automatically added. O18 is missing because our cross sections file (which is based on ENDF/B-VII.1) doesn't have O18. If OpenMC didn't know about the cross sections file, it would have assumed that all isotopes exist. # ### The `cross_sections.xml` file # # The `cross_sections.xml` tells OpenMC where it can find nuclide cross sections and $S(\alpha,\beta)$ tables. It serves the same purpose as MCNP's `xsdir` file and Serpent's `xsdata` file. As we mentioned, this can be set either by the `OPENMC_CROSS_SECTIONS` environment variable or the `Materials.cross_sections` attribute. # # Let's have a look at what's inside this file: # !cat $OPENMC_CROSS_SECTIONS | head -n 10 print(' ...') # !cat $OPENMC_CROSS_SECTIONS | tail -n 10 # ### Enrichment # # Note that the `add_element()` method has a special argument `enrichment` that can be used for Uranium. For example, if we know that we want to create 3% enriched UO2, the following would work: uo2_three = openmc.Material() uo2_three.add_element('U', 1.0, enrichment=3.0) uo2_three.add_element('O', 2.0) uo2_three.set_density('g/cc', 10.0) # ### Mixtures # # In OpenMC it is also possible to define materials by mixing existing materials. For example, if we wanted to create MOX fuel out of a mixture of UO2 (97 wt%) and PuO2 (3 wt%) we could do the following: # + # Create PuO2 material puo2 = openmc.Material() puo2.add_nuclide('Pu239', 0.94) puo2.add_nuclide('Pu240', 0.06) puo2.add_nuclide('O16', 2.0) puo2.set_density('g/cm3', 11.5) # Create the mixture mox = openmc.Material.mix_materials([uo2, puo2], [0.97, 0.03], 'wo') # - # The 'wo' argument in the `mix_materials()` method specifies that the fractions are weight fractions. Materials can also be mixed by atomic and volume fractions with 'ao' and 'vo', respectively. For 'ao' and 'wo' the fractions must sum to one. For 'vo', if fractions do not sum to one, the remaining fraction is set as void. # ## Defining Geometry # # At this point, we have three materials defined, exported to XML, and ready to be used in our model. To finish our model, we need to define the geometric arrangement of materials. OpenMC represents physical volumes using constructive solid geometry (CSG), also known as combinatorial geometry. The object that allows us to assign a material to a region of space is called a `Cell` (same concept in MCNP, for those familiar). In order to define a region that we can assign to a cell, we must first define surfaces which bound the region. A *surface* is a locus of zeros of a function of Cartesian coordinates $x$, $y$, and $z$, e.g. # # - A plane perpendicular to the x axis: $x - x_0 = 0$ # - A cylinder parallel to the z axis: $(x - x_0)^2 + (y - y_0)^2 - R^2 = 0$ # - A sphere: $(x - x_0)^2 + (y - y_0)^2 + (z - z_0)^2 - R^2 = 0$ # # Between those three classes of surfaces (planes, cylinders, spheres), one can construct a wide variety of models. It is also possible to define cones and general second-order surfaces (tori are not currently supported). # # Note that defining a surface is not sufficient to specify a volume -- in order to define an actual volume, one must reference the half-space of a surface. A surface *half-space* is the region whose points satisfy a positive or negative inequality of the surface equation. For example, for a sphere of radius one centered at the origin, the surface equation is $f(x,y,z) = x^2 + y^2 + z^2 - 1 = 0$. Thus, we say that the negative half-space of the sphere, is defined as the collection of points satisfying $f(x,y,z) < 0$, which one can reason is the inside of the sphere. Conversely, the positive half-space of the sphere would correspond to all points outside of the sphere. # # Let's go ahead and create a sphere and confirm that what we've told you is true. sphere = openmc.Sphere(r=1.0) # Note that by default the sphere is centered at the origin so we didn't have to supply `x0`, `y0`, or `z0` arguments. Strictly speaking, we could have omitted `R` as well since it defaults to one. To get the negative or positive half-space, we simply need to apply the `-` or `+` unary operators, respectively. # # (NOTE: Those unary operators are defined by special methods: `__pos__` and `__neg__` in this case). inside_sphere = -sphere outside_sphere = +sphere # Now let's see if `inside_sphere` actually contains points inside the sphere: print((0,0,0) in inside_sphere, (0,0,2) in inside_sphere) print((0,0,0) in outside_sphere, (0,0,2) in outside_sphere) # Everything works as expected! Now that we understand how to create half-spaces, we can create more complex volumes by combining half-spaces using Boolean operators: `&` (intersection), `|` (union), and `~` (complement). For example, let's say we want to define a region that is the top part of the sphere (all points inside the sphere that have $z > 0$. z_plane = openmc.ZPlane(z0=0) northern_hemisphere = -sphere & +z_plane # For many regions, OpenMC can automatically determine a bounding box. To get the bounding box, we use the `bounding_box` property of a region, which returns a tuple of the lower-left and upper-right Cartesian coordinates for the bounding box: northern_hemisphere.bounding_box # Now that we see how to create volumes, we can use them to create a cell. # + cell = openmc.Cell() cell.region = northern_hemisphere # or... cell = openmc.Cell(region=northern_hemisphere) # - # By default, the cell is not filled by any material (void). In order to assign a material, we set the `fill` property of a `Cell`. cell.fill = water # ### Universes and in-line plotting # A collection of cells is known as a universe (again, this will be familiar to MCNP/Serpent users) and can be used as a repeatable unit when creating a model. Although we don't need it yet, the benefit of creating a universe is that we can visualize our geometry while we're creating it. # + universe = openmc.Universe() universe.add_cell(cell) # this also works universe = openmc.Universe(cells=[cell]) # - # The `Universe` object has a `plot` method that will display our the universe as current constructed: universe.plot(width=(2.0, 2.0)) # By default, the plot will appear in the $x$-$y$ plane. We can change that with the `basis` argument. universe.plot(width=(2.0, 2.0), basis='xz') # If we have particular fondness for, say, fuchsia, we can tell the `plot()` method to make our cell that color. universe.plot(width=(2.0, 2.0), basis='xz', colors={cell: 'fuchsia'}) # ### Pin cell geometry # # We now have enough knowledge to create our pin-cell. We need three surfaces to define the fuel and clad: # # 1. The outer surface of the fuel -- a cylinder parallel to the z axis # 2. The inner surface of the clad -- same as above # 3. The outer surface of the clad -- same as above # # These three surfaces will all be instances of `openmc.ZCylinder`, each with a different radius according to the specification. fuel_outer_radius = openmc.ZCylinder(r=0.39) clad_inner_radius = openmc.ZCylinder(r=0.40) clad_outer_radius = openmc.ZCylinder(r=0.46) # With the surfaces created, we can now take advantage of the built-in operators on surfaces to create regions for the fuel, the gap, and the clad: fuel_region = -fuel_outer_radius gap_region = +fuel_outer_radius & -clad_inner_radius clad_region = +clad_inner_radius & -clad_outer_radius # Now we can create corresponding cells that assign materials to these regions. As with materials, cells have unique IDs that are assigned either manually or automatically. Note that the gap cell doesn't have any material assigned (it is void by default). # + fuel = openmc.Cell(name='fuel') fuel.fill = uo2 fuel.region = fuel_region gap = openmc.Cell(name='air gap') gap.region = gap_region clad = openmc.Cell(name='clad') clad.fill = zirconium clad.region = clad_region # - # Finally, we need to handle the coolant outside of our fuel pin. To do this, we create x- and y-planes that bound the geometry. pitch = 1.26 left = openmc.XPlane(x0=-pitch/2, boundary_type='reflective') right = openmc.XPlane(x0=pitch/2, boundary_type='reflective') bottom = openmc.YPlane(y0=-pitch/2, boundary_type='reflective') top = openmc.YPlane(y0=pitch/2, boundary_type='reflective') # The water region is going to be everything outside of the clad outer radius and within the box formed as the intersection of four half-spaces. # + water_region = +left & -right & +bottom & -top & +clad_outer_radius moderator = openmc.Cell(name='moderator') moderator.fill = water moderator.region = water_region # - # OpenMC also includes a factory function that generates a rectangular prism that could have made our lives easier. box = openmc.rectangular_prism(width=pitch, height=pitch, boundary_type='reflective') type(box) # Pay attention here -- the object that was returned is NOT a surface. It is actually the intersection of four surface half-spaces, just like we created manually before. Thus, we don't need to apply the unary operator (`-box`). Instead, we can directly combine it with `+clad_or`. water_region = box & +clad_outer_radius # The final step is to assign the cells we created to a universe and tell OpenMC that this universe is the "root" universe in our geometry. The `Geometry` is the final object that is actually exported to XML. # + root_universe = openmc.Universe(cells=(fuel, gap, clad, moderator)) geometry = openmc.Geometry() geometry.root_universe = root_universe # or... geometry = openmc.Geometry(root_universe) geometry.export_to_xml() # !cat geometry.xml # - # ## Starting source and settings # # The Python API has a module ``openmc.stats`` with various univariate and multivariate probability distributions. We can use these distributions to create a starting source using the ``openmc.Source`` object. # Create a point source point = openmc.stats.Point((0, 0, 0)) source = openmc.Source(space=point) # Now let's create a `Settings` object and give it the source we created along with specifying how many batches and particles we want to run. settings = openmc.Settings() settings.source = source settings.batches = 100 settings.inactive = 10 settings.particles = 1000 settings.export_to_xml() # !cat settings.xml # ## User-defined tallies # # We actually have all the *required* files needed to run a simulation. Before we do that though, let's give a quick example of how to create tallies. We will show how one would tally the total, fission, absorption, and (n,$\gamma$) reaction rates for $^{235}$U in the cell containing fuel. Recall that filters allow us to specify *where* in phase-space we want events to be tallied and scores tell us *what* we want to tally: # # $$X = \underbrace{\int d\mathbf{r} \int d\mathbf{\Omega} \int dE}_{\text{filters}} \; \underbrace{f(\mathbf{r},\mathbf{\Omega},E)}_{\text{scores}} \psi (\mathbf{r},\mathbf{\Omega},E)$$ # # In this case, the *where* is "the fuel cell". So, we will create a cell filter specifying the fuel cell. # + cell_filter = openmc.CellFilter(fuel) tally = openmc.Tally(1) tally.filters = [cell_filter] # - # The *what* is the total, fission, absorption, and (n,$\gamma$) reaction rates in $^{235}$U. By default, if we only specify what reactions, it will gives us tallies over all nuclides. We can use the `nuclides` attribute to name specific nuclides we're interested in. tally.nuclides = ['U235'] tally.scores = ['total', 'fission', 'absorption', '(n,gamma)'] # Similar to the other files, we need to create a `Tallies` collection and export it to XML. tallies = openmc.Tallies([tally]) tallies.export_to_xml() # !cat tallies.xml # ## Running OpenMC # # Running OpenMC from Python can be done using the `openmc.run()` function. This function allows you to set the number of MPI processes and OpenMP threads, if need be. openmc.run() # Great! OpenMC already told us our k-effective. It also spit out a file called `tallies.out` that shows our tallies. This is a very basic method to look at tally data; for more sophisticated methods, see other example notebooks. # !cat tallies.out # ## Geometry plotting # # We saw before that we could call the `Universe.plot()` method to show a universe while we were creating our geometry. There is also a built-in plotter in the codebase that is much faster than the Python plotter and has more options. The interface looks somewhat similar to the `Universe.plot()` method. Instead though, we create `Plot` instances, assign them to a `Plots` collection, export it to XML, and then run OpenMC in geometry plotting mode. As an example, let's specify that we want the plot to be colored by material (rather than by cell) and we assign yellow to fuel and blue to water. plot = openmc.Plot() plot.filename = 'pinplot' plot.width = (pitch, pitch) plot.pixels = (200, 200) plot.color_by = 'material' plot.colors = {uo2: 'yellow', water: 'blue'} # With our plot created, we need to add it to a `Plots` collection which can be exported to XML. plots = openmc.Plots([plot]) plots.export_to_xml() # !cat plots.xml # Now we can run OpenMC in plotting mode by calling the `plot_geometry()` function. Under the hood this is calling `openmc --plot`. openmc.plot_geometry() # OpenMC writes out a peculiar image with a `.ppm` extension. If you have ImageMagick installed, this can be converted into a more normal `.png` file. # !convert pinplot.ppm pinplot.png # We can use functionality from IPython to display the image inline in our notebook: from IPython.display import Image Image("pinplot.png") # That was a little bit cumbersome. Thankfully, OpenMC provides us with a method on the `Plot` class that does all that "boilerplate" work. plot.to_ipython_image()
examples/jupyter/pincell.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example of creating an SVG diagram # The ProgramAnaiysis Module creates a SVG diagram with links to demonstrate a function. The left hand side has boxes that rerpresent the input and have links to the input in text form. The central box represents the action or function being done and has a link to the code of the function. The right most boxes represent the output and has a link to the text version of the output. The output can have a from pyMez.Code.DataHandlers.Translations import S2PV1_to_XmlDataTable from pyMez.Code.Analysis.ProgramAnalysis import * # + # to display this diagram it is good to have a html form first new_svg=create_svg_black_box_diagram(inputs={"s2p":S2PV1(os.path.join(TESTS_DIRECTORY,"thru.s2p"))}, outputs=["S2P_as_xml"],function=S2PV1_to_XmlDataTable, output_transformation_function=lambda x: x.to_HTML(os.path.join(TESTS_DIRECTORY, '../XSL/S2P_DB_STYLE.xsl')), output_mime_type="text/html",box_text_ratio=15) new_html=HTMLBase() new_html.add_head() new_html.add_body() new_html.append_to_body({"tag":"br"}) new_html.append_to_body(new_svg) new_html.append_to_head({"tag":"style","text":"rect:hover {stroke-width:8;stroke:blue;}"}) # + # from IPython.display import SVG,HTML # HTML(str(new_html)) # - # this shows the html file in your browser new_html.show() # this works for any function black_box=create_svg_black_box_diagram(inputs={"list_of_strings":["This is one string","This is number two"]}, function=string_list_collapse,outputs=["Collapsed String"]) new_html.append_to_body(black_box) new_html.show()
Documentation/Examples/html/.ipynb_checkpoints/ProgramAnalysis_Example-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Code used to train the RapidEye regular models # # The models were trained in Google Colaboratory Virtual Environment, thus, to work properly, this notebook should be loaded in google drive. # # * [32x32 models](#32-x-32-models) # # * [64x64 models](#64-x-64-models) # # * [128x128 models](#128-x-128-models) # + # Import libraries import tensorflow as tf from __future__ import absolute_import from __future__ import division from __future__ import print_function from datetime import datetime from packaging import version # %tensorflow_version 2.x from tensorflow import keras from tensorflow.keras.models import * from tensorflow.keras.layers import * from tensorflow.keras.optimizers import * import numpy as np import pandas as pd import matplotlib.pyplot as plt print("TensorFlow version: ", tf.__version__) assert version.parse(tf.__version__).release[0] >= 2, \ "This notebook requires TensorFlow 2.0 or above." # - # Install segmetation_models library (https://github.com/qubvel/segmentation_models) pip install segmentation_models # Load segmentation)models library import segmentation_models as sm # #### 32 x 32 models # + # Load training data 32x32 - regular - the Strings are the directions to the .npy files in google drive X_train = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/non_augmented/32_32/regular/arrays/X_train_32_regular.npy") Y_train = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/non_augmented/32_32/regular/arrays/Y_train_32_regular.npy") # Load test data - Area 1 X_test_area_1 = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/test/test_without_terrain/area_1/arrays/X_test_test_area_1.npy") Y_test_area_1 = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/test/test_without_terrain/area_1/arrays/Y_test_test_area_1.npy") # Load test data - Area 2 X_test_area_2 = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/test/test_without_terrain/area_2/arrays/X_test_test_area_2.npy") Y_test_area_2 = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/test/test_without_terrain/area_2/arrays/Y_test_test_area_2.npy") # - # Evaluate data dimensions print(f"X_train shape: {X_train.shape}, Y_train shape: {Y_train.shape}\nX_test_area_1 shape: {X_test_area_1.shape}, Y_test_area_1 shape: {Y_test_area_1.shape},\nX_test_area_2 shape: {X_test_area_2.shape}, Y_test_area_2 shape: {Y_test_area_2.shape}") # Evaluation Metrics - Precision, Recall, FScore, IoU metrics = [sm.metrics.Precision(threshold=0.5),sm.metrics.Recall(threshold=0.5),sm.metrics.FScore(threshold=0.5,beta=1),sm.metrics.IOUScore(threshold=0.5)] # Unet Architecture def Unet_Original(lr,filtersFirstLayer, pretrained_weights = None,input_size = (32,32,5)): inputs = Input(input_size) conv1 = Conv2D(filtersFirstLayer, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(inputs) conv1 = Conv2D(filtersFirstLayer, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv1) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Conv2D(filtersFirstLayer*2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(pool1) conv2 = Conv2D(filtersFirstLayer*2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv2) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Conv2D(filtersFirstLayer*4, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(pool2) conv3 = Conv2D(filtersFirstLayer*4, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv3) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Conv2D(filtersFirstLayer*8, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(pool3) conv4 = Conv2D(filtersFirstLayer*8, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv4) pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) conv5 = Conv2D(filtersFirstLayer*16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(pool4) conv5 = Conv2D(filtersFirstLayer*16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv5) up6 = Conv2D(filtersFirstLayer*8, 2, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(UpSampling2D(size = (2,2))(conv5)) merge6 = concatenate([conv4,up6], axis = 3) conv6 = Conv2D(filtersFirstLayer*8, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(merge6) conv6 = Conv2D(filtersFirstLayer*8, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv6) up7 = Conv2D(filtersFirstLayer*4, 2, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(UpSampling2D(size = (2,2))(conv6)) merge7 = concatenate([conv3,up7], axis = 3) conv7 = Conv2D(filtersFirstLayer*4, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(merge7) conv7 = Conv2D(filtersFirstLayer*4, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv7) up8 = Conv2D(filtersFirstLayer*2, 2, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(UpSampling2D(size = (2,2))(conv7)) merge8 = concatenate([conv2,up8], axis = 3) conv8 = Conv2D(filtersFirstLayer*2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(merge8) conv8 = Conv2D(filtersFirstLayer*2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv8) up9 = Conv2D(filtersFirstLayer, 2, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(UpSampling2D(size = (2,2))(conv8)) merge9 = concatenate([conv1,up9], axis = 3) conv9 = Conv2D(filtersFirstLayer, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(merge9) conv9 = Conv2D(filtersFirstLayer, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv9) conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv9) conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9) model = Model(inputs, conv10) model.compile(optimizer = Adam(lr = lr), loss = 'binary_crossentropy', metrics = metrics) model.summary() if(pretrained_weights): model.load_weights(pretrained_weights) return model # + # Model training - Results are saved in a .csv file # size of the tiles size = 32 # Sampling method sampling = "regular" # number of filters filters = [16,32,64] # lr = 0.001 lr = [10e-4] # batch sizes batch_size = [16,32,64,128] # dictionary that will save the results dic = {} # Hyperparameters dic["model"] = [] dic["batch_size"] = [] dic["learning_rate"] = [] dic["filters"] = [] # test area 1 dic["precision_area_1"] = [] dic["recall_area_1"] = [] dic["f1_score_area_1"] = [] dic["iou_score_area_1"] = [] # test area 2 dic["precision_area_2"] = [] dic["recall_area_2"] = [] dic["f1_score_area_2"] = [] dic["iou_score_area_2"] = [] # loop over all the filters in the filter list for fiilter in filters: # loop over the learning rates (used to evalute 0.01 and 0.0001 without good results) for learning_rate in lr: # loop over all batch sizes in batch_size list for batch in batch_size: # load the model model = Unet_Original(filtersFirstLayer= fiilter, lr = learning_rate) # Save the models only when validation loss decrease model_checkpoint = tf.keras.callbacks.ModelCheckpoint(f'/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/model/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}.hdf5', monitor='val_loss', mode='min',verbose=1, save_best_only=True,save_weights_only = True) # Stop after 20 epochs without decreasing the validation loss early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20) print(fiilter, learning_rate,batch) # fit the model 30% of the dataset was used as validation history = model.fit(X_train,Y_train,batch_size = batch,epochs=200,validation_split=0.3,callbacks=[model_checkpoint, early_stopping]) # summarize history for iou score plt.plot(history.history['iou_score']) plt.plot(history.history['val_iou_score']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') # save plots plt.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/plots/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_iou_score.png") plt.show() # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/plots/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_val_loss.png") plt.show() # load unet to evaluate the test data unet_original = Unet_Original(filtersFirstLayer= fiilter, lr = learning_rate,input_size=(1024,1024,5)) # load the last saved weight from the training unet_original.load_weights(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/model/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}.hdf5") # Evaluate test area 1 res_1 = unet_original.evaluate(X_test_area_1,Y_test_area_1) # Evaluate test area 2 res_2 = unet_original.evaluate(X_test_area_2,Y_test_area_2) # Data to plot the predicted output preds_train_1 = unet_original.predict(X_test_area_1, verbose=1) preds_train_t1 = (preds_train_1 > 0.5).astype(np.uint8) preds_train_2 = unet_original.predict(X_test_area_2, verbose=1) preds_train_t2 = (preds_train_2 > 0.5).astype(np.uint8) # save results on the dictionary dic["model"].append("Unet") dic["batch_size"].append(batch) dic["learning_rate"].append(learning_rate) dic["filters"].append(fiilter) dic["precision_area_1"].append(res_1[1]) dic["recall_area_1"].append(res_1[2]) dic["f1_score_area_1"].append(res_1[3]) dic["iou_score_area_1"].append(res_1[4]) dic["precision_area_2"].append(res_2[1]) dic["recall_area_2"].append(res_2[2]) dic["f1_score_area_2"].append(res_2[3]) dic["iou_score_area_2"].append(res_2[4]) # Plot the results and save the plots f, axarr = plt.subplots(2,3,figsize=(10,10)) axarr[0,0].imshow(X_test_area_1[0][:,:,:3]) axarr[0,1].imshow(np.squeeze(preds_train_t1[0])) axarr[0,2].imshow(np.squeeze(Y_test_area_1[0])) axarr[1,0].imshow(X_test_area_2[0][:,:,:3]) axarr[1,1].imshow(np.squeeze(preds_train_t2[0])) axarr[1,2].imshow(np.squeeze(Y_test_area_2[0])) f.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/images/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_result.png") # Convert results to a dataframe results = pd.DataFrame(dic) # Export as csv results.to_csv(f'/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/result_table/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}.csv', index = False) # - # #### 64 x 64 models # Load training data 32x32 - regular - the Strings are the directions to the .npy files in google drive X_train = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/non_augmented/64_64/regular/arrays/X_train_64_regular.npy") Y_train = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/non_augmented/64_64/regular/arrays/Y_train_64_regular.npy") # Evaluate data dimensions print(f"X_train shape: {X_train.shape}, Y_train shape: {Y_train.shape}\nX_test_area_1 shape: {X_test_area_1.shape}, Y_test_area_1 shape: {Y_test_area_1.shape},\nX_test_area_2 shape: {X_test_area_2.shape}, Y_test_area_2 shape: {Y_test_area_2.shape}") # + # Model training - Results are saved in a .csv file # size of the tiles size = 64 # Sampling method sampling = "regular" # number of filters filters = [16,32,64] # lr = 0.001 lr = [10e-4] # batch sizes batch_size = [16,32,64,128] # dictionary that will save the results dic = {} # Hyperparameters dic["model"] = [] dic["batch_size"] = [] dic["learning_rate"] = [] dic["filters"] = [] # test area 1 dic["precision_area_1"] = [] dic["recall_area_1"] = [] dic["f1_score_area_1"] = [] dic["iou_score_area_1"] = [] # test area 2 dic["precision_area_2"] = [] dic["recall_area_2"] = [] dic["f1_score_area_2"] = [] dic["iou_score_area_2"] = [] # loop over all the filters in the filter list for fiilter in filters: # loop over the learning rates (used to evalute 0.01 and 0.0001 without good results) for learning_rate in lr: # loop over all batch sizes in batch_size list for batch in batch_size: # load the model model = Unet_Original(filtersFirstLayer= fiilter, lr = learning_rate, input_size = (64,64,5)) # Save the models only when validation loss decrease model_checkpoint = tf.keras.callbacks.ModelCheckpoint(f'/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/model/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}.hdf5', monitor='val_loss', mode='min',verbose=1, save_best_only=True,save_weights_only = True) # Stop after 20 epochs without decreasing the validation loss early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20) print(fiilter, learning_rate,batch) # fit the model 30% of the dataset was used as validation history = model.fit(X_train,Y_train,batch_size = batch,epochs=200,validation_split=0.3,callbacks=[model_checkpoint, early_stopping]) # summarize history for iou score plt.plot(history.history['iou_score']) plt.plot(history.history['val_iou_score']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') # save plots plt.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/plots/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_iou_score.png") plt.show() # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/plots/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_val_loss.png") plt.show() # load unet to evaluate the test data unet_original = Unet_Original(filtersFirstLayer= fiilter, lr = learning_rate,input_size=(1024,1024,5)) # load the last saved weight from the training unet_original.load_weights(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/model/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}.hdf5") # Evaluate test area 1 res_1 = unet_original.evaluate(X_test_area_1,Y_test_area_1) # Evaluate test area 2 res_2 = unet_original.evaluate(X_test_area_2,Y_test_area_2) # Data to plot the predicted output preds_train_1 = unet_original.predict(X_test_area_1, verbose=1) preds_train_t1 = (preds_train_1 > 0.5).astype(np.uint8) preds_train_2 = unet_original.predict(X_test_area_2, verbose=1) preds_train_t2 = (preds_train_2 > 0.5).astype(np.uint8) # save results on the dictionary dic["model"].append("Unet") dic["batch_size"].append(batch) dic["learning_rate"].append(learning_rate) dic["filters"].append(fiilter) dic["precision_area_1"].append(res_1[1]) dic["recall_area_1"].append(res_1[2]) dic["f1_score_area_1"].append(res_1[3]) dic["iou_score_area_1"].append(res_1[4]) dic["precision_area_2"].append(res_2[1]) dic["recall_area_2"].append(res_2[2]) dic["f1_score_area_2"].append(res_2[3]) dic["iou_score_area_2"].append(res_2[4]) # Plot the results and save the plots f, axarr = plt.subplots(2,3,figsize=(10,10)) axarr[0,0].imshow(X_test_area_1[0][:,:,:3]) axarr[0,1].imshow(np.squeeze(preds_train_t1[0])) axarr[0,2].imshow(np.squeeze(Y_test_area_1[0])) axarr[1,0].imshow(X_test_area_2[0][:,:,:3]) axarr[1,1].imshow(np.squeeze(preds_train_t2[0])) axarr[1,2].imshow(np.squeeze(Y_test_area_2[0])) f.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/images/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_result.png") # Convert results to a dataframe results = pd.DataFrame(dic) # Export as csv results.to_csv(f'/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/result_table/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}.csv', index = False) # - # #### 128 x 128 models # Load training data 32x32 - regular - the Strings are the directions to the .npy files in google drive X_train = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/non_augmented/128_128/regular/arrays/X_train_128_regular.npy") Y_train = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/non_augmented/128_128/regular/arrays/Y_train_128_regular.npy") # Evaluate data dimensions print(f"X_train shape: {X_train.shape}, Y_train shape: {Y_train.shape}\nX_test_area_1 shape: {X_test_area_1.shape}, Y_test_area_1 shape: {Y_test_area_1.shape},\nX_test_area_2 shape: {X_test_area_2.shape}, Y_test_area_2 shape: {Y_test_area_2.shape}") # + # Model training - Results are saved in a .csv file # size of the tiles size = 128 # Sampling method sampling = "regular" # number of filters filters = [16,32,64] # lr = 0.001 lr = [10e-4] # batch sizes batch_size = [16,32,64,128] # dictionary that will save the results dic = {} # Hyperparameters dic["model"] = [] dic["batch_size"] = [] dic["learning_rate"] = [] dic["filters"] = [] # test area 1 dic["precision_area_1"] = [] dic["recall_area_1"] = [] dic["f1_score_area_1"] = [] dic["iou_score_area_1"] = [] # test area 2 dic["precision_area_2"] = [] dic["recall_area_2"] = [] dic["f1_score_area_2"] = [] dic["iou_score_area_2"] = [] # loop over all the filters in the filter list for fiilter in filters: # loop over the learning rates (used to evalute 0.01 and 0.0001 without good results) for learning_rate in lr: # loop over all batch sizes in batch_size list for batch in batch_size: # load the model model = Unet_Original(filtersFirstLayer= fiilter, lr = learning_rate, input_size = (128,128,5)) # Save the models only when validation loss decrease model_checkpoint = tf.keras.callbacks.ModelCheckpoint(f'/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/model/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}.hdf5', monitor='val_loss', mode='min',verbose=1, save_best_only=True,save_weights_only = True) # Stop after 20 epochs without decreasing the validation loss early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20) print(fiilter, learning_rate,batch) # fit the model 30% of the dataset was used as validation history = model.fit(X_train,Y_train,batch_size = batch,epochs=200,validation_split=0.3,callbacks=[model_checkpoint, early_stopping]) # summarize history for iou score plt.plot(history.history['iou_score']) plt.plot(history.history['val_iou_score']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') # save plots plt.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/plots/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_iou_score.png") plt.show() # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/plots/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_val_loss.png") plt.show() # load unet to evaluate the test data unet_original = Unet_Original(filtersFirstLayer= fiilter, lr = learning_rate,input_size=(1024,1024,5)) # load the last saved weight from the training unet_original.load_weights(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/model/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}.hdf5") # Evaluate test area 1 res_1 = unet_original.evaluate(X_test_area_1,Y_test_area_1) # Evaluate test area 2 res_2 = unet_original.evaluate(X_test_area_2,Y_test_area_2) # Data to plot the predicted output preds_train_1 = unet_original.predict(X_test_area_1, verbose=1) preds_train_t1 = (preds_train_1 > 0.5).astype(np.uint8) preds_train_2 = unet_original.predict(X_test_area_2, verbose=1) preds_train_t2 = (preds_train_2 > 0.5).astype(np.uint8) # save results on the dictionary dic["model"].append("Unet") dic["batch_size"].append(batch) dic["learning_rate"].append(learning_rate) dic["filters"].append(fiilter) dic["precision_area_1"].append(res_1[1]) dic["recall_area_1"].append(res_1[2]) dic["f1_score_area_1"].append(res_1[3]) dic["iou_score_area_1"].append(res_1[4]) dic["precision_area_2"].append(res_2[1]) dic["recall_area_2"].append(res_2[2]) dic["f1_score_area_2"].append(res_2[3]) dic["iou_score_area_2"].append(res_2[4]) # Plot the results and save the plots f, axarr = plt.subplots(2,3,figsize=(10,10)) axarr[0,0].imshow(X_test_area_1[0][:,:,:3]) axarr[0,1].imshow(np.squeeze(preds_train_t1[0])) axarr[0,2].imshow(np.squeeze(Y_test_area_1[0])) axarr[1,0].imshow(X_test_area_2[0][:,:,:3]) axarr[1,1].imshow(np.squeeze(preds_train_t2[0])) axarr[1,2].imshow(np.squeeze(Y_test_area_2[0])) f.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/images/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_result.png") # Convert results to a dataframe results = pd.DataFrame(dic) # Export as csv results.to_csv(f'/content/drive/My Drive/Mestrado/artigo/artigo_final/results/non_augmented/{size}_{size}/{sampling}/result_table/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}.csv', index = False)
Landslide_Segmentation_with_U-Net_Landslide_Segmentation_with_U-Net:_Evaluating_Different_Sampling_Methods_and_Patch_Sizes/notebooks/Training_regular_RapidEye.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Author: <NAME> # ### Reg No: 20MAI0044 # ### Deep Learning CSE6037-Lab2 # ### Activity 2 - Write a code to filter the given image using the following size of the filters # ***1: filter size(3,3) and (5,5)\ # 2: filter size(3,3) and (5,5),stride=2\ # 3: filter size(3,3) and (5,5),stride=1,zero-padding\ # 4: Calculate the metric: entropy*** # Importing Necessary Libraries import cv2 import numpy as np from matplotlib import pyplot as plt from PIL import Image # + #1: filter size(3,3) and (5,5) img = Image.open("image.jpg") img = np.array(img) kernel1 = np.ones((3,3),np.float32)/9 dst1 = cv2.filter2D(img,-1,kernel1) kernel2 = np.ones((5,5),np.float32)/25 dst2 = cv2.filter2D(img,-1,kernel2) plt.figure(figsize=(20,10)) plt.subplot(221),plt.imshow(img),plt.title('Original') plt.subplot(222),plt.imshow(dst1),plt.title('Averaging - 3*3') plt.subplot(223),plt.imshow(dst2),plt.title('Averaging - 5*5') plt.show() # - # ## Note: Here we can notice the more the filtering we apply the more averaging occurs and the image starts getting blurr # ***Knowing the math behind the convolution*** def convolve2D(image, kernel, padding=0, strides=1): # Cross Correlation kernel = np.flipud(np.fliplr(kernel)) # Gather Shapes of Kernel + Image + Padding xKernShape = kernel.shape[0] yKernShape = kernel.shape[1] xImgShape = image.shape[0] yImgShape = image.shape[1] # Shape of Output Convolution xOutput = int(((xImgShape - xKernShape + 2 * padding) / strides) + 1) yOutput = int(((yImgShape - yKernShape + 2 * padding) / strides) + 1) output = np.zeros((xOutput, yOutput)) # Apply Equal Padding to All Sides if padding != 0: imagePadded = np.zeros((image.shape[0] + padding*2, image.shape[1] + padding*2)) imagePadded[int(padding):int(-1 * padding), int(padding):int(-1 * padding)] = image print(imagePadded) else: imagePadded = image # Iterate through image for y in range(image.shape[1]): # Exit Convolution if y > image.shape[1] - yKernShape: break # Only Convolve if y has gone down by the specified Strides if y % strides == 0: for x in range(image.shape[0]): # Go to next row once kernel is out of bounds if x > image.shape[0] - xKernShape: break try: # Only Convolve if x has moved by the specified Strides if x % strides == 0: output[x, y] = (kernel * imagePadded[x: x + xKernShape, y: y + yKernShape]).sum() except: break return output # + #2: filter size(3,3) and (5,5),stride=2 kernel1 = np.ones((3,3),np.float32)/9 kernel2 = np.ones((5,5),np.float32)/25 dst3 = convolve2D(img, kernel1, strides=2) dst4 = convolve2D(img, kernel2, strides=2) plt.figure(figsize=(20,10)) plt.subplot(121),plt.imshow(dst3),plt.title('Averaging - 3*3') plt.subplot(122),plt.imshow(dst4),plt.title('Averaging - 5*5') plt.show() # + #3: filter size(3,3) and (5,5),stride=1,zero-padding import cv2 import numpy as np from matplotlib import pyplot as plt from PIL import Image img = Image.open("image.jpg") img = np.array(img) img = np.pad(img, pad_width=[(6, 6),(6, 6),(0, 0)], mode='constant') kernel1 = np.ones((3,3),np.float32)/9 dst5 = cv2.filter2D(img,-1,kernel1) kernel2 = np.ones((5,5),np.float32)/25 dst6 = cv2.filter2D(img,-1,kernel2) plt.figure(figsize=(20,10)) plt.subplot(121),plt.imshow(dst5),plt.title('Averaging - 3*3') plt.subplot(122),plt.imshow(dst6),plt.title('Averaging - 5*5') plt.show() # + #Additional --- def processImage(image): image = cv2.imread(image) image = cv2.cvtColor(src=image, code=cv2.COLOR_BGR2GRAY) return image image = processImage('image.jpg') # Edge Detection Kernel kernel = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]) #kernel = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) #kernel = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]) # Convolve and Save Output output = convolve2D(image, kernel, padding=2) plt.imshow(output) # + #4: Calculating image entropies from numpy import unique from scipy.stats import entropy as scipy_entropy def entropy(image, base=2): _, counts = unique(image, return_counts=True) return scipy_entropy(counts, base=base) print("Entropy of original image: ",entropy(img)) print("Entropy of dst1: ",entropy(dst1)) print("Entropy of dst2: ",entropy(dst2)) print("Entropy of dst3: ",entropy(dst3)) print("Entropy of dst4: ",entropy(dst4)) print("Entropy of dst5: ",entropy(dst5)) print("Entropy of dst6: ",entropy(dst6)) print("Entropy of output: ",entropy(output)) # - # ## Here, we can see that entropy of 4th processed image is 0.
Lab_Assignment_2/Assignment2_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.0 # language: julia # name: julia-1.6 # --- # # Basic linear algebra in Julia # Author: <NAME> (MIT & JuliaComputing) (https://twitter.com/anoackjensen?lang=en) # (with edits from <NAME>) # First let's define a random matrix A = rand(1:4,3,3) # Define a vector of ones x = fill(1.0, (3,)) # = fill(1.0, 3) # Notice that $A$ has type Array{Int64,2} but $x$ has type Array{Float64,1}. Julia defines the aliases Vector{Type}=Array{Type,1} and Matrix{Type}=Array{Type,2}. # # Many of the basic operations are the same as in other languages # #### Multiplication b = A*x # #### Transposition # As in other languages `A'` is the conjugate transpose, or adjoint A' # and we can get the transpose with transpose(A) # #### Transposed multiplication # Julia allows us to write this without * A'A # #### Solving linear systems # The problem $Ax=b$ for ***square*** $A$ is solved by the \ function. A\b # `A\b` gives us the *least squares solution* if we have an overdetermined linear system (a "tall" matrix) Atall = rand(3, 2) Atall\b # and the *minimum norm least squares solution* if we have a rank-deficient least squares problem v = rand(3) rankdef = hcat(v, v) rankdef\b # Julia also gives us the minimum norm solution when we have an underdetermined solution (a "short" matrix) bshort = rand(2) Ashort = rand(2, 3) Ashort\bshort # # The LinearAlgebra library # # While much of linear algebra is available in Julia by default (as shown above), there's a standard library named `LinearAlgebra` that brings in many more relevant names and functions. In particular, it provides factorizations and some structured matrix types. As with all packages, you can bring these additional features into your session with a `using LinearAlgebra`. # ### Exercises # # #### 10.1 # Take the inner product (or "dot" product) of a vector `v` with itself and assign it to variable `dot_v`. # # v = [1,2,3] # + deletable=false editable=false hide_input=true nbgrader={"checksum": "b93dad361f66498eb2460d708f674220", "grade": true, "grade_id": "cell-913fef9b0d19cd52", "locked": true, "points": 1, "schema_version": 1, "solution": false} @assert dot_v == 14 # - # #### 10.2 # Take the outer product of a vector v with itself and assign it to variable `outer_v` @assert outer_v == [1 2 3 2 4 6 3 6 9] # + deletable=false editable=false hide_input=true nbgrader={"checksum": "01642581e27c0ac19752cd90d11ac2ae", "grade": true, "grade_id": "cell-e6b6970ffe104df5", "locked": true, "points": 1, "schema_version": 1, "solution": false} @assert cross_v == [0, 0, 0] # - # Please click on `Validate` on the top, once you are done with the exercises.
10 - Basic linear algebra.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # imports from scipy.io import readsav from scipy import interpolate sys.path.append(os.path.abspath('/Users/xavier/local/Python/PYPIT/src/')) from linetools import utils as ltu from linetools.spectra import xspectrum1d import armsgs import ardebug debug = ardebug.init() last_updated = "2 May 2016" version = '0.1' msgs = armsgs.get_logger((None, debug, last_updated, version, 1)) import arwave as arwv import arutils try: from xastropy.xutils import xdebug as debugger except: import pdb as debugger # - # # Load 1D spectrum spec1d_fil = '/Users/xavier/PYPIT/Tiffany/lris_red/Science/spec1d_OFF_J1044p6306_LRISr_2016Feb16T112439.fits' hdu = fits.open(spec1d_fil) hdu.info() h2_obj = Table(hdu[5].data) # + #debugger.xplot(h2_obj['box_sky']) # - # ## Run Algorithm slf = arutils.dummy_self(pypitdir=os.getenv('PYPIT')) slf._pixcen = np.zeros(10) slf._lordloc = 0 slf._rordloc = 0 slf._argflag['arc'] = {} slf._argflag['arc']['calibrate'] = {} slf._argflag['arc']['calibrate']['detection'] = 5. slf._argflag['arc']['calibrate']['nfitpix'] = 7. # slf._argflag['reduce'] = {} slf._argflag['reduce']['flexure'] ={} slf._argflag['reduce']['flexure']['spectrum'] = None slf._argflag['reduce']['flexure']['maxshift'] = 20. # slf._argflag['run']['spectrograph'] = 'LRISr' # msgs._debug['flexure'] = True # Load archive reload(arwv) arx_file, arx_spec = arwv.flexure_archive(slf, 1) obj_sky = xspectrum1d.XSpectrum1D.from_tuple((h2_obj['box_wave'], h2_obj['box_sky'])) reload(arwv) flex_dict = arwv.flex_shift(slf, 1, obj_sky, arx_spec) # ## Shift wavelengths x = np.linspace(0., 1., obj_sky.npix) f = interpolate.interp1d(x, obj_sky.wavelength.value, bounds_error=False, fill_value="extrapolate") twave = f(x+flex_dict['shift']/(obj_sky.npix-1)) new_sky = xspectrum1d.XSpectrum1D.from_tuple((twave, obj_sky.flux)) # ## Compare to Paranal pfile = '/Users/xavier/local/Python/PYPIT/data/sky_spec/paranal_sky.fits' phdu = fits.open(pfile) pwave = phdu[0].data pflux = phdu[1].data debugger.xplot(new_sky.wavelength, h2_obj['box_sky'], xtwo=pwave, ytwo=pflux*8)
doc/nb/Flexure_Tests.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Adaptive PDE discretizations on cartesian grids # ## Volume : Non-divergence form PDEs # ## Part : Monotone numerical schemes # ## Chapter : PDEs with a second order non-linearity # # This notebook illustrates the use of monotone finite difference schemes to compute viscosity solutions of non-linear PDEs, in two space dimensions. # We consider Pucci's operator # $$ # \Lambda u(x) := \alpha(x) \lambda_{\max}(\nabla^2 u(x)) + \lambda_{\min}(\nabla^2 u(x)) # $$ # in the PDE # $$ # {-} \Lambda u(x) + \beta(x) = 0, # $$ # with Dirichlet boundary conditions. The PDE parameters are a positive function $\alpha$, and an arbitrary function $\beta$. # We denote by $\lambda_{\max}(M)$ and $\lambda_{\min}(M)$ the largest and smallest eigenvalue of a positive definite tensor $M$. More details on this problem below. # # We design two monotone numerical schemes: # * The first sheme, based on a discretization of the space of controls, is simple to implement. However it is quite costly numerically, and it induces a consistency defect. # * The second scheme is second order consistent and possibly cheaper numerically. However, implementation details are more subtle. # # The two schemes involves adaptive stencils, built using techniques from lattice geometry. The techniques developed are fairly general, and can be applied to a wide range of non-linear PDEs. Numerical implementation is kept simple thanks to the use of automatic differentiation. # ## Discretization of the PDE # # ### Reformulation as an extremal operator # # Assume without loss of generality that $\alpha \leq 1$. Then for any positive definite matrix $M$ one has # $$ # \alpha \lambda_{\max}(M) + \lambda_{\min}(M) = \min_{0 \leq \theta \leq \pi} \mathrm{Tr}(D_\alpha(\theta) M), # $$ # where we denoted, with $e(\theta) := (\cos \theta, \sin \theta)$ # $$ # D_\alpha(\theta) = \alpha\, e(\theta) e(\theta)^T + e(\theta)^\perp (e(\theta)^\perp)^T, # $$ # the symmetric matrix whose eigenvalues are $\alpha$ and $1$, the former associated with the eigenvector $e(\theta)$. # # **Remark on the range of the variable $\theta$.** # For any $\theta\in \mathbb R$, one has $e(\theta+\pi) = -e(\theta)$, and therefore $D_\alpha(\theta+\pi) = D_\alpha(\theta)$. By periodicity, we may therefore limit our attention to the interval $[0,\pi]$. # # **Remark on the case $\alpha\geq 1$.** # This second case is handled by replacing the minimum over $\theta\in [0,\pi]$ with a maximum. This does not induce any additional difficulty from the theoretical or numerical standpoints. However, for the sake of simplicity, we make the assumption that $\alpha\leq 1$ in the following. # # ### A monotone discretization strategy : sampling the control space # # Let $K$ be a positive integer, and let $\theta_1 \leq \cdots \leq \theta_K$ be a sampling of the interval $[0,\pi]$. Then we may consider the approximate operator # $$ # \Lambda_K u(x) := \min_{1 \leq k \leq K} \mathrm{Tr} (D_\alpha(\theta_k) \nabla^2 u(x)). # $$ # Introduce decompositions of the tensors, obtained e.g. by Selling's method, # $$ # D_\alpha(\theta_k) = \sum_{1 \leq i \leq n} \mu_{ki} e_{ki} e_{ki}^T, # $$ # where $\mu_{ki} \geq 0$ and $e_{ki}$ has integer coordinates. Then we obtain the monotone numerical scheme # $$ # \min_{1 \leq k \leq K} \sum_{1 \leq i \leq n} \mu_{ki} \frac{ u(x+h e_{ki}) - 2 u(x) +u(x-h e_{ki})} {h^2}. # $$ # A consistency defect remains, which can be estimated in terms of the width of the sampling $\theta_1,\cdots,\theta_K$ of the control space $[0,\pi]$. # # An additional problem is that the numerical scheme cost increases as $K$ increases. # This issue becomes more acute in the case of a multi-dimensional control space. # # ### Another monotone and consistent discretization # # In order to introduce this discretization, we need to recall some elements from lattice geometry. # Selling's decomposition of a tensor $D$ involves a geometrical object, referred to as a *$D$-obtuse superbase* and here denoted # $$ # \mathrm{osb}(D). # $$ # The obtuse superbase $s=\mathrm{osb}(D)$ dictates the support $(e_{si})_{i=1}^n$ of Selling's decomposition of $D$, hence the stencil of the numerical scheme. We can take advantage of this fact to rewrite the operator as # $$ # \Lambda u(x) = \min_{s \in S} \Lambda_s u(x) # $$ # where # $$ # \Lambda_s u(x) := \min_{\theta, \mathrm{osb}(D_\alpha(\theta)) = s} \mathrm{Tr} (D_\alpha(\theta) \nabla^2 u). # $$ # Each operator $\Lambda_s$ admits the consistent discretization # $$ # \Lambda_s u(x) \approx \min_{\theta, \mathrm{osb}(D_\alpha(\theta)) = s} \sum_{1 \leq i \leq n} # \mu_{si}(\theta) \frac{u(x+h e_{si}) - 2 u(x) + u(x-e_{si})} {h^2}, # $$ # and a closed form can be obtained for the r.h.s. by examining a simple optimization problem. # [**Summary**](Summary.ipynb) of volume Non-Divergence form PDEs, this series of notebooks. # # [**Main summary**](../Summary.ipynb) of the Adaptive Grid Discretizations # book of notebooks, including the other volumes. # # # Table of contents # * [1. Non-Monotone discretization (purposedly fails)](#1.-Non-Monotone-discretization-(purposedly-fails)) # * [2. Monotone discretization by sampling of the control space](#2.-Monotone-discretization-by-sampling-of-the-control-space) # * [2.1 Limit case : $\alpha=1$.](#2.1-Limit-case-:-$\alpha=1$.) # * [2.2 Limit case : $\alpha \to 0$](#2.2-Limit-case-:-$\alpha-\to-0$) # * [2.3 Optimization opportunities](#2.3-Optimization-opportunities) # * [3. Monotone and consistent discretization](#3.-Monotone-and-consistent-discretization) # * [3.1 Angular sectors and obtuse superbases](#3.1-Angular-sectors-and-obtuse-superbases) # * [3.2 Optimization over an angular sector](#3.2-Optimization-over-an-angular-sector) # * [3.3 Optimized implementation](#3.3-Optimized-implementation) # * [3.4 Non-square domains](#3.4-Non-square-domains) # * [4 Validation](#4-Validation) # * [4.1 Comparaison of the two schemes](#4.1-Comparaison-of-the-two-schemes) # * [4.2 Comparison with automatic differentiation](#4.2-Comparison-with-automatic-differentiation) # # # # **Acknowledgement.** The experiments presented in these notebooks are part of ongoing research, # some of it with PhD student <NAME>, in co-direction with <NAME>, # and PhD student <NAME>, in co-direction with <NAME>. # # Copyright <NAME>, University Paris-Sud, CNRS, University Paris-Saclay # ## 0. Importing the required libraries import sys; sys.path.insert(0,"..") # Allow imports from parent directory #from Miscellaneous import TocTools; print(TocTools.displayTOC('NonlinearMonotoneSecond2D','NonDiv')) # + tags=["ExportCode"] from agd import Selling from agd import LinearParallel as lp from agd import AutomaticDifferentiation as ad from agd import Domain from agd.Plotting import savefig; #savefig.dirName = "Figures/NonlinearMonotoneSecond2D" # + tags=["ExportCode"] import numpy as np import matplotlib.pyplot as plt # - # Some utility functions # + newton_root = ad.Optimization.newton_root norm = ad.Optimization.norm def BoundaryNeighborhood(interior,width=1): bd=interior.copy() bd[0,:]=False; bd[-1,:]=False; bd[:,0]=False; bd[:,-1]=False directions = ( (0,0),(0,1),(0,-1),(1,0),(-1,0) ) neigh = np.stack(tuple(np.roll(bd,e,axis=(0,1)) for e in directions),axis=0) neigh = np.logical_and(neigh.any(axis=0),np.logical_not(neigh).any(axis=0)) for i in range(width): neigh = np.stack(tuple(np.roll(neigh,e,axis=(0,1)) for e in directions),axis=0).any(axis=0) return neigh # - # ## 1. Non-Monotone discretization (purposedly fails) # # We present a naive, non-monotone discretization of the addressed problem. This scheme can be used to check that a numerical solution (produced by other means) is correct, or to construct synthetic examples. However, using it to solve the PDE is usually bound to fail. # # The naive scheme is based on a reconstruction of the Hessian matrix of the form # $$ # \begin{pmatrix} # D^h_{00} u(x) & D^h_{01} u(x)\\ # D^h_{01} u(x) & D^h_{11} u(x) # \end{pmatrix}, # $$ # where $D_{00}$, $D_{01}$ and $D_{11}$ are finite-difference operators. Namely # $$ # D^h_{00} u(x) := \frac{u(x_0+h,x_1)-2 u(x_0,x_1) + u(x_0-h,x_1)}{h^2}, # $$ # likewise for $D^h_{11} u(x)$, and finally # $$ # D^h_{01} u(x) := \frac{u(x_0+h,x_1+h)-u(x_0-h,x_1+h)-u(x_0+h,x_1-h)+h(x_0-h,x_1-h)}{4 h^2}. # $$ # + tags=["ExportCode"] def SchemeNonMonotone(u,α,β,bc,sqrt_relax=1e-6): # Compute the hessian matrix of u uxx = bc.Diff2(u,(1,0)) uyy = bc.Diff2(u,(0,1)) uxy = 0.25*(bc.Diff2(u,(1,1)) - bc.Diff2(u,(1,-1))) # Compute the eigenvalues # The relaxation is here to tame the non-differentiability of the square root. htr = (uxx+uyy)/2. # Half trace Δ = ((uxx-uyy)/2.)**2 + uxy**2 # Discriminant of characteristic polynomial sΔ = np.sqrt( np.maximum( Δ, sqrt_relax) ) λ_max = htr+sΔ λ_min = htr-sΔ # Numerical scheme residue = β - α*λ_max - λ_min # Boundary conditions return np.where(bc.interior,residue,u-bc.grid_values) # - # Our next step is to define the parameters of our specific problem. # Regarding the boundary conditions, we set $u=0$ on the square boundary, and $u=-1$ on some interior diamond. # For well posedness, $d$ and $\alpha$ must be positive over the domain. # + # Create the domain aX0,dx = np.linspace(-1,1,100,retstep=True); aX1=aX0; X = np.array(np.meshgrid(aX0,aX1,indexing='ij')) # Set the boundary conditions bc_grid_values=np.full(X.shape[1:],np.nan) bc_grid_values[ad.Optimization.norm(X,ord=1,axis=0) < 0.4] = -1 bc = Domain.MockDirichlet(bc_grid_values,dx,padding=0.) # - plt.contourf(*X,bc_grid_values) # Choose the PDE parameters α = 0.01 β = 1 # The naive and non-monotone discretization scheme is consistent, but lacks any other sort of theoretical guarantees. # It is pure luck that the Newton method does converge in this simple instance, and that the result looks reasonable. params = (α,β,bc); guess = np.zeros(bc.shape); solution = ad.Optimization.newton_root(SchemeNonMonotone,guess,params) plt.title("Solution obtained with non-monotone scheme") plt.contourf(*X,solution); plt.axis('equal'); # ## 2. Monotone discretization by sampling of the control space # # We present a numerical scheme based on sampling the control space, which is quite simple and generic. Given fields of positive definite diffusion tensors $D_k(x)$, $1 \leq k \leq K$, without any specific assumption on their origin, we compute the decompositions # $$ # D_k(x) = \sum_{1 \leq i \leq n} \mu_{ki}(x) e_{ki}(x) e_{ki}(x)^T. # $$ # We then implement the scheme # $$ # \beta(x) - \min_{1\leq k \leq K} \sum_{1 \leq i \leq n} # \mu_{ki}(x) \frac{u(x+h e_{ki}(x))-2 u(x) +u(x-h e_{ki}(x))}{h^2}. # $$ # # # # $$ # \beta(x) - \min_{1 \leq k \leq K} {\mathrm Tr}(D_k(x) \nabla^2 u(x)) # $$ # *Question 5* # === # Implémenter le schéma ci-dessus. # def SchemeSampling(u,diffs,β,bc): # Tensor decomposition μ,e = Selling.Decomposition(diffs) μ = bc.as_field(μ) # Broadcast # Numerical scheme d2u = bc.Diff2(u,e) # (u(x+he)-2 u(x)+u(x-he))/h^2 Tr_Dk_d2u = np.sum(μ*d2u,axis=0) residue = β - np.min(Tr_Dk_d2u,axis=0) # Boundary conditions return np.where(bc.interior,residue,u-bc.grid_values) # The tensors involved in our PDE take the following form. # # # # # *Question 6* # === # Implémenter une fonction calculant le tenseur associé # à une anisotropie $\alpha$ et une orientation $\theta$, # apparaissant dans l'opérateur de Pucci. # # + def Diff(α,θ): c = np.cos(θ); s=np.sin(θ); return np.array([ [α*c**2 + s**2, α*c*s - c*s], [α*c*s - c*s, α*s**2 +c**2] ]) # TODO α e e^T + f f^T avec e=(c,s) et f=(-s,c) # - Diff(1.,np.pi/3) # We also choose a discretization of the control space. nθ = 20 θs = np.linspace(0,np.pi,nθ,endpoint=False) α=0.25 # We next solve the PDE and display the solution. params = (Diff(α,θs), β,bc) solution = newton_root(SchemeSampling,guess,params) plt.axis('equal'); plt.title('Solution to lambda_max/2 + lambda_min = 1') plt.contourf(*X,solution); # Since the solution is not explicit, we use the non-monotone numerical scheme to test the result. # We eliminate a layer around the neighborhood of the boundary conditions, where the solution is not smooth. residue_non_monotone = SchemeNonMonotone(solution,α,β,bc) residue_non_monotone[BoundaryNeighborhood(bc.interior,width=5)] = 0. print("Max cross-residue of the sampling based numerical solution:",norm(residue_non_monotone,ord=np.inf)) plt.axis('equal'); plt.title("Cross-residue of the sampling based numerical solution:") plt.contourf(*X,residue_non_monotone); # ### 2.1 Limit case : $\alpha=1$. # # If one chooses $\alpha=1$, then the PDE becomes linear, namely $-\Delta u + \beta = 0$. # In this very specific case, the sampling based and non-monotone scheme coincide, with the usual discretization of the laplacian operator. # As a result the Newton method converges in one iteration, and the cross-residue vanishes. # + params = (Diff(1.,θs), β,bc) solution = newton_root(SchemeSampling,guess,params) plt.axis('equal'); plt.title('Solution to λ_max + λ_min = 1') plt.contourf(*X,solution); # - print("Cross residue in special case α=1 :", norm(SchemeNonMonotone(solution,1.,β,bc),ord=np.inf) ) Selling.Decomposition(np.array([[1.,0],[0,1]])) # ### 2.2 Limit case : $\alpha \to 0$ # # In contrast, if one chooses a very small value of $\alpha$, then the PDE becomes more and more non-linear, which raises numerical difficulties discussed below. # If in addition $\beta = 0$, then we recover PDE characterization of the convex envelope: # $$ # -\lambda_{\min}(\nabla^2 u) = 0 # $$ # # **Numerical challenges.** # As $\alpha\to 0$, the condition number of the tensors $D_\alpha(\theta)$ increase. A finer sampling of the interval $[0,\pi]$ is required, which increases the numerical cost of the method. # In addition, the width of the discretization stencil increases, and therefore the effective discretization scale is reduced. # # **Note on computing the convex envelope.** # The computation of convex envelopes is one of the most central problems in algorithmic geometry. For instance, Voronoi diagrams are deduced from a convex envelope computation in higher dimension. # Extremely efficient software packages are available for this problem, and PDE methods are *not* the recommended way to go. α_small = 0.01 nθ_small = 50 θs_small = np.linspace(0,np.pi,nθ_small,endpoint=False) β_cvx_env = 0. # The above parameters turn the solution into the convex envelope of the boundary conditions. Recall that we imposed: # * $u=0$ on the (exterior) square boundary. # * $u=-1$ on the (interior) diamond boundary. params = (Diff(α_small,θs_small), β_cvx_env,bc) solution = newton_root(SchemeSampling,guess,params) plt.axis('equal'); plt.title('Convex envelope of the boundary conditions.') plt.contourf(*X,solution); # The solution is piecewise affine, and its gradient is piecewise constant. # Note in particular that the solution is not twice differentiable, and the equation $-\lambda_{\min}(\nabla^2 u) = 0$ here only has meaning in the sense of viscosity solutions. grad = np.array(np.gradient(solution,bc.gridscale)) plt.axis('equal'); plt.title("The solution to the convex envelope problem is piecewise affine") plt.contourf(*X,norm(grad,ord=2,axis=0)); # We next negate the boundary conditions, imposing: # * $u=0$ on the (exterior) square boundary. # * $u=1$ on the (interior) diamond boundary. # # This raises an apparent incompatibility: the convex envelope should be $u=0$ on the whole square, yet we impose a distinct value in the diamond. What to expect ? bc_negated = Domain.MockDirichlet(-bc_grid_values,dx,padding=0.) params = (Diff(α_small,θs_small), β_cvx_env,bc_negated) solution = newton_root(SchemeSampling,guess,params) plt.axis('equal'); plt.title('Convex envelope of the boundary conditions.') plt.contourf(*X,solution); # The solution to the second order PDE $-\lambda_{\min}(\nabla^2 u)$ is still unique and well defined, in the sense of *viscosity solutions*, with these boundary conditions. It is piecewise constant, with value $u=0$ except on the interior diamond where we impose $u=-1$. # ### 2.3 Optimization opportunities # # The numerical scheme implemented in this section involves a maximization over a set of diffusion tensors. The size of this set dictates the accuracy of the method, and it may therefore become rather large. In combination with the overhead of sparse AD, this can increase the numerical cost. # # We can significantly limit the numerical cost using a technique based on the *envelope theorem*, which proceeds in two steps: # * First run the scheme with ordinary floats, to find out which tensor is active at each grid point. # * Second, run the scheme with AD variables, and and *oracle* providing the active tensors. # # **Important : other optimization opportunities.** # The "optimization" presented in this subsection only serves to illustrate the envelope theorem mechanism. It is *not* effective in terms of computation time, because the optimized part is not dominant. There are other optimization opportunities here, the most obvious one being to avoid recomputing the tensor decompositions at each call of the iterative solver. The choice of linear solver may also be of importance. # + tags=["ExportCode"] def SchemeSampling_OptInner(u,diffs,bc,oracle=None): # Select the active tensors, if they are known if not(oracle is None): diffs = np.take_along_axis(diffs, np.broadcast_to(oracle,diffs.shape[:2]+(1,)+oracle.shape),axis=2) print("Has AD information :", ad.is_ad(u), ". Number active tensors per point :", diffs.shape[2]) # Tensor decomposition coefs,offsets = Selling.Decomposition(diffs) # Return the minimal value, and the minimizing index return ad.min_argmin( lp.dot_VV(coefs,bc.Diff2(u,offsets)), axis=0) def SchemeSampling_Opt(u,diffs,β,bc): # Evaluate the operator using the envelope theorem result,_ = ad.apply(SchemeSampling_OptInner, u,bc.as_field(diffs),bc, envelope=True) # Boundary conditions return np.where(bc.interior, β-result, u-bc.grid_values) # - params = (Diff(α,θs), β,bc) solution = newton_root(SchemeSampling_Opt,guess,params) # ## 3. Monotone and consistent discretization # # Setting up a monotone and consistent discretization requires a bit more work, but is worthwhile in the end if performance and accuracy are a target. Let us recall that the diffusion tensors take the form, # $$ # D_\alpha(\theta) = \alpha\, e(\theta) e(\theta)^T + e(\theta)^\perp (e(\theta)^\perp)^T, # $$ # where $0< \alpha \leq 1$ is a fixed parameter in the following, and $\theta \in [0,\pi]$. # ### 3.1 Angular sectors and obtuse superbases # The first, and main, difficulty is to construct a sequence of angles $0 = \theta_0 \leq \cdots \leq \theta_N = \pi$ and of superbases $s_0,\cdots, s_{N-1}$ such that # $$ # s_k \text{ is } D_\theta(\theta) \text{-obtuse, for all } \theta \in [\theta_k, \theta_{k+1}]. # $$ # For that purpose, we remark that # $$ # D_\alpha(\theta) = D_0 + D_1 \cos(2 \theta) + D_2 \sin(2 \theta), # $$ # where (omitting the dependency on $\alpha$ for readability) # $$ # D_0 = \frac{\alpha+1} 2 # \begin{pmatrix} # 1 & 0\\ # 0 & 1 # \end{pmatrix}, # \quad # D_1 = \frac{\alpha-1} 2 # \begin{pmatrix} # 1 & 0\\ # 0 &-1 # \end{pmatrix}, # \quad # D_2 = \frac{\alpha-1} 2 # \begin{pmatrix} # 0 & 1\\ # 1 & 0 # \end{pmatrix}. # $$ # Then, for given $u,v \in R^2$, one has # $$ # <u,D_\alpha(\theta) v> = a_0 + a_1 \cos(2 \theta) + a_2 \sin(2 \theta) # = r (\cos(2\theta-\phi) - c). # $$ # where $a_i = <u,D_i v>$. Then $r e(\phi) = (a_1,a_2)$, and $c=-a_0/r$. We assume that $r$ is positive. # Eventually, the above scalar product is # * always negative if $c>1$. # * always positive if $c<-1$. # * otherwise, positive iff $|2 \theta-\phi| \leq \arccos(c)$. # + tags=["ExportCode"] def MakeD(α): return np.moveaxis(0.5*np.array([ (α+1)*np.array([[1,0],[0,1]]), (α-1)*np.array([[1,0],[0,-1]]), (α-1)*np.array([[0,1],[1,0]]) ]), 0,-1) def NextAngleAndSuperbase(θ,sb,D): pairs = np.stack([(1,2), (2,0), (0,1)],axis=1) scals = lp.dot_VAV(np.expand_dims(sb[:,pairs[0]],axis=1), np.expand_dims(D,axis=-1), np.expand_dims(sb[:,pairs[1]],axis=1)) ϕ = np.arctan2(scals[2],scals[1]) cst = -scals[0]/np.sqrt(scals[1]**2+scals[2]**2) θ_max = np.pi*np.ones(3) mask = cst<1 θ_max[mask] = (ϕ[mask]-np.arccos(cst[mask]))/2 θ_max[θ_max<=0] += np.pi θ_max[θ_max<=θ] = np.pi k = np.argmin(θ_max) i,j = (k+1)%3,(k+2)%3 return (θ_max[k],np.stack([sb[:,i],-sb[:,j],sb[:,j]-sb[:,i]],axis=1)) def AnglesAndSuperbases(D,maxiter=200): sb = Selling.CanonicalSuperbase(np.eye(2)).astype(int) θs=[] superbases=[] θ=0 for i in range(maxiter): θs.append(θ) if(θ>=np.pi): break superbases.append(sb) θ,sb = NextAngleAndSuperbase(θ,sb,D) return np.array(θs), np.stack(superbases,axis=2) # - # The above code is a bit intricate, but its purpose is simple : split the interval $[0,\pi]$ into sub-intervals on which the support of Selling's decomposition of the tensors is fixed and known. α=0.1 θs,superbases = AnglesAndSuperbases(MakeD(α)) θs superbases # + θs_sampled = np.linspace(0,np.pi,200) decomp = Selling.GatherByOffset(θs_sampled,*Selling.Decomposition(Diff(α,θs_sampled))) fig = plt.figure(figsize=(20,10)) # Paper:(10,5) plt.title(f"Decomposition of a rotated matrix of eigenvalues {α} and {1}") plt.xlabel("θ"); plt.ylabel("Coefficient") for offset,(angle,coef) in decomp.items(): plt.plot(angle,coef) plt.legend(decomp.keys()); for θ in θs: # Show a vertical line for each angle theta where the stencil changes plt.axvline(x=θ) savefig(fig,"DecompositionCoefficients.png") # - # A value of $\alpha$ closer to $1$ yields a smaller number of superbases. # + α=1/4 θs_sampled = np.linspace(0,np.pi,200) θs,superbases = AnglesAndSuperbases(MakeD(α)) decomp = Selling.GatherByOffset(θs_sampled,*Selling.Decomposition(Diff(α,θs_sampled))) fig = plt.figure(figsize=(20,10)) # Paper:(10,5) plt.title(f"Decomposition of a rotated matrix of eigenvalues {α} and {1}") plt.xlabel("θ"); plt.ylabel("Coefficient") for offset,(angle,coef) in decomp.items(): plt.plot(angle,coef) plt.legend(decomp.keys()); for θ in θs: # Show a vertical line for each angle theta where the stencil changes plt.axvline(x=θ); # - # Whereas a smaller values yields more superbases and increases the numerical scheme complexity. # + α=1/100 θs_sampled = np.linspace(0,np.pi,400) θs,superbases = AnglesAndSuperbases(MakeD(α)) decomp = Selling.GatherByOffset(θs_sampled,*Selling.Decomposition(Diff(α,θs_sampled))) fig = plt.figure(figsize=(20,10)) # Paper:(10,5) plt.title(f"Decomposition of a rotated matrix of eigenvalues {α} and {1}") plt.xlabel("θ"); plt.ylabel("Coefficient") for offset,(angle,coef) in decomp.items(): plt.plot(angle,coef) plt.legend(decomp.keys()); for θ in θs: # Show a vertical line for each angle theta where the stencil changes plt.axvline(x=θ); # - # ### 3.2 Optimization over an angular sector # # The second step is discretize # $$ # \min_{\theta \in [\theta_0,\theta_1]} \mathrm{Tr}(D_\alpha(\theta) \nabla^2 u(x)), # $$ # when $D_\alpha(\theta)$ admits the same known obtuse superbase $b = (b_0,b_1,b_2)$ for each $\theta \in [\theta_0,\theta_1]$. # # The discretization reads # $$ # \min_{\theta \in [\theta_0, \theta_1]} - \sum_{1 \leq i \leq 3} <b_{i+1}, D_\alpha(\theta) b_{i+2}> \frac{u(x+h b_i^\perp)-2 u(x) + u(x-h b_i^\perp)}{h^2}. # $$ # where indices of the superbase are understood modulo $3$. # # For that purpose, we rely on the expression $D_\alpha(\theta) = D_0 + D_1 \cos(2 \theta) + D_2 \sin(2 \theta)$, and on the explicit solution # $$ # \min_{\phi \in [2\theta_0,2\theta_1]} d_0 + d_1 \cos \phi + d_2 \sin \phi = d_0 - \sqrt{d_1^2+d_2^2} # $$ # if $(\cos \phi,\sin \phi)$ is proportional to $-(d_1,d_2)$ for some $\phi \in [2\theta_0,2\theta_1]$. Otherwise the minimum is attained at $2\theta_0$ or $2 \theta_1$. # # **Theoretical issue for the Newton method** The lack of differentiability of the term $\sqrt{d_1^2+d_2^2}$ is a theoretical issue for the Newton method. It does not raise any difficulty from a practical standpoint, although numpy does raise a warning on the matter. # + tags=["ExportCode"] def MinimizeTrace(u,α,bc,sqrt_relax=1e-16): # Compute the tensor decompositions D=MakeD(α) θ,sb = AnglesAndSuperbases(D) θ = np.array([θ[:-1],θ[1:]]) # Compute the second order differences in the direction orthogonal to the superbase sb_rotated = np.array([-sb[1],sb[0]]) d2u = bc.Diff2(u,sb_rotated) d2u[...,bc.not_interior]=0. # Placeholder values to silent NaNs # Compute the coefficients of the tensor decompositions sb1,sb2 = np.roll(sb,1,axis=1), np.roll(sb,2,axis=1) sb1,sb2 = (e.reshape( (2,3,1)+sb.shape[2:]) for e in (sb1,sb2)) D = D.reshape((2,2,1,3,1)+D.shape[3:]) # Axes of D are space,space,index of superbase element, index of D, index of superbase, and possibly shape of u scals = lp.dot_VAV(sb1,D,sb2) # Compute the coefficients of the trigonometric polynomial scals,θ = (bc.as_field(e) for e in (scals,θ)) coefs = -lp.dot_VV(scals, np.expand_dims(d2u,axis=1)) # Optimality condition for the trigonometric polynomial in the interior value = coefs[0] - np.sqrt(np.maximum(coefs[1]**2+coefs[2]**2,sqrt_relax)) coefs_ = ad.remove_ad(coefs) # removed AD information angle = np.arctan2(-coefs_[2],-coefs_[1])/2. angle[angle<0]+=np.pi # Boundary conditions for the trigonometric polynomial minimization mask = np.logical_not(np.logical_and(θ[0]<=angle,angle<=θ[1])) t,c = θ[:,mask],coefs[:,mask] value[mask],amin_t = ad.min_argmin(c[0]+c[1]*np.cos(2*t)+c[2]*np.sin(2*t),axis=0) # Minimize over superbases value,amin_sb = ad.min_argmin(value,axis=0) # Record the optimal angles for future use angle[mask]=np.take_along_axis(t,np.expand_dims(amin_t,axis=0),axis=0).squeeze(axis=0) # Min over bc angle = np.take_along_axis(angle,np.expand_dims(amin_sb,axis=0),axis=0) # Min over superbases return value,angle def SchemeConsistent(u,α,β,bc): value,_ = MinimizeTrace(u,α,bc) residue = β - value return np.where(bc.interior,residue,u-bc.grid_values) # - # The scheme is efficiently solved by the Newton method. The warning (possibly) raised is related with the lack of differentiability of sqrt, as mentioned above. # %%time params = (α,β,bc) guess2 = 0.5*(X[0]**2 +2.*X[1]**2) solution = newton_root(SchemeConsistent,guess2,params) fig = plt.figure(figsize=(4,4)); plt.axis('equal') plt.title("Solution from the monotone and consistent scheme.") plt.contourf(*X,solution); savefig(fig,"SolutionMonotoneConsistent.png") # For validation, we compute the residue of the naive scheme. It is small, as expected, except on the boundary of the obstacle. fig = plt.figure(figsize=(6,4)); plt.axis('equal') plt.title("Residue from the monotone and consistent scheme.") res = SchemeNonMonotone(solution,*params) res[BoundaryNeighborhood(bc.interior,width=5)] = 0. plt.contourf(res); plt.colorbar(); savefig(fig,"ResidueMonotoneConsistent.png") # ### 3.3 Optimized implementation # # Similarly to the sampling based scheme, we propose an enhanced implementation taking advantage of the envelope theorem, which we recall lets one differentiate functions of the form # $$ # F(x) = \min_{\alpha \in A} F_\alpha(x). # $$ # However, there is one important distinction, relative to the nature of the optimization parameter $\alpha \in A$. Indeed, it is: # * *Discrete* in the case of the sampling scheme. Namely the (index of) the optimal angle # * *Continuous* in the case of the consistent scheme. Namely the optimal angle itself. # + tags=["ExportCode"] def MinimizeTrace_Opt(u,α,bc,oracle=None): if oracle is None: return MinimizeTrace(u,α,bc) # The oracle contains the optimal angles diffs=Diff(α,oracle.squeeze(axis=0)) coefs,sb = Selling.Decomposition(diffs) value = lp.dot_VV(coefs,bc.Diff2(u,sb)) return value,oracle def SchemeConsistent_Opt(u,α,β,bc): value,_ = ad.apply(MinimizeTrace_Opt,u,α,bc,envelope=True) residue = β - value return np.where(bc.interior,residue,u-bc.grid_values) # - # %%time params = (α,β,bc) guess2 = 0.5*(X[0]**2 +2.*X[1]**2) solution = newton_root(SchemeConsistent_Opt,guess2,params) # ### 3.4 Non-square domains # # We illustrate (first order) accurate (Dirichlet) boundary conditions on a general domain. # This is in contrast with the numerical experiments presented in the above subsections, which rely on a rather crude implementation of the boundary conditions. # # The chosen domain is ring shaped, with a non-smooth boundary including reentrant corners. # The chosen boundary condition is $0$ on the inner boundary, and $1$ on the outer boundary. # + outer = Domain.Union(Domain.Ball(),Domain.Box([[0,1],[-1,1]]) ) inner = Domain.AffineTransform(outer,0.4*lp.rotation(np.pi/3)) domain_ring = Domain.Complement(outer,inner) def bc_value_ring(x): """0 on inner boundary, 1 on outer boundary.""" return outer.level(x)+inner.level(x) > 0 bc_ring = Domain.Dirichlet(domain_ring,bc_value_ring,X) # - plt.contourf(*X,domain_ring.contains(X)); plt.axis('equal'); # + # %%time params = (0.5**2,0.,bc_ring) guess2 = 0.5*(X[0]**2 +2.*X[1]**2) solution_05 = newton_root(SchemeConsistent_Opt,guess2,params) fig = plt.figure(figsize=[4,4]); plt.axis('equal') plt.title(r"$\lambda_{min}(\nabla^2u)+\alpha\lambda_{\max}(\nabla^2u)=0$, $\alpha=1/4$.") plt.contourf(*X,np.where(bc_ring.interior,solution_05,np.nan)); savefig(fig,"Consistent_Ring_05.png") # + grad = np.array(np.gradient(solution_05,bc_ring.gridscale)) grad[:,np.logical_not(bc_ring.domain.contains_ball(X,1.5*bc.gridscale))]=0. fig = plt.figure(figsize=[4,4]); plt.axis('equal') plt.title(r"Norm of the solution gradient, $\alpha=1/4$") plt.pcolormesh(*X,norm(grad,ord=2,axis=0)); #plt.colorbar(); savefig(fig,"Consistent_Ring_05_GradNorm.png") # - fig = plt.figure(figsize=[4,4]); plt.axis('equal') plt.title(r"Norm of the solution gradient, $\alpha=1/4$") plt.contourf(*X,norm(grad,ord=2,axis=0)); #plt.colorbar(); savefig(fig,"Consistent_Ring_05_GradNorm_Levels.png") # + # %%time params = (0.05**2,0.,bc_ring) guess2 = 0.5*(X[0]**2 +2.*X[1]**2) solution_005 = newton_root(SchemeConsistent_Opt,guess2,params) fig = plt.figure(figsize=[4,4]); plt.axis('equal') plt.title(r"$\lambda_{min}(\nabla^2u)+\alpha\lambda_{\max}(\nabla^2u)=0$, $\alpha=1/400$.") #Paper : plt.title(r"$\lambda_{min}(\nabla^2u)+\mu\lambda_{\max}(\nabla^2u)=0$, $\mu=1/400$.") plt.contourf(*X,np.where(bc_ring.interior,solution_005,np.nan)) savefig(fig,"Consistent_Ring_005.png") # + grad = np.array(np.gradient(solution_005,bc.gridscale)) grad[:,np.logical_not(bc_ring.domain.contains_ball(X,1.5*bc.gridscale))]=np.nan fig = plt.figure(figsize=[5,4]); plt.axis('equal') plt.title(r"Norm of the solution gradient, $\alpha=1/400$") #Paper : plt.title(r"Norm of the solution gradient, $\mu=1/400$") plt.pcolormesh(*X,norm(grad,ord=2,axis=0)); plt.colorbar(); savefig(fig,"Consistent_Ring_005_GradNorm.png") # - # *Journal version of figures* # # # ## 4 Validation # # The experiments presented below aim at informally validating the numerical implementation, by # * Comparing the two schemes with one another. # * Comparing with the automatic differentiation of an analytic function. # ### 4.1 Comparaison of the two schemes # # The consistent scheme, here denoted $F$, is arguably significantly more complex to implement than numerical scheme based on a sampling of the control space, here denoted $F_n$ where $n$ is the number of samples (angles). # # For cross validation, one can observe that for any discrete map $u$, one has # $$ # F_n(u) = F(u) + O(1/n). # $$ # Note, crucially, that the test function $u$ is fixed, and so is the grid scale. The continuous limit only takes place in the control space. # # # + np.random.seed(42) u_random = np.random.uniform(-1,1,guess.shape) bc_unit = Domain.MockDirichlet(guess.shape,1,padding=0.) # - params=(α,β,bc_unit) residue_consistent = SchemeConsistent(u_random,*params) def error(nθ): θs = np.linspace(0,np.pi,nθ,endpoint=False) params=(Diff(α,θs),β,bc_unit) residue_sampling = SchemeSampling(u_random,*params) return norm(residue_consistent-residue_sampling,ord=np.inf) nθ_validation = np.array([2**n for n in range(1,10)]) error_validation = np.array([error(n) for n in nθ_validation]) # Convergence of the sampling based scheme toward the continuous one is observed, with the expected convergence rate. plt.title("Convergence of the sampling scheme toward the consistent scheme") n_inv = 1./nθ_validation plt.loglog(n_inv,error_validation, n_inv,n_inv); # ### 4.2 Comparison with automatic differentiation # # We rely on automatic differentiation to compute the derivatives of an analytic function, and evaluate the PDE operator of interest. We then compute the numerical scheme residue on a synthetic problem with a known solution. # + tags=["ExportCode"] def Pucci_ad(u,α,x): """ Computes alpha*lambda_max(D^2 u) + lambda_min(D^2 u), at the given set of points, by automatic differentiation. """ x_ad = ad.Dense2.identity(constant=x,shape_free=(2,)) hessian = u(x_ad).hessian() Δ = ((hessian[0,0]-hessian[1,1])/2.)**2 + hessian[0,1]**2 sΔ = np.sqrt(Δ) mean = (hessian[0,0]+hessian[1,1])/2. λMin,λMax = mean-sΔ,mean+sΔ return λMin+α*λMax # - def Residue_ad(u,α,dom,X): bc = Domain.Dirichlet(dom,u,X) rhs = Pucci_ad(u,α,X) residue = SchemeConsistent(u(X),α,rhs,bc) residue[bc.not_interior]=0 return residue # + def test_quadratic(x): return x[0]**2+x[1]**2 def test_polynomial(x): return 0.5*(x[0]**2+x[1]**2)**2 dom_convex = Domain.Union(Domain.Ball(),Domain.Box()) dom_ball = Domain.Ball() dom_square = Domain.Box() # - # Because the scheme is second order consistent its residue is zero up to essentially machine precision on the quadratic function. (Note that the relaxation introduced for differentiability of a square root is mainly responsible for the small error). [norm(Residue_ad(test_quadratic,0.5,dom,X),ord=np.inf) for dom in (dom_convex,dom_ball,dom_square)] # On the non-quadratic function, we get essentially second order convergence in the $L^1$ averaged norm. A slower convergence rate is achieved in the $L^\infty$ norm, because the finite differences are only first order accurate at the boundary. # + aX_25 = np.linspace(-1,1,25) X_25 = np.array(np.meshgrid(aX_25,aX_25,indexing='ij')) aX_50 = np.linspace(-1,1,50) X_50 = np.array(np.meshgrid(aX_50,aX_50,indexing='ij')) X_100=X print("Domain (line): convex, ball, square.") print("Resolution (column) : 25,50,100.\n") print("Mean residue of exact polynomial solution.") print(np.array([[ norm(Residue_ad(test_polynomial,0.5,dom,X),ord=1,averaged=True) for X in (X_25,X_50,X_100)] for dom in (dom_convex,dom_ball,dom_square)])) print("Residue of exact polynomial solution in the L^Infinity norm.") print(np.array([[ norm(Residue_ad(test_polynomial,0.5,dom,X),ord=np.inf) for X in (X_25,X_50,X_100)] for dom in (dom_convex,dom_ball,dom_square)])) # -
TP7/NonlinearMonotoneSecond2D_Exo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: chineseocr # language: python # name: chineseocr # --- # ## 加载模型 # import os GPUID='0'##调用GPU序号 os.environ["CUDA_VISIBLE_DEVICES"] = GPUID import torch from apphelper.image import xy_rotate_box,box_rotate,solve import model # + import cv2 import numpy as np import cv2 def plot_box(img,boxes): blue = (0, 0, 0) #18 tmp = np.copy(img) for box in boxes: cv2.rectangle(tmp, (int(box[0]),int(box[1])), (int(box[2]), int(box[3])), blue, 1) #19 return Image.fromarray(tmp) def plot_boxes(img,angle, result,color=(0,0,0)): tmp = np.array(img) c = color w,h = img.size thick = int((h + w) / 300) i = 0 if angle in [90,270]: imgW,imgH = img.size[::-1] else: imgW,imgH = img.size for line in result: cx =line['cx'] cy = line['cy'] degree =line['degree'] w = line['w'] h = line['h'] x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx, cy, w, h, degree/180*np.pi) x1,y1,x2,y2,x3,y3,x4,y4 = box_rotate([x1,y1,x2,y2,x3,y3,x4,y4],angle=(360-angle)%360,imgH=imgH,imgW=imgW) cx =np.mean([x1,x2,x3,x4]) cy = np.mean([y1,y2,y3,y4]) cv2.line(tmp,(int(x1),int(y1)),(int(x2),int(y2)),c,1) cv2.line(tmp,(int(x2),int(y2)),(int(x3),int(y3)),c,1) cv2.line(tmp,(int(x3),int(y3)),(int(x4),int(y4)),c,1) cv2.line(tmp,(int(x4),int(y4)),(int(x1),int(y1)),c,1) mess=str(i) cv2.putText(tmp, mess, (int(cx), int(cy)),0, 1e-3 * h, c, thick // 2) i+=1 return Image.fromarray(tmp) # + import time from PIL import Image p = './test/train.jpg' img = Image.open(p).convert("RGB") w,h = img.size timeTake = time.time() _,result,angle= model.model(img, detectAngle=True,##是否进行文字方向检测 config=dict(MAX_HORIZONTAL_GAP=100,##字符之间的最大间隔,用于文本行的合并 MIN_V_OVERLAPS=0.7, MIN_SIZE_SIM=0.7, TEXT_PROPOSALS_MIN_SCORE=0.1, TEXT_PROPOSALS_NMS_THRESH=0.3, TEXT_LINE_NMS_THRESH = 0.99,##文本行之间测iou值 MIN_RATIO=1.0, LINE_MIN_SCORE=0.1, TEXT_PROPOSALS_WIDTH=0, MIN_NUM_PROPOSALS=0, ), leftAdjust=True,##对检测的文本行进行向左延伸 rightAdjust=True,##对检测的文本行进行向右延伸 alph=0.0,##对检测的文本行进行向右、左延伸的倍数 ifadjustDegree=True ) timeTake = time.time()-timeTake print('It take:{}s'.format(timeTake)) for line in result: print(line['text']) plot_boxes(img,angle, result,color=(0,0,0))
test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Rolling Windows # # ## Pandas.DataFrame.rolling # # # You've just learned about rolling windows. Let's see how we can use rolling function in pandas to create the rolling windows # # First, let's create a simple dataframe! # # + import numpy as np import pandas as pd from datetime import datetime dates = pd.date_range(datetime.strptime('10/10/2018', '%m/%d/%Y'), periods=11, freq='D') close_prices = np.arange(len(dates)) close = pd.Series(close_prices, dates) close # - # Here, we will introduce rolling function from pandas. The rolling function helps to provide rolling windows that can be customized through different parameters. # # You can learn more about [rolling function here](https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.rolling.html) # # Let's take a look at a quick sample. close.rolling(window = 3) # This returns a Rolling object. Just like what you've seen before, it's an intermediate object similar to the GroupBy object which breaks the original data into groups. That means, we'll have to apply an operation to these groups. Let's try with sum function. close.rolling(window = 3).sum() # The window parameter defines the size of the moving window. This is the number of observations used for calculating the statistics which is the "sum" in our case. # # For example, the output for 2018-10-12 is 3, which equals to the sum of the previous 3 data points, 0 + 1 + 2. # Another example is 2018-10-20 is 27, which equals to 8+ 9 + 10 # # Not just for summation, we can also apply other functions that we've learned in the previous lessons, such as max, min or even more. # # Let's have a look at another quick example close.rolling(window = 3).min() # Now, the output returns the minimum of the past three data points. # # By the way, have you noticed that we are getting NaN for close.rolling(window = 3).sum(). Since we are asking to calculate the mininum of the past 3 data points. For 2018-10-10 and 2018-10-11, there are no enough data points in the past for our calculation, that's why we get NaN as outputs. # # There are many other parameters you can play with for this rolling function, such as min_period or so. Please refer to [the python documentation](https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.rolling.html) for more details # # ## Quiz: Calculate Simple Moving Average # # Through out the program, you will learn to generate alpha factors. However, signals are always noisy. A common practise from the industry is to smooth the factors by using simple moving average. In this quiz, we can create a simple function that you can specify the rolling window and calculate the simple moving average of a time series. # + import quiz_tests def calculate_simple_moving_average(rolling_window, close): """ Compute the simple moving average. Parameters ---------- rolling_window: int Rolling window length close : DataFrame Close prices for each ticker and date Returns ------- simple_moving_average : DataFrame Simple moving average for each ticker and date """ # TODO: Implement Function return close.rolling(rolling_window).mean() quiz_tests.test_calculate_simple_moving_average(calculate_simple_moving_average) # - # ## Quiz Solution # If you're having trouble, you can check out the quiz solution [here](rolling_windows_solution.ipynb).
lesson-14/rolling_windows.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from datetime import datetime import matplotlib as mpl import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns # + sns.set(style='darkgrid') mpl.rcParams['figure.figsize'] = (16,9) pd.set_option('display.max_rows', 500) # - dataPath_Processed = ("../data/processed/COVID_Flat_Table_complete.csv") df_analyse = pd.read_csv(dataPath_Processed, sep=',') df_analyse.sort_values('date',ascending=True).head() # + country = 'India' N0 = 1000000 beta = 0.4 gamma = 0.1 I0 = 90 S0 = N0-I0 R0 = 0 # - def SIR_model(SIR,beta, gamma): S,I,R = SIR dS_dt = -beta*S*I/N0 dI_dt = beta*S*I/N0-gamma*I dR_dt = gamma*I return([dS_dt,dI_dt,dR_dt]) # + SIR = np.array([S0,I0,R0]) propagation_rate = pd.DataFrame(columns={'Susceptible':S0, 'Infected':I0, 'Recovered':R0 }) for each_t in np.arange(50): new_delta_vec = SIR_model(SIR,beta,gamma) SIR = SIR + new_delta_vec propagation_rate = propagation_rate.append({'Susceptible':SIR[0], 'Infected':SIR[1], 'Recovered':SIR[2] }, ignore_index = True) # + fig, ax1 = plt.subplots(1,1) ax1.plot(propagation_rate.index, propagation_rate.Infected, label='Infected', color='k') ax1.plot(propagation_rate.index, propagation_rate.Recovered, label='Recovered') ax1.plot(propagation_rate.index, propagation_rate.Susceptible, label='Susceptible') ax1.set_ylim(10, 1000000) ax1.set_yscale('linear') ax1.set_title('SIR Simulations', size = 16) ax1.legend(loc='best', prop={'size': 16}) # - # # Fitting the parameters of SIR model # + from scipy import optimize from scipy import integrate ydata = np.array(df_analyse.Italy[36:80]) time=np.arange(len(ydata)) # - I0 = ydata[0] S0 = N0 - I0 R0 = 0 beta def SIR_model(SIR,time,beta, gamma): S,I,R = SIR dS_dt = -beta*S*I/N0 dI_dt = beta*S*I/N0-gamma*I dR_dt = gamma*I return dS_dt,dI_dt,dR_dt def fit_odeint(x, beta, gamma): return integrate.odeint(SIR_model, (S0,I0,R0), time, args=(beta,gamma))[:,1] popt = [0.4,0.1] fit_odeint(time, *popt) # + popt,pcov = optimize.curve_fit(fit_odeint, time, ydata, maxfev=100000) perr = np.sqrt(np.diag(pcov)) print('Standard deviation errors :', str(perr), ' start infect:', ydata[0]) print('Optimal parameters: beta = ', popt[0], ' and gamma = ', popt[1]) # - fitted = fit_odeint(time, *popt) plt.semilogy(time,ydata, 'o') plt.semilogy(time, fitted) plt.title("Fit of SIR model for cases in %s" %country) plt.ylabel('Infected Population') plt.xlabel('Number of Days') plt.show() print('Optimal Parametes: beta = ', popt[0], ' and gamma = ', popt[1]) print('Reproduction number, R0 : ', popt[0]/popt[1]) # # Dynamic beta in SIR (Rate of infection) # + t_initial = 50 t_intro_measures = 16 t_hold = 21 t_relax = 21 beta_max = popt[0] beta_min = 0.11 gamma = popt[1] pd_beta = np.concatenate((np.array(t_initial*[beta_max]), np.linspace(beta_max, beta_min, t_intro_measures), np.array(t_hold * [beta_min]), np.linspace(beta_min, beta_max, t_relax) )) pd_beta # + SIR = np.array([S0,I0,R0]) propagation_rates = pd.DataFrame(columns={'Susceptible':S0, 'Infected':I0, 'Recovered':R0 }) for each_beta in pd_beta: new_delta_vector = SIR_model(SIR, time, each_beta, gamma) SIR = SIR + new_delta_vector propagation_rates = propagation_rates.append({'Susceptible':SIR[0], 'Infected':SIR[1], 'Recovered':SIR[2], },ignore_index=True ) # + fig, ax1 = plt.subplots(1,1) ax1.plot(propagation_rates.index, propagation_rates.Infected, label = 'Infected', linewidth = 3) ax1.bar(np.arange(len(ydata)), ydata, width=2, label = 'Actual cases in Germany', color = 'r') t_phases = np.array([t_initial, t_intro_measures, t_hold, t_relax]).cumsum() ax1.axvspan(0, t_phases[0], facecolor='b', alpha=0.01, label="No Measures") ax1.axvspan(t_phases[0], t_phases[1], facecolor='b', alpha=0.3, label="Hard Measures") ax1.axvspan(t_phases[1], t_phases[2], facecolor='b', alpha=0.5, label="Holding Measures") ax1.axvspan(t_phases[2], t_phases[3], facecolor='b', alpha=0.7, label="Relaxed Measures") ax1.axvspan(t_phases[3], len(propagation_rates.Infected),facecolor='b', alpha=0.6, label="Hard Measures Again") ax1.set_ylim(10,1.5*max(propagation_rates.Infected)) #ax1.set_xlim(0,100) ax1.set_yscale('log') ax1.set_title('SIR Simulation', size= 16) ax1.set_xlabel('Number of days', size=16) ax1.legend(loc='best', prop={'size':16}) # -
notebooks/SIR_modelling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # ### Logistic discrimination load("PCA.rda") load("DP.rda") suppressMessages(library(caret)) set.seed(201703) options(warn=-1) # Logistic Discrimination pca_ld_s = train(response~., data = pca_train, method = "glm", family = "binomial", trControl = trainControl(method = "LOOCV")) pca_ld_te = predict(pca_ld_s, data.frame(pca_test_s)) pca_ld_ac = mean(pca_ld_te == golub_test_r) pca_ld_re = c(LOOCV = pca_ld_s$results$Accuracy, Test = pca_ld_ac) pca_ld_re
ReproducingMLpipelines/Paper6/.ipynb_checkpoints/ModelLogitPCA-checkpoint.ipynb