code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.3 64-bit (''base'': conda)' # metadata: # interpreter: # hash: bdff06bd94e17c36ce62dbd42a532c4255a44c9d38880a633082aa091992cee7 # name: 'Python 3.8.3 64-bit (''base'': conda)' # --- # # Author : <NAME> # # ## Task 1 : Prediction using Supervised Machine Learning # ## GRIP @ The Sparks Foundation # In this regression task I tried to predict the percentage of marks that a student is expected to score based upon the number of hours they studied. # # This is a simple linear regression task as it involves just two variables. # &nbsp; # # ## Technical Stack : Scikit Learn, Numpy Array, Pandas, Matplotlib # Importing the required libraries from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import pandas as pd import numpy as np # ## Step 1 - Reading the data from source # + # Reading data from remote link url = r"https://raw.githubusercontent.com/AdiPersonalWorks/Random/master/student_scores%20-%20student_scores.csv" s_data = pd.read_csv(url) print("Data import successful") s_data.head(10) # - # ## Step 2 - Input data Visualization # Plotting the distribution of scores s_data.plot(x='Hours', y='Scores', style='o') plt.title('Hours vs Percentage') plt.xlabel('Hours Studied') plt.ylabel('Percentage Score') plt.show() # From the graph we can safely assume a positive linear relation between the number of hours studied and percentage of score. # ## Step 3 - Data Preprocessing # This step involved division of data into "attributes" (inputs) and "labels" (outputs). X = s_data.iloc[:, :-1].values y = s_data.iloc[:, 1].values # ## Step 4 - Model Training # Splitting the data into training and testing sets, and training the algorithm. # + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) regressor = LinearRegression() regressor.fit(X_train.reshape(-1,1), y_train) print("Training complete.") # - # ## Step 5 - Plotting the Line of regression # # Now since our model is trained now, its the time to visualize the best-fit line of regression. # + # Plotting the regression line line = regressor.coef_*X+regressor.intercept_ # Plotting for the test data plt.scatter(X, y) plt.plot(X, line,color='red'); plt.show() # - # ## Step 6 - Making Predictions # Now that we have trained our algorithm, it's time to test the model by making some predictions. # # For this we will use our test-set data # Testing data print(X_test) # Model Prediction y_pred = regressor.predict(X_test) # ## Step 7 - Comparing Actual result to the Predicted Model result # # Comparing Actual vs Predicted df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred}) df #Estimating training and test score print("Training Score:",regressor.score(X_train,y_train)) print("Test Score:",regressor.score(X_test,y_test)) # + # Plotting the Bar graph to depict the difference between the actual and predicted value df.plot(kind='bar',figsize=(5,5)) plt.grid(which='major', linewidth='0.5', color='red') plt.grid(which='minor', linewidth='0.5', color='blue') plt.show() # - # Testing the model with our own data hours = 9.25 test = np.array([hours]) test = test.reshape(-1, 1) own_pred = regressor.predict(test) print("No of Hours = {}".format(hours)) print("Predicted Score = {}".format(own_pred[0])) # ## Step 8 - Evaluating the model # The final step is to evaluate the performance of algorithm. This step is particularly important to compare how well different algorithms perform on a particular dataset. Here different errors have been calculated to compare the model performance and predict the accuracy. from sklearn import metrics print('Mean Absolute Error:',metrics.mean_absolute_error(y_test, y_pred)) print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print('R-2:', metrics.r2_score(y_test, y_pred)) # R-2 gives the score of model fit and in this case we have R-2 = 0.9454906892105355 which is actually a great score for this model. # # ## Conclusion # # ### I was successfully able to carry-out Prediction using Supervised ML task and was able to evaluate the model's performance on various parameters. # # Thank You #
Task-1_Linear_Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import matplotlib import pandas as pd import seaborn as sns file = "../code/data/next_gen_exps.times" df = pd.read_csv(file) print(df) # # all times are in us (seconds * 1e-6) # times to construct res net construct_df = df[df.type=='construct'] for inst, data in construct_df.groupby('inst'): plt.bar(data=data, x='alg', height='time') print(inst) plt.show() for inst, inst_data in df.groupby('inst'): print(inst) for typ in ['low','high','gh']: fig,ax = plt.subplots(figsize=(16,4)) xoff = 0 for alg, data in inst_data[inst_data.type==typ].groupby('alg'): yoff = [0]*(data.iter.max()+1) for stage in pd.unique(data.stage): ys = data[data.stage==stage].time xs = [i*5+xoff-1.5 for i in range(len(ys))] ax.bar(xs, ys, label=alg+' '+stage, bottom=yoff) i = 0 for val in ys: yoff[i] += val i += 1 xoff += 1 ax.set_xticks([i*5+2.5 for i in range(9)], minor=True) ax.grid(which='minor', axis='x') ax.set_title(typ) plt.legend() plt.show()
plots/paper_runtime2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Learning from BEL # # This notebook is about learning from BEL graphs # + import getpass import os import sys import time import numpy as np import pykeen from pykeen.pipeline import pipeline from pykeen.triples.leakage import Sealant from pykeen.triples import TriplesFactory # - print(sys.version) print(time.asctime()) print(getpass.getuser()) print(pykeen.get_version(with_git_hash=True)) ras_triples_path = 'ras_machine_triples.tsv' missing_ras_triples = not os.path.exists(ras_triples_path) missing_ras_triples if missing_ras_triples: import pybel print(pybel.get_version(with_git_hash=True)) graph = pybel.from_emmaa('rasmachine') graph.summarize() triples = pybel.to_triples(graph) np.savetxt(ras_triples_path, triples, fmt='%s', delimiter='\t') else: triples = np.loadtxt(ras_triples_path, dtype=str, delimiter='\t') tf = TriplesFactory(triples=triples) training, testing = tf.split(random_state=1234) results = pipeline( training_triples_factory=training, testing_triples_factory=testing, model='RotatE', training_kwargs=dict(num_epochs=100), random_seed=1235, device='cpu', ) results.metric_results.to_df()
notebooks/learn_from_bel/Learning from BEL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Import PyMongo import pymongo pymongo.version from pymongo import MongoClient # ## Connect database client=MongoClient('172.17.0.2') db=client.packt testCollection=db.testCollection # ## Insert some documents testCollection.insert_one({'name':'Alice', 'salary':50000}) testCollection.insert_one({'name':'Bob', 'salary':40000}) testCollection.insert_one({'name':'Charlie', 'salary':60000}) # ## Set an Individual Document testCollection.update_one({'name': 'Alice'}, {'$set':{'salary':55000}}) testCollection.find_one({'name':'Alice'}) # ## Remove a Field from a Document testCollection.update_one({'name':'Alice'}, {'$unset': {'salary':''}}) testCollection.find_one({'name':'Alice'}) # ## Calculate total and mean salary pipeline=[] pipeline.append({'$match':{'salary':{'$exists':'True'}}}) pipeline.append({'$group':{'_id':None, 'avSalary':{'$avg':'$salary'},'totalSalary':{'$sum':'$salary'}}}) cur=testCollection.aggregate(pipeline=pipeline) cur.next() for d in testCollection.find(): print(d) # ## Calculate total and mean salary over groups testCollection.drop() testCollection.insert_one({'name':'Alice', 'salary':50000, 'unit':'legal'}) testCollection.insert_one({'name':'Bob', 'salary':40000, 'unit':'marketing'}) testCollection.insert_one({'name':'Charlie', 'salary':60000, 'unit':'communications'}) testCollection.insert_one({'name':'David', 'salary':70000, 'unit':'legal'}) testCollection.insert_one({'name':'Edwina', 'salary':90000, 'unit':'communications'}) pipeline=[{'$group':{'_id':'$unit', 'avgSalary':{'$avg':'$salary'}, 'totalSalary':{'$sum':'$salary'}}}] cur=testCollection.aggregate(pipeline=pipeline) for d in cur: print(d)
bigdata-with-python/pyMongo/oper-update-aggre.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Predicting Car Prices Based on Features # # The goal of this project is to utilize the K Nearest Neighbors algorithm to see if we can predict car prices using data about car features vs. known car prices. # # The data comes from the UC Irvine Machine Learning Repository and can be found at the following location: # # __[Automobile Data Set](https://archive.ics.uci.edu/ml/datasets/automobile)__ # # # ### Feature list from the data source site # # 1. symboling: -3, -2, -1, 0, 1, 2, 3. # 2. normalized-losses: continuous from 65 to 256. # 3. make: # alfa-romero, audi, bmw, chevrolet, dodge, honda, # isuzu, jaguar, mazda, mercedes-benz, mercury, # mitsubishi, nissan, peugot, plymouth, porsche, # renault, saab, subaru, toyota, volkswagen, volvo # # 4. fuel-type: diesel, gas. # 5. aspiration: std, turbo. # 6. num-of-doors: four, two. # 7. body-style: hardtop, wagon, sedan, hatchback, convertible. # 8. drive-wheels: 4wd, fwd, rwd. # 9. engine-location: front, rear. # 10. wheel-base: continuous from 86.6 120.9. # 11. length: continuous from 141.1 to 208.1. # 12. width: continuous from 60.3 to 72.3. # 13. height: continuous from 47.8 to 59.8. # 14. curb-weight: continuous from 1488 to 4066. # 15. engine-type: dohc, dohcv, l, ohc, ohcf, ohcv, rotor. # 16. num-of-cylinders: eight, five, four, six, three, twelve, two. # 17. engine-size: continuous from 61 to 326. # 18. fuel-system: 1bbl, 2bbl, 4bbl, idi, mfi, mpfi, spdi, spfi. # 19. bore: continuous from 2.54 to 3.94. # 20. stroke: continuous from 2.07 to 4.17. # 21. compression-ratio: continuous from 7 to 23. # 22. horsepower: continuous from 48 to 288. # 23. peak-rpm: continuous from 4150 to 6600. # 24. city-mpg: continuous from 13 to 49. # 25. highway-mpg: continuous from 16 to 54. # 26. price: continuous from 5118 to 45400. # # # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import Imputer, normalize from sklearn.neighbors import KNeighborsRegressor from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score, KFold, train_test_split # %matplotlib inline # Setting some default graph styles sns.set_style("white", {'axes.edgecolor': '#b2b2b2'}) sns.set_context("notebook", font_scale=1.25) # - # # Data Analysis and Cleaning cars_original = pd.read_csv('imports-85.data') cars_original.head() # Looks like the data has no header row. The column names pandas chose are actually data from the first entry. Let's create a list of the real column names. columns = ['symboling', 'normalized_losses', 'make', 'fuel-type', 'aspiration', 'num-of-doors', 'body-style', 'drive-wheels', 'engine-location', 'wheel-base', 'length', 'width', 'height', 'curb-weight', 'engine-type', 'num-of-cylinders', 'engine-size', 'fuel-system', 'bore', 'stroke', 'compression-ratio', 'horsepower', 'peak-rpm', 'city-mpg', 'highway-mpg', 'price'] cars_original = pd.read_csv('imports-85.data', names=columns, header=None) # We want to see all of the columns for examination when we display the dataframe pd.set_option('display.max_columns', 26) cars_original.head() # The target prediction column is: **price** # # The following columns appear to be both numeric (or can be converted to numeric) and ordinal. These will be our features. # # - normalized_losses # - wheel-base # - length # - width # - height # - curb-weight # - num-of-cylinders # - engine-size # - bore # - stroke # - compression-ratio # - horsepower # - peak-rpm # - city-mpg # - highway-mpg keep_columns = ['normalized_losses', 'wheel-base', 'length', 'width', 'height', 'curb-weight', 'num-of-cylinders', 'engine-size', 'bore', 'stroke', 'compression-ratio', 'horsepower', 'peak-rpm', 'city-mpg', 'highway-mpg', 'price'] # Creating a new dataframe using only the columns we want to use as features cars = cars_original[keep_columns] cars.head() # The **normalized_losses** column has question marks instead of null values. Let's replace question marks with NaN throughout the data frame. cars = cars.replace('?', np.nan) # Now lets see where things are regarding the data types for the columns: cars.info() # The **num-of-cylinders** column contains words for numbers rather than numbers. Let's see what words are used: cars['num-of-cylinders'].value_counts() # Not that many distinct values. Let's whip up a quick dictionary for substitution. num_dict = {'four': 4, 'six': 6, 'five': 5, 'eight': 8, 'two': 2, 'twelve': 12, 'three': 3} cars['num-of-cylinders'] = cars['num-of-cylinders'].apply(lambda x: num_dict.get(x, np.nan)) cars['num-of-cylinders'].head() # All other columns apper to be number-ish. Let's try a mass conversion of the entire data frame to float. cars = cars.astype('float') cars.head() # Now let's take a look at how many missing values we have. A little bar chart gives a nice visual representation: cars.isnull().sum().apply(lambda x: x / cars.shape[0] * 100).plot(kind='barh', figsize=(10,6)); plt.title('Percentage of Missing Values by Feature') sns.despine() # The **normalized_losses** column indicates an average yearly insurance loss for that particular car. This should not factor into the initial price of the car, and it is missing quite a bit of data. Let's drop it. cars.drop('normalized_losses', inplace=True, axis=1) # For the remaining missing data, let's use a simple mean imputation to fill in the values. imp = Imputer(strategy='mean') cars['peak-rpm'] = imp.fit_transform(cars[['peak-rpm']]) cars['horsepower'] = imp.fit_transform(cars[['horsepower']]) cars['stroke'] = imp.fit_transform(cars[['stroke']]) cars['bore'] = imp.fit_transform(cars[['bore']]) # There are still 4 rows missing price data. Let's drop those rows from the dataframe. cars.dropna(inplace=True) cars.isnull().sum() # No more empty values. # # Now we need to normalize the data so that one particular column doesn't skew the prediction. **price** should be excluded as it is the target of our prediction. for column in cars.columns: if column != 'price': cars[column] = normalize(cars[[column]], axis=0) cars.head() # # Univariate KNN Prediction # Let's set up a function to split the data into training and testing sets, and utilize the __[KNeighborsRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html)__ model to predict the price of the cars in the test set. The function will return the ***Root Mean Squared Error*** as an indicator of how successful the prediction was. In this case the **RMSE** will indicate how many dollars off we were between the actual price and the predicted price. def knn_train_test(train_cols, target_col, df, neighbors=None): ''' Splits a dataframe into training and testing data sets and uses KNN for prediction. ''' X = df[train_cols] y = df[target_col] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=101) if neighbors: knn = KNeighborsRegressor(n_neighbors=neighbors) else: knn = KNeighborsRegressor() knn.fit(X_train, y_train) prediction = knn.predict(X_test) rmse = mean_squared_error(y_test, prediction) ** .5 return rmse, X_test, y_test, prediction # + # Getting a list of columns to cycle through the model excluding 'price' test_cols = list(cars.columns) test_cols.remove('price') # + # Creating an empty list to hold the RMSE values returned by our function rmse_list = [] # + # Running each feature column through the model function for col in test_cols: rmse, _, _, _ = knn_train_test([col], 'price', cars) rmse_list.append(rmse) # + # For easier graphing and data manipulation, creating a DataFrame out of the results univ_rmse = pd.DataFrame({'Feature': test_cols, 'RMSE': rmse_list}) univ_rmse.sort_values('RMSE', inplace=True) univ_rmse # - plt.figure(figsize=(10,5)) sns.barplot(x='RMSE', y='Feature', data=univ_rmse, palette=sns.color_palette("PuBu_r", univ_rmse.shape[0]) ).set_title('Univariate RMSE by Feature'); sns.despine() # This charts shows which individual features performed best when used alone. # # Next let's try some different values for the ***n_neighbors*** hyperparameter. # # Trying the following values; # # 1, 3, 5, 7, 9 neighbors = list(range(1,11,2)) neighbors_test = [test_cols] # + # For each individual feature, run it through KNN using the 5 values for n_neighbors for val in neighbors: rmse_list = [] for col in test_cols: rmse, _, _, _ = knn_train_test([col], 'price', cars, val) rmse_list.append(rmse) neighbors_test.append(rmse_list) # + #Putting the results into a dataframe for eaiser analysis neighbors_df = pd.DataFrame({'Feature': neighbors_test[0], 'n1': neighbors_test[1], 'n3': neighbors_test[2], 'n5': neighbors_test[3], 'n7': neighbors_test[4], 'n9': neighbors_test[5]}) neighbors_df.head() # + # Using the dataframe mean function to generate a column containing # the mean RMSE for each feature across all 5 n_neighbors values neighbors_df.set_index('Feature', inplace=True) neighbors_df['Avg. RMSE'] = neighbors_df.mean(axis=1) neighbors_df.reset_index(inplace=True) neighbors_df.sort_values('Avg. RMSE', inplace=True) neighbors_df.head() # - plt.figure(figsize=(10,5)) sns.barplot(x='Avg. RMSE', y='Feature', data=neighbors_df, palette=sns.color_palette("YlGn_r", univ_rmse.shape[0]) ).set_title('Univariate RMSE Averaged Across n_neighbors Values'); sns.despine() # # Multivariate KNN Prediction # Now let's use the 5 best features in different combinations to see how it affects our prediction. For each combination, we will try n_neighbors values of 1 - 25 as well to find the optimum k value for KNN. top_5_features = neighbors_df['Feature'][0:5] best_multi = {'Top 2': [], 'Top 3': [], 'Top 4': [], 'Top 5': [] } # + # Looping over combinations of the top 2 - top 5 features across 1-25 neighbors for k in range(1,26): for f in range(2,6): rmse, _, _, _ = knn_train_test(top_5_features[0:f], 'price', cars, k) best_multi['Top ' + str(f)].append(rmse) # - multi_rmse = pd.DataFrame(best_multi) multi_rmse['k Nearest Neighbors'] = list(range(1,26)) # + plt.figure(figsize=(10,6)) for key, value in best_multi.items(): plt.plot(multi_rmse['k Nearest Neighbors'], multi_rmse[key], label=key) plt.legend() plt.title('Multivariate RMSE Across 1-25 Neighbors') plt.xlabel('k Nearest Neighbors') plt.ylabel('RMSE') sns.despine() # + # Grabbing the minimum RMSE for each feature combination and its associated k value multi_rmse_min = pd.DataFrame() for i in range(2,6): min_index = multi_rmse['Top ' + str(i)].idxmin multi_rmse_min = multi_rmse_min.append({'Features': 'Top ' + str(i), 'RMSE': multi_rmse.iloc[min_index]['Top ' + str(i)], 'k Nearest Neighbors': multi_rmse.iloc[min_index]['k Nearest Neighbors']}, ignore_index=True ) # - multi_rmse_min.sort_values('RMSE') # Based on the results, it seems that the best prediction occurs when we use all 5 top Features with a k neighbors setting of 1. # # To reiterate, the top 5 features were: # # * engine-size # * horsepower # * highway-mpg # * city-mpg # * width # # # With those settings we can theoretically predict the car price within +/- $2376.24 of the actual price. # Let's try K-Fold Cross-Validation to see if it improves our prediction def knn_fold_train_test(train_cols, target_col, df, folds, k_val=None): ''' Uses K-Fold Cross-Validation along with KNN to predict values ''' kf = KFold(folds, shuffle=True, random_state=1) if k_val: model = KNeighborsRegressor(n_neighbors=k_val) else: model = KNeighborsRegressor() mses = cross_val_score(model, df[train_cols], df[target_col], scoring="neg_mean_squared_error", cv=kf) rmses = np.sqrt(np.absolute(mses)) return rmses # + # Trying with 5 Folds knn_fold_train_test(top_5_features, 'price', cars, 5, 1).mean() # + # Trying with 10 Folds knn_fold_train_test(top_5_features, 'price', cars, 10, 1).mean() # - # Using K-Fold Cross-Validation does not seem to improve our prediction. # # Results # # Now that we've tuned our parameters and feature selection, let's take a look at some results based on the original data. We'll compare the results from the test set vs. the known test values to get an idea of how accurate the model is. # + # Running KNN with the features and parameters discovered above (rmse, X_test, y_test, prediction) = knn_train_test(top_5_features, 'price', cars, 1) # + # Joining the results with original data for a more human readable output results = cars_original.iloc[X_test.index][top_5_features] results['Actual Price'] = y_test results['Predicted Price'] = prediction results['Difference'] = results['Actual Price'] - results['Predicted Price'] # Taking 20 random samples from the results for viewing results.sample(20) # - # Scanning down the ***Difference*** column, we can see that the algorithm did a pretty good job of predicting car prices. Given the limitations of KNN and the small dataset, the results are acceptable.
car_prices.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## PyTorch Tutorial # # IFT6135 – Representation Learning # # A Deep Learning Course, January 2019 # # By <NAME> # # (Adapted from <NAME>'s MILA tutorial) # ## An introduction to the PyTorch neural network library # # ### `torch.nn` & `torch.optim` import numpy as np from __future__ import print_function import torch import torch.nn as nn import torch.optim as optim import torch.nn.init as init import torch.nn.functional as F # ### torch.nn # # Neural networks can be constructed using the `torch.nn` package. # # Provides pretty much all neural network related functionalities such as : # # 1. Linear layers - `nn.Linear`, `nn.Bilinear` # 2. Convolution Layers - `nn.Conv1d`, `nn.Conv2d`, `nn.Conv3d`, `nn.ConvTranspose2d` # 3. Nonlinearities - `nn.Sigmoid`, `nn.Tanh`, `nn.ReLU`, `nn.LeakyReLU` # 4. Pooling Layers - `nn.MaxPool1d`, `nn.AveragePool2d` # 4. Recurrent Networks - `nn.LSTM`, `nn.GRU` # 5. Normalization - `nn.BatchNorm2d` # 6. Dropout - `nn.Dropout`, `nn.Dropout2d` # 7. Embedding - `nn.Embedding` # 8. Loss Functions - `nn.MSELoss`, `nn.CrossEntropyLoss`, `nn.NLLLoss` # # Instances of these classes will have an `__call__` function built-in that can be used to run an input through the layer. # ### Linear, Bilinear & Nonlinearities # + x = torch.randn(32, 10) y = torch.randn(32, 30) sigmoid = nn.Sigmoid() linear = nn.Linear(in_features=10, out_features=20, bias=True) output_linear = linear(x) print('Linear output size : ', output_linear.size()) bilinear = nn.Bilinear(in1_features=10, in2_features=30, out_features=50, bias=True) output_bilinear = bilinear(x, y) print('Bilinear output size : ', output_bilinear.size()) # - # ### Convolution, BatchNorm & Pooling Layers # + x = torch.randn(10, 3, 28, 28) conv = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=(3, 3), stride=1, padding=1, bias=True) bn = nn.BatchNorm2d(num_features=32) pool = nn.MaxPool2d(kernel_size=(2, 2), stride=2) output_conv = bn(conv(x)) outpout_pool = pool(conv(x)) print('Conv output size : ', output_conv.size()) print('Pool output size : ', outpout_pool.size()) # - # ### Recurrent, Embedding & Dropout Layers # + inputs = [[1, 2, 3], [1, 0, 4], [1, 2, 4], [1, 4, 0], [1, 3, 3]] x = torch.LongTensor(inputs) embedding = nn.Embedding(num_embeddings=5, embedding_dim=20, padding_idx=1) # padding_idx: If given, pads the output with the embedding # . vector at padding_idx (initialized to zeros) # . whenever it encounters the index. drop = nn.Dropout(p=0.5) rnn = nn.RNN(input_size=20, hidden_size=50, num_layers=2, batch_first=True, bidirectional=True, dropout=0.3) # batch_first=True -> x: batch_size x sequence_length x embedding_dim emb = drop(embedding(x)) rnn_h, rnn_h_t = rnn(emb) print('Embedding size : ', emb.size()) print('GRU hidden states size : ', rnn_h.size()) print('GRU last hidden state size : ', rnn_h_t.size()) print emb[1,0] # - # ### torch.nn.functional # # Using the above classes requires defining an instance of the class and then running inputs through the instance. # # The functional API provides users a way to use these classes in a `functional` way. Such as # # `import torch.nn.functional as F` # # 1. Linear layers - `F.linear(input=x, weight=W, bias=b)` # 2. Convolution Layers - `F.conv2d(input=x, weight=W, bias=b, stride=1, padding=0, dilation=1, groups=1)` # 3. Nonlinearities - `F.sigmoid(x), F.tanh(x), F.relu(x), F.softmax(x)` # 4. Dropout - `F.dropout(x, p=0.5, training=True)` # ### A few examples of the functional API # + x = torch.randn(10, 3, 28, 28) filters = torch.randn(32, 3, 3, 3) conv_out = F.relu(F.dropout(F.conv2d(input=x, weight=filters, padding=1), p=0.5, training=True)) print('Conv output size : ', conv_out.size()) # - # ### torch.nn.init # # Provides a set of functions for standard weight initialization techniques # # `import torch.nn.init as init` # # 1. Calculate the gain of a layer based on the activation function - `init.calculate_gain('sigmoid')` # 2. Uniform init - `init.uniform(tensor, low, high)` # 3. Xavier uniform - `init.xavier_uniform(tensor, gain=init.calculate_gain('sigmoid'))` # 4. Xavier normal - `init.xavier_normal(tensor, gain=init.calculate_gain('tanh'))` # 5. Orthogonal - `init.orthogonal(tensor, gain=init.calculate_gain('tanh'))` # 6. Kaiming normal - `init.kaiming_normal(tensor, mode='fan_in')` # ### Initializing convolution kernels conv_layer = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=(3, 3), padding=1) for k,v in conv_layer.named_parameters(): print k if k == 'weight': init.kaiming_normal_(v) # ### torch.optim # # Provides implementations of standard stochastic optimization techniques # # `import torch.optim as optim` # # W1 = Variable(torch.randn(10, 20), requires_grad=True) # W2 = Variable(torch.randn(10, 20), requires_grad=True) # # 1. SGD - `optim.SGD([W1, W2], lr=0.01, momentum=0.9, dampening=0, weight_decay=1e-2, nesterov=True)` # 2. Adam - `optim.Adam([W1, W2], lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)` # # #### Learning Rate Scheduling # # `optim.lr_scheduler` # # 1. `optim.lr_scheduler.MultiStepLR(optimizer, milestones=[30,80], gamma=0.1)` # 2. `optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=True, threshold=1e-04, threshold_mode='rel', min_lr=1e-05, eps=1e-08)` # ### We'll look at how to use `torch.optim` in the following tutorial
pytorch/3. Introduction to the Torch Neural Network Library.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/yukinaga/minnano_rl/blob/main/section_4/02_deep_reinforcement_learning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="vxam5J6s8Pw9" # # 深層強化学習の実装 # 重力下で飛行する物体の制御を、深層強化学習により行います。 # 深層強化学習では、Q-Tableの代わりにニューラルネットワークを使用します。 # ニューラルネットワークを実装するためのフレームワークとして、PyTorchを使用します。 # + [markdown] id="A2iCWKom81u5" # ## ライブラリの導入 # 数値計算のためにNumPy、グラフ表示のためにmatplotlib、ニューラルネットワークを実装するためのフレームワークとしてPyTorchを導入します。 # + id="zfOUkvmVAmmY" import numpy as np import matplotlib.pyplot as plt from matplotlib import animation, rc import torch import torch.nn as nn import torch.nn.functional as F from torch import optim # + [markdown] id="Mjp80sVK1kue" # ## Netクラス # `nn.Module`モジュールを継承したクラスとして、ニューラルネットワークを実装します。 # + id="EpiskMvFmhAq" class Net(nn.Module): def __init__(self, n_state, n_mid, n_action): super().__init__() self.fc1 = nn.Linear(n_state, n_mid) # 全結合層 self.fc2 = nn.Linear(n_mid, n_mid) self.fc3 = nn.Linear(n_mid, n_action) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x # + [markdown] id="FAGWByiE9Q1M" # ## Brainクラス # エージェントの頭脳となるクラスです。Q値を出力するニューラルネットワークを構築し、Q値が正解に近づくように訓練します。 # Q学習に用いる式は以下の通りです。 # # $$ Q(s_t,a_t) \leftarrow Q(s_t,a_t) + \eta\left(R_{t+1}+\gamma \max_{a}Q(s_{t+1}, a) - Q(s_{t}, a_{t})\right) $$ # # ここで、$a_{t}$は行動、$s_t$は状態、$Q(s_t,a_t) $はQ値、$\eta$は学習係数、$R_{t+1}$は報酬、$\gamma$は割引率になります。 # 次の状態における最大のQ値を使用するのですが、ディープラーニングの正解として用いるのは上記の式のうちの以下の部分です。 # # $$R_{t+1}+\gamma \max_{a}Q(s_{t+1}, a_{t})$$ # # 以下の`Brain`クラスにおけるtrainメソッドでは、正解として上記を用います。 # また、ある状態における行動を決定する`get_action`メソッドでは、ε-greedy法により行動が選択されます。 # + id="x7mzk3ujGwjY" class Brain: def __init__(self, n_state, n_action, net, loss_fnc, optimizer, is_gpu, gamma=0.9, r=0.99, lr=0.01): self.n_state = n_state # 状態の数 self.n_action = n_action # 行動の数 self.net = net # ニューラルネットワークのモデル self.loss_fnc = loss_fnc # 誤差関数 self.optimizer = optimizer # 最適化アルゴリズム self.is_gpu = is_gpu # GPUを使うかどうか if self.is_gpu: self.net.cuda() # GPU対応 self.eps = 1.0 # ε self.gamma = gamma # 割引率 self.r = r # εの減衰率 self.lr = lr # 学習係数 def train(self, states, next_states, action, reward, terminal): # ニューラルネットワークを訓練 states = torch.from_numpy(states).float() next_states = torch.from_numpy(next_states).float() if self.is_gpu: states, next_states = states.cuda(), next_states.cuda() # GPU対応 self.net.eval() # 評価モード next_q = self.net.forward(next_states) self.net.train() # 訓練モード q = self.net.forward(states) t = q.clone().detach() if terminal: t[:, action] = reward # エピソード終了時の正解は、報酬のみ else: t[:, action] = reward + self.gamma*np.max(next_q.detach().cpu().numpy(), axis=1)[0] loss = self.loss_fnc(q, t) self.optimizer.zero_grad() loss.backward() self.optimizer.step() def get_action(self, states): # 行動を取得 states = torch.from_numpy(states).float() if self.is_gpu: states = states.cuda() # GPU対応 if np.random.rand() < self.eps: # ランダムな行動 action = np.random.randint(self.n_action) else: # Q値の高い行動を選択 q = self.net.forward(states) action = np.argmax(q.detach().cpu().numpy(), axis=1)[0] if self.eps > 0.1: # εの下限 self.eps *= self.r return action # + [markdown] id="a9t6fuQvDPOf" # ## エージェントのクラス # エージェントをクラスとして実装します。 # x座標が-1から1まで、y座標が-1から1までの正方形の領域を考えますが、エージェントの初期位置は左端中央とします。 # そして、エージェントが右端に達した際は報酬として1を与え、終了とします。 # また、エージェントが上端もしくは下端に達した際は報酬として-1を与え、終了とします。 # # x軸方向には等速度で移動します。 # 行動には、自由落下とジャンプの2種類があります。自由落下の場合は重量加速度をy速度に加えます。ジャンプの場合は、y速度を予め設定した値に変更します。 # + id="DP4AaA5bHTCQ" class Agent: def __init__(self, v_x, v_y_sigma, v_jump, brain): self.v_x = v_x # x速度 self.v_y_sigma = v_y_sigma # y速度、初期値の標準偏差 self.v_jump = v_jump # ジャンプ速度 self.brain = brain self.reset() def reset(self): self.x = -1 # 初期x座標 self.y = 0 # 初期y座標 self.v_y = self.v_y_sigma * np.random.randn() # 初期y速度 def step(self, g): # 時間を1つ進める g:重力加速度 states = np.array([[self.y, self.v_y]]) self.x += self.v_x self.y += self.v_y reward = 0 # 報酬 terminal = False # 終了判定 if self.x>1.0: reward = 1 terminal = True elif self.y<-1.0 or self.y>1.0: reward = -1 terminal = True action = self.brain.get_action(states) if action == 0: self.v_y -= g # 自由落下 else: self.v_y = self.v_jump # ジャンプ next_states = np.array([[self.y, self.v_y]]) self.brain.train(states, next_states, action, reward, terminal) if terminal: self.reset() # + [markdown] id="fr-bkx6REXCx" # ## 環境のクラス # 環境をクラスとして実装します。 # このクラスの役割は、重力加速度を設定し、時間を前に進めるのみです。 # + id="KqCxzW2RHYrl" class Environment: def __init__(self, agent, g): self.agent = agent self.g = g # 重力加速度 def step(self): self.agent.step(self.g) return (self.agent.x, self.agent.y) # + [markdown] id="Hot23xovE-fj" # ## アニメーション # 今回は、matplotlibを使ってエージェントの飛行をアニメーションで表します。 # アニメーションには、matplotlib.animationのFuncAnimation関数を使用します。 # + id="yuEBjtK-JNbz" def animate(environment, interval, frames): fig, ax = plt.subplots() plt.close() ax.set_xlim(( -1, 1)) ax.set_ylim((-1, 1)) sc = ax.scatter([], []) def plot(data): x, y = environment.step() sc.set_offsets(np.array([[x, y]])) return (sc,) return animation.FuncAnimation(fig, plot, interval=interval, frames=frames, blit=True) # + [markdown] id="-XhwfFz1GKOl" # ## 深層強化学習の実行 # ニューラルネットワーク、Brain、エージェント、環境の設定を行い、深層強化学習を実行します。 # + colab={"background_save": true} id="jOryPVWuJTme" n_state = 2 n_mid = 32 n_action = 2 net = Net(n_state, n_mid, n_action) loss_fnc = nn.MSELoss() # 誤差関数 optimizer = optim.RMSprop(net.parameters(), lr=0.01) # 最適化アルゴリズム is_gpu = True brain = Brain(n_state, n_action, net, loss_fnc, optimizer, is_gpu) v_x = 0.05 v_y_sigma = 0.1 v_jump = 0.2 agent = Agent(v_x, v_y_sigma, v_jump, brain) g = 0.2 environment = Environment(agent, g) anim = animate(environment, 50, 1024) rc("animation", html="jshtml") anim # + [markdown] id="dVn-iSjcGoco" # 学習が進むにつれて、エージェントは状態に応じて適切な行動を選択できるようになります。
section_4/02_deep_reinforcement_learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="TA21Jo5d9SVq" # # # ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/healthcare/NER_LAB.ipynb) # # # # + [markdown] id="CzIdjHkAW8TB" # # **Detect lab results** # + [markdown] id="6uDmeHEFW7_h" # To run this yourself, you will need to upload your license keys to the notebook. Just Run The Cell Below in order to do that. Also You can open the file explorer on the left side of the screen and upload `license_keys.json` to the folder that opens. # Otherwise, you can look at the example outputs at the bottom of the notebook. # # # + [markdown] id="wIeCOiJNW-88" # ## 1. Colab Setup # + [markdown] id="HMIDv74CYN0d" # Import license keys # + colab={"base_uri": "https://localhost:8080/", "height": 113, "resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "headers": [["content-type", "application/javascript"]], "ok": true, "status": 200, "status_text": "OK"}}} id="ttHPIV2JXbIM" outputId="6272b4a4-1dc0-49b8-c013-ca045f21f0b8" import json import os from google.colab import files license_keys = files.upload() with open(list(license_keys.keys())[0]) as f: license_keys = json.load(f) # Defining license key-value pairs as local variables locals().update(license_keys) # Adding license key-value pairs to environment variables os.environ.update(license_keys) # + [markdown] id="rQtc1CHaYQjU" # Install dependencies # + id="CGJktFHdHL1n" # Installing pyspark and spark-nlp # ! pip install --upgrade -q pyspark==3.1.2 spark-nlp==$PUBLIC_VERSION # Installing Spark NLP Healthcare # ! pip install --upgrade -q spark-nlp-jsl==$JSL_VERSION --extra-index-url https://pypi.johnsnowlabs.com/$SECRET # Installing Spark NLP Display Library for visualization # ! pip install -q spark-nlp-display # + [markdown] id="Hj5FRDV4YSXN" # Import dependencies into Python and start the Spark session # + id="sw-t1zxlHTB7" import pandas as pd from pyspark.ml import Pipeline from pyspark.sql import SparkSession import pyspark.sql.functions as F import sparknlp from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.base import * import sparknlp_jsl spark = sparknlp_jsl.start(license_keys['SECRET']) # manually start session # params = {"spark.driver.memory" : "16G", # "spark.kryoserializer.buffer.max" : "2000M", # "spark.driver.maxResultSize" : "2000M"} # spark = sparknlp_jsl.start(license_keys['SECRET'],params=params) # + [markdown] id="9RgiqfX5XDqb" # ## 2. Select the NER model and construct the pipeline # + [markdown] id="M6MDlWmDuaLq" # Select the NER model - Lab Results models: **ner_jsl, ner_jsl_enriched** # # For more details: https://github.com/JohnSnowLabs/spark-nlp-models#pretrained-models---spark-nlp-for-healthcare # + id="He3s9CAvucfe" # You can change this to the model you want to use and re-run cells below. # Lab models: ner_jsl, ner_jsl_enriched MODEL_NAME = "ner_jsl" # + [markdown] id="zweiG2ilZqoR" # Create the pipeline # + colab={"base_uri": "https://localhost:8080/"} id="LLuDz_t40be4" outputId="d0042a35-3b0f-4013-cab6-d88437f16b20" document_assembler = DocumentAssembler() \ .setInputCol('text')\ .setOutputCol('document') sentence_detector = SentenceDetector() \ .setInputCols(['document'])\ .setOutputCol('sentence') tokenizer = Tokenizer()\ .setInputCols(['sentence']) \ .setOutputCol('token') word_embeddings = WordEmbeddingsModel.pretrained('embeddings_clinical', 'en', 'clinical/models') \ .setInputCols(['sentence', 'token']) \ .setOutputCol('embeddings') clinical_ner = MedicalNerModel.pretrained(MODEL_NAME, "en", "clinical/models") \ .setInputCols(["sentence", "token", "embeddings"])\ .setOutputCol("ner") ner_converter = NerConverter()\ .setInputCols(['sentence', 'token', 'ner']) \ .setOutputCol('ner_chunk') nlp_pipeline = Pipeline(stages=[ document_assembler, sentence_detector, tokenizer, word_embeddings, clinical_ner, ner_converter]) # + [markdown] id="2Y9GpdJhXIpD" # ## 3. Create example inputs # + id="vBOKkB2THdGI" # Enter examples as strings in this array input_list = [ """Tumor cells show no reactivity with cytokeratin AE1/AE3. No significant reactivity with CAM5.2 and no reactivity with cytokeratin-20 are seen. Tumor cells show partial reactivity with cytokeratin-7. PAS with diastase demonstrates no convincing intracytoplasmic mucin. No neuroendocrine differentiation is demonstrated with synaptophysin and chromogranin stains. Tumor cells show cytoplasmic and nuclear reactivity with S100 antibody. No significant reactivity is demonstrated with melanoma marker HMB-45 or Melan-A. Tumor cell nuclei (spindle cell and pleomorphic/giant cell carcinoma components) show nuclear reactivity with thyroid transcription factor marker (TTF-1). The immunohistochemical studies are consistent with primary lung sarcomatoid carcinoma with pleomorphic/giant cell carcinoma and spindle cell carcinoma components.""" ] # + [markdown] id="mv0abcwhXWC-" # ## 4. Use the pipeline to create outputs # + id="TK1DB9JZaPs3" empty_df = spark.createDataFrame([['']]).toDF('text') pipeline_model = nlp_pipeline.fit(empty_df) df = spark.createDataFrame(pd.DataFrame({'text': input_list})) result = pipeline_model.transform(df) # + [markdown] id="UQY8tAP6XZJL" # ## 5. Visualize results # + colab={"base_uri": "https://localhost:8080/", "height": 372} id="Ar32BZu7J79X" outputId="fa1d9373-df11-4d9a-a404-8895298155cd" from sparknlp_display import NerVisualizer NerVisualizer().display( result = result.collect()[0], label_col = 'ner_chunk', document_col = 'document' )
tutorials/streamlit_notebooks/healthcare/NER_LAB.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import sqlalchemy import pymysql from sqlalchemy import create_engine # %matplotlib inline from matplotlib.backends.backend_pdf import PdfPages from datetime import datetime from datetime import timedelta import plotly.express as px df_cat = pd.DataFrame(columns=['Category', 'Twitter_Score', 'News_Score']) # + engine = create_engine('mysql+pymysql://root:zipcoder@localhost/twitter') twitter_df = pd.read_sql("sentiments", con = engine) twitter_df = twitter_df.reset_index() twitter_df # - twitter_df = twitter_df.groupby("tweet_id").agg({"name":"first", "text":"first", "time_stamp":"first", "state":"first", "sentiment":"first","score":"first"}) twitter_locations = twitter_df[twitter_df["state"].str.len() > 3] twitter_locations engine2 = create_engine('mysql+pymysql://root:zipcoder@localhost/News') news_df = pd.read_sql("news", con = engine2) news_df = news_df.groupby(["author","title"]).agg({"Content":"first","date":"first","sentiment":"first","score":"first"}) news_df twitter_df.astype({'time_stamp': 'int'}) twitter_df['Date'] = twitter_df['time_stamp'].astype(int)/(1000) twitter_df['Date'] = twitter_df['Date'].map(lambda a: datetime.fromtimestamp(a).isoformat()[0:10]) twitter_df prez_df = twitter_df prez_df = prez_df[prez_df["text"].str.contains("Trump")] prez_twitter = prez_df["score"].astype(float).mean() prez_twitter stock_twitter_df = twitter_df stock_twitter_df = stock_twitter_df[stock_twitter_df["text"].str.contains("economy")] twitter_stock = stock_twitter_df["score"].astype(float).mean() twitter_stock # + jobs_twitter_df = twitter_df jobs_twitter_df = jobs_twitter_df[jobs_twitter_df["text"].str.contains("jobs")] jobs_twitter = jobs_twitter_df["score"].astype(float).mean() jobs_twitter # - quartine_twitter_df = twitter_df quartine_twitter_df = quartine_twitter_df[quartine_twitter_df["text"].str.contains("quarantine")] quartine_twitter = quartine_twitter_df["score"].astype(float).mean() quartine_twitter vaccine_twitter_df = twitter_df vaccine_twitter_df = vaccine_twitter_df[vaccine_twitter_df["text"].str.contains("vaccine")] vaccine_twitter = vaccine_twitter_df["score"].astype(float).mean() vaccine_twitter prez_df = news_df prez_df = prez_df[prez_df["Content"].str.contains("Trump")] prez_news = prez_df["score"].astype(float).mean() prez_news stock_news_df = news_df stock_news_df = stock_news_df[stock_news_df["Content"].str.contains("economy")] news_stock = stock_news_df["score"].astype(float).mean() news_stock jobs_news_df = news_df jobs_news_df = jobs_news_df[jobs_news_df["Content"].str.contains("jobs")] jobs_news = jobs_news_df["score"].astype(float).mean() jobs_news quartine_news_df = news_df quarantine_news_df = quartine_news_df[quartine_news_df["Content"].str.contains("quarantine")] quartine_news = quartine_news_df["score"].astype(float).mean() quartine_news vaccine_news_df = news_df vaccine_news_df = vaccine_news_df[vaccine_news_df["Content"].str.contains("vaccine")] vaccine_news = vaccine_news_df["score"].astype(float).mean() vaccine_news twitter_df_date = twitter_df twitter_df_date['Date'] = twitter_df_date['time_stamp'].astype(int)/(1000) twitter_df_date['Date'] = twitter_df_date['Date'].map(lambda a: datetime.fromtimestamp(a).isoformat()[0:10]) twitter_df_avgSent = twitter_df_date.astype({'score': 'float'}).groupby("Date").agg({"score":"mean"}) twitter_df_avgSent.index import pandas as pd import sqlalchemy import pymysql from sqlalchemy import create_engine engine = create_engine('mysql+pymysql://root:zipcoder@localhost/twitter') twitter_df = pd.read_sql("sentiments", con = engine) twitter_df_date = twitter_df twitter_df_date['Date'] = twitter_df_date['time_stamp'].astype(int)/(1000) twitter_df_date['Date'] = twitter_df_date['Date'].map(lambda a: datetime.fromtimestamp(a).isoformat()[0:10]) twitter_df_avgSent = twitter_df_date.astype({'score': 'float'}).groupby("Date").agg({"score":"mean"}) twitter_df_avgSent.index import plotly.graph_objects as go y = twitter_df_avgSent["score"] x = twitter_df_avgSent.index fig = go.Figure(data=go.Bar(y=y,x=x)) fig.show() today = datetime.now().isoformat()[0:10] twitter_df_today = twitter_df_date[twitter_df_date.Date == str(today)] #index = twitter_df_pie.index count = len(twitter_df_today.index) tweet_sentiment_today = twitter_df_today.groupby("sentiment").agg({"tweet_id":"count"}) names = tweet_sentiment_today.index values = tweet_sentiment_today["tweet_id"] fig = px.pie(values=values, names=names) fig.show() news_df_date = news_df news_df_date['Date'] = news_df_date['date'].astype(int)/(1000) news_df_date['Date'] = news_df_date['Date'].map(lambda a: datetime.fromtimestamp(a).isoformat()[0:10]) news_df_avgSent = news_df_date.astype({'score': 'float'}).groupby("Date").agg({"score":"mean"}) news_df_avgSent.index y = news_df_avgSent["score"] x = news_df_avgSent.index fig = go.Figure(data=go.Bar(y=y,x=x)) fig.show() import plotly.express as px today = (datetime.now()-timedelta(1)).isoformat()[0:10] news_df_today = news_df_date[news_df_date.Date == str(today)] news_df_today count = len(news_df_today.index) news_sentiment_today = news_df_today.groupby("sentiment").agg({"Content":"count"}) news_sentiment_today names = news_sentiment_today.index values = news_sentiment_today["Content"] fig = px.pie(values=values, names=names) fig.show() import plotly.express as px y = twitter_df_avgSent["score"] x = twitter_df_avgSent.index news_sentiment = news_df_avgSent["score"] fig = px.bar(x=x, y=y, color=news_sentiment, labels={'color':'news sentiment', "y":"twitter senitiment", "x":"year", "hover_data_0":"twitter_sentiment"}) fig.show() # + df_cat = df_cat.append({'Category': "Economy", 'Twitter_Score': twitter_stock, 'News_Score': news_stock}, ignore_index=True) df_cat = df_cat.append({'Category': "Jobs", 'Twitter_Score': jobs_twitter, 'News_Score': jobs_news}, ignore_index=True) df_cat = df_cat.append({'Category': "President", 'Twitter_Score': prez_twitter, 'News_Score': prez_news}, ignore_index=True) df_cat = df_cat.append({'Category': "Quarantine", 'Twitter_Score': quartine_twitter, 'News_Score': quartine_news}, ignore_index=True) df_cat = df_cat.append({'Category': "Vaccine", 'Twitter_Score': vaccine_twitter, 'News_Score': vaccine_news}, ignore_index=True) # - df_cat # + import plotly.graph_objects as go Category=df_cat["Category"] fig = go.Figure(data=[ go.Bar(name="Twitter", x=Category, y=df_cat["Twitter_Score"]), go.Bar(name='News', x=Category, y=df_cat["News_Score"]) ]) # Change the bar mode fig.update_layout(barmode='group') fig.show() # -
Final_Project/Dashboard/getting_df.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from IPython.display import display, Math, Latex import pandas as pd import numpy as np import numpy_financial as npf import yfinance as yf import matplotlib.pyplot as plt from datetime import datetime import concurrent.futures # - # ## Group Assignment # ### Team Number: 2 # ### Team Member Names: <NAME>, <NAME>, <NAME> # ### Team Strategy Chosen: RISKY # Extract Tickers.csv into a dataframe ticker_file = pd.read_csv('Tickers.csv') # + # Change ticker_file into a list of tickers and also add the column name into that list since it can also be a stock name ticker_list = list(ticker_file[ticker_file.columns[0]]) ticker_list.insert(0,ticker_file.columns[0]) # Removes the potential duplicates in ticker_list ticker_list = list(pd.unique(ticker_list)) # + # Define a function to clean the data (change all tickers that do not exist or is not traded in the US market to None) def clean(tic): if (yf.Ticker(tic).info['regularMarketPrice'] == None): return elif (yf.Ticker(tic).info['market'] == 'us_market'): return tic else: return # Run the function concurrently in mutilple threads to improve the efficiency with concurrent.futures.ThreadPoolExecutor() as executor: temp_ticker_list=executor.map(clean, ticker_list) # Delete all elements in temp_ticker_list that are None values clean_ticker_list = [] for i in temp_ticker_list: if i == None: clean_ticker_list = clean_ticker_list else: clean_ticker_list.append(i) # + # Initiate start and end dates of stock data to check for volume requirements vol_start_date = '2021-07-02' vol_end_date = '2021-10-22' # Define a function that changes any ticker with a average trading volume of 10000 to None def clean_vol(tick): if yf.Ticker(tick).history(start=vol_start_date, end=vol_end_date)['Volume'].mean() >= 10000: return tick else: return # Run the function concurrently in multiple threads to improve the efficiency with concurrent.futures.ThreadPoolExecutor() as executor: temp_ticker_list = executor.map(clean_vol, clean_ticker_list) # Delete all elements in temp_ticker_list that are None values final_ticker_list = [] for i in temp_ticker_list: if i == None: final_ticker_list = final_ticker_list else: final_ticker_list.append(i) # + tags=[] # Initiate start and end dates of stock data that is relevant to us start_date = '2021-07-02' end_date = '2021-11-27' # Function hist collects the closing price for each stock in the given time interval and generates a list containing the stock's ticker and a dataframe that contains its closing prices def hist(ticker): return [ticker, pd.DataFrame(yf.Ticker(ticker).history(start = start_date, end = end_date)['Close'])] # Function daily_return generates a dataframe that contains the daily return of a stock def daily_return(df): daily = df.pct_change() daily.columns = ['daily_return'] return daily # Function std_return generates a list containing the stock's ticker and the standard deviation of its daily return def std_return(ticker): return [ticker, daily_return(hist(ticker)[1])['daily_return'].std()] # Function highest_std_return generates a list containing the ticker of the stock with highest std, it's std value, a name list that ranks the std of the stocks in descending order, # and a value list that ranks the std of the stocks in descending order. def highest_std_return(df): riskiest_return = [] riskiest_return_name = [] for i in df.columns: if len(riskiest_return) == 0: riskiest_return_name.append(i) riskiest_return.append(df[i][0]) for k in range(len(riskiest_return)+1): if k == len(riskiest_return): riskiest_return_name.append(i) riskiest_return.append(df[i][0]) break elif df[i][0] > riskiest_return[k]: riskiest_return_name.insert(k, i) riskiest_return.insert(k, df[i][0]) break else: continue return [riskiest_return_name[0], riskiest_return[0], riskiest_return_name, riskiest_return] # Function daily_return_with_ticker generates a list that contains the stock's ticker and the daily return of that stock. def daily_return_with_ticker(ticker): return [ticker, daily_return(hist(ticker)[1])[1:]] # Function compare_corr compares the correlation between two stocks and inserts them into high_corr and high_corr_name following the ranking scheme def compare_corr(df, lst1, lst2): for i in range(1, len(df.columns)): if len(lst1) == 0: lst1.append(df.iloc[i, 0]) lst2.append(df.columns[i]) else: for k in range(len(lst1)+1): if k == len(lst1): lst1.append(df.iloc[i, 0]) lst2.append(df.columns[i]) break elif df.iloc[i, 0] > lst1[k]: continue elif df.iloc[i, 0] < lst1[k]: lst1.insert(k, df.iloc[i, 0]) lst2.insert(k, df.columns[i]) break else: continue return lst1, lst2 # - # funciton portfolio_value_std consumes the name of the riskiest stock, the std of the riskiest stock, the cleaned ticker list, the std of the highest std portfolio, the daily price of all of the stocks, the daily return (pct_change) of all of the stocks # function portfolio_value_std generates a list that contains: # a list of tickers without the ticker with the highest std, # a portfolio with 10 selected stocks after using the algorithm # the std of the portfolio in the given interval def portfolio_value_std(riskiest_stock, ticker_std, final_ticker_list, std_value, daily_price_df, daily_return_df): # Base Case: if the highest std of remaining ticker is less than the previous portfolio's std, then end the recursion if ticker_std < std_value: return None # Create a dataframe that accumulates the daily return of all the stocks in final_ticker_list, with the initial value of the daily return of the riskiest stock corr_df = daily_return_df[riskiest_stock] # Formatting corr_df.columns = [riskiest_stock] # Create a list that accumulates the tickers in final_ticker_list in the order collowing corr_df, with the initial value of the ticker of riskiest stock corr_ticker_list = [riskiest_stock] # Remove the riskiest_stock from the final_ticker_list and store the remaining tickers in a new list called new_final_ticker_list new_final_ticker_list = final_ticker_list.copy() new_final_ticker_list.remove(riskiest_stock) # Run the function concurrently in multiple threads to improve the efficiency with concurrent.futures.ThreadPoolExecutor() as executor: temp_ticker_list2 = executor.map(daily_return_with_ticker, new_final_ticker_list) # Use the accumlaters to records the daily_return of each stock and store it in corr_df with columns of the tickers for i in temp_ticker_list2: i[1].columns = [i[0]] corr_ticker_list.append(i[1]) corr_df = pd.concat([corr_df, i[1]], join='outer', axis=1) # Drop last row corr_df.drop(corr_df.tail(1).index,inplace=True) # Calculate the correlation between each stock and the stock with the highest std corr = corr_df.corr() # high_corr is a list that ranks the stocks that best correlates the riskiest stock. # high_corr_name is a list that contains the stock names based on the ranking of high_corr high_corr = [] high_corr_name = [] compare_corr(corr, high_corr, high_corr_name) # Make a list of the ten highest correlated tickers to the riskiest ticker (including the riskiest ticker) final_10_tickers = list(reversed(high_corr_name[-9:])) final_10_tickers.insert(0,riskiest_stock) # Set a dictionary detailing the weighing of the 10 stocks f_portfolio_dict = { 'Ticker': final_10_tickers, 'Weight': [0.35, 0.25, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05] } # Change that dictionary to a dataframe for further manipulation portfolio = pd.DataFrame(f_portfolio_dict) portfolio['Shares'] = "" portfolio['Price'] = "" portfolio['Value'] = 100000 * portfolio['Weight'] # Resets portfolio since portfolio index is set to 1 to 10 for output (delete if not needed) portfolio.reset_index(inplace=True) # Resets portfolio shares according to our weighing but for the closing price on start_date for comparison purposes for i in range(len(final_10_tickers)): portfolio.loc[i, 'Price'] = daily_price_df[final_10_tickers[i]][0] portfolio.loc[i, 'Shares'] = portfolio.loc[i, 'Value'] / portfolio.loc[i, 'Price'] # Initiate a dataframe to see the value of the portfolio over time for comparison purposes portfolio_value = pd.DataFrame() # Calculate the value of the portfolio over time so we can compare standard deviations for i in range(len(final_10_tickers)): ticker_value = pd.DataFrame() ticker_value = pd.DataFrame(daily_price_df[final_10_tickers[i]]) ticker_value.columns = ['Close'] ticker_value['Shares'] = portfolio['Shares'][i] ticker_value['Portfolio_Value'] = ticker_value['Shares'] * ticker_value['Close'] portfolio_value = pd.concat([portfolio_value, pd.DataFrame(ticker_value['Portfolio_Value'])], axis = 1) # Rename the columns and add a new column that sums the value of the portfolio over time so we can see the over risk of the portfolio portfolio_value.columns = final_10_tickers portfolio_value['Value'] = portfolio_value.sum(axis=1) # Drop the last column since yahoo finance has a bug of making the last row all NaN portfolio_value = portfolio_value[:-1] # Formatting FinalPortfolio = portfolio[['Ticker', 'Price', 'Shares', 'Value', 'Weight']] return [new_final_ticker_list, FinalPortfolio, portfolio_value['Value'].pct_change().std()] # function FINAL_FUNC generates a list that contains the portfolio with the highest std and its std value def FINAL_FUNC(lst): # Create a list named constant_dec_lst that stores the list of tickers without the one with the highest std constant_dec_lst = lst.copy() # Create a list named portfolio_std_lst that stores the result from calling function portfolio_value_std. portfolio_std_lst = [] # Create a dictory named std_dict that stores the ticker and its std value within the time interval std_dict = {} # Run the function std_return concurrently in multiple threads to improve the efficiency with concurrent.futures.ThreadPoolExecutor() as executor: temp_ticker_list1 = executor.map(std_return, lst) # Store the ticker and its std value to std_dict. for i in temp_ticker_list1: std_dict[i[0]] = i[1] # Create a dataframe named std_df that stores all tickers' stds std_df = pd.DataFrame(std_dict, index=[0]) # Create a list named riskiest_stock_list that stores the std value of all the stocks in descending order and # another list named riskiest_stock_name_list that stores the tickers in the same order. riskiest_stock_list = highest_std_return(std_df)[-1] riskiest_stock_name_list = highest_std_return(std_df)[-2] # Create Daily Closing Price Dataframe price_list = [] daily_price_df = pd.DataFrame() # Run the function hist concurrently in multiple threads to improve the efficiency with concurrent.futures.ThreadPoolExecutor() as executor: temp_ticker_list2 = executor.map(hist, final_ticker_list) # Store the closing prices of each stock in the list named price_list for i in temp_ticker_list2: i[1].columns = [i[0]] price_list.append(i[1]) # Combine all the closing prices into one dataframe named daily_price_df for i in range(len(price_list)): daily_price_df = pd.concat([price_list[i], daily_price_df], join='outer', axis=1) # Delete the last row because there is a bug with pd.concat that the last row may return NaN value. daily_price_df = daily_price_df[:-1] # Create Daily Return Dataframe daily_return_df = daily_price_df.pct_change()[1:] # Run the recursion that generates a list that contains the portfolio with highest std value at the end of the list. for i in range(len(lst)): # Base Case: If the portfolio_std_lst doesn't have any value, add in the first portfolio with its std values and update the constant_dec_lst. if len(portfolio_std_lst) == 0: temp_portfolio_std = portfolio_value_std(riskiest_stock_name_list[i], riskiest_stock_list[i], final_ticker_list, 0, daily_price_df, daily_return_df) # If the function portfolio_value_std returns None, then the recursion is done, return the last value of the accumlator. if temp_portfolio_std == None: return portfolio_std_lst[-1] else: portfolio_std_lst.append(temp_portfolio_std[1:]) constant_dec_lst = temp_portfolio_std[0] else: # Otherwise, run the following algorithm: # If the new portfolio's std value is higher than the ones in the accumulator, append it to the list and update the constant_dec_lst. # Otherwise, keep the accumulator unchanged and update the constant_dec_lst. temp_portfolio_std = portfolio_value_std(riskiest_stock_name_list[i], riskiest_stock_list[i], final_ticker_list, portfolio_std_lst[-1][-1], daily_price_df, daily_return_df) # If the function portfolio_value_std returns None, then the recursion is done, return the last value of the accumulator. if temp_portfolio_std == None: return portfolio_std_lst[-1] elif temp_portfolio_std[-1] > portfolio_std_lst[-1][-1]: portfolio_std_lst.append(temp_portfolio_std[1:]) constant_dec_lst = temp_portfolio_std[0] else: portfolio_std_lst = portfolio_std_lst constant_dec_lst = temp_portfolio_std[0] # Return the last (the portfolio with the highes std) value in portfolio_std_lst return portfolio_std_lst[-1] # Store the result of FINAL_FUNC with the cleaned list in a variable named p p = FINAL_FUNC(final_ticker_list) p # Make a copy of p[0](the dataframe) and store it in a variable named portfolio portfolio = p[0].copy() portfolio # + # Create the FinalPortfolio dataframe # Calculate the amount of money to invest into each stock # Change the time interval in order to extract the stock prices on 2021-11-26. start_date = '2021-07-02' end_date = '2021-11-27' # Store the ticker choices into a variable named final_10_tickers final_10_tickers = portfolio['Ticker'] # Create two empty columns named Price and Shares that stores the last closing price and the shares that can be bought with that closing price respectively. portfolio['Price'] = "" portfolio['Shares'] = "" # Create a column named Value to store the amount of money to invest into each stock portfolio['Value'] = 100000 * portfolio['Weight'] # Add the closing prices and number of shares purchased to the dataframe for i in range(len(final_10_tickers)): portfolio.loc[i, 'Price'] = hist(portfolio.iloc[i,0])[1].iloc[[-1]].iloc[0,0] portfolio.loc[i, 'Shares'] = (portfolio.loc[i, 'Value'])/(portfolio.loc[i, 'Price']) # Formatting portfolio.index = np.arange(1, len(portfolio)+1) FinalPortfolio = portfolio[['Ticker', 'Price', 'Shares', 'Value', 'Weight']] # Create Stocks dataframe Stocks = FinalPortfolio[['Ticker', 'Shares']] Stocks # - # Outputs Stocks to a CSV file Stocks.to_csv('Stocks_Group_2.csv') # ## Our Strategy # First, we will discuss our strategy in choosing the stocks that form the riskiest portfolio. # The first few lines of code are pretty self-explanatory: they import the csv file provided, converts it to a list of potential stocks, and removes any stocks that do not exist or does not meet the requirement of minimum 10000 average daily volume in the given timeframe. # ##### Algorithm for finding a portfolio # We analyzed each stock's history in the time between July 2nd and November 26th of 2021. The starting date was chosen to be July 2nd because we are interested in the short term volatility of the stocks as the portfolios are only tracked for a week for the competition. Thus, we needed a timeframe short enought to capture current information about the stocks (ie. maybe there is some bad press/new legislation that disturbed the stock price recently but the stock is normally quite stable - we are still interested in this stock since the portfolio is only tracked for a week) but long enough to contain sufficient information for a proer analysis. We thought that July 2nd, which was also the timeframe we started tracking the volume of the stocks, was a healthy medium that fulfilled our needs. November 26th is a much more self-explanatory choice: we want to get the latest data prior to generating the portfolio. # In our analysis of each stock, we chose to find the most volatile stock by finding the stock with the highest standard deviation, meaning that the stock will be more likely to move further away from its average rate of returns. This brings our portfolio the most possible individual risk a stock in the list can bring, which sets a foundation for our "riskiest portfolio". # Since we want the riskiest portfolio possible, then we want to preserve the risk of the riskiest stock as much as possible since the addition of any "non-1" correlated stock (which is every stock other than itself) will always diversified away some (may be very small or large) risk from the portfolio. Therefore, we will only choose 10 stocks (the minimum required) to form our portfolio so the riskiest stock's risk will be prioritized and preserved. In choosing the other 9 stocks, we decided to find the stocks that had the highest correlation with the riskiest stock since those stocks will most resemble the shape of the riskiest stock and thus preserve the risk of the riskiest stock. # As mentioned before, our strategy depends on maximizing the singular risk of the riskiest stock and minimizing the effect of diversification from any other stock that is added to the portfolio since we believe that will produce the highest risk possible for our portfolio. Then naturally, we will weigh the stocks in a way that follows that principle, so our weighing is as follows. We will use 35% (the maximum for a single stock) of our portfolio on purchasing the riskiest stock, 25% (the maximum while allowing the rest of the stocks to have an minimum amount invested) on purchasing the stock that correlated the highest with the riskiest stock, and 5% on each of the other stocks. This allows us to maximize the risk of the portfolio by preserving the risk of riskiest stock (since the stock that is most correlated to the riskiest will preserve that risk the best) and minimizing the impact of diversification from the other stocks on our portfolio. # #### Recursion of algorithm to find the final portfolio # Notice in the algorithm discuss above that there is a chance of failure of that algorithm: if the stock with the highest standard deviation is poorly correlated with all of the other stocks, then there might exist another stock that still has a very high standard deviation and also is highly correlated with many other stocks, which can form a portfolio of 10 stocks that might beat the portfolio generated from the original algorithm in terms of risk. This is because a low correlation with the rest of the stocks in a portfolio can result in heavy diversification of risk, which, in turn, heavily lowers the standard deviation of the portfolio. # Our solution to this is to test all of the possible portfolios that could potentially beat the first one we generated. However, doing that could require a lot of computing resources and time, so we did it in a way that eliminates the need to go through every single possibility. # Since we know that the maximum standard deviation of a portfolio is the standard deviation of the riskiest stock in that portfolio and our previous algorithm involves finding a stock that has a high standard deviation and building a portfolio around that stock, then we can safely assume that any stock with a standard deviation lower than the standard deviation of the first portfolio we found cannot be used to generate a portfolio with a higher standard deviation. Also, note that the case of having a higher standard deviation stock in the 9 correlated stocks which could bring the standard deviation of newly generated portfolio higher than the stock it is built around is handled in the recursion: any stock that has a higher standard deviation and a high correlation with this stock is already considered before this stock. # Using this logic, we can recurse the algorithm. First, we find a portfolio using the algorithm. Then, we find the next riskiests (highest standard deviation) stock and find a portfolio using the algorithm on that stock. We then compare the standard deviation of both of these portfolios and keep whichever one is higher. Then we run this same loop with the highest standard deviation portfolio out of the two and the next riskiest stock in the list until there exists no stock with a higher standard deviation than the highest standard deviation portfolio we have found. We stop here and output that portfolio. # ## Graphs and Proofs # To visualize and prove our strategy works, we will compare the ten stocks we chose with ten random stocks by graphing and calculating the standard deviation of the two portfolios. Note that we will use the same weighing strategy for consistency purposes. # ### Random Portfolio vs Our Portfolio # #### Our Portfolio Code # Resets portfolio since portfolio index is set to 1 to 10 for output (delete if not needed) portfolio.reset_index(inplace=True) # Resets portfolio shares according to our weighing but for the closing price on start_date for comparison purposes for i in range(len(final_10_tickers)): portfolio.loc[i, 'Shares'] = portfolio.loc[i, 'Value'] / yf.Ticker(final_10_tickers[i]).history(start = start_date, end = end_date)['Close'][0] # + # Initiate a dataframe to see the value of the portfolio over time for comparison purposes portfolio_value = pd.DataFrame() # Calculate the value of the portfolio over time so we can compare standard deviations for i in range(len(final_10_tickers)): ticker_value = pd.DataFrame() ticker_value = pd.DataFrame(yf.Ticker(final_10_tickers[i]).history(start = start_date, end = end_date)['Close']) ticker_value.columns = ['Close'] ticker_value['Shares'] = portfolio['Shares'][i] ticker_value['Portfolio_Value'] = ticker_value['Shares'] * ticker_value['Close'] portfolio_value = pd.concat([portfolio_value, pd.DataFrame(ticker_value['Portfolio_Value'])], axis = 1) # Rename the columns and add a new column that sums the value of the portfolio over time so we can see the over risk of the portfolio portfolio_value.columns = final_10_tickers portfolio_value['Value'] = portfolio_value.sum(axis=1) # Drop the last column since yahoo finance has a bug of making the last row all NaN portfolio_value = portfolio_value[:-1] portfolio_value # - # #### Random Portfolio Code # + # Get the 10 random stocks we will be comparing random_10_tickers = np.random.choice(final_ticker_list, 10, replace=False) # Set a dictionary to weigh the 10 random stocks random_portfolio_dict = { 'Ticker': random_10_tickers, 'Weight': [0.35, 0.25, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05] } # Change that dictionary to a dataframe for further manipulation random_portfolio = pd.DataFrame(random_portfolio_dict) # Calculate the amount of money to invest into each stock random_portfolio['Value'] = 100000 * random_portfolio['Weight'] # - # Sets random portfolio shares according to our weighing but for the closing price on start_date for comparison purposes random_portfolio['Shares'] = 0 for i in range(len(random_10_tickers)): random_portfolio.loc[i, 'Shares'] = random_portfolio.loc[i, 'Value'] / yf.Ticker(random_10_tickers[i]).history(start = start_date, end = end_date)['Close'][0] # + # Initiate a dataframe to see the value of the portfolio over time for comparison purposes random_portfolio_value = pd.DataFrame() # Calculate the value of the portfolio over time so we can compare standard deviations for i in range(len(random_10_tickers)): ticker_value = pd.DataFrame() ticker_value = pd.DataFrame(yf.Ticker(random_10_tickers[i]).history(start = start_date, end = end_date)['Close']) ticker_value.columns = ['Close'] ticker_value['Shares'] = random_portfolio['Shares'][i] ticker_value['Portfolio_Value'] = ticker_value['Shares'] * ticker_value['Close'] random_portfolio_value = pd.concat([random_portfolio_value, pd.DataFrame(ticker_value['Portfolio_Value'])], axis = 1) # Rename the columns and add a new column that sums the value of the portfolio over time so we can see the over risk of the portfolio random_portfolio_value.columns = random_10_tickers random_portfolio_value['Value'] = random_portfolio_value.sum(axis=1) # Drop the last column since yahoo finance has a bug of making the last row all NaN random_portfolio_value = random_portfolio_value[:-1] random_portfolio_value # - # #### Graph and Analysis # + # Plot the data to visualize how risky each portfolio is plt.plot(portfolio_value.index, portfolio_value['Value'].pct_change(), label='Our') plt.plot(random_portfolio_value.index, random_portfolio_value['Value'].pct_change(), label='Random') # Define labels plt.title('Our Portfolio vs Random Portfolio in Percent Change') plt.xlabel('Dates') plt.xticks(rotation=70) plt.ylabel('Changes in Price (%)') # Create legend plt.legend(loc='best') plt.show() # + print('The Standard Deviation for our Portfolio is:') print(portfolio_value['Value'].pct_change().std()) print('') print('The Standard Deviation for the Randomly Generated Portfolio is:') print(random_portfolio_value['Value'].pct_change().std()) # - # Since our portfolio has a higher standard deviation - and we are confident that it will for any randomly generated portfolio - then our strategy is able to provide us with a riskier portfolio than just randomly picking ten stocks. This is also visualized by the graph, where we can clearly see that our portfolio fluctuates in percent change much more than the random portfolio, once again proving that our portfolio is more risky. # ### Evenly Weighted vs Our Weighing # #### Evenly Weighted Portfolio Code # We will also compare the weighing of our ten stocks according to our strategy versus an even split in weighing amongst the stocks # + # Set a dictionary to weigh the 10 stocks evenly even_portfolio_dict = { 'Ticker': final_10_tickers, 'Weight': [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] } # Change that dictionary to a dataframe for further manipulation even_portfolio = pd.DataFrame(even_portfolio_dict) # Calculate the amount of money to invest into each stock even_portfolio['Value'] = 100000 * even_portfolio['Weight'] # - # Sets random portfolio shares according to our weighing but for the closing price on start_date for comparison purposes even_portfolio['Shares'] = 0 for i in range(len(final_10_tickers)): even_portfolio.loc[i, 'Shares'] = even_portfolio.loc[i, 'Value'] / yf.Ticker(final_10_tickers[i]).history(start = start_date, end = end_date)['Close'][0] # + # Initiate a dataframe to see the value of the portfolio over time for comparison purposes even_portfolio_value = pd.DataFrame() # Calculate the value of the portfolio over time so we can compare standard deviations for i in range(len(final_10_tickers)): ticker_value = pd.DataFrame() ticker_value = pd.DataFrame(yf.Ticker(final_10_tickers[i]).history(start = start_date, end = end_date)['Close']) ticker_value.columns = ['Close'] ticker_value['Shares'] = even_portfolio['Shares'][i] ticker_value['Portfolio_Value'] = ticker_value['Shares'] * ticker_value['Close'] even_portfolio_value = pd.concat([even_portfolio_value, pd.DataFrame(ticker_value['Portfolio_Value'])], axis = 1) # Rename the columns and add a new column that sums the value of the portfolio over time so we can see the over risk of the portfolio even_portfolio_value.columns = final_10_tickers even_portfolio_value['Value'] = even_portfolio_value.sum(axis=1) # Drop the last column since yahoo finance has a bug of making the last row all NaN even_portfolio_value = even_portfolio_value[:-1] even_portfolio_value # - # #### Graph and Analysis # + # Plot the data to visualize how risky each portfolio is plt.plot(portfolio_value.index, portfolio_value['Value'].pct_change(), label='Our') plt.plot(even_portfolio_value.index, even_portfolio_value['Value'].pct_change(), label='Even') # Define labels plt.title('Our Portfolio vs Even Portfolio in Percent Change') plt.xlabel('Dates') plt.xticks(rotation=70) plt.ylabel('Changes in Price (%)') # Create legend plt.legend(loc='best') plt.show() # + print('The Standard Deviation for our Portfolio is:') print(portfolio_value['Value'].pct_change().std()) print('') print('The Standard Deviation for the Evenly Weighted Portfolio is:') print(even_portfolio_value['Value'].pct_change().std()) # - # As we can see from both the graph and the calculation, our weighing produces a portfolio with a higher standard deviation than one that is weighted evenly - and we are confident that it will always be the highest one out of any weighing combinations - which means our weighing strategy successfully gives a higher standard deviation. # ## Contribution Declaration # # The following team members made a meaningful contribution to this assignment: # # <NAME>, <NAME>, <NAME>
Team_2_Group_Assignment_Risky (2).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.0 64-bit (''my_env64'': venv)' # name: python38064bitmyenv64venv0776e80e1d964a309141464fb4ff9d0d # --- # ## Support Vector Machines for Classification # ### Hard Margin (SVM without regularization) # # ### Soft Margin (SVM with regularization) # # ### Simplified Sequential Minimal Optimization (SMO) # # %matplotlib inline import numpy as np import sklearn.preprocessing import sklearn.datasets import pandas as pd import sklearn.model_selection import numpy.random import math import sklearn.metrics # + tags=[] X, y = sklearn.datasets.load_breast_cancer(return_X_y=True) X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, random_state=42) standard = sklearn.preprocessing.StandardScaler() X_train = standard.fit_transform(X_train) training_data = pd.DataFrame(np.c_[X_train, y_train])#All of the features are continuous, so, no need to use one-hot encoder and we can directly standard normalize the features of the data set y_train = np.array(list(map(lambda y: 1 if y == 1 else -1, y_train)))# y must be either 1 or -1 X_test = standard.transform(X_test) y_test = np.array(list(map(lambda y: 1 if y == 1 else -1, y_test)))# y must be either 1 or -1 test_data = pd.DataFrame(np.c_[X_test, y_test]) print(training_data.shape) print(test_data.shape) #training_data.iloc[10] # - class SVM2CLASS(object): def __init__(self, X_train, y_train, C = 10, tol = 0.001, max_passes = 5, passes = 0): self.X_train = X_train self.y_train = y_train self.C = C self.tol = tol self.max_passes = max_passes self.passes = passes self.b = 0 self.alphas = np.zeros((self.X_train.shape[0], 1))#need to be of size nx1 self.coefficients = () self.kernel_type ={"poly": self.polynomial_kernel, "gauss": self.gaussian_kernel} self.kernel_parameters = [] self.kernel_choice = "" def polynomial_kernel(self, x_i, x_j, a): return np.power(np.dot(x_i.T, x_j) + a[0], a[1]) def gaussian_kernel(self, x, x_star, sigma): return np.exp(np.divide(-1*(np.linalg.norm(x-x_star)**2), 2*sigma**2)) def SMO(self, kernel_choice, parameters): choices = np.arange(0, self.y_train.shape[0]) count = 0 self.kernel_choice = kernel_choice #Better to construct the Kernel matrix from the scratch for efficiency, but this method will prevent the simplified SMO from working on large datasets, like, the mnist dataset if kernel_choice == "poly": exponent = parameters[0] intercept = parameters[1] self.kernel_parameters = [intercept, exponent] K = np.zeros((self.X_train.shape[0], self.X_train.shape[0])) for i in range(0, self.X_train.shape[0]): for j in range(0, self.X_train.shape[0]): K[i, j] = self.kernel_type[self.kernel_choice](self.X_train[i, :], self.X_train[j, :], self.kernel_parameters) assert(np.any(np.linalg.eig(K)[0] == 0) == False)#Test for PSD elif kernel_choice == "gauss": self.kernel_parameters = 2 K = np.zeros((self.X_train.shape[0], self.X_train.shape[0])) for i in range(0, self.X_train.shape[0]): for j in range(0, self.X_train.shape[0]): K[i, j] = self.kernel_type[self.kernel_choice](self.X_train[i, :], self.X_train[j, :], self.kernel_parameters) assert(np.linalg.det(K) != 0)#Test for PSD else: print("Wrong entry") return -1 while(self.passes <= self.max_passes): #begin while num_changed_alphas = 0 for i in range(0, self.X_train.shape[0]): #begin for #compute kernel function for every iteration from scratch #f_x_i = np.sum( list(map(lambda x, alpha, y: alpha * y * self.kernel_type[self.kernel_choice](x, self.X_train[i, :], self.kernel_parameters) , self.X_train, self.alphas, self.y_train) ) )#instead of calculating the Kernel matrix from the start, we will calculate the inner product with each iteration in order to mitigate the problem of having a large kernel matrix that will raise an exception #print(f_x_i) f_x_i = np.sum(self.alphas.reshape(-1, 1) * (self.y_train.reshape(-1, 1) * K[:, i].reshape(-1, 1)).reshape(-1, 1)) E_i = f_x_i + self.b - self.y_train[i] #print(f_x_i) #Check if we satisfy the condition for the dual problem if (((self.y_train[i] * E_i) < -self.tol) and (self.alphas[i] < self.C)) or (((self.y_train[i] * E_i) > self.tol) and (self.alphas[i] > 0)): #begin if j = np.random.choice( list(filter(lambda v: v == v, list(map(lambda c: c if c != i else np.nan, choices)))) ) #only nan will generate False at its equlaity, and the filter object will end up filtering out these wrong values assert( i != j) #f_x_j = np.sum( list(map(lambda x, alpha, y: alpha * y * self.kernel_type[self.kernel_choice](x, self.X_train[j, :], self.kernel_parameters) , self.X_train, self.alphas, self.y_train) ) ) #print(f_x_j) f_x_j = np.sum(self.alphas.reshape(-1, 1) * (self.y_train.reshape(-1, 1) * K[:, j].reshape(-1, 1)).reshape(-1, 1)) #print((alphas.reshape(-1, 1) * (y_train.reshape(-1, 1) * K[i, :].reshape(-1, 1)).reshape(-1, 1)).shape) #print(f_x_j) E_j = f_x_j + self.b - self.y_train[j] alpha_i_old = self.alphas[i].copy()#Needs to copy the value because otherwise they would be pointing to the same address alpha_j_old = self.alphas[j].copy() #Computing L and H if(self.y_train[i] != self.y_train[j]): L = max(0, self.alphas[j] - self.alphas[i]) H = min(self.C, self.C + self.alphas[j] - self.alphas[i]) else: L = max(0, self.alphas[j] + self.alphas[i] - self.C) H = min(self.C, self.alphas[j] + self.alphas[i]) #Checking if L=H which indicate that the alpha would certainly wouldn't change if L == H: continue #eta = 2 * self.kernel_type[self.kernel_choice](self.X_train[i, :], self.X_train[j, :], self.kernel_parameters) - self.kernel_type[self.kernel_choice](self.X_train[i, :], self.X_train[i, :], self.kernel_parameters) - self.kernel_type[self.kernel_choice](self.X_train[j, :], self.X_train[j, :], self.kernel_parameters) eta = 2 * K[i, j] - K[i, i] - K[j, j] #eta = 0 if the similarity between x_i and x_j is as the combination of the similarity of x_i with itself and same goes for x_j, will cause an exception to happend and this indicate we are dealing with the same x. #eta > 0 if if the similarity between x_i and x_j is higher than the combination of the similarity of x_i with itself and same goes for x_j, so, this update step would have little effect on the converging to the optimal minimum and may leads to diverging the algorithm patht to a worse path #eta < 0 there are small simialrity between x_i and x_j, so, this would help in discoverign the interaction of those observations in the feature space if eta >= 0: #print("The two vectors are too similar") continue alpha_j_clip = alpha_j_old - (1/eta) * self.y_train[j] * (E_i - E_j) if alpha_j_clip > H: self.alphas[j] = H elif alpha_j_clip < L: self.alphas[j] = L else: self.alphas[j] = alpha_j_clip #print(alphas[j], alpha_j_old) #Check if it is worth to update alpha_i if(abs(self.alphas[j] - alpha_j_old) < 1e-3): #print("No noticeable changes happened to alpha") continue self.alphas[i] = alpha_i_old + self.y_train[i] * self.y_train[j] * (alpha_j_old - self.alphas[j])#The signs changed from the negative sign for updating alpha_i ##KKT constrains convergence test #b1 = self.b - E_i - self.y_train[i]*(self.alphas[i] - alpha_i_old) * self.kernel_type[self.kernel_choice](self.X_train[i, :], self.X_train[i, :], self.kernel_parameters) - self.y_train[j]*(self.alphas[j] - alpha_j_old) * self.kernel_type[self.kernel_choice](self.X_train[i, :], self.X_train[j, :], self.kernel_parameters) b1 = self.b - E_i - self.y_train[i] * (self.alphas[i] - alpha_i_old) * K[i, i] - self.y_train[j] * (self.alphas[j] - alpha_j_old) * K[i, j] #b2 = self.b - E_j - self.y_train[i]*(self.alphas[i] - alpha_i_old) * self.kernel_type[self.kernel_choice](self.X_train[i, :], self.X_train[j, :], self.kernel_parameters) - self.y_train[j]*(self.alphas[j] - alpha_j_old) * self.kernel_type[self.kernel_choice](self.X_train[j, :], self.X_train[j, :], self.kernel_parameters) b2 = self.b - E_j - self.y_train[i] * (self.alphas[i] - alpha_i_old) * K[i, j] - self.y_train[j] * (self.alphas[j] - alpha_j_old) * K[j, j] if (self.alphas[j] > 0) and (self.alphas[j] < self.C): self.b = b2 elif (self.alphas[i] > 0) and (self.alphas[i] < self.C): self.b = b1 else: self.b = (b1 + b2)/2 num_changed_alphas = num_changed_alphas + 1 #end if #end for print(f"count:{count}, passes:{self.passes}, max_passes:{self.max_passes}, b:{self.b}") count+=1 if(num_changed_alphas == 0): self.passes = self.passes + 1 else: self.passes = 0 #end while #Only store in the memory the support vectors support_indeces = np.argwhere(self.alphas != 0)[:, 0] support_vectors = self.X_train[support_indeces, :] support_alphas = self.alphas[support_indeces] support_target = self.y_train[support_indeces] self.importantParameters = (support_vectors, support_alphas, support_target) return self.importantParameters def prediction_dataset(self, X): pred = list(map(lambda x: self.prediction(x), X)) return pred def prediction(self, x): t1 = np.sum( list(map(lambda x1, alpha, y: y * alpha * self.kernel_type[self.kernel_choice](x, x1, self.kernel_parameters), self.importantParameters[0], self.importantParameters[1], self.importantParameters[2])) ) pred = t1 + self.b #print(pred) if pred >=0: return 1 return -1 # + tags=["outputPrepend"] svm_model =SVM2CLASS(X_train, y_train, C = 10, tol = 0.001, max_passes = 5, passes = 0) support_vectors, support_alphas, support_target = svm_model.SMO("poly", [1, 1]) pred = svm_model.prediction_dataset(X_train) print("Performance on the training set") print(sklearn.metrics.confusion_matrix(y_train, pred)) # + tags=[] pred = svm_model.prediction_dataset(X_train) print("Performance on the training set") print(sklearn.metrics.confusion_matrix(y_train, pred)) # + tags=[] pred = svm_model.prediction_dataset(X_test) print("Performance on the test set") print(sklearn.metrics.confusion_matrix(y_test, pred)) # + tags=[] import sklearn.svm def polynomial_kernel(x_i, x_j, a): return np.power(np.dot(x_i.T, x_j) + a[0], a[1]) K = np.zeros((X_train.shape[0], X_train.shape[0])) for i in range(0, X_train.shape[0]): for j in range(0, X_train.shape[0]): K[i, j] = polynomial_kernel(X_train[i, :], X_train[j, :], [0, 1]) assert(np.any(np.linalg.eig(K)[0] == 0) == False)#Test for PSD model = sklearn.svm.SVC(kernel="precomputed", C=10) model.fit(K, y_train) print("Performance on the training set") print(sklearn.metrics.confusion_matrix(y_train, model.predict(K))) print(model.support_) print(model.intercept_) model.dual_coef_ # - # ### References # * Chapter 1, chapter 6 and Chapter 7 from Bishop, C. (2006). Pattern Recognition and Machine Learning. Cambridge: Springer. # * <NAME>, Lec 6: (https://www.youtube.com/watch?v=qyyJKd-zXRE) # * <NAME>, Lec 7: (https://www.youtube.com/watch?v=s8B4A5ubw6c) # * <NAME>, Lec 8: (https://www.youtube.com/watch?v=bUv9bfMPMb4) # * Simplified Sequential Minimal Optimization: (https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=&cad=rja&uact=8&ved=2ahUKEwiRlObmw5_qAhW7ShUIHSjJAbYQFjAAegQIAhAB&url=http%3A%2F%2Fcs229.stanford.edu%2Fmaterials%2Fsmo.pdf&usg=AOvVaw201bQxVZY0MmUn_gGAu5O8) #
Pattern_Recognition_and_ML_Folder/Chapter_7_Sparse_Kernel_Machines/proj7_SVM_2Classes_SMO.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import seaborn as sns import requests import pandas as pd inddata = requests.get('https://raw.githubusercontent.com/CarmineCrown/COVID-19/master/covid_19_datasets/covid19_india/india_statewise.json').json() inddata dfind = pd.io.json.json_normalize(inddata['data']['statewise']) dfind dfind.set_index('state', inplace = True) dfind sns.set_style('darkgrid') plt.figure(figsize=(15,10)) sns.barplot(x = dfind.index, y = dfind['confirmed']) plt.title('Confirmed Cases in India Statewise') plt.xticks(rotation = 90) plt.show() # Using Plotly: import plotly.express as px figure = px.bar(dfind, x = dfind.index, height = 800, width = 1000, y = 'confirmed', color='confirmed') figure.show() location = { "Sikkim": [27.5330,88.5122], "Maharashtra" : [19.7515,75.7139], "West Bengal": [22.9868,87.8550], "Chandigarh":[30.7333,76.7794], "Karnataka": [15.3173,75.7139], "Telangana": [18.1124,79.0193], "Uttar Pradesh": [26.8467,80.9462], "Gujarat":[22.2587,71.1924], "Odisha":[20.9517,85.0985], "Delhi" : [28.7041,77.1025], "Tamil Nadu": [11.1271,78.6569], "Haryana": [29.0588,76.0856], "Madhya Pradesh":[22.9734,78.6569], "Kerala" : [10.8505,76.2711], "Rajasthan": [27.0238,74.2179], "Jammu and Kashmir":[33.7782,76.5762], "Ladakh": [34.1526,77.5770], "Andhra Pradesh":[15.9129,79.7400], "Bihar": [25.0961,85.3131], "Chhattisgarh":[21.2787,81.8661], "Uttarakhand":[30.0668,79.0193], "Himachal Pradesh":[31.1048,77.1734], "Goa": [15.2993,74.1240], "Tripura":[23.9408,91.9882], "Andaman and Nicobar Islands": [11.7401,92.6586], "Puducherry":[11.9416,79.8083], "Manipur":[24.6637,93.9063], "Mizoram":[23.1645,92.9376], "Assam":[26.2006,92.9376], "Meghalaya":[25.4670,91.3662], "Arunachal Pradesh":[28.2180,94.7278], "Jharkhand" : [23.6102,85.2799], "Nagaland": [26.1584,94.5624], "Punjab":[31.1471,75.3412], "Dadra and Nagar Haveli":[20.1809,73.0169], "Lakshadweep":[10.5667,72.6417], "Daman and Diu":[20.4283,72.8397] } dfind ['Lat'] = "" dfind ['Long'] = "" for index in dfind.index: dfind.loc[dfind.index == index, "Lat"] = location [index][0] dfind.loc[dfind.index == index, "Long"] = location [index][1] import folium imap = folium.Map(location= [20, 80], zoom_start=4.5, max_zoom=8, height=1000, width='100%', tiles='CartoDB dark_matter') for i in range(0, len(dfind)): folium.Circle(location= [dfind.iloc[i]['Lat'], dfind.iloc[i]['Long']], radius=(int(np.log2(dfind.iloc[i]['confirmed']+1.00001)))*13000, tooltip= "<h5 style='text-align:center;font-weight: bold'>"+ dfind.iloc[i].name +"</h5>"+ "<li>Confirmed "+str(dfind.iloc[i]['confirmed'])+"</li>"+ "<li>Deaths "+str(dfind.iloc[i]['deaths'])+"</li>"+ "<li>Active "+str(dfind.iloc[i]['active'])+"</li>"+ "</ul>", color = 'red', fill = True).add_to(imap) imap
170520AIworkshop05 COVID19 IND json.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.6 64-bit # metadata: # interpreter: # hash: 1ee38ef4a5a9feb55287fd749643f13d043cb0a7addaab2a9c224cbe137c0062 # name: python3 # --- # # generate feature list # ``` # python3 gen_feat.py --inf_list toy_imgs/img.list --feat_list toy_imgs/feat.list --resume magface_epoch_00025.pth # ``` import numpy as np import cv2 from matplotlib import pyplot as plt import seaborn as sns sns.set(style="white") # %matplotlib inline # # Visualize magnitudes (qualities) def imshow(img): plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) def show(idx_): imgname = imgnames[idx_] img = cv2.imread(imgname) imshow(img) print(img_2_mag[imgname], imgname) with open('toy_imgs/feat.list', 'r') as f: lines = f.readlines() img_2_feats = {} img_2_mag = {} for line in lines: parts = line.strip().split(' ') imgname = parts[0] feats = [float(e) for e in parts[1:]] mag = np.linalg.norm(feats) img_2_feats[imgname] = feats/mag img_2_mag[imgname] = mag imgnames = list(img_2_mag.keys()) mags = [img_2_mag[imgname] for imgname in imgnames] sort_idx = np.argsort(mags) # + H, W = 112, 112 NH, NW = 1, 10 canvas = np.zeros((NH * H, NW * W, 3), np.uint8) for i, ele in enumerate(sort_idx): imgname = '/'.join(imgnames[ele].split('/')[-2:]) img = cv2.imread(imgname) canvas[int(i / NW) * H: (int(i / NW) + 1) * H, (i % NW) * W: ((i % NW) + 1) * W, :] = img # - plt.figure(figsize=(20, 20)) print([float('{0:.2f}'.format(mags[idx_])) for idx_ in sort_idx]) imshow(canvas) # # visualize recognition feats = np.array([img_2_feats[imgnames[ele]] for ele in sort_idx]) sim_mat = np.dot(feats, feats.T) fig, ax = plt.subplots(figsize=(8, 6)) ax = sns.heatmap(sim_mat, cmap="PuRd", annot=True)
inference/examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Setup # load the packages import requests from bs4 import BeautifulSoup base_site = "https://en.wikipedia.org/wiki/List_of_national_capitals_by_population" # connect to webpage r = requests.get(base_site) r.status_code # get the HTML content from the webpage html = r.content # # Extracting tables with Beautiful Soup # + # Let's see how extracting the tables would be done with conventional Beautiful Soup methods # - # Create the soup soup = BeautifulSoup(html, 'lxml') # Tables are marked with the 'table' tag in HTML soup.find_all("table") # The main table on the page table = soup.find_all("table")[1] table # + # Recall: # 'th' marks a column heading # 'tr' marks a table row # 'td' marks a table cell (inside a row) # - # Extracting all rows table.find_all('tr') # Note that the first row contains the headings # Inspecting the contents of first row table.find_all('tr')[0].contents # The 'Capital' is the 6th element table.find_all('tr')[0].contents[5] # Extracting the 'Capital' column (the first element being the heading) capitals = [row.contents[5].text for row in table.find_all('tr')] capitals # + # This is only one column; we have to do the same for the rest # + # At this point you probably realize how tedious this process is: # First, we have to manually inspect the elements, to be able to scrape them # Second, we have to repeat the same commands for every column # Third, the data has 'lost' its initial tabular form, we have to reconstruct it manually # + # There should be a better way # - # # Using Pandas to extract tables import pandas as pd # + # Pandas provides an extremely easy-to-use method for table extraction # It actually uses Beautiful Soup in the background, # performing all the operations we executed above automatically # - # To extract all tables on a page, use pandas.read_html() # It takes either raw HTML or the page URL as a parameter tables = pd.read_html(base_site) # It identifies all of the tables on the page and returns them as a list of dataframes type(tables) type(tables[0]) # We can check to see that pandas found four tables on the webpage len(tables) # Getting the full main table is now straightforward tables[1] # + # Notice that Pandas not only extracts all columns and headings, # but also deals with missing data (as can be seen in the Iraq, 'Year' column) # - # Getting the column headings tables[1].columns # + # Because of the way HTML is coded, there may be some messiness, which could require cleaning # - # Similar to BeautifulSoup, we can add tag attributes as parameters # This allows us to filter our search for tables filtered_tables = pd.read_html(base_site, attrs = {"class": "navbox"}) filtered_tables # Still a list, even though only one such table exist len(filtered_tables) # As mentioned, we can also pass the retrieved HTML directly into the method (instead of URL) pd.read_html(html, attrs = {"class": "wikitable sortable"})
Web scraping/Scraping wikepedia Tables.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/TangJiahui/6.036_Machine_Learning/blob/main/MIT_6_036_HW11_Recurrent_Neural_Networks.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="sgwpVmLOgw8J" # # RNN for Name Generation # # In the following problem, we will look at using an RNN to generate names in different languages. Much of the code is below is given to you, as our goal is more to expose you to more PyTorch code which you'd likely see in practical ML implementations than expecting you to code PyTorch proficiently for this class. # + id="0WJm253BCFWM" outputId="5bc785e3-5d79-4e21-de2f-cbd8f375a102" colab={"base_uri": "https://localhost:8080/"} # !rm -rf code_for_hw11* __MACOSX data .DS_Store # !wget --no-check-certificate --quiet https://introml.odl.mit.edu/cat-soop/_static/6.036/homework/hw11/char_rnn_data.zip # !unzip char_rnn_data.zip # !wget --no-check-certificate --quiet https://introml.odl.mit.edu/cat-soop/_static/6.036/homework/hw11/char_rnn_utils.zip # !unzip char_rnn_utils.zip from char_rnn_utils import timeSince, all_letters, n_letters, get_data, categoryTensor, inputTensor, randomTrainingExample import math import random import time import matplotlib.pyplot as plt import numpy as np import torch import torch.nn as nn # + id="uFZ-y4jw7Clf" outputId="2f802d30-2179-4af9-eb4d-4530008648ca" colab={"base_uri": "https://localhost:8080/"} # all_letters contains all possible letters in names print('All letters:', all_letters) # number of letters + 1 for the end-of-string (EOS) token print('Number of letters and EOS token:', n_letters) # categoryTensor, inputTensor convert categories and letters (not including EOS) to one-hot vectors # + id="wslUNnQHwOoM" # Get the data all_categories, category_lines = get_data() n_categories = len(all_categories) # + id="_gu2xm50xUnA" outputId="ef1e0c3c-9b5c-47b3-8958-12a7f5f6ce3b" colab={"base_uri": "https://localhost:8080/"} n_categories # + id="xj4OYqDK18bo" outputId="d930e0cb-8d8b-4eb2-a50c-6254f01ec487" colab={"base_uri": "https://localhost:8080/"} n_letters # + [markdown] id="SmmHD1IG619G" # ## Model # # Below, we define the RNN architecture that we'll be exploring. The RNN we use will take in the previous character and hidden state to calculate the new hidden state and predict the next character in the string. This architecture is a bit different from the basic version we've discussed: while the RNN maintains a hidden state, it will also compute a second `output` vector of the output size via the `input2output` fully-connected layer and then concatenate it with the hidden vector and use another fully-connected layer to get the actual prediction. We've written most of the details of the model, but we would like you figure out the dimensions for each of the fully-connected layers. Also, the RNN combines its three inputs into one vector by concatenation before passing it through its fully-connected layers `input2hidden` and `input2output`. # + id="JOv4wsqCJRA_" class RNN(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(RNN, self).__init__() self.hidden_size = hidden_size # Replace d1-4 with expressions in terms of the variables # n_categories, input_size, hidden_size, and output_size d1 = input_size + hidden_size + n_categories d2 = hidden_size d3 = output_size d4 = output_size+hidden_size self.input2hidden = nn.Linear(d1, d2) self.input2output = nn.Linear(d1, d3) self.output2output = nn.Linear(d4, d3) self.dropout = nn.Dropout(0.1) self.softmax = nn.LogSoftmax(dim=1) # Runs the forward pass of the model with the given inputs def forward(self, category, input, hidden): input_combined = torch.cat((category, input, hidden), 1) hidden = self.input2hidden(input_combined) output = self.input2output(input_combined) output_combined = torch.cat((hidden, output), 1) output = self.output2output(output_combined) output = self.dropout(output) output = self.softmax(output) return output, hidden def initHidden(self): return torch.zeros(1, self.hidden_size) # + [markdown] id="kviTY_3pzYPN" # Now let's train the model! This will take a bit of time (about 30 seconds when we ran it). # + id="J5zhSWNymG2q" criterion = nn.NLLLoss() def train(category_tensor, input_line_tensor, target_line_tensor, learning_rate): target_line_tensor.unsqueeze_(-1) hidden = rnn.initHidden() rnn.zero_grad() loss = 0 for i in range(input_line_tensor.size(0)): output, hidden = rnn(category_tensor, input_line_tensor[i], hidden) l = criterion(output, target_line_tensor[i]) loss += l loss.backward() # oftentimes, we'll use other optimizers like Adam, but we'll keep it simple for p in rnn.parameters(): p.data.add_(p.grad.data, alpha=-learning_rate) return output, loss.item() / input_line_tensor.size(0) # + id="fpKjQyGyoaqh" outputId="936416aa-53de-4cff-a872-6f2fcb1d7aab" colab={"base_uri": "https://localhost:8080/"} # The following two lines are very important so that you ge tthe right results! random.seed(0) torch.manual_seed(0) rnn = RNN(n_letters, 128, n_letters) # Keep these parameters for your homework submission, # but feel free to experiment with other settings after. n_iters = 10000 learning_rate = 0.0005 print_every = 500 plot_every = 50 all_losses = [] total_loss = 0 # Reset every plot_every iters start = time.time() for iter in range(1, n_iters + 1): output, loss = train(*randomTrainingExample(all_categories, category_lines), learning_rate=learning_rate) total_loss += loss if iter % print_every == 0: print('%s (%d %d%%) %.4f' % (timeSince(start), iter, iter / n_iters * 100, loss)) if iter % plot_every == 0: all_losses.append(total_loss / plot_every) total_loss = 0 # + [markdown] id="Gi467O3dzFcM" # Let's plot the loss. # + id="St1l92Qpoj6U" outputId="33ddd458-86fe-4760-cf26-8f427c7d2e3c" colab={"base_uri": "https://localhost:8080/", "height": 265} plt.figure() plt.plot(all_losses) plt.show() # + [markdown] id="PX-oR-gxzwy9" # ## Generating New Names # # Finally, we'd like to generate new names using our RNN. We've defined helper functions below to help you out. For the homework, please generate German names that start with the letters G, E, and R. (capitalization matters!) # + id="go8oc6Kk0QJp" max_length = 20 # Sample from a category and starting letter def sample(category, start_letter='A'): # for reproducibility. DO NOT DELETE random.seed(0) torch.manual_seed(0) with torch.no_grad(): # no need to track gradients at test time category_tensor = categoryTensor(category, all_categories) input = inputTensor(start_letter) hidden = rnn.initHidden() output_name = start_letter for i in range(max_length): output, hidden = rnn(category_tensor, input[0], hidden) _, topi = output.topk(1) topi = topi.item() if topi == n_letters - 1: break else: letter = all_letters[topi] output_name += letter # use the predicted letter as the input for the next prediction input = inputTensor(letter) return output_name # feel free to change this if needed # Generates multiple samples from one category and multiple starting letters def samples(category, start_letters='ABC'): for start_letter in start_letters: print(sample(category, start_letter)) # + id="dYrKbKJ1La7Y" outputId="7232ef13-a980-4abe-c834-3bcecfda9577" colab={"base_uri": "https://localhost:8080/"} samples('German', 'GER')
MIT_6_036_HW11_Recurrent_Neural_Networks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import psycopg2 import os from datetime import datetime from sendgrid import SendGridAPIClient from sendgrid.helpers.mail import Mail notification_id = 5 SENDGRID_API_KEY = '<KEY>' # Get connection to database conn = psycopg2.connect(dbname="techconfdb", user="postgres", password="<PASSWORD>", host="localhost") cur = conn.cursor() try: # Get notification message and subject from database using the notification_id cur.execute("SELECT subject, message FROM notification WHERE id={};".format(notification_id)) result = cur.fetchall() subject, body = result[0][0], result[0][1] # Get attendees email and name cur.execute("SELECT email, first_name FROM attendee;") attendees = cur.fetchall() # Loop through each attendee and send an email with a personalized subject num_attendee_notified = 0 for (email, first_name) in attendees: mail = Mail( from_email='<EMAIL>', to_emails= email, subject= subject, plain_text_content= "Hi {}, \n {}".format(first_name, body)) try: sg = SendGridAPIClient(SENDGRID_API_KEY) response = sg.send(mail) if response.status_code == 202: num_attendee_notified += 1 except Exception as e: print(e.message) status = "Notified {} attendees".format(num_attendee_notified) # Update the notification table by setting the completed date and updating the status with the total number of attendees notified # cur.execute("UPDATE notification SET status = '{}', completed_date = '{}' WHERE id = {};".format(status, datetime.utcnow(), notification_id)) cur.execute("INSERT INTO notification(status, message, completed_date, subject) VALUES ('{}', '{}', '{}','{}');".format(status, body, datetime.utcnow(), subject)) conn.commit() except (Exception, psycopg2.DatabaseError) as error: logging.error(error) conn.rollback() finally: # Close connection cur.close() conn.close() # + import psycopg2 import os from datetime import datetime from sendgrid import SendGridAPIClient from sendgrid.helpers.mail import Mail notification_id = 5 SENDGRID_API_KEY = '<KEY>' # Get connection to database conn = psycopg2.connect(dbname="techconfdb", user="dbadmin@sqlserver20210405", password="<PASSWORD>", host="sqlserver20210405.postgres.database.azure.com") cur = conn.cursor() try: # Get notification message and subject from database using the notification_id cur.execute("SELECT subject, message FROM notification WHERE id={};".format(notification_id)) result = cur.fetchall() subject, body = result[0][0], result[0][1] # Get attendees email and name cur.execute("SELECT email, first_name FROM attendee;") attendees = cur.fetchall() # Loop through each attendee and send an email with a personalized subject num_attendee_notified = 0 for (email, first_name) in attendees: mail = Mail( from_email='<EMAIL>', to_emails= email, subject= subject, plain_text_content= "Hi {}, \n {}".format(first_name, body)) try: sg = SendGridAPIClient(SENDGRID_API_KEY) response = sg.send(mail) print(response.status_code) if response.status_code == 202: num_attendee_notified += 1 except Exception as e: logging(e.message) status = "Notified {} attendees".format(num_attendee_notified) # Update the notification table by setting the completed date and updating the status with the total number of attendees notified # cur.execute("UPDATE notification SET status = '{}', completed_date = '{}' WHERE id = {};".format(status, datetime.utcnow(), notification_id)) cur.execute("INSERT INTO notification(status, message, completed_date, subject) VALUES ('{}', '{}', '{}','{}');".format(status, body, datetime.utcnow(), subject)) conn.commit() except (Exception, psycopg2.DatabaseError) as error: logging.error(error) conn.rollback() finally: # Close connection cur.close() conn.close() # - mail = Mail( from_email='<EMAIL>', to_emails= '<EMAIL>', subject= 'sendgrid test', plain_text_content= "Hi sendgrid") try: sg = SendGridAPIClient(SENDGRID_API_KEY) response = sg.send(mail) print(response.body) print(response.status_code) except Exception as e: print(e.message)
notification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Analyze data and then plot # # # This example demonstrates how to use the `analyze` function to process data # prior to plotting. The data is a list of numpy arrays representing # multi-voxel activity patterns (columns) over time (rows). First, analyze function # normalizes the columns of each matrix (within each matrix). Then the data is # reduced using PCA (10 dims) and finally it is aligned with hyperalignment. We can # then plot the data with hyp.plot, which further reduces it so that it can be # visualized. # # + # Code source: <NAME> # License: MIT # load hypertools import hypertools as hyp # load the data geo = hyp.load('weights') data = geo.get_data() # process the data data = hyp.analyze(data, normalize='within', reduce='PCA', ndims=10, align='hyper') # plot it hyp.plot(data)
docs/auto_examples/analyze.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from utils.epg import * import icecream as ic import numpy as np import matplotlib.pyplot as plt import matplotlib from copy import copy from scipy.optimize import curve_fit # initial settings pgf_with_latex = { "pgf.texsystem": "pdflatex", "text.usetex": True, # use LaTeX to write all text "font.family": "sans-serif", "font.sans-serif": "Helvetica", "font.size": 25, # default font size "axes.labelsize": 24, # x and y label size "axes.titlesize": 24, # subfigure title size, i.e. title size when one figure "legend.fontsize": 22, # legend size "xtick.labelsize": 23, # x axis tick label size "ytick.labelsize": 23, # y axis tick label "figure.titlesize": 25, # Figure title size, useful when you have multiple plots in one canvas. "pgf.preamble": r"\usepackage{xcolor}" # xcolor for colours } matplotlib.rcParams.update(pgf_with_latex) fname = "~/Dropbox (MIT)/data/dvcs_inb.root" epg = epgFromROOT(fname) dvpi0 = epg.getDVpi0() dvcs = epg.getDVCS(sub2g=True) fnameoutb = "~/Dropbox (MIT)/data/dvcs_outb.root" epgoutb = epgFromROOT(fnameoutb) dvpi0outb = epgoutb.getDVpi0() dvcsoutb = epgoutb.getDVCS(sub2g=True) fname_mc = "~/Dropbox (MIT)/data/MC/Feb2020/dvcsRadRECinb.root" # fname_mc = "~/Dropbox (MIT)/data/dvcs_mc_inb.root" epg_mc = epgFromROOT(fname_mc, rec=True) # dvpi0_mc = epg_mc.getDVpi0() dvcs_mc = epg_mc.getDVCS() dvcs_gen = epg_mc.df_MC dvcs_gen.keys() ele = [dvcs["Epx"], dvcs["Epy"], dvcs["Epz"]] dvcs["Etheta"] = getTheta(ele) dvcs["Ephi"] = getPhi(ele) pro = [dvcs["Ppx"], dvcs["Ppy"], dvcs["Ppz"]] dvcs["Ptheta"] = getTheta(pro) dvcs["Pphi"] = getPhi(pro) gam = [dvcs["Gpx"], dvcs["Gpy"], dvcs["Gpz"]] dvcs["Gtheta"] = getTheta(gam) dvcs["Gphi"] = getPhi(gam) ele = [dvcs_mc["Epx"], dvcs_mc["Epy"], dvcs_mc["Epz"]] dvcs_mc["Etheta"] = getTheta(ele) dvcs_mc["Ephi"] = getPhi(ele) pro = [dvcs_mc["Ppx"], dvcs_mc["Ppy"], dvcs_mc["Ppz"]] dvcs_mc["Ptheta"] = getTheta(pro) dvcs_mc["Pphi"] = getPhi(pro) gam = [dvcs_mc["Gpx"], dvcs_mc["Gpy"], dvcs_mc["Gpz"]] dvcs_mc["Gtheta"] = getTheta(gam) dvcs_mc["Gphi"] = getPhi(gam) fname_mc2 = "~/Dropbox (MIT)/data/MC/Feb2020/dvcsNonRadRECinb.root" epg_mc2 = epgFromROOT(fname_mc2, rec=False) # dvpi0_mc2 = epg_mc2.getDVpi0() dvcs_mc2 = epg_mc2.getDVCS() ele = [dvcs_mc2["Epx"], dvcs_mc2["Epy"], dvcs_mc2["Epz"]] dvcs_mc2["Etheta"] = getTheta(ele) dvcs_mc2["Ephi"] = getPhi(ele) pro = [dvcs_mc2["Ppx"], dvcs_mc2["Ppy"], dvcs_mc2["Ppz"]] dvcs_mc2["Ptheta"] = getTheta(pro) dvcs_mc2["Pphi"] = getPhi(pro) gam = [dvcs_mc2["Gpx"], dvcs_mc2["Gpy"], dvcs_mc2["Gpz"]] dvcs_mc2["Gtheta"] = getTheta(gam) dvcs_mc2["Gphi"] = getPhi(gam) df_epg = epg.df_epg df_epg1 = epg_mc.df_epg cut1_Wagon1 = (df_epg1["ME_epg"]>-1) & (df_epg1["ME_epg"]<2) cut1_Wagon2 = (df_epg1["MM2_eg"]>1/16) & (df_epg1["MM2_eg"]<4) cut1_Wagon3 = (df_epg1["MM2_epg"]>-0.1) & (df_epg1["MM2_epg"]<0.1) cut1_Wagon4 = (df_epg1["MPt"]<0.75) cut1_Wagon5 = (df_epg1["reconGam"]<7.5) cut1 = cut1_Wagon1 & cut1_Wagon2 & cut1_Wagon3 & cut1_Wagon4 & cut1_Wagon5 df_epg_mc1 = df_epg1[cut1] # + # df_epg2 = epg_mc2.df_epg # cut2_Wagon1 = (df_epg2["ME_epg"]>-1) & (df_epg2["ME_epg"]<2) # cut2_Wagon2 = (df_epg2["MM2_eg"]>1/16) & (df_epg2["MM2_eg"]<4) # cut2_Wagon3 = (df_epg2["MM2_epg"]>-0.1) & (df_epg2["MM2_epg"]<0.1) # cut2_Wagon4 = (df_epg2["MPt"]<0.75) # cut2_Wagon5 = (df_epg2["reconGam"]<7.5) # cut2 = cut2_Wagon1 & cut2_Wagon2 & cut2_Wagon3 & cut2_Wagon4 & cut2_Wagon5 # df_epg_mc2 = df_epg2[cut2] # - # var = "coneAngle" var = "MM2_eg" bins = 100 bins=np.linspace(-0.01, 1.5, 101) df_epg[var].hist(bins=100, density = True, histtype='stepfilled', facecolor='none', edgecolor='k') df_epg1[var].hist(bins=bins, density = True, histtype='stepfilled', facecolor='none', edgecolor='b') # df_epg2[var].hist(bins=bins, density = True, histtype='stepfilled', facecolor='none', edgecolor='r') # dvcsFD = dvcs[(dvcs["Pstat"]<4000) & (dvcs["t2"]<1) & (dvcs["xB"]<0.85) & (dvcs["Q2"]<14)] # dvcsFD_mc = dvcs_mc[(dvcs_mc["Pstat"]<4000) & (dvcs_mc["t2"]<1)& (dvcs_mc["xB"]<0.85) & (dvcs_mc["Q2"]<14)] # dvcsFD_mc2 = dvcs_mc2[(dvcs_mc2["Pstat"]<4000) & (dvcs_mc2["t2"]<1)& (dvcs_mc2["xB"]<0.85) & (dvcs_mc2["Q2"]<14)] # dvpi0FD = dvpi0[(dvpi0["Pstat"]<4000)] # dvcsCD = dvcs[(dvcs["Pstat"]>4000)& (dvcs["t2"]<1) & (dvcs["xB"]<0.85) & (dvcs["Q2"]<14)] # dvcsCD_mc = dvcs_mc[(dvcs_mc["Pstat"]>4000) & (dvcs_mc["t2"]<1)& (dvcs_mc["xB"]<0.85) & (dvcs_mc["Q2"]<14)] # dvcsCD_mc2 = dvcs_mc2[(dvcs_mc2["Pstat"]>4000) & (dvcs_mc2["t2"]<1)& (dvcs_mc2["xB"]<0.85) & (dvcs_mc2["Q2"]<14)] dvcsCDFT = dvcs[(dvcs["Pstat"]>4000) & (dvcs["Gstat"]<2000)] dvcsCDFT_mc = dvcs_mc[(dvcs_mc["Pstat"]>4000) & (dvcs_mc["Gstat"]<2000)] dvcsCDFD = dvcs[(dvcs["Pstat"]>4000) & (dvcs["Gstat"]>2000)] dvcsCDFD_mc = dvcs_mc[(dvcs_mc["Pstat"]>4000) & (dvcs_mc["Gstat"]>2000)] # dvcsFDFT = dvcs[(dvcs["Pstat"]<4000) & (dvcs["Gstat"]<2000)] # dvcsFDFT_mc = dvcs_mc[(dvcs_mc["Pstat"]<4000) & (dvcs_mc["Gstat"]<2000)] # dvcsFDFT_mc2 = dvcs_mc2[(dvcs_mc2["Pstat"]<4000) & (dvcs_mc2["Gstat"]<2000)] dvcsFDFD = dvcs[(dvcs["Pstat"]<4000) & (dvcs["Gstat"]>2000)] dvcsFDFD_mc = dvcs_mc[(dvcs_mc["Pstat"]<4000) & (dvcs_mc["Gstat"]>2000)] dvcsCDFT_mc = dvcs_mc[(dvcs_mc["Pstat"]>4000) & (dvcs_mc["Gstat"]<2000)] dvcsCDFD_mc = dvcs_mc[(dvcs_mc["Pstat"]>4000) & (dvcs_mc["Gstat"]>2000)] dvcsFDFD_mc = dvcs_mc[(dvcs_mc["Pstat"]<4000) & (dvcs_mc["Gstat"]>2000)] dvcsCDFT_mc2 = dvcs_mc2[(dvcs_mc2["Pstat"]>4000) & (dvcs_mc2["Gstat"]<2000)] dvcsCDFD_mc2 = dvcs_mc2[(dvcs_mc2["Pstat"]>4000) & (dvcs_mc2["Gstat"]>2000)] dvcsFDFD_mc2 = dvcs_mc2[(dvcs_mc2["Pstat"]<4000) & (dvcs_mc2["Gstat"]>2000)] degree = r"${}^{\circ}$" GeV = "GeV" GeV2 = "GeV"+r"${}^{2}$" GeVc = "GeV/c" GeVc2 = "(GeV/c)"+r"${}^{2}$" varstoplot = ["coneAngle", "reconGam", "coplanarity", "ME_epg", "MM2_epg", "MM2_eg", "MM2_ep", "MPt"] title = [r"$\theta_{e'\gamma}$", r"$\theta_{\gamma_{det.}\gamma_{rec.}}$", r"$\Delta\phi$" , "ME"+r"${}_{epg}$", "MM"+r"${}^{2}_{epg}$", "MM"+r"${}^{2}_{eg}$", "MM"+r"${}^{2}_{ep}$", "MPt"+r"${}_{epg}$"] unit = [degree, degree, degree, GeV, GeV2, GeV2, GeV2, GeVc] binstarts = [20, 0, 0, -0.5, -0.04, 0.1,-0.4 , 0] binends = [60, 1.1, 10, 1.2, 0.04, 1.7, 0.4, 0.12] fig, axs = plt.subplots(2, 4, figsize = (15,10)) for yind in range(0, 2): for xind in range(0,4): ind = 4*yind + xind start = binstarts[ind] end = binends[ind] bins = np.linspace(start, end, 101) # axs[yind, xind].hist(dvcsFDFD[varstoplot[ind]], bins = bins, density = True, histtype='stepfilled', facecolor='none', edgecolor='k') axs[yind, xind].hist(dvcsFDFD_mc[varstoplot[ind]], bins = bins, density = True, histtype='stepfilled', facecolor='none', edgecolor='r') axs[yind, xind].hist(dvcsFDFD_mc2[varstoplot[ind]], bins = bins, density = True, histtype='stepfilled', facecolor='none', edgecolor='b') axs[yind, xind].set_title(title[ind]) axs[yind, xind].set_xlabel(title[ind]+" [" + unit[ind] +"]") axs[yind, xind].set_xlim([start, end]) plt.tight_layout() plt.show() # plt.savefig("simComparison_1.pdf") plt.clf() varstoplot = ["nu", "W", "Q2", "xB", "t2", "phi2", "t1", "phi1"] title = [r"$\nu$", r"$W$", r"$Q^{2}$", r"$x_{B}$", r"$-t$"+" from "+r"$\gamma$", r"$\phi_{trento}$"+" from "+r"$\gamma$", r"$-t$"+" from "+r"$p$", r"$\phi_{trento}$"+" from "+r"$p$" ] unit = [None, None, GeVc2, None, GeV2, GeV2, GeV2, GeVc] binstarts = [0, 2, 0, 0, 0, 0, 0 , 0] binends = [10, 4, 10, 1, 4, 360, 4, 360] fig, axs = plt.subplots(2, 4, figsize = (15, 10)) for xind in range(0,4): for yind in range(0, 2): ind =xind+4*yind start = binstarts[ind] end = binends[ind] bins = np.linspace(start, end, 101) # axs[yind, xind].hist(dvcsFDFD[varstoplot[ind]], bins = bins, density = True, histtype='stepfilled', facecolor='none', edgecolor='k') axs[yind, xind].hist(dvcsFDFD_mc[varstoplot[ind]], bins = bins, density = True, histtype='stepfilled', facecolor='none', edgecolor='r') axs[yind, xind].hist(dvcsFDFD_mc2[varstoplot[ind]], bins = bins, density = True, histtype='stepfilled', facecolor='none', edgecolor='b') axs[yind, xind].set_title(title[ind]) axs[yind, xind].set_xlim([start, end]) if (unit[ind]): axs[yind, xind].set_xlabel(title[ind]+" [" + unit[ind] +"]") else: axs[yind, xind].set_xlabel(title[ind]) plt.tight_layout() plt.show() # plt.savefig("simComparison_2.pdf") plt.clf() varstoplot = ["Epx", "Epy", "Epz", "Ppx", "Ppy", "Ppz", "Gpx", "Gpy", "Gpz"] title = [r"$px_{e'}$", r"$py_{e'}$", r"$pz_{e'}$", r"$px_{p'}$", r"$py_{p'}$", r"$pz_{p'}$", r"$px_{\gamma}$", r"$py_{\gamma}$", r"$pz_{\gamma}$" ] unit = [GeVc, GeVc, GeVc2, GeVc, GeVc, GeVc, GeVc, GeVc, GeVc] binstarts = [-1.5, -1.5, 0, -1.2, -1.2, 0, -2, -2, 0] binends = [1.5, 1.5, 10, 1.2, 1.2, 2, 2, 2, 10] fig, axs = plt.subplots(3, 3, figsize = (15,15)) for xind in range(0,3): for yind in range(0, 3): ind = xind+3*yind start = binstarts[ind] end = binends[ind] bins = np.linspace(start, end, 101) axs[yind, xind].hist(dvcsFDFD[varstoplot[ind]], bins = bins, density = True, histtype='stepfilled', facecolor='none', edgecolor='k') axs[yind, xind].hist(dvcsFDFD_mc[varstoplot[ind]], bins = bins, density = True, histtype='stepfilled', facecolor='none', edgecolor='r') # axs[yind, xind].hist(dvcsFDFD_mc2[varstoplot[ind]], bins = bins, density = True, histtype='stepfilled', facecolor='none', edgecolor='r') axs[yind, xind].set_title(title[ind]) axs[yind, xind].set_xlim([start, end]) if (unit[ind]): axs[2-yind, xind].set_xlabel(title[ind]+" [" + unit[ind] +"]") else: axs[2-yind, xind].set_xlabel(title[ind]) plt.tight_layout() plt.show() # plt.savefig("simComparison_3.pdf") plt.clf() varstoplot = ["Evx", "Evy", "Evz", "Etheta", "Ptheta", "Gtheta", "Ephi", "Gphi", "Gphi"] title = [r"$vx_{e'}$", r"$vy_{e'}$", r"$vz_{e'}$", r"$\theta_{e'}$", r"$\theta_{p'}$", r"$\theta_{\gamma}$", r"$\phi_{e'}$", r"$\phi_{p'}$", r"$\phi_{\gamma}$" ] unit = ["cm", "cm", "cm", degree, degree, degree, degree, degree, degree] binstarts = [-0.2, -1, -6, 0, 0, 0, -180, -180, -180] binends = [0.2, 1, 0, 45, 45, 45, 180, 180, 180] fig, axs = plt.subplots(3, 3, figsize = (15,15)) for xind in range(0,3): for yind in range(0, 3): ind = xind+3*yind start = binstarts[ind] end = binends[ind] bins = np.linspace(start, end, 101) axs[yind, xind].hist(dvcsFDFD[varstoplot[ind]], bins = bins, density = True, histtype='stepfilled', facecolor='none', edgecolor='k') axs[yind, xind].hist(dvcsFDFD_mc[varstoplot[ind]]+0.007, bins = bins, density = True, histtype='stepfilled', facecolor='none', edgecolor='r') # axs[yind, xind].hist(dvcsFDFD_mc2[varstoplot[ind]], bins = bins, density = True, histtype='stepfilled', facecolor='none', edgecolor='r') axs[yind, xind].set_title(title[ind]) axs[yind, xind].set_xlim([start, end]) if (unit[ind]): axs[yind, xind].set_xlabel(title[ind]+" [" + unit[ind] +"]") else: axs[yind, xind].set_xlabel(title[ind]) plt.tight_layout() plt.show() # plt.savefig("simComparison_4.pdf") plt.clf() # mean = dvcs["Evy"].mean() mean = 0 asym = dvcs[(dvcs["Evy"]>mean-1) & (dvcs["Evy"]<mean+1)] print(mean) for i in range(0, 10): mean = asym["Evy"].mean() asym = dvcs[(dvcs["Evy"]>mean-0.3) & (dvcs["Evy"]<mean+0.3)] print(mean) data, bins = np.histogram(dvcs["Evx"], bins =np.linspace(-1+mean, 1+mean, 101)) bins[np.argmax(data)] (dvcs_mc["Evx"]).hist(bins=np.linspace(-0.1, 0.1, 201), density=True, histtype='stepfilled', facecolor='none', edgecolor='k') (dvcs["Evx"]).hist(bins=np.linspace(-0.1, 0.1, 201), density=True, histtype='stepfilled', facecolor='none', edgecolor='r') # + # hists.computeIfAbsent("/epg/corr/tmin", h_Q2_xB).fill(xB,Q2,tmin) # hists.computeIfAbsent("/epg/corr/tcol", h_Q2_xB).fill(xB,Q2,tcol) # hists.computeIfAbsent("/epg/corr/pro_theta_mom_xB_${xBbin}_Q2_${Q2bin}", h_theta_mom).fill(pro.p(), Math.toDegrees(pro.theta())) # hists.computeIfAbsent("/epg/corr/pro_phi_mom_xB_${xBbin}_Q2_${Q2bin}", h_phi_mom).fill(pro.p(), pro_phi_convention) # hists.computeIfAbsent("/epg/corr/pro_theta_phi_xB_${xBbin}_Q2_${Q2bin}", h_theta_phi).fill(pro_phi_convention, Math.toDegrees(pro.theta())) # hists.computeIfAbsent("/epg/corr/gam_phi_mom_xB_${xBbin}_Q2_${Q2bin}", h_phi_mom).fill(gam.p(), gam_phi_convention) # hists.computeIfAbsent("/epg/corr/gam_theta_mom_xB_${xBbin}_Q2_${Q2bin}", h_theta_mom).fill(gam.p(), Math.toDegrees(gam.theta())) # hists.computeIfAbsent("/epg/corr/gam_theta_phi_xB_${xBbin}_Q2_${Q2bin}", h_theta_phi).fill(gam_phi_convention, Math.toDegrees(gam.theta())) # hists.computeIfAbsent("/epg/corr/pro_theta_t_xB_${xBbin}_Q2_${Q2bin}", h_theta_t).fill(t2, Math.toDegrees(pro.theta())) # hists.computeIfAbsent("/epg/corr/pro_phi_t_xB_${xBbin}_Q2_${Q2bin}", h_phi_t).fill(t2, pro_phi_convention) # hists.computeIfAbsent("/epg/corr/pro_theta_trento_xB_${xBbin}_Q2_${Q2bin}", h_theta_trento).fill(TrentoAng2, Math.toDegrees(pro.theta())) # hists.computeIfAbsent("/epg/corr/pro_phi_trento_xB_${xBbin}_Q2_${Q2bin}", h_phi_trento).fill(TrentoAng2, pro_phi_convention) # hists.computeIfAbsent("/epg/corr/gam_theta_t_xB_${xBbin}_Q2_${Q2bin}", h_theta_t).fill(t2, Math.toDegrees(gam.theta())) # hists.computeIfAbsent("/epg/corr/gam_phi_t_xB_${xBbin}_Q2_${Q2bin}", h_phi_t).fill(t2, gam_phi_convention) # hists.computeIfAbsent("/epg/corr/gam_theta_trento_xB_${xBbin}_Q2_${Q2bin}", h_theta_trento).fill(TrentoAng2, Math.toDegrees(gam.theta())) # hists.computeIfAbsent("/epg/corr/gam_phi_trento_xB_${xBbin}_Q2_${Q2bin}", h_phi_trento).fill(TrentoAng2, gam_phi_convention) # hists.computeIfAbsent("/epg/corr/t_t", h_t_t).fill(t, t2) # hists.computeIfAbsent("/epg/corr/trento_trento", h_trento_trento).fill(TrentoAng, TrentoAng2) # - dvcs["neweSector"] = getPhi(ele)+80 dvcs["neweSector"] = dvcs["neweSector"]//60 dvcs["neweSector"] = np.where(dvcs["neweSector"]<=0, dvcs["neweSector"]+6, dvcs["neweSector"]) fname = "/Users/sangbaek/Dropbox (MIT)/data/dvcsgen.dat" epgLund = epgFromLund(fname, 10) import importlib import utils importlib.reload(utils.epg) from utils.epg import * dvcsLund = epgLund.getDVCS() xB_edges = [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4,\ 0.45, 0.5, 0.55, 0.6, 0.7, 0.85, 1] Q2_edges = [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0,\ 4.5, 5.0, 5.5, 6.0, 7.0, 8.0, 9.0, 12.] t_edges = [0.09, 0.15, 0.20, 0.3, 0.4, 0.60, 1.00, 1.5, 2.0] phi_edges = np.linspace(0, 360, 31) fname_gen = "~/Dropbox (MIT)/data/MC/nonradVzCorrInbMC.root" epg_gen = epgFromROOT(fname_gen, mc=True) dvcs_gen = epg_gen.getDVCS() dvcs5038 = dvcsFDFD[dvcsFDFD["RunNum"]<5200] dvcs5419 = dvcsFDFD[dvcsFDFD["RunNum"]>5300] dvcs["RunNum"].unique() cmap = copy(plt.cm.get_cmap("jet")) # plt.hist2d(dvcs_mc["Evx"], dvcs_mc["Evy"], bins = [np.linspace(-0.05, 0.05, 101), np.linspace(-0.05, 0.05, 101)], cmin=1, cmap=cmap) plt.hist2d(dvcs_gen["MCEvx"], dvcs_gen["MCEvy"], bins = [np.linspace(-0.05, 0.05, 101), np.linspace(-0.05, 0.05, 101)], cmin=1, cmap=cmap) # plt.hist2d(dvcsFDFD["Evx"], dvcsFDFD["Evy"], bins = [np.linspace(-0.2, 0.2, 101), np.linspace(-0.2, 0.2, 101)], cmin=1, cmap=cmap) # plt.hist2d(dvcs5038["Evx"], dvcs5038["Evy"], bins = [np.linspace(-0.1, 0.1, 101), np.linspace(-0.05, -0.03, 101)], cmin=1, cmap = cmap) # plt.hist2d(dvcs5419["Evx"], dvcs5419["Evy"], bins = [np.linspace(-0.2, 0.2, 16), np.linspace(-0.2, 0.2, 16)], cmin=1, cmap = cmap) # plt.hist2d(dvcsoutb["Evx"], dvcsoutb["Evy"], bins = [np.linspace(0.032, 0.042, 11), np.linspace(-0.03, -0.02, 11)], cmin=1, cmap = cmap) plt.xlabel("electron vx (cm)") plt.ylabel("electron vy (cm)") # plt.hist(np.sqrt(dvcs_gen["Evx"]**2+(dvcs_gen["Evy"]+0.05)**2)) plt.tight_layout() plt.show() # plt.savefig("vyvxover5300.pdf") # plt.savefig("vyvxoutb.pdf") dvcs_gen dvcsoutb["Evy"].hist(bins=np.linspace(-0.25, 0.25, 101)) import uproot file = uproot.open("~/Dropbox (MIT)/data/dvcs_inb.root") tree = file["T"] df_ele = pd.DataFrame() RunNum = tree["RunNum"].array(library="pd", entry_stop=10) RunNum fig, ax = plt.subplots() dvpi0CD["MM2_egg"].hist(bins=np.linspace(0.5, 1.5, 101), density=True, histtype='stepfilled', facecolor='none', edgecolor='b', ax = ax) dvpi0FD["MM2_egg"].hist(bins=np.linspace(0.5, 1.5, 101), density=True, histtype='stepfilled', facecolor='none', edgecolor='r', ax = ax) ax.set_xlabel("MM"+r"${}^{2}_{egg}$"+" [GeV"+r"${}^{2}$"+"]") ax.set_xlim([0.5, 1.5]) # ax.set_ylim([0, 2.0]) # dvpi0CD["ME_epgg"].hist(bins=np.linspace(-0.4, 0.4, 101), density=True, histtype='stepfilled', facecolor='none', edgecolor='b', ax = ax) # dvpi0FD["ME_epgg"].hist(bins=np.linspace(-0.4, 0.4, 101), density=True, histtype='stepfilled', facecolor='none', edgecolor='r', ax = ax) # ax.set_xlabel("ME"+r"${}_{epgg}$"+" [GeV]") # ax.set_xlim([-.4, .4]) plt.show() # dvcsCD["ME_epg"].hist(bins=np.linspace(-0.4, 0.4, 101), density=True, histtype='stepfilled', facecolor='none', edgecolor='b') # dvcsFD["ME_epg"].hist(bins=np.linspace(-0.4, 0.4, 101), density=True, histtype='stepfilled', facecolor='none', edgecolor='g') dvcs_data = dvcsFDFD[(dvcsFDFD["xB"]>0.3)&(dvcsFDFD["xB"]<0.35)&(dvcsFDFD["Q2"]>3)&(dvcsFDFD["Q2"]<3.5)&(dvcsFDFD["t2"]>0.4)&(dvcsFDFD["t2"]<0.6)] dvcs_rec = dvcsFDFD_mc[(dvcsFDFD_mc["xB"]>0.3)&(dvcsFDFD_mc["xB"]<0.35)&(dvcsFDFD_mc["Q2"]>3)&(dvcsFDFD_mc["Q2"]<3.5)&(dvcsFDFD_mc["t2"]>0.4)&(dvcsFDFD_mc["t2"]<0.6)] dvcs_generated = dvcs_gen[(dvcs_gen["xB"]>0.3)&(dvcs_gen["xB"]<0.35)&(dvcs_gen["Q2"]>3)&(dvcs_gen["Q2"]<3.5)&(dvcs_gen["t2"]>0.4)&(dvcs_gen["t2"]<0.6)] phi_data = dvcs_data["phi2"] phi_rec = dvcs_rec["phi2"] phi_gen = dvcs_gen["phi2"] phi_edges = np.linspace(0,360, 31) phiData_entries, bins = np.histogram(phi_data, bins=phi_edges) phiRec_entries, bins = np.histogram(phi_rec, bins=phi_edges) phiGen_entries, bins = np.histogram(phi_gen, bins=phi_edges) binscenters = np.array([0.5 * (bins[i] + bins[i + 1]) for i in range(len(bins) - 1)]) phiGen_entries # + fig, axs = plt.subplots(2, 3, figsize = (15,10)) axs[0,0].hist(phi_data, bins= phi_edges, histtype='stepfilled', facecolor='none', edgecolor='b') axs[0,0].set_title("Raw yields, Data") axs[0,1].hist(phi_rec, bins= phi_edges, histtype='stepfilled', facecolor='none', edgecolor='b') axs[0,1].set_title("Reconstructed") axs[0,2].hist(phi_gen, bins= phi_edges, histtype='stepfilled', facecolor='none', edgecolor='b') axs[0,2].set_title("Generated") acc = phiRec_entries/phiGen_entries axs[1,0].step(binscenters, phiRec_entries/phiGen_entries, where='mid',color='b', linewidth=1) axs[1,0].bar(binscenters, phiRec_entries/phiGen_entries, width=np.diff(bins), color = 'w', facecolor='w') axs[1,0].set_title("Acceptance") accCorrectedData = acc for ind, val in enumerate(acc): if val ==0: accCorrectedData[ind] = 0 else: accCorrectedData[ind] = phiData_entries[ind]/val axs[1,1].step(binscenters, accCorrectedData, where='mid',color='b', linewidth=1) axs[1,1].bar(binscenters, accCorrectedData, width=np.diff(bins), color = 'w', facecolor='w') axs[1,1].set_title("Acc. corrected yields") axs[1,2].step(binscenters, accCorrectedData/56.5/0.05/0.5/0.2/(np.pi*2/30)/10**6, where='mid',color='b', linewidth=1) axs[1,2].bar(binscenters, accCorrectedData/56.5/0.05/0.5/0.2/(np.pi*2/30)/10**6, width=np.diff(bins), color = 'w', facecolor='w') axs[1,2].set_title("differential cross sections") for ax in axs.reshape(6,): ax.set_xlim([0, 360]) ax.set_xticks(np.linspace(0, 360, 5)) ax.set_xlabel("$\phi$"+" ["+r"${}^{\circ}$"+"]") plt.tight_layout() plt.savefig("onebin.pdf") # for xind in range(0,3): # for yind in range(0, 3): # ind = xind+3*(2-yind) # start = binstarts[ind] # end = binends[ind] # bins = np.linspace(start, end, 101) # axs[2-yind, xind].hist(dvcsFDFD[varstoplot[ind]], bins = bins, density = True, histtype='stepfilled', facecolor='none', edgecolor='k') # axs[2-yind, xind].hist(dvcsFDFD_mc[varstoplot[ind]], bins = bins, density = True, histtype='stepfilled', facecolor='none', edgecolor='r') # # axs[2-yind, xind].hist(dvcsFDFD_mc2[varstoplot[ind]], bins = bins, density = True, histtype='stepfilled', facecolor='none', edgecolor='r') # axs[2-yind, xind].set_title(title[ind]) # axs[2-yind, xind].set_xlim([start, end]) # if (unit[ind]): # axs[2-yind, xind].set_xlabel(title[ind]+" [" + unit[ind] +"]") # else: # axs[2-yind, xind].set_xlabel(title[ind]) # plt.tight_layout() # plt.savefig("simComparison_3.pdf") # plt.clf() # - acc len(dvcsFDFD_mc) dvcs_rec len(df_epg[df_epg["Pstat"]>4000])
APSApril/simComparison.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + executionInfo={"elapsed": 891, "status": "ok", "timestamp": 1604123731048, "user": {"displayName": "E24056629\u738b\u6636\u6587", "photoUrl": "", "userId": "04499738007700106957"}, "user_tz": -480} id="9ubVx3EUDU9W" import numpy as np from matplotlib import pyplot as plt # + [markdown] id="JQ97WqKes3b-" # ### Define Model # + executionInfo={"elapsed": 1174, "status": "ok", "timestamp": 1604123731341, "user": {"displayName": "E24056629\u738b\u6636\u6587", "photoUrl": "", "userId": "04499738007700106957"}, "user_tz": -480} id="fAr38nImf4XO" ''' Model class for the network model Store the model, including layer(construct by node), activation function ''' class Model(): def __init__(self, layer_nums, create_func, input_arr, lr_rate): self.layer_nums = layer_nums # List of the number of each layer # 0 => input number # middle => layer number # last => output number self.layer_list = [] # Store layer list create_func(self, input_arr, layer_nums) # Creating the network self.result = 0 # Initial network result self.lr_rate = lr_rate # Learning rate self.loss = 0 ''' Calculate the network by using input data ''' def cal_network(self, input): self.layer_list[0].set_input(input.copy()) for i, layer in enumerate(self.layer_list): layer.forwrad_pass() return self.get_result() ''' Set output errors, for the last layer only ''' def set_output_error(self, error): self.layer_list[len(self.layer_nums)-1].set_output_error(error) ''' Adjust nodes in network using backpropagation and ground truth ''' def adjust_model(self, ground_truth): # print(self.result, ground_truth) error = self.result - ground_truth # print(error) self.loss = np.dot(error, error) / 2 if ground_truth == 1: error = error * 3 self.set_output_error(error) for i in range(len(self.layer_list)-1, -1, -1): # print("in ", i, "-th layer") self.layer_list[i].adjust_weight(self.lr_rate) # print("ans", ground_truth ,"l w: ", self.layer_list[2].w) ''' Return network result ''' def get_result(self): self.result = self.layer_list[len(self.layer_nums)-1].get_output() return self.result def get_loss(self): return self.loss def get_output_w(self): w = self.layer_list[len(self.layer_list)-1].get_output_w() return w # + [markdown] id="Fezmbg647700" # ### Define Layer # + cellView="code" executionInfo={"elapsed": 1165, "status": "ok", "timestamp": 1604123731343, "user": {"displayName": "E24056629\u738b\u6636\u6587", "photoUrl": "", "userId": "04499738007700106957"}, "user_tz": -480} id="NwoN8EKNvsnE" #@title ''' Layer class for the network model Help model to handle neurons ''' class Layer_vec(): ''' Initial layer @param func - activation function @param d_func - diviation of activation function @param node_num - number of nodes in this layer @param last_layer - last layer's node list @param is_first - whether this layer is the first layer ''' def __init__(self, func, d_func, node_num, last_layer, is_first): # Activation Functions self.act_func = func # Activation function self.d_act_func = d_func # Diviation of activate function # Input definition if not is_first: self.i_num = last_layer.get_node_num() # Number of input node else: self.i_num = len(last_layer) # Number of input node self.input_vec = np.full(self.i_num+1, 0.0) # Initial input passed from self.input_vec[self.i_num] = 1 self.neuron_num = node_num self.last_layer = last_layer # Calculation variables self.w = np.random.randn(self.i_num+1, node_num) / np.sqrt(self.i_num+1) # Initial weight # self.w = np.full((self.i_num+1, node_num), 0.5) # Initial input passed from self.bp_vec = np.full(self.neuron_num, 0.0) # Recieve value passed from postorier layer self.is_first = is_first # Set to true if this node is at first layer self.weighted_input = np.full(self.i_num, 0.0) # Initial weighted input, use to store the value after the weighted input are sum up self.result = np.full(self.neuron_num, 0.0) # Initial output result, equal to the value after subsituted weighted input into activation function self.lr_rate = 0.005 # Learning rate of the node ''' Adjust weights, using backpropagation For error function, e = y_predict - y_desire For weight correction, w_n+1 = w_n - delta_w ''' def adjust_weight(self, lr_rate): self.lr_rate = lr_rate # Calculate each weight for the specific previous node delta = self.bp_vec * self.d_act_func(self.weighted_input) # Dimation of layer node # print("bp_vec: ", self.bp_vec, "winput: ", self.weighted_input, "dwinput: ", self.d_act_func(self.weighted_input)) delta_w = np.outer(self.input_vec, delta) if (not self.is_first): pass_v = np.dot(delta, self.w[0:len(self.w)-1, :].transpose()) # print("passv ", pass_v[0:len(self.input_vec)-1], delta, self.w[0:len(self.w)-1, :].transpose()) self.last_layer.pass_bp(pass_v[0:len(self.input_vec)-1]) self.w = self.w - self.lr_rate * delta_w def forwrad_pass(self): if not self.is_first: self.extract_value() self.bp_vec = np.full(self.neuron_num, 0.0) # Set bp value to zero, for later adjustment # print(self.w, self.input_vec) self.weighted_input = np.dot(self.input_vec, self.w) self.result = self.act_func(self.weighted_input) return self.result ''' Pass backpropagation value back to previous layer ''' def pass_bp(self, bp_value): self.bp_vec = bp_value.copy() ''' Set input variable, used for first layer which recieve input value @param x - input value for the network ''' def set_input(self, x): self.input_vec = x.copy() if self.is_first: self.input_vec = np.append(self.input_vec, 1) def extract_value(self): self.input_vec = self.last_layer.get_output() self.input_vec = np.append(self.input_vec, 1) def get_node_num(self): return self.neuron_num def set_output_error(self, error): if self.neuron_num != len(error): print("Output layer and error doesn't match") return self.pass_bp(error) def get_output(self): return self.result def get_output_w(self): return self.w.copy() # + [markdown] id="RSApFupW7ojO" # ### Activation functions # + cellView="both" executionInfo={"elapsed": 1147, "status": "ok", "timestamp": 1604123731345, "user": {"displayName": "E24056629\u738b\u6636\u6587", "photoUrl": "", "userId": "04499738007700106957"}, "user_tz": -480} id="e87aLLFDb-ia" #@title ''' Activation function for the network ''' def test_act_func(x): return x*11 ''' ReLU ''' def ReLU(x): x[x<=0] = 0 return x.copy() ''' Sigmoid ''' def Sigmoid(x): return 1/(1+np.exp(-x)) # + [markdown] id="znz66Njx7ueF" # ### Diviation of Activation function # + cellView="code" executionInfo={"elapsed": 1140, "status": "ok", "timestamp": 1604123731346, "user": {"displayName": "E24056629\u738b\u6636\u6587", "photoUrl": "", "userId": "04499738007700106957"}, "user_tz": -480} id="YgMss65OdtJS" #@title ''' Diviation of the activation function for the network ''' def d_test_act_func(x): return x+2 ''' Diviation of ReLU ''' def d_ReLU(x): x[x > 0] = 1 x[x <= 0] = 0 return x.copy() ''' Diviation of Sigmoid ''' def d_Sigmoid(x): # print("dsig in: ", x) s = 1/(1+np.exp(-x)) # print("dsig a: ", s) ans = s * (1 - s) # print("dsig b: ", ans) return ans # + [markdown] id="Fg7Rry3AZTia" # ### Test Layer # # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1447, "status": "ok", "timestamp": 1604123731675, "user": {"displayName": "E24056629\u738b\u6636\u6587", "photoUrl": "", "userId": "04499738007700106957"}, "user_tz": -480} id="ZmAoCb9d0wTY" outputId="18442b73-2eec-4039-aabf-4eeb41fdf2c9" ############################################################## # test LAYER block # ############################################################## lr_rate = 0.5 test_l = Layer_vec(ReLU, d_ReLU, 2, [1,2,3], True) test_l2 = Layer_vec(ReLU, d_ReLU, 1, test_l, False) for i in range(20): print(i, "'s round !!!") test_l.set_input([0.2, 0.1, 0.2]) # print("test w: ", test.w) test_l.forwrad_pass() # print("test output: ", test.get_output()) test_l2.forwrad_pass() print("test2 output: ", test_l2.get_output()) error = test_l2.get_output() - 0.7 test_l2.set_output_error([error]) test_l2.adjust_weight(lr_rate) test_l.adjust_weight(lr_rate) # + [markdown] id="TN3Wu8TfkxMB" # ### Test Model # - def construct_model(model, input_arr, layer_num): for i, num in enumerate(layer_num): if i == 0: model.layer_list.append( Layer_vec(ReLU, d_ReLU, num, input_arr, True) ) elif i == len(layer_num)-2: model.layer_list.append( Layer_vec(Sigmoid, d_Sigmoid, num, model.layer_list[i-1], False) ) else: model.layer_list.append( Layer_vec(ReLU, d_ReLU, num, model.layer_list[i-1], False) ) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1426, "status": "ok", "timestamp": 1604123731678, "user": {"displayName": "E24056629\u738b\u6636\u6587", "photoUrl": "", "userId": "04499738007700106957"}, "user_tz": -480} id="0ADV2VnBk5Ws" outputId="ae0f2383-1b6c-4fa3-d041-ec67d450b3d8" # ############################################################## # # test MODEL block # # ############################################################## # lr_rate = 0.5 # layer_nums = [2, 1] # layer_input = [0, 0, 0] # test_m = Model(layer_nums, construct_model, layer_input, lr_rate) # input_data = [[0.2], [0.1], [0.2]] # ground_truth = 0.7 # for i in range(20): # print(i, "'s round !!!") # test_m.cal_network(input_data) # print("test_m output: ", test_m.get_result()) # test_m.adjust_model(ground_truth) # test_m.get_loss() # test_m.get_output_w() # - x = np.array([1, -1, -1, 1, 0.4, 0.5, -0.5]) print(x) x[x > 0] = 1 x[x <= 0] = 0 print(x)
code/NN_HW3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/mohaljamal/hu-bby261-2020/blob/main/KmToMile.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="oHSx10gRZ7ly" outputId="283e7f4f-4181-42a4-b793-6ac46542b1af" colab={"base_uri": "https://localhost:8080/", "height": 721} import logging import numpy import tensorflow as tf import matplotlib.pyplot as plot logger = tf.get_logger() logger.setLevel(logging.ERROR) KM = numpy.array([0.01, 0.1, 1, 3, 5, 7, 11,20, 40,70,100,150 ], dtype=float) MIL = numpy.array([0.0062, 0.062,0.62, 1.8641, 3.1069, 4.3496, 6.83508,12.4274,24.8548,43.496,62.1371,93.2057], dtype=float) choice = input("Select:\n(a) To convert KM to MILE\n(b) To Convert MILE to KM\n") if choice=="a": trainingTimes = int(input("Enter number of epochs: ")) print("----- Training Values -----") for index,value in enumerate(KM): print("{} KM = {} MIL".format(value, MIL[index])) print("---------------------------") kmToCalculate = float(input("Enter your KM value: ")) l1 = tf.keras.layers.Dense(units=1, input_shape=[1]) model = tf.keras.Sequential([l1]) model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.Adam(0.1)) realExactAnswer = kmToCalculate * 0.621371 print("Exact value in MILES: {} mile".format(realExactAnswer)) print("---------------------------") print("Num of Epochs: {}".format(trainingTimes)) history= model.fit(KM, MIL, epochs=trainingTimes, verbose=False) predictedValue = model.predict([kmToCalculate]) print("Predicted value in MILES: {} mile".format(predictedValue)) plot.xlabel('epoch number') plot.ylabel('loss magnitude') plot.plot(history.history['loss']) print("---------------------------") print("Layer variables: {}".format(l1.get_weights())) elif choice == "b": trainingTimes = int(input("Enter number of epochs: ")) print("----- Training Values -----") for index,value in enumerate(KM): print("{} MIL = {} KM".format(MIL[index],value)) print("---------------------------") mileToCalculate = float(input("Enter your Mile value: ")) l1 = tf.keras.layers.Dense(units=1, input_shape=[1]) model = tf.keras.Sequential([l1]) model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.Adam(0.1)) realExactAnswer = mileToCalculate / 0.621371 print("Exact value in KMs: {} km".format(realExactAnswer)) print("---------------------------") print("Num of Epochs: {}".format(trainingTimes)) history= model.fit(MIL, KM, epochs=trainingTimes, verbose=False) predictedValue = model.predict([mileToCalculate]) print("Predicted value in KMs: {} km".format(predictedValue)) plot.xlabel('epoch number') plot.ylabel('loss magnitude') plot.plot(history.history['loss']) print("---------------------------") print("Layer variables: {}".format(l1.get_weights())) else: print("You entered a choice that is not in the list")
proje_02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/dnldes/Linear-Algebra-58019/blob/main/Vectors.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="LuOyuGKN0l84" # #Matrix Algebra # # + colab={"base_uri": "https://localhost:8080/"} id="8PDZ3h_T0XK3" outputId="0774d416-290f-4d9c-c4c2-34ebe4c77ce3" #numpy import numpy as np a = np.array([1,2,3]) #This is a 1x3 array print(a) # + colab={"base_uri": "https://localhost:8080/"} id="StJipFsi1sk9" outputId="82018bda-e592-4390-bf46-9ebbfca07dd8" import numpy as np b = np.array([[1,2,3],[4,5,6]]) #This is a 2x3 array print(b) # + colab={"base_uri": "https://localhost:8080/"} id="9vx7xP-C2RoE" outputId="cf518194-715f-4c2c-b750-a4f6ee256631" import numpy as np c = np.array([[1,2,3],[4,5,6],[7,8,9]]) #This is a 3x3 array print(c) # + colab={"base_uri": "https://localhost:8080/"} id="fZBcm6jI2tEt" outputId="a1921cac-336e-4e53-9138-e0db68e798b7" import numpy as np d = np.full((3,3),7) print(d) # + colab={"base_uri": "https://localhost:8080/"} id="AGKl4qQc3RYb" outputId="b690ab84-bc7e-4493-9f80-2574bf210887" import numpy as np e = np.array ([[1,2,3],[4,5,6],[7,8,9]]) print(e) e = np.diagonal ([[1,2,3],[4,5,6],[7,8,9]]) print(e) # + colab={"base_uri": "https://localhost:8080/"} id="smfysVzU3pyv" outputId="fd3722a1-31ad-4a8f-dfe5-a0d6a9a8702a" import numpy as np f = np.eye(3) print(f) # + colab={"base_uri": "https://localhost:8080/"} id="EcJR_U8a4I2u" outputId="84798df4-7d85-44ae-8e79-d27f71ba1f85" import numpy as np g = np.zeros((2,2)) print(g) # + colab={"base_uri": "https://localhost:8080/"} id="BPuf_sek4Nfi" outputId="c423b0fb-9cec-492d-cc7f-106fa074695f" import numpy as np h = np.empty((0,12)) print(h)
Vectors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf import numpy as np import pickle with open('dataset.pkl', 'rb') as fopen: dataset = pickle.load(fopen) dataset.keys() len(dataset['train_texts']) with open('dictionary.pkl', 'rb') as fopen: dictionary = pickle.load(fopen) rev_dictionary = dictionary['rev_dictionary'] dictionary = dictionary['dictionary'] class Model: def __init__(self, size_layer, num_layers, embedded_size, dict_size, learning_rate): def cells(reuse=False): return tf.nn.rnn_cell.LSTMCell(size_layer,initializer=tf.orthogonal_initializer(),reuse=reuse) self.X = tf.placeholder(tf.int32, [None, None]) self.Y = tf.placeholder(tf.float32, [None, None]) self.mask = tf.placeholder(tf.int32, [None, None]) self.clss = tf.placeholder(tf.int32, [None, None]) mask = tf.cast(self.mask, tf.float32) encoder_embeddings = tf.Variable(tf.random_uniform([dict_size, embedded_size], -1, 1)) encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X) rnn_cells = tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)]) outputs, _ = tf.nn.dynamic_rnn(rnn_cells, encoder_embedded, dtype = tf.float32) outputs = tf.gather(outputs, self.clss, axis = 1, batch_dims = 1) self.logits = tf.layers.dense(outputs, 1) self.logits = tf.squeeze(self.logits, axis=-1) self.logits = self.logits * mask crossent = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.Y) crossent = crossent * mask crossent = tf.reduce_sum(crossent) total_size = tf.reduce_sum(mask) self.cost = tf.div_no_nan(crossent, total_size) self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(self.cost) l = tf.round(tf.sigmoid(self.logits)) self.accuracy = tf.reduce_mean(tf.cast(tf.boolean_mask(l, tf.equal(self.Y, 1)), tf.float32)) size_layer = 256 num_layers = 2 embedded_size = 256 learning_rate = 1e-3 batch_size = 128 epoch = 20 tf.reset_default_graph() sess = tf.InteractiveSession() model = Model(size_layer,num_layers,embedded_size,len(dictionary),learning_rate) sess.run(tf.global_variables_initializer()) # + UNK = 3 def str_idx(corpus, dic): X = [] for i in corpus: ints = [] for k in i.split(): ints.append(dic.get(k,UNK)) X.append(ints) return X def pad_sentence_batch(sentence_batch, pad_int): padded_seqs = [] seq_lens = [] max_sentence_len = max([len(sentence) for sentence in sentence_batch]) for sentence in sentence_batch: padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence))) seq_lens.append(len(sentence)) return padded_seqs, seq_lens # - train_X = str_idx(dataset['train_texts'], dictionary) test_X = str_idx(dataset['test_texts'], dictionary) train_clss = dataset['train_clss'] test_clss = dataset['test_clss'] train_Y = dataset['train_labels'] test_Y = dataset['test_labels'] # + batch_x, _ = pad_sentence_batch(train_X[:5], 0) batch_y, _ = pad_sentence_batch(train_Y[:5], 0) batch_clss, _ = pad_sentence_batch(train_clss[:5], -1) batch_clss = np.array(batch_clss) batch_mask = 1 - (batch_clss == -1) batch_clss[batch_clss == -1] = 0 feed = {model.X: batch_x, model.Y: batch_y, model.mask: batch_mask, model.clss: batch_clss} acc, loss, _ = sess.run([model.accuracy, model.cost,model.optimizer], feed_dict = feed) acc, loss # + import tqdm for e in range(epoch): pbar = tqdm.tqdm( range(0, len(train_X), batch_size), desc = 'minibatch loop') train_loss, train_acc, test_loss, test_acc = [], [], [], [] for i in pbar: index = min(i + batch_size, len(train_X)) batch_x, _ = pad_sentence_batch(train_X[i : index], 0) batch_y, _ = pad_sentence_batch(train_Y[i : index], 0) batch_clss, _ = pad_sentence_batch(train_clss[i : index], -1) batch_clss = np.array(batch_clss) batch_mask = 1 - (batch_clss == -1) batch_clss[batch_clss == -1] = 0 feed = {model.X: batch_x, model.Y: batch_y, model.mask: batch_mask, model.clss: batch_clss} accuracy, loss, _ = sess.run([model.accuracy,model.cost,model.optimizer], feed_dict = feed) train_loss.append(loss) train_acc.append(accuracy) pbar.set_postfix(cost = loss, accuracy = accuracy) pbar = tqdm.tqdm( range(0, len(test_X), batch_size), desc = 'minibatch loop') for i in pbar: index = min(i + batch_size, len(test_X)) batch_x, _ = pad_sentence_batch(test_X[i : index], 0) batch_y, _ = pad_sentence_batch(test_Y[i : index], 0) batch_clss, _ = pad_sentence_batch(test_clss[i : index], -1) batch_clss = np.array(batch_clss) batch_mask = 1 - (batch_clss == -1) batch_clss[batch_clss == -1] = 0 feed = {model.X: batch_x, model.Y: batch_y, model.mask: batch_mask, model.clss: batch_clss} accuracy, loss = sess.run([model.accuracy,model.cost], feed_dict = feed) test_loss.append(loss) test_acc.append(accuracy) pbar.set_postfix(cost = loss, accuracy = accuracy) print('epoch %d, training avg loss %f, training avg acc %f'%(e+1, np.mean(train_loss),np.mean(train_acc))) print('epoch %d, testing avg loss %f, testing avg acc %f'%(e+1, np.mean(test_loss),np.mean(test_acc))) # + from tensor2tensor.utils import rouge from tensorflow.keras.preprocessing import sequence def calculate_rouges(predicted, batch_y): non = np.count_nonzero(batch_y, axis = 1) o = [] for n in non: o.append([True for _ in range(n)]) b = sequence.pad_sequences(o, dtype = np.bool, padding = 'post', value = False) batch_y = np.array(batch_y) rouges = [] for i in range(predicted.shape[0]): a = batch_y[i][b[i]] p = predicted[i][b[i]] rouges.append(rouge.rouge_n([p], [a])) return np.mean(rouges) # + batch_x, _ = pad_sentence_batch(test_X[: 5], 0) batch_y, _ = pad_sentence_batch(test_Y[: 5], 0) batch_clss, _ = pad_sentence_batch(test_clss[: 5], -1) batch_clss = np.array(batch_clss) batch_y = np.array(batch_y) batch_x = np.array(batch_x) cp_batch_clss = batch_clss.copy() batch_mask = 1 - (batch_clss == -1) batch_clss[batch_clss == -1] = 0 feed = {model.X: batch_x, model.mask: batch_mask, model.clss: batch_clss} predicted = sess.run(tf.round(tf.sigmoid(model.logits)), feed_dict = feed) # + from tensor2tensor.utils import rouge def calculate_rouge(predicted, batch_y, cp_batch_clss, batch_x): f, y_, predicted_ = [], [], [] for i in range(len(cp_batch_clss)): f.append(cp_batch_clss[i][cp_batch_clss[i] != -1]) y_.append(batch_y[i][cp_batch_clss[i] != -1]) predicted_.append(predicted[i][cp_batch_clss[i] != -1]) actual, predict = [], [] for i in range(len(f)): actual_, predict_ = [], [] for k in range(len(f[i])): if k == (len(f[i]) - 1): s = batch_x[i][f[i][k]:] s = s[s != 0] else: s = batch_x[i][f[i][k]: f[i][k + 1]] s = [w for w in s if w not in [0, 1, 2, 3, 5, 6, 7, 8]] if y_[i][k]: actual_.extend(s) if predicted_[i][k]: predict_.extend(s) actual.append(actual_) predict.append(predict_) return rouge.rouge_n(predict, actual) calculate_rouge(predicted, batch_y, cp_batch_clss, batch_x) # - from tqdm import tqdm as tqdm_base def tqdm(*args, **kwargs): if hasattr(tqdm_base, '_instances'): for instance in list(tqdm_base._instances): tqdm_base._decr_instances(instance) return tqdm_base(*args, **kwargs) # + rouges = [] pbar = tqdm( range(0, len(test_X), 32), desc = 'minibatch loop') for i in pbar: index = min(i + batch_size, len(test_X)) batch_x, _ = pad_sentence_batch(test_X[i: index], 0) batch_y, _ = pad_sentence_batch(test_Y[i: index], 0) batch_clss, _ = pad_sentence_batch(test_clss[i: index], -1) batch_clss = np.array(batch_clss) batch_y = np.array(batch_y) batch_x = np.array(batch_x) cp_batch_clss = batch_clss.copy() batch_mask = 1 - (batch_clss == -1) batch_clss[batch_clss == -1] = 0 feed = {model.X: batch_x, model.mask: batch_mask, model.clss: batch_clss} predicted = sess.run(tf.round(tf.sigmoid(model.logits)), feed_dict = feed) rouge_ = calculate_rouge(predicted, batch_y, cp_batch_clss, batch_x) rouges.append(rouge_) pbar.set_postfix(rouge = rouge_) # - np.mean(rouges)
extractive-summarization/1.rnn-lstm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Logging setup import logging logging.basicConfig(level=logging.INFO) logging.getLogger().setLevel(level=logging.ERROR) logging.getLogger('orion').setLevel(level=logging.INFO) import warnings warnings.simplefilter("ignore") # - # # Orion Tutorial # # In the following steps we will learn how to set Orion up, run pipelines to detect anomalies # on our timeseries and then explore the results. # # Overall, the steps that we will perform are: # # 1. Add _Datasets_, _Signals_, _Templates_, _Pipelines_ and _Experiments_ to our Database. # 2. Create start _Dataruns_, which create _Signalruns_ and _Events_. # 3. Explore the _Signalrun_ results and the detected _Events_. # 4. Add _Annotations_ to the existing _Events_ as well as new manual _Events_. # # ## Creating an instance of the OrionDBExplorer # # In order to connect to the database, all you need to do is import and create an instance of the # `OrionDBExplorer` class. # # Note that, because of the dynamic schema-less nature of MongoDB, no database initialization # or table creation is needed. All you need to do start using a new database is create the # `OrionDBExplorer` instance with the right connection details and start using it! # # In order to create the `OrionDBExplorer` instance you will need to pass: # # * `user`: An identifier of the user that is running Orion. # * `database`: The name of the MongoDB database to use. This is optional and defaults to `orion`. # + from orion.db import OrionDBExplorer orex = OrionDBExplorer(user='my_username', database='orion-usage-example') # - # This will directly create a connection to the database named `'orion'` at the default # MongoDB host, `localhost`, and port, `27017`. # # In case you wanted to connect to a different database, host or port, or in case user authentication # is enabled in your MongoDB instance, you can pass a dictionary or a path to a JSON file containing # any required additional arguments: # # * `host`: Hostname or IP address of the MongoDB Instance. Defaults to `'localhost'`. # * `port`: Port to which MongoDB is listening. Defaults to `27017`. # * `username`: username to authenticate with. # * `password`: password to <PASSWORD> with. # * `authentication_source`: database to authenticate against. # Once we have created the `OrionDBExplorer` instance, and to be sure that we are ready to follow # the tutorial, let's do the following two set-up setps: # 1. Drop the `orion-usage-example` database # # **WARNING**: This will remove all the data that exists in this database! orex.drop_database() # 2. Make sure to have downloaded some demo data using the `orion.data.download_demo()` function # + from orion.data import download_demo download_demo() # - # This will create a folder called `orion-data` in your current directory with the 3 CSV files # that we will use later on. # ## Setting up the Orion Environment # # The first thing that you will need to do to start using **Orion** with a Database will be # to add information about your data and your pipelines. # # This can be done by using the methods of the `OrionDBExplorer` class that are documenteted below, # which allow creating the corresponding objects in the Database. # # ### Add a Dataset # # In order to add a dataset you can use the `add_dataset` method, which has the following arguments: # # * `name (str)`: Name of the dataset # * `entity (str)`: Name or Id of the entity which this dataset is associated to # # Let's create the `Demo Dataset` that we will use for our demo. dataset = orex.add_dataset( name='Demo Dataset', entity='Orion', ) # This call will try to create a new _Dataset_ object in the database and return it. # # We can now see the _Dataset_ that we just created using the `get_datasets` method: orex.get_datasets() # ### Add a Signal # # The next step is to add Signals. This can be done with the `add_signal` method, which expects: # # * `name (str)`: Name of the signal # * `dataset (Dataset or ObjectID)`: Dataset Object or Dataset Id. # * `start_time (int)`: (Optional) minimum timestamp to be used for this signal. If not given, it # defaults to the minimum timestamp found in the data. # * `stop_time (int)`: (Optional) maximum timestamp to be used for this signal. If not given, it # defaults to the maximum timestamp found in the data. # * `data_location (str)`: URI of the dataset # * `timestamp_column (int)`: (Optional) index of the timestamp column. Defaults to 0. # * `value_column (int)`: (Optional) index of the value column. Defaults to 1. # # For example, adding the `S-1` signal to the Demo Dataset that we just created could be done like # this: orex.add_signal( name='S-1', dataset=dataset, data_location='orion-data/S-1.csv' ) # Additionally, we can also add all the signals that exist inside a folder by using the `add_signals` # method, passing a `signals_path`: orex.add_signals( dataset=dataset, signals_path='orion-data' ) # After this is done, we can see that one signal has been created for each one of the CSV # files that we downloaded before. orex.get_signals(dataset=dataset) # ### Add a Template # # The next thing we need to add is a _Template_ to the Database using the `add_template` method. # # This method expects: # # * `name (str)`: Name of the template. # * `template (dict or str)`: Optional. Specification of the template to use, which can be one of: # * An MLPipeline instance # * The name of a registered template # * a dict containing the MLPipeline details # * The path to a pipeline JSON file. # # **Orion** comes with a few templates ready to be used, so let's have a look at the ones that exist # using the `orion.get_available_templates` function. # + from orion import get_available_templates get_available_templates() # - # And now let's create a _Template_ using the `lstm_dynamic_threshold` template. template = orex.add_template( name='lstmdt', template='lstm_dynamic_threshold', ) # We can now see the _Template_ that we just created orex.get_templates() # Also, during this step, apart from a _Template_ object, a _Pipeline_ object has also been # registred with the same name as the _Template_ and using the default hyperparameter values. orex.get_pipelines() # However, if we want to use a configuration different from the default, we might want to # create another _Pipeline_ with custom hyperparameter values. # # In order to do this we will need to call the `add_pipeline` method passing: # # * `name (str)`: Name given to this pipeline # * `template (Template or ObjectID)`: Template or the corresponding id. # * `hyperparameters (dict or str)`: dict containing the hyperparameter details or path to the # corresponding JSON file. Optional. # # For example, if we want to specify a different number of epochs for the LSTM primitive of the # pipeline that we just created we will run: new_hyperparameters = { 'keras.Sequential.LSTMTimeSeriesRegressor#1': { 'epochs': 1, 'verbose': True } } pipeline = orex.add_pipeline( name='lstmdt_1_epoch', template=template, hyperparameters=new_hyperparameters, ) # And we can see how a new _Pipeline_ was created in the Database. orex.get_pipelines() # ### Add an Experiment # # Once we have a _Dataset_ with _Signals_ and a _Template_, we are ready to add an # _Experiment_. # # In order to run an _Experiment_ we will need to: # # 1. Get the _Dataset_ and the list of _Signals_ that we want to run the _Experiment_ on. # 2. Get the _Template_ which we want to use for the _Experiment_ # 3. Call the `add_experiment` method passing all these with an experiment, a project name and a # username. # # For example, if we want to create an experiment using the _Dataset_, the _Signals_ and the # _Template_ that we just created, we will use: experiment = orex.add_experiment( name='My Experiment', project='My Project', template=template, dataset=dataset, ) # This will create an _Experiment_ object in the database using the indicated _Template_ # and all the _Signals_ from the given _Dataset_. orex.get_experiments() # ## Starting a Datarun # # Once we have created our _Experiment_ object we are ready to start executing _Pipelines_ on our # _Signals_. # # For this we will need to use the `orion.runner.start_datarun` function, which expects: # # * `orex (OrionExplorer)`: The `OrionDBExplorer` instance. # * `experiment (Experiment or ObjectID)`: Experiment object or the corresponding ID. # * `pipeline (Pipeline or ObjectID)`: Pipeline object or the corresponding ID. # # This will create a _Datarun_ object for this _Experiment_ and _Pipeline_ in the database, # and then it will start creating and executing _Signalruns_, one for each _Signal_ in the _Experiment_. # Let's trigger a _Datarun_ using the `lstmdt_1_epoch` _Pipeline_ that we created. # + from orion.runner import start_datarun start_datarun(orex, experiment, pipeline) # - # ## Explore the results # # Once a _Datarun_ has finished, we can see can see its status by using the `orex.get_dataruns` method. orex.get_dataruns() # As well as the _Signalruns_ and _Events_ that were created. datarun = orex.get_datarun(experiment=experiment) signalruns = orex.get_signalruns(datarun=datarun) signalruns signalrun_id = signalruns['signalrun_id'].iloc[-1] orex.get_events(signalrun=signalrun_id) # ## Add Manual Events and Annotations # # If we want to add new events manually, we can do so by calling the `add_event` method and # passing: # # * `start_time (int)`: The timestamp at which the event starts # * `stop_time (int)`: The timestamp at which the event ends # * `source (str)`: If manual, the string `MANUALLY_CREATED`. # * `signal (Signal or ObjectID or str)`: The id to which the Event is associated. signal = orex.get_signal(name='P-1') event = orex.add_event( start_time=1393758300, stop_time=1408270800, source='MANUALLY_CREATED', signal=signal ) # And, optionally, add annotations to any of the events. orex.add_annotation( event=event, tag='maneuver', comment='satellite was maneuvering during this period' ) unknown_event = orex.get_events().event_id.iloc[0] orex.add_annotation( event=unknown_event, tag='unknown', comment='this needs to be investigated' ) # We can then see the annotations that we just created orex.get_annotations()
notebooks/OrionDBExplorer Tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Plotting a Torus # # To plot a surface in 3D do you first need to find the # parametrization of the surface you wish to plot. # # For a Torus the parametrization looks like this: # \begin{align} # x &= [c + a \cos v] \cos u \\ # y &= [c + a \cos v] \sin u \\ # z &= a \sin v # \end{align} # # # where $a$ represents the radius of the tube and $c$ is the radius of the # center hole of the torus tube. See https://mathworld.wolfram.com/Torus.html for further information. # # # Begin by activating matplotlib notebook and importing numpy and # matplotlib # + # %matplotlib notebook # This enables you to drag and rotate the figure from mpl_toolkits.mplot3d import Axes3D # needed to do the 3D Plots import numpy as np import matplotlib.pyplot as plt # + fig = plt.figure() ax = fig.gca(projection='3d') u = np.linspace(0, 2*np.pi, 100) v = np.linspace(0, 2*np.pi, 100) u, v = np.meshgrid(u, v) # Make coordinate matrices from coordinate vectors. a, c = 0.2, 1.0 # Set a and c from the torus parametrization # Paramatrization x = (c + a*np.cos(v))*np.cos(u) y = (c + a*np.cos(v))*np.sin(u) z = a*np.sin(v) ax.plot_surface(x, y, z) ax.set_xlim3d(-1, 1) ax.set_ylim3d(-1, 1) ax.set_zlim3d(-1, 1) plt.show() # - # # Make a surface plot of a sphere # # Plot a sphere the parametrization is: # # \begin{align} # x &= r \cos \theta \sin \phi \\ # y &= r \sin \theta \sin \phi \\ # z &= r \cos \phi # \end{align} # # where $\theta$ is the loggitude coorinate running from $0$ to $2\pi$, # $\phi$ is the colatitude coordinate running from $0$ to $\pi$ and $r$ is the radius. # + fig = plt.figure() ax = fig.gca(projection='3d') # Fill inn the rest # - # # Make a surface plot of a Mobius strip # # The parametrization is: # \begin{align} # x &= \left(1 + \frac{v}{2}\cos \frac{u}{2} \right)\cos u \\ # y &= \left(1 + \frac{v}{2}\cos v \right)\sin u \\ # z &= \frac{v}{2}\sin \frac{u}{2} # \end{align} # # where $u$ is running from $0$ to $2\pi$ and $v$ is running from $-1$ to $1$. # + fig = plt.figure() ax = fig.gca(projection='3d') # Fill inn the rest # -
problem_candidates/Plotting surfacees.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # pytorch의 경사하강법(gradient descent) 코드를 보고 있으면 # ``` # 0. cost 계산 # 1. optimizer = optim.SGD([w,b],lr) # 2. optimizer.zero_grad() # 3. cost.backward() # 4. optimizer.step() # ``` # 이 나온다. # # 이 중 # ``` # requires_grad=True # .backward() # ``` # 는 pytorch가 Auto_grad를 하는 것을 의미한다. # (backpropagation에서 auto_grad가 작동하니까..) # ### 1) Auto grad 실습 # # - requires_grad = True : 해당 텐서의 기울기를 저장하겠다는 의미 # ```python # w = torch.tensor(2.0, requires_grad=True) # ``` # 이렇게 하면, w.grad 에 미분한 기울기가 저장된다. # # # - .backward() : 미분해 기울기 계산 # # + import torch w = torch.tensor(2.0,requires_grad=True) y = w**2 z = 2*y + 2 z.backward() # z의 미분을 통해 gradient 값 계산(dz/dw, w <= requires_grad) # - print(w.grad)
1.Study/2. with computer/3.Deep_Learning_code/4. Pytorch/0.basic/4. Auto_grad.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: gold-miner # language: python # name: gold-miner # --- # # Download and store data # This notebook contains information on downloading the Quandl Wiki stock prices and a few other sources that we use throughout the book. # Imports & Settings import warnings warnings.filterwarnings('ignore') # + from pathlib import Path import requests from io import BytesIO from zipfile import ZipFile, BadZipFile import numpy as np import pandas as pd import pandas_datareader.data as web from sklearn.datasets import fetch_openml pd.set_option('display.expand_frame_repr', False) # - # DATA_STORE = Path('assets.h5') DATA_STORE = Path('assets.h5') df = (pd.read_csv('wiki_prices.csv', parse_dates=['date'], index_col=['date', 'ticker'], infer_datetime_format=True) .sort_index()) # # Quandl Wiki Prices # Quandl makes available a dataset with stock prices, dividends and splits for 3000 US publicly-traded companies. Quandl decided to discontinue support in favor of its commercial offerings but the historical data are still useful to demonstrate the application of the machine learning solutions in the book, just ensure you implement your own algorithms on current data. # # ```As of April 11, 2018 this data feed is no longer actively supported by the Quandl community. We will continue to host this data feed on Quandl, but we do not recommend using it for investment or analysis.``` # # Follow the instructions to create a free Quandl) account # Download the entire WIKI/PRICES data # Extract the .zip file, # Move to this directory and rename to wiki_prices.csv # Run the below code to store in fast HDF format (see Chapter 02 on Market & Fundamental Data for details). print(df.info(null_counts=True)) with pd.HDFStore(DATA_STORE) as store: store.put('quandl/wiki/prices', df) # # Wiki Prices Metadata # As of writing, the following instructions no longer work because Quandl changed its API: # # Follow the instructions to create a free Quandl) account if you haven't done so yet # Find link to download wiki metadata under Companies](https://www.quandl.com/databases/WIKIP/documentation) or use the download link with your API_KEY: https://www.quandl.com/api/v3/databases/WIKI/metadata?api_key= # Extract the .zip file, # Move to this directory and rename to wiki_stocks.csv # Run the following code to store in fast HDF format # Instead, load the file wiki_stocks.csv as described and store in HDF5 format. # + df = pd.read_csv('wiki_stocks.csv') # no longer needed # df = pd.concat([df.loc[:, 'code'].str.strip(), # df.loc[:, 'name'].str.split('(', expand=True)[0].str.strip().to_frame('name')], axis=1) print(df.info(null_counts=True)) with pd.HDFStore(DATA_STORE) as store: store.put('quandl/wiki/stocks', df) # - # # S&P 500 Prices # The following code downloads historical S&P 500 prices from FRED (only last 10 years of daily data is freely available) df = web.DataReader(name='SP500', data_source='fred', start=2009).squeeze().to_frame('close') print(df.info()) with pd.HDFStore(DATA_STORE) as store: store.put('sp500/fred', df) sp500_stooq = (pd.read_csv('^spx_d.csv', index_col=0, parse_dates=True).loc['1950':'2019'].rename(columns=str.lower)) print(sp500_stooq.info()) # + df = pd.read_csv('us_equities_meta_data.csv') df.info() with pd.HDFStore(DATA_STORE) as store: store.put('us_equities/stocks', df.set_index('ticker')) # -
data/my_create_datasets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Assignment 4 # # Welcome to Assignment 4. This will be the most fun. Now we will prepare data for plotting. # # Just make sure you hit the play button on each cell from top to down. There are three functions you have to implement. Please also make sure than on each change on a function you hit the play button again on the corresponding cell to make it available to the rest of this notebook. # # # Sampling is one of the most important things when it comes to visualization because often the data set gets so huge that you simply # # - can't copy all data to a local Spark driver (Watson Studio is using a "local" Spark driver) # - can't throw all data at the plotting library # # Please implement a function which returns a 10% sample of a given data frame: # + from pyspark.sql import SparkSession # initialise sparkContext spark = SparkSession.builder \ .master('local') \ .appName('myAppName') \ .config('spark.executor.memory', '1gb') \ .config("spark.cores.max", "2") \ .getOrCreate() sc = spark.sparkContext # using SQLContext to read parquet file from pyspark.sql import SQLContext sqlContext = SQLContext(sc) # - import pyspark.sql.functions as F def getSample(df,spark): #TODO Please enter your code here, you are not required to use the template code below #some reference: https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrame #https://spark.apache.org/docs/latest/api/sql/ return df.sample(fraction=0.1).count() # Now we want to create a histogram and boxplot. Please ignore the sampling for now and return a python list containing all temperature values from the data set def getListForHistogramAndBoxPlot(df,spark): #TODO Please enter your code here, you are not required to use the template code below #some reference: https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrame #https://spark.apache.org/docs/latest/api/sql/ my_list = df.select(F.collect_list('temperature')).first()[0] if not type(my_list)==list: raise Exception('return type not a list') return my_list # Finally we want to create a run chart. Please return two lists (encapsulated in a python tuple object) containing temperature and timestamp (ts) ordered by timestamp. Please refer to the following link to learn more about tuples in python: https://www.tutorialspoint.com/python/python_tuples.htm #should return a tuple containing the two lists for timestamp and temperature #please make sure you take only 10% of the data by sampling #please also ensure that you sample in a way that the timestamp samples and temperature samples correspond (=> call sample on an object still containing both dimensions) def getListsForRunChart(df,spark): #TODO Please enter your code here, you are not required to use the template code below #some reference: https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrame #https://spark.apache.org/docs/latest/api/sql/ double_tuple_rdd = spark.sql(""" select temperature, ts from washing where temperature is not null order by ts asc """).sample(False,0.1).rdd.map(lambda row : (row.ts,row.temperature)) result_array_ts = double_tuple_rdd.map(lambda ts_temperature: ts_temperature[0]).collect() result_array_temperature = double_tuple_rdd.map(lambda ts_temperature: ts_temperature[1]).collect() return (result_array_ts,result_array_temperature) result_array_ts = double_tuple_rdd.map(lambda ts_temperature: ts_temperature[0]).collect() result_array_temperature = double_tuple_rdd.map(lambda ts_temperature: ts_temperature[1]).collect() return (result_array_ts,result_array_temperature) # Now it is time to grab a PARQUET file and create a dataframe out of it. Using SparkSQL you can handle it like a database. # !wget https://github.com/IBM/coursera/blob/master/coursera_ds/washing.parquet?raw=true # !mv washing.parquet?raw=true washing.parquet df = spark.read.parquet('washing.parquet') df.createOrReplaceTempView('washing') df.show() # Now we gonna test the functions you've completed and visualize the data. # !pip install matplotlib # %matplotlib inline import matplotlib.pyplot as plt plt.hist(getListForHistogramAndBoxPlot(df,spark)) plt.show() plt.boxplot(getListForHistogramAndBoxPlot(df,spark)) plt.show() lists = getListsForRunChart(df,spark) plt.plot(lists[0],lists[1]) plt.xlabel("time") plt.ylabel("temperature") plt.show() # Congratulations, you are done! The following code submits your solution to the grader. Again, please update your token from the grader's submission page on Coursera # !rm -f rklib.py # !wget https://raw.githubusercontent.com/IBM/coursera/master/rklib.py # + from rklib import submitAll import json key = "S5PNoSHNEeisnA6YLL5C0g" email = ###_YOUR_CODE_GOES_HERE_### token = ###_YOUR_CODE_GOES_HERE_### #you can obtain it from the grader page on Coursera (have a look here if you need more information on how to obtain the token https://youtu.be/GcDo0Rwe06U?t=276) # + parts_data = {} parts_data["iLdHs"] = json.dumps(str(type(getListForHistogramAndBoxPlot(df,spark)))) parts_data["xucEM"] = json.dumps(len(getListForHistogramAndBoxPlot(df,spark))) parts_data["IyH7U"] = json.dumps(str(type(getListsForRunChart(df,spark)))) parts_data["MsMHO"] = json.dumps(len(getListsForRunChart(df,spark)[0])) submitAll(email, token, key, parts_data) # -
Course-01-Fundamentals-of-Scalable-Data-Science/assignment4.1_spark2.3_python3.6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.1.0 # language: julia # name: julia-1.1 # --- using Plots; pyplot(); using DelimitedFiles, Distributions; using AugmentedGaussianProcesses; N = 1000 X = reshape((sort(rand(N)).-0.5).*40.0,N,1) function latent(x) 5.0.*sin.(x)./x end Y = (latent(X)+randn(N))[:]; scatter(X,Y) # Run sparse classification with increasing number of inducing points Ms = [4, 8, 16, 32, 64] models = Vector{AbstractGP}(undef,length(Ms)+1) kernel = RBFKernel(1.0) for (index, num_inducing) in enumerate(Ms) m = SVGP(X, Y, kernel,LaplaceLikelihood(),AnalyticVI(),num_inducing) println("Training with $(num_inducing) points") @time train!(m,iterations=100) models[index]=m; end mfull = VGP(X, Y, kernel,LaplaceLikelihood(),AnalyticVI()) println("Training with full model") @time train!(mfull,iterations=5); models[end]=mfull; function compute_Grid(model,nGrid=50) mins = -20 maxs = 20 Xplot = collect(range(mins[1],stop=maxs[1],length=nGrid)) y = proba_y(model,Xplot) return (y,Xplot) end; function plotdata(X,Y) Plots.plot(X,Y,t=:scatter,alpha=0.33,markerstrokewidth=0.0,lab="",size=(300,500)); end; function plotcontour(model,X,Y) nGrid = 100 (predic,x) = compute_Grid(model,nGrid); plotdata(X,Y) if in(:Z,fieldnames(typeof(model))) Plots.plot!(model.Z[1][:,1],zero(model.Z[1][:,1]),msize=2.0,color="black",t=:scatter,lab="") end return Plots.plot!(x,predic,title=(in(:Z,fieldnames(typeof(model))) ? "M = $(model.nFeatures)" : "full"),color="red",lab="",linewidth=3.0) end; Plots.plot(broadcast(x->plotcontour(x,X,Y),models)...,layout=(1,length(models)),size=(1000,200))
examples/Regression - Laplace.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- from eulerian_cities.eulerian import eulerian_trail_from_place eulerian_trail_from_place( query='Jonzieux', network_type='drive', start=(4.373818, 45.318257), save_trail_as_gpx=True, gpx_fp = './data/jonzieux.gpx', save_animation=True, animation_fp='./data/jonzieux_for_display.gif', animation_fig_size=10, quiet=True ) eulerian_trail_from_place( query='Jonzieux', network_type='drive', start=(4.373818, 45.318257), save_trail_as_gpx=True, gpx_fp = './data/jonzieux.gpx', save_animation=True, animation_fp='./data/jonzieux.gif', quiet=True ) eulerian_trail_from_place( query=('42, <NAME>', 600), query_type='address', save_trail_as_gpx=True, gpx_fp='./data/caluire_address_test.gpx', quiet=True ) eulerian_trail_from_place( query=(45.79, 45.78, 4.84, 4.80), query_type='bbox', network_type='drive', save_trail_as_gpx=True, gpx_fp='./data/vaise_bbox.gpx', quiet=True ) eulerian_trail_from_place( query='Neuenheim, Heidelberg', network_type='drive', start='west', save_trail_as_gpx=True, gpx_fp='./data/hd_test.gpx', quiet=True )
examples/examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + hide_input=true inputHidden=true language="html" # <span style="color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;">An Exception was encountered at 'In [2]'.</span> # + [markdown] papermill={"duration": 0.039513, "end_time": "2019-12-12T16:54:37.462129", "exception": false, "start_time": "2019-12-12T16:54:37.422616", "status": "completed"} tags=[] # # AutoML Image Classification: With Rotation (Fashion MNIST) # + papermill={"duration": 0.041335, "end_time": "2019-12-12T16:54:37.533620", "exception": false, "start_time": "2019-12-12T16:54:37.492285", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] import warnings warnings.simplefilter(action="ignore", category=FutureWarning) # + [markdown] papermill={"duration": 0.025274, "end_time": "2019-12-12T16:54:37.587844", "exception": false, "start_time": "2019-12-12T16:54:37.562570", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[] # ![](imgs/rotation.png) # + papermill={"duration": 17.90452, "end_time": "2019-12-12T16:54:55.515693", "exception": true, "start_time": "2019-12-12T16:54:37.611173", "status": "failed"} pycharm={"name": "#%%\n"} tags=[] import random as rn from abc import ABC, abstractmethod import autokeras as ak import h2o import matplotlib.pyplot as plt import numpy as np from h2o.automl import H2OAutoML from keras.datasets import fashion_mnist from numpy.random import RandomState from sklearn.datasets import load_digits from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from tpot import TPOTClassifier from dpemu import runner from dpemu.filters.common import GaussianNoise, Clip from dpemu.filters.image import RotationPIL from dpemu.nodes import Array from dpemu.nodes.series import Series from dpemu.plotting_utils import visualize_scores, print_results_by_model from dpemu.utils import generate_tmpdir # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"is_executing": false, "name": "#%% md\n"} tags=[] # # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"name": "#%%\n"} tags=[] def get_data(): # random_state = RandomState(42) # x, y = load_digits(return_X_y=True) # y = y.astype(np.uint8) # return train_test_split(x, y, test_size=.25, random_state=random_state) (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() s = x_train.shape[1] x_train = x_train.reshape((len(x_train), s**2)).astype(np.float64) x_test = x_test.reshape((len(x_test), s**2)).astype(np.float64) return x_train, x_test, y_train, y_test # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"is_executing": false, "name": "#%% md\n"} tags=[] # # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"name": "#%%\n"} tags=[] def get_err_root_node(): # err_img_node = Array(reshape=(8, 8)) err_img_node = Array(reshape=(28, 28)) err_root_node = Series(err_img_node) err_img_node.addfilter(RotationPIL("max_angle")) return err_root_node # err_root_node = Series(err_img_node) # err_img_node.addfilter(GaussianNoise("mean", "std")) # err_img_node.addfilter(Clip("min_val", "max_val")) # return err_root_node # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"is_executing": false, "name": "#%% md\n"} tags=[] # # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"name": "#%%\n"} tags=[] def get_err_params_list(data): angle_steps = np.linspace(0, 180, num=6) err_params_list = [{"max_angle": a} for a in angle_steps] return err_params_list # min_val = np.amin(data) # max_val = np.amax(data) # std_steps = np.round(np.linspace(0, max_val, num=6), 3) # err_params_list = [{"mean": 0, "std": std, "min_val": min_val, "max_val": max_val} for std in std_steps] # return err_params_list # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"is_executing": false, "name": "#%% md\n"} tags=[] # # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"name": "#%%\n"} tags=[] class Preprocessor: def run(self, train_data, test_data, params): return np.round(train_data).astype(np.uint8), np.round(test_data).astype(np.uint8), {} # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"is_executing": false, "name": "#%% md\n"} tags=[] # # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"name": "#%%\n"} tags=[] class AbstractModel(ABC): def __init__(self): self.time_limit_mins = 60*12 self.seed = 42 self.random_state = RandomState(self.seed) np.random.seed(self.seed) @abstractmethod def get_fitted_model(self, train_data, train_labels, params): pass @abstractmethod def get_accuracy(self, data, labels, fitted_model, params): pass @abstractmethod def get_best_pipeline(self, fitted_model): pass def run(self, train_data, test_data, params): train_labels = params["train_labels"] test_labels = params["test_labels"] fitted_model = self.get_fitted_model(train_data, train_labels, params) results = { "test_acc": self.get_accuracy(test_data, test_labels, fitted_model, params), "train_acc": self.get_accuracy(train_data, train_labels, fitted_model, params), "best_pipeline": self.get_best_pipeline(fitted_model), } print(type(fitted_model)) print(results["test_acc"]) return results class TPOTClassifierModel(AbstractModel): def __init__(self): super().__init__() def get_fitted_model(self, train_data, train_labels, params): return TPOTClassifier( max_time_mins=self.time_limit_mins, max_eval_time_mins=self.time_limit_mins, n_jobs=-1, random_state=self.seed, verbosity=1, use_dask=True ).fit(train_data, train_labels) def get_accuracy(self, data, labels, fitted_model, params): return round(fitted_model.score(data, labels), 3) def get_best_pipeline(self, fitted_model): return [step[1] for step in fitted_model.fitted_pipeline_.steps] class H2OAutoMLModel(AbstractModel): def __init__(self): super().__init__() h2o.init(name=f"#{rn.SystemRandom().randint(1, 2**30)}", nthreads=48) h2o.no_progress() def get_fitted_model(self, train_data, train_labels, params): train_data = h2o.H2OFrame(np.concatenate((train_data, train_labels.reshape(-1, 1)), axis=1)) x = np.array(train_data.columns)[:-1].tolist() y = np.array(train_data.columns)[-1].tolist() train_data[y] = train_data[y].asfactor() aml = H2OAutoML(max_runtime_secs=60*self.time_limit_mins, seed=self.seed) aml.train(x=x, y=y, training_frame=train_data) return aml def get_accuracy(self, data, labels, fitted_model, params): data = h2o.H2OFrame(np.concatenate((data, labels.reshape(-1, 1)), axis=1)) y = np.array(data.columns)[-1].tolist() data[y] = data[y].asfactor() pred = fitted_model.predict(data).as_data_frame(header=False)["predict"].values.astype(int) return np.round(np.mean(pred == labels), 3) def get_best_pipeline(self, fitted_model): leader_params = fitted_model.leader.get_params() best_pipeline = [leader_params["model_id"]["actual_value"]["name"]] if "base_models" in leader_params: for base_model in leader_params["base_models"]["actual_value"]: best_pipeline.append(base_model["name"]) h2o.cluster().shutdown() return best_pipeline class AutoKerasModel(AbstractModel): def __init__(self): super().__init__() import tensorflow as tf tf.set_random_seed(self.seed) import torch torch.multiprocessing.set_sharing_strategy("file_system") torch.manual_seed(self.seed) def get_fitted_model(self, x_train, y_train, params): s = np.sqrt(x_train.shape[1]).astype(int) x_train = x_train.reshape((len(x_train), s, s, 1)) clf = ak.ImageClassifier(augment=False, path=generate_tmpdir(), verbose=False) clf.fit(x_train, y_train, time_limit=60*self.time_limit_mins) return clf def get_accuracy(self, x, y, clf, params): s = np.sqrt(x.shape[1]).astype(int) x = x.reshape((len(x), s, s, 1)) y_pred = clf.predict(x) return np.round(accuracy_score(y_true=y, y_pred=y_pred), 3) def get_best_pipeline(self, clf): return [m for i, m in enumerate(clf.cnn.best_model.produce_model().modules()) if i > 0] # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"name": "#%% md\n"} tags=[] # # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"name": "#%%\n"} tags=[] def get_model_params_dict_list(train_labels, test_labels): model_params_base = {"train_labels": train_labels, "test_labels": test_labels} return [ { "model": TPOTClassifierModel, "params_list": [{**model_params_base}], "use_clean_train_data": False }, # { # "model": TPOTClassifierModel, # "params_list": [{**model_params_base}], # "use_clean_train_data": True # }, { "model": H2OAutoMLModel, "params_list": [{**model_params_base}], "use_clean_train_data": False }, # { # "model": H2OAutoMLModel, # "params_list": [{**model_params_base}], # "use_clean_train_data": True # }, # { # "model": AutoKerasModel, # "params_list": [{**model_params_base}], # "use_clean_train_data": False # }, # { # "model": AutoKerasModel, # "params_list": [{**model_params_base}], # "use_clean_train_data": True # }, ] # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"is_executing": false, "name": "#%% md\n"} tags=[] # # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"name": "#%%\n"} tags=[] def visualize(df): visualize_scores( df, score_names=["test_acc", "train_acc"], is_higher_score_better=[True, True], err_param_name="max_angle", # err_param_name="std", title="Classification scores with added error" ) plt.show() # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"is_executing": false, "name": "#%% md\n"} tags=[] # # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"is_executing": false, "name": "#%%\n"} tags=[] train_data, test_data, train_labels, test_labels = get_data() df = runner.run( train_data=train_data, test_data=test_data, preproc=Preprocessor, preproc_params=None, err_root_node=get_err_root_node(), err_params_list=get_err_params_list(train_data), model_params_dict_list=get_model_params_dict_list(train_labels, test_labels), n_processes=1 ) # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"name": "#%%\n"} tags=[] print_results_by_model(df, ["train_labels", "test_labels"], # ["mean", "min_val", "max_val", "train_labels", "test_labels"], err_param_name="max_angle", # err_param_name="std", pipeline_name="best_pipeline" ) visualize(df) # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} pycharm={"name": "#%% md\n"} tags=[] #
docs/case_studies/test4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # A look at solution data and processed variables # Once you have run a simulation the first thing you want to do is have a look at the data. Most of the examples so far have made use of PyBaMM's handy QuickPlot function but there are other ways to access the data and this notebook will explore them. First off we will generate a standard SPMe model and use QuickPlot to view the default variables. # + # %pip install pybamm -q # install PyBaMM if it is not installed import pybamm import numpy as np import os import matplotlib.pyplot as plt os.chdir(pybamm.__path__[0]+'/..') # load model model = pybamm.lithium_ion.SPMe() # set up and solve simulation simulation = pybamm.Simulation(model) dt = 90 t_eval = np.arange(0, 3600, dt) # time in seconds solution = simulation.solve(t_eval) quick_plot = pybamm.QuickPlot(solution) quick_plot.dynamic_plot(); # - # Behind the scenes the QuickPlot classed has created some processed variables which can interpolate the model variables for our solution and has also stored the results for the solution steps solution.data.keys() solution.data['Negative particle surface concentration [mol.m-3]'].shape solution.t.shape # Notice that the dictionary keys are in the same order as the subplots in the QuickPlot figure. We can add new processed variables to the solution by simply using it like a dictionary. First let's find a few more variables to look at. As you will see there are quite a few: # + tags=["outputPrepend"] keys = list(model.variables.keys()) keys.sort() print(keys) # - # If you want to find a particular variable you can search the variables dictionary model.variables.search("time") # We'll use the time in hours solution['Time [h]'] # This created a new processed variable and stored it on the solution object solution.data.keys() # We can see the data by simply accessing the entries attribute of the processed variable solution['Time [h]'].entries # We can also call the method with specified time(s) in SI units of seconds time_in_seconds = np.array([0, 600, 900, 1700, 3000 ]) solution['Time [h]'](time_in_seconds) # If the variable has not already been processed it will be created behind the scenes var = 'X-averaged negative electrode temperature [K]' solution[var](time_in_seconds) # In this example the simulation was isothermal, so the temperature remains unchanged. # ## Saving the solution # # The solution can be saved in a number of ways: # to a pickle file (default) solution.save_data( "outputs.pickle", ["Time [h]", "Current [A]", "Terminal voltage [V]", "Electrolyte concentration [mol.m-3]"] ) # to a matlab file # need to give variable names without space solution.save_data( "outputs.mat", ["Time [h]", "Current [A]", "Terminal voltage [V]", "Electrolyte concentration [mol.m-3]"], to_format="matlab", short_names={ "Time [h]": "t", "Current [A]": "I", "Terminal voltage [V]": "V", "Electrolyte concentration [mol.m-3]": "c_e", } ) # to a csv file (time-dependent outputs only, no spatial dependence allowed) solution.save_data( "outputs.csv", ["Time [h]", "Current [A]", "Terminal voltage [V]"], to_format="csv" ) # ## Stepping the solver # # The previous solution was created in one go with the solve method, but it is also possible to step the solution and look at the results as we go. In doing so, the results are automatically updated at each step. dt = 360 time = 0 end_time = solution["Time [s]"].entries[-1] step_simulation = pybamm.Simulation(model) while time < end_time: step_solution = step_simulation.step(dt) print('Time', time) print(step_solution["Terminal voltage [V]"].entries) time += dt # We can plot the voltages and see that the solutions are the same voltage = solution["Terminal voltage [V]"].entries step_voltage = step_solution["Terminal voltage [V]"].entries plt.figure() plt.plot(solution["Time [h]"].entries, voltage, "b-", label="SPMe (continuous solve)") plt.plot( step_solution["Time [h]"].entries, step_voltage, "ro", label="SPMe (stepped solve)" ) plt.legend() # ## References # # The relevant papers for this notebook are: pybamm.print_citations()
examples/notebooks/solution-data-and-processed-variables.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Barplots # + # MIT License # # Copyright (c) 2021 Playtika Ltd. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # - import numpy as np import sys from abexp.visualization.analysis_plots import AnalysisPlot # + labels = ['First'] # Choose the height of the blue bars bars1 = [200] # Choose the height of the cyan bars bars2 = [250] bars3 = [250] bars4 = [200] # Choose the height of the error bars (bars1) yer1 = [[195], [220]] # Choose the height of the error bars (bars2) yer2 = [[200], [260]] yer3 = [[200], [260]] yer4 = [[195], [220]] bars = [bars1, bars2] yerr = [yer1, yer2] fig = AnalysisPlot.barplot(bars, yerr, title='Barplot', ylabel='kpi', xlabel=labels, groupslabel=['control', 'treatment'], rotation=10.1) # + labels = ['G1'] # Choose the height of the blue bars bars1 = [200] # Choose the height of the cyan bars bars2 = [250] bars3 = [250] bars4 = [200] # Choose the height of the error bars (bars1) yer1 = [[195], [220]] # Choose the height of the error bars (bars2) yer2 = [[200], [260]] yer3 = [[200], [260]] yer4 = [[195], [220]] bars = [bars1, bars2, bars3] yerr = [yer1, yer2, yer3] fig = AnalysisPlot.barplot(bars, yerr, title='Barplot') # + labels = ['G1'] # Choose the height of the blue bars bars1 = [200] # Choose the height of the cyan bars bars2 = [250] bars3 = [250] bars4 = [200] # Choose the height of the error bars (bars1) yer1 = [[195], [220]] # Choose the height of the error bars (bars2) yer2 = [[200], [260]] yer3 = [[200], [260]] yer4 = [[195], [220]] bars = [bars1, bars2, bars3, bars4] yerr = [yer1, yer2, yer3, yer4] fig = AnalysisPlot.barplot(bars, yerr, title='Barplot', legendloc='lower right') # + labels = ['G1', 'G2'] # Choose the height of the blue bars bars1 = [200, 340] # Choose the height of the cyan bars bars2 = [250, 320] # Choose the height of the error bars (bars1) yer1 = [[200-5, 340-5], [200+20, 340+20]] # Choose the height of the error bars (bars2) yer2 = [[250-50, 320-50], [250+10, 320+10]] bars = [bars1, bars2] yerr = [yer1, yer2] fig = AnalysisPlot.barplot(bars, yerr, title='Barplot') # + labels = ['G1', 'G2'] # Choose the height of the blue bars bars1 = [200, 340] # Choose the height of the cyan bars bars2 = [250, 320] bars3 = [250, 320] bars4 = [200, 340] # Choose the height of the error bars yer1 = [[200-5, 340-5], [200+20, 340+20]] yer2 = [[250-50, 320-50], [250+10, 320+10]] yer3 = [[250-50, 320-50], [250+10, 320+10]] yer4 = [[200-5, 340-5], [200+20, 340+20]] bars = [bars1, bars2, bars3] yerr = [yer1, yer2, yer3] fig = AnalysisPlot.barplot(bars, yerr, title='Barplot') # + labels = ['G1', 'G2'] # Choose the height of the blue bars bars1 = [200, 340] # Choose the height of the cyan bars bars2 = [250, 320] bars3 = [250, 320] bars4 = [200, 340] # Choose the height of the error bars yer1 = [[200-5, 340-5], [200+20, 340+20]] yer2 = [[250-50, 320-50], [250+10, 320+10]] yer3 = [[250-50, 320-50], [250+10, 320+10]] yer4 = [[200-5, 340-80], [200+20, 340+20]] bars = [bars1, bars2, bars3, bars4] yerr = [yer1, yer2, yer3, yer4] fig = AnalysisPlot.barplot(bars, yerr, title='Barplot') # + labels = ['G1', 'G2', 'G3'] # Choose the height of the blue bars bars1 = [200, 340, 300] # Choose the height of the cyan bars bars2 = [250, 320, 340] bars3 = [250, 320, 340] bars4 = [200, 340, 300] # Choose the height of the error bars (bars1) yer1 = [[200-5, 340-5, 300-5], [200+20, 340+20, 300+20]] yer2 = [[250-50, 320-50, 340-50], [250+10, 320+10, 340+10]] yer3 = [[250-50, 320-50, 340-50], [250+10, 320+10, 340+10]] yer4 = [[200-5, 340-5, 300-5], [200+20, 340+20, 300+20]] bars = [bars1, bars2] yerr = [yer1, yer2] fig = AnalysisPlot.barplot(bars, yerr, title='Barplot') # + labels = ['G1', 'G2', 'G3'] # Choose the height of the blue bars bars1 = [200, 340, 300] # Choose the height of the cyan bars bars2 = [250, 320, 340] bars3 = [250, 320, 340] bars4 = [200, 340, 300] # Choose the height of the error bars yer1 = [[200-5, 340-5, 300-5], [200+20, 340+20, 300+20]] yer2 = [[250-50, 320-50, 340-50], [250+10, 320+10, 340+10]] yer3 = [[250-50, 320-50, 340-50], [250+10, 320+10, 340+10]] yer4 = [[200-5, 340-5, 300-5], [200+20, 340+20, 300+20]] bars = [bars1, bars2, bars3] yerr = [yer1, yer2, yer3] fig = AnalysisPlot.barplot(bars, yerr, title='Barplot') # + labels = ['G1', 'G2', 'G3'] # Choose the height of the blue bars bars1 = [200, 340, 300] # Choose the height of the cyan bars bars2 = [250, 320, 340] bars3 = [250, 320, 340] bars4 = [200, 340, 300] # Choose the height of the error bars yer1 = [[200-5, 340-5, 300-5], [200+20, 340+20, 300+20]] yer2 = [[250-50, 320-50, 340-50], [250+10, 320+10, 340+10]] yer3 = [[250-50, 320-50, 340-50], [250+10, 320+10, 340+10]] yer4 = [[200-5, 340-5, 300-5], [200+20, 340+20, 300+20]] bars = [bars1, bars2, bars3, bars4] yerr = [yer1, yer2, yer3, yer4] fig = AnalysisPlot.barplot(bars, yerr, title='Barplot') # + labels = ['G1', 'G2', 'G3', 'G4'] # Choose the height of the blue bars bars1 = [200, 340, 300, 360] # Choose the height of the cyan bars bars2 = [250, 320, 340, 270] bars3 = [250, 320, 340, 270] bars4 = [200, 340, 300, 360] # Choose the height of the error bars yer1 = [[200-5, 340-5, 300-5, 360-8], [200+20, 340+20, 300+20, 360+70]] yer2 = [[250-50, 320-50, 340-50, 270-34], [250+10, 320+10, 340+10, 270+30]] yer3 = [[250-50, 320-50, 340-50, 270-34], [250+10, 320+10, 340+10, 270+30]] yer4 = [[200-5, 340-5, 300-5, 360-8], [200+20, 340+20, 300+20, 360+70]] bars = [bars1, bars2] yerr = [yer1, yer2] fig = AnalysisPlot.barplot(bars, yerr, title='Barplot') # + labels = ['G1', 'G2', 'G3', 'G4'] # Choose the height of the blue bars bars1 = [200, 340, 300, 360] # Choose the height of the cyan bars bars2 = [250, 320, 340, 270] bars3 = [250, 320, 340, 270] bars4 = [200, 340, 300, 360] # Choose the height of the error bars yer1 = [[200-5, 340-5, 300-5, 360-8], [200+20, 340+20, 300+20, 360+70]] yer2 = [[250-50, 320-50, 340-50, 270-34], [250+10, 320+10, 340+10, 270+30]] yer3 = [[250-50, 320-50, 340-50, 270-34], [250+10, 320+10, 340+10, 270+30]] yer4 = [[200-5, 340-5, 300-5, 360-8], [200+20, 340+20, 300+20, 360+70]] bars = [bars1, bars2, bars3] yerr = [yer1, yer2, yer3] fig = AnalysisPlot.barplot(bars, yerr, title='Barplot') # + labels = ['G1', 'G2', 'G3', 'G4'] # Choose the height of the blue bars bars1 = [200, 340, 300, 360] # Choose the height of the cyan bars bars2 = [250, 320, 340, 270] bars3 = [250, 320, 340, 270] bars4 = [200, 340, 300, 360] # Choose the height of the error bars yer1 = [[200-5, 340-5, 300-5, 360-8], [200+20, 340+20, 300+20, 360+70]] yer2 = [[250-50, 320-50, 340-50, 270-34], [250+10, 320+10, 340+10, 270+30]] yer3 = [[250-50, 320-50, 340-50, 270-34], [250+10, 320+10, 340+10, 270+30]] yer4 = [[200-5, 340-5, 300-5, 360-8], [200+20, 340+20, 300+20, 360+70]] bars = [bars1, bars2, bars3, bars4] yerr = [yer1, yer2, yer3, yer4] fig = AnalysisPlot.barplot(bars, yerr, title='Barplot') # + labels = ['G1', 'G2', 'G3', 'G4', 'G5'] # Choose the height of the bars bars1 = [200, 340, 300, 360, 340] bars2 = [250, 320, 340, 270, 400] bars3 = [250, 320, 340, 270, 600] bars4 = [200, 340, 300, 360, 500] # Choose the height of the error bars yer1 = [[200-5, 340-5, 300-5, 360-8, 340-12], [200+20, 340+20, 300+20, 360+70, 340+100]] yer2 = [[250-50, 320-50, 340-50, 270-34, 400-90], [250+10, 320+10, 340+10, 270+30, 400+60]] yer3 = [[250-50, 320-50, 340-50, 270-34, 600-22], [250+10, 320+10, 340+10, 270+30, 600+12]] yer4 = [[200-5, 340-5, 300-5, 360-8, 500-30], [200+20, 340+20, 300+20, 360+70, 500+67]] bars = [bars1, bars2] yerr = [yer1, yer2] fig = AnalysisPlot.barplot(bars, yerr, title='Barplot') # + labels = ['G1', 'G2', 'G3', 'G4', 'G5'] # Choose the height of the bars bars1 = [200, 340, 300, 360, 340] bars2 = [250, 320, 340, 270, 400] bars3 = [250, 320, 340, 270, 600] bars4 = [200, 340, 300, 360, 500] # Choose the height of the error bars yer1 = [[200-5, 340-5, 300-5, 360-8, 340-12], [200+20, 340+20, 300+20, 360+70, 340+100]] yer2 = [[250-50, 320-50, 340-50, 270-34, 400-90], [250+10, 320+10, 340+10, 270+30, 400+60]] yer3 = [[250-50, 320-50, 340-50, 270-34, 600-22], [250+10, 320+10, 340+10, 270+30, 600+12]] yer4 = [[200-5, 340-5, 300-5, 360-8, 500-30], [200+20, 340+20, 300+20, 360+70, 500+67]] bars = [bars1, bars2, bars3] yerr = [yer1, yer2, yer3] fig = AnalysisPlot.barplot(bars, yerr, title='Barplot') # + labels = ['G1', 'G2', 'G3', 'G4', 'G5'] # Choose the height of the bars bars1 = [200, 340, 300, 360, 340] bars2 = [250, 320, 340, 270, 400] bars3 = [250, 320, 340, 270, 600] bars4 = [200, 340, 300, 360, 500] # Choose the height of the error bars yer1 = [[200-5, 340-5, 300-5, 360-8, 340-12], [200+20, 340+20, 300+20, 360+70, 340+100]] yer2 = [[250-50, 320-50, 340-50, 270-34, 400-90], [250+10, 320+10, 340+10, 270+30, 400+60]] yer3 = [[250-50, 320-50, 340-50, 270-34, 600-22], [250+10, 320+10, 340+10, 270+30, 600+12]] yer4 = [[200-5, 340-5, 300-5, 360-8, 500-30], [200+20, 340+20, 300+20, 360+70, 500+67]] bars = [bars1, bars2, bars3, bars4] yerr = [yer1, yer2, yer3, yer4] fig = AnalysisPlot.barplot(bars, yerr, title='Barplot', xlabel=labels) # + labels = ['First', 'Second', 'Third', 'Fourth', 'Fifth', 'Sixth'] # Choose the height of the bars bars1 = [200, 340, 300, 360, 340, 800] bars2 = [250, 320, 340, 270, 400, 100] bars3 = [250, 320, 340, 270, 400, 100] bars4 = [250, 320, 340, 270, 600, 700] # Choose the height of the error bars yer1 = [[200-5, 340-5, 300-5, 360-8, 340-12, 800-50], [200+20, 340+20, 300+20, 360+70, 340+100, 800+50]] yer2 = [[250-50, 320-50, 340-50, 270-34, 400-90, 100-3], [250+10, 320+10, 340+10, 270+30, 400+60, 100+20]] yer3 = [[250-50, 320-50, 340-50, 270-34, 400-90, 100-3], [250+10, 320+10, 340+10, 270+30, 400+60, 100+20]] yer4 = [[250-5, 320-5, 340-5, 270-8, 600-12, 700-50], [250+20, 320+20, 340+20, 270+70, 600+100, 700+50]] bars = [bars1, bars2] yerr = [yer1, yer2] fig = AnalysisPlot.barplot(bars, yerr, xlabel=labels, rotation=45) # + labels = ['First', 'Second', 'Third', 'Fourth', 'Fifth', 'Sixth'] # Choose the height of the bars bars1 = [200, 340, 300, 360, 340, 800] bars2 = [250, 320, 340, 270, 400, 100] bars3 = [250, 320, 340, 270, 400, 100] bars4 = [250, 320, 340, 270, 600, 700] # Choose the height of the error bars yer1 = [[200-5, 340-5, 300-5, 360-8, 340-12, 800-50], [200+20, 340+20, 300+20, 360+70, 340+100, 800+50]] yer2 = [[250-50, 320-50, 340-50, 270-34, 400-90, 100-3], [250+10, 320+10, 340+10, 270+30, 400+60, 100+20]] yer3 = [[250-50, 320-50, 340-50, 270-34, 400-90, 100-3], [250+10, 320+10, 340+10, 270+30, 400+60, 100+20]] yer4 = [[250-5, 320-5, 340-5, 270-8, 600-12, 700-50], [250+20, 320+20, 340+20, 270+70, 600+100, 700+50]] bars = [bars1, bars2, bars3] yerr = [yer1, yer2, yer3] fig = AnalysisPlot.barplot(bars, yerr, xlabel=labels, rotation=45) # + labels = ['First', 'Second', 'Third', 'Fourth', 'Fifth', 'Sixth'] # Choose the height of the bars bars1 = [200, 340, 300, 360, 340, 800] bars2 = [250, 320, 340, 270, 400, 100] bars3 = [250, 320, 340, 270, 400, 100] bars4 = [250, 320, 340, 270, 600, 700] # Choose the height of the error bars yer1 = [[200-5, 340-5, 300-5, 360-8, 340-12, 800-50], [200+20, 340+20, 300+20, 360+70, 340+100, 800+50]] yer2 = [[250-50, 320-50, 340-50, 270-34, 400-90, 100-3], [250+10, 320+10, 340+10, 270+30, 400+60, 100+20]] yer3 = [[250-50, 320-50, 340-50, 270-34, 400-90, 100-3], [250+10, 320+10, 340+10, 270+30, 400+60, 100+20]] yer4 = [[250-5, 320-5, 340-5, 270-8, 600-12, 700-50], [250+20, 320+20, 340+20, 270+70, 600+100, 700+50]] bars = [bars1, bars2, bars3, bars4] yerr = [yer1, yer2, yer3, yer4] fig = AnalysisPlot.barplot(bars, yerr, xlabel=labels, rotation=45) # - # # Forest plot # + y= np.random.choice(np.arange(1, 7, 0.1), 1) err = np.random.choice(np.arange(0.5, 3, 0.1), 1) p_val = np.around(np.random.choice(np.arange(0.5, 1, 0.1), 4), 1) fig = AnalysisPlot.forest_plot(y, err, annotation=p_val, ylabel='diff arpu', marker='.') # + y= np.random.choice(np.arange(1, 7, 0.1), 2) err = np.random.choice(np.arange(0.5, 3, 0.1), 2) p_val = np.around(np.random.choice(np.arange(0.5, 1, 0.1), 2), 4) fig = AnalysisPlot.forest_plot(y, err, annotation=p_val, annotationlabel='p-value', title='Difference in churn rate', xlabel=['NPU', 'Platinum']) # + y= np.random.choice(np.arange(1, 7, 0.1), 3) err = np.random.choice(np.arange(0.5, 3, 0.1), 3) p_val = np.around(np.random.choice(np.arange(0.5, 1, 0.1), 3), 4) fig = AnalysisPlot.forest_plot(y, err, annotation=p_val, ylabel='diff arpu', annotationlabel='prob(A>B)', title='A/B test', marker='_') # + y= np.random.choice(np.arange(1, 7, 0.1), 4) err = np.random.choice(np.arange(0.5, 3, 0.1), 4) p_val = np.around(np.random.choice(np.arange(0.5, 1, 0.1), 4), 4) fig = AnalysisPlot.forest_plot(y, err, annotation=p_val, ylabel='diff arpu', annotationlabel='p-value') # + y= np.random.choice(np.arange(1, 7, 0.1), 5) err = np.random.choice(np.arange(0.5, 3, 0.1), 5) p_val = np.around(np.random.choice(np.arange(0.5, 1, 0.1), 5), 4) fig = AnalysisPlot.forest_plot(y, err, annotation=p_val, ylabel='diff arpu', annotationlabel='p-value', figsize=(15,8)) # + y= np.random.choice(np.arange(1, 7, 0.1), 6) err = np.random.choice(np.arange(0.5, 3, 0.1), 6) p_val = np.around(np.random.choice(np.arange(0.5, 1, 0.1), 6), 4) fig = AnalysisPlot.forest_plot(y, err, annotation=p_val, ylabel='diff arpu', annotationlabel='prob(A>B)', figsize=(18,12), xlabel=['NPU','PU', 'gold', 'platinum', 'new users', 'italians'], rotation=45) # + y1 = np.random.choice(np.arange(1, 7, 0.1), 15) yerr1 = np.random.choice(np.arange(0.5, 1, 0.1), 15) y2 = np.random.choice(np.arange(1, 7, 0.1), 15) + 10 yerr2 = np.random.choice(np.arange(0.5, 1, 0.1), 15) y3 = np.random.choice(np.arange(1, 7, 0.1), 15) + 5 yerr3 = np.random.choice(np.arange(0.5, 1, 0.1), 15) y = [y1, y2, y3] yerr = [yerr1, yerr2, yerr3] fig = AnalysisPlot.timeseries_plot(y, yerr) # + y1 = np.random.choice(np.arange(1, 7, 0.1), 7) yerr1 = np.random.choice(np.arange(0.5, 1, 0.1), 7) y2 = np.random.choice(np.arange(1, 7, 0.1), 7) + 2 yerr2 = np.random.choice(np.arange(0.5, 1, 0.1), 7) y = [y1, y2] yerr = [yerr1, yerr2] fig = AnalysisPlot.timeseries_plot(y, yerr, groupslabel=['control group', 'treatment group'], xlabel=['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05', '2020-01-06', '2020-01-07'], rotation=0, ylabel='dau') # + y1 = np.random.choice(np.arange(1, 7, 0.1), 30) yerr1 = np.random.choice(np.arange(0.5, 1, 0.1), 30) y2 = np.random.choice(np.arange(1, 7, 0.1), 30) + 10 yerr2 = np.random.choice(np.arange(0.5, 1, 0.1), 30) y = [y1, y2] yerr = [yerr1, yerr2] fig = AnalysisPlot.timeseries_plot(y, yerr)
tests/test_analysis_plots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + from sirf.Gadgetron import AcquisitionData, CoilSensitivityData, AcquisitionModel, ImageData from sirf.Utilities import assert_validity import numpy as np import sys sys.path.append("/home/jovyan/P1-Temp-Reg/modelbased-approach/") import auxiliary_functions as aux # - def get_test_img(): filepath_y = "/home/jovyan/InputData/y_4.h5" y = AcquisitionData(filepath_y) u = ImageData() u.from_acquisition_data(y) csm = CoilSensitivityData() csm.calculate(y) A = AcquisitionModel(acqs=y, imgs=u) A.set_coil_sensitivity_maps(csm) u_test = A.inverse(y) return u_test class Gradient1D_Local: def __init__(self, axis, weights): assert_validity(weights, ImageData) self.weights=weights self.axis = axis def forward(self, u): assert_validity(u, ImageData) return self.weights*u.copy().fill(np.roll(u.as_array(), -1, axis=self.axis) - u.as_array()) def backward(self, u ): assert_validity(u, ImageData) res = np.roll((self.weights * u).as_array(), 1, axis=self.axis) - (self.weights*u).as_array() u = u.copy().fill(res) return u # + import matplotlib.pyplot as plt import numpy as np def test_local_Gradient1D(): u = get_test_img() alpha = u.copy() beta = u.copy() unit_weights = u.copy() arr_shape = alpha.as_array().shape X,T,Y = np.meshgrid( np.arange(-arr_shape[1]//2, arr_shape[1]//2), \ np.arange(-arr_shape[0]//2, arr_shape[0]//2), \ np.arange(-arr_shape[2]//2, arr_shape[2]//2)) unit_weights.fill(1.0) alpha.fill( 1 + X/np.max(X)) Gx = Gradient1D_Local(weights = alpha, axis = 1) Gx_unit = Gradient1D_Local(weights = unit_weights, axis = 1) dxu = Gx.forward(u) unit_dxu = Gx_unit.forward(u) f,ax = plt.subplots(1,4) ax[0].imshow( np.abs(u.as_array()[0,:,:]),cmap='gray', vmin=0, vmax=50 ) ax[0].axis("off") brightness_increase = 1 ax[1].imshow( np.abs(dxu.as_array()[0,:,:]),cmap='gray', vmin=0, vmax=20 ) ax[1].axis("off") ax[1].set_title("Ramp along X") ax[2].imshow( np.abs(unit_dxu.as_array()[0,:,:]),cmap='gray', vmin=0, vmax=20 ) ax[2].set_title("Unit weight") ax[2].axis("off") ax[3].imshow( np.abs(dxu.as_array()[0,:,:]) - np.abs(unit_dxu.as_array()[0,:,:]),cmap='gray' ) ax[3].set_title("Diff") ax[3].axis("off") plt.show() return True test_local_Gradient1D() # + u = aux.get_test_img() weights = u.fill(np.random.randn(*u.as_array().shape)) Gt = Gradient1D_Local(weights = weights, axis = 0) Gx = Gradient1D_Local(weights = weights, axis = 1) Gy = Gradient1D_Local(weights = weights, axis = 2) # - assert aux.test_image_operator_adjointness(Gt), "Gt is not adjoint" assert aux.test_image_operator_adjointness(Gx), "Gx is not adjoint" assert aux.test_image_operator_adjointness(Gy), "Gy is not adjoint" # + Dtlocal = aux.Dt_Local(weights) assert aux.test_image_operator_adjointness(Dtlocal), "Dt local is not adjoint" Dxlocal = aux.Dx_Local(weights=weights) assert aux.test_stacked_image_operator_adjointness(Dxlocal), "Dx Local is not adjoint" # -
modelbased-approach/local_gradient.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nbgrader={} # # Interact Exercise 6 # + [markdown] nbgrader={} # ## Imports # + [markdown] nbgrader={} # Put the standard imports for Matplotlib, Numpy and the IPython widgets in the following cell. # + nbgrader={} # %matplotlib inline import matplotlib.pyplot as plt import numpy as np # + nbgrader={"solution": false} from IPython.display import Image from IPython.html.widgets import interact, interactive, fixed # + [markdown] nbgrader={} # ## Exploring the Fermi distribution # + [markdown] nbgrader={} # In quantum statistics, the [Fermi-Dirac](http://en.wikipedia.org/wiki/Fermi%E2%80%93Dirac_statistics) distribution is related to the probability that a particle will be in a quantum state with energy $\epsilon$. The equation for the distribution $F(\epsilon)$ is: # + nbgrader={} Image('fermidist.png') # + [markdown] nbgrader={} # In this equation: # # * $\epsilon$ is the single particle energy. # * $\mu$ is the chemical potential, which is related to the total number of particles. # * $k$ is the Boltzmann constant. # * $T$ is the temperature in Kelvin. # # In the cell below, typeset this equation using LaTeX: # + [markdown] deletable=false nbgrader={"checksum": "84e504c96c29f1c60dbfb4dec13d68a5", "grade": true, "grade_id": "interactex06a", "points": 2, "solution": true} # \begin{equation*} # F(\epsilon) = \frac{1}{e^{(\epsilon-\mu)/kT}+1} # \end{equation*} # + [markdown] nbgrader={} # Define a function `fermidist(energy, mu, kT)` that computes the distribution function for a given value of `energy`, chemical potential `mu` and temperature `kT`. Note here, `kT` is a single variable with units of energy. Make sure your function works with an array and don't use any `for` or `while` loops in your code. # + nbgrader={"checksum": "bf335dcbe5278484f0ab3de031cb74a5", "solution": true} def fermidist(energy, mu, kT): exp = (energy-mu)/kT F = 1/((np.exp(exp))+1) if type(energy) or type (mu) or typle(kT) == np.array: return np.array(F) else: return F # + deletable=false nbgrader={"checksum": "fe62c9137b7ea0acba5b933bcd7c3226", "grade": true, "grade_id": "interactex06b", "points": 2} assert np.allclose(fermidist(0.5, 1.0, 10.0), 0.51249739648421033) assert np.allclose(fermidist(np.linspace(0.0,1.0,10), 1.0, 10.0), np.array([ 0.52497919, 0.5222076 , 0.51943465, 0.5166605 , 0.51388532, 0.51110928, 0.50833256, 0.50555533, 0.50277775, 0.5 ])) # + [markdown] nbgrader={} # Write a function `plot_fermidist(mu, kT)` that plots the Fermi distribution $F(\epsilon)$ as a function of $\epsilon$ as a line plot for the parameters `mu` and `kT`. # # * Use enegies over the range $[0,10.0]$ and a suitable number of points. # * Choose an appropriate x and y limit for your visualization. # * Label your x and y axis and the overall visualization. # * Customize your plot in 3 other ways to make it effective and beautiful. # + nbgrader={"checksum": "6613c80574ecbd6eac3fb18ec6e29798", "solution": true} def plot_fermidist(mu, kT): #plt.figure(figsize = (15,5)) plt.plot(energy,fermidist) #plt.ylabel('Fermidist Distribution') #plt.xlabel('Energy') #plt.title('Distribution vs. Energy') #plt.grid(True) #plt.box(True) #plt.xlim(0,10.0,100); #plt.ylim(0,10) #axis = plt.gca() #axis.spines['top'].set_visible(False) #axis.spines['right'].set_visible(False) #axis.get_xaxis().tick_bottom() #axis.get_yaxis().tick_left(); # + nbgrader={} plot_fermidist(4.0, 1.0) # + deletable=false nbgrader={"checksum": "431fd4e3772f241938f69a76371092f9", "grade": true, "grade_id": "interactex06c", "points": 4} assert True # leave this for grading the plot_fermidist function # + [markdown] nbgrader={} # Use `interact` with `plot_fermidist` to explore the distribution: # # * For `mu` use a floating point slider over the range $[0.0,5.0]$. # * for `kT` use a floating point slider over the range $[0.1,10.0]$. # + deletable=false nbgrader={"checksum": "6cff4e8e53b15273846c3aecaea84a3d", "solution": true} #interact(plot_fermidist , mu = (0.0,5.0)) #interact(plot_fermidist , kT = (0.1,10.0)); # + [markdown] nbgrader={} # Provide complete sentence answers to the following questions in the cell below: # # * What happens when the temperature $kT$ is low? # * What happens when the temperature $kT$ is high? # * What is the effect of changing the chemical potential $\mu$? # * The number of particles in the system are related to the area under this curve. How does the chemical potential affect the number of particles. # # Use LaTeX to typeset any mathematical symbols in your answer. # + [markdown] deletable=false nbgrader={"checksum": "e2d2eda45d934db7a4dc1cef97eebbcc", "grade": true, "grade_id": "interactex06d", "points": 2, "solution": true} # YOUR ANSWER HERE
assignments/midterm/InteractEx06.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Climate classification with neural networks # # The [Köppen Climate classification](https://en.wikipedia.org/wiki/Köppen_climate_classification) is a widely used climate classification system. It classifies locations around the world as climates like "Tropical rainforest" or "Warm summer continental". # # ![By <NAME>., <NAME>., and <NAME>.(University of Melbourne)Enhanced, modified, and vectorized by Ali Zifan. - Hydrology and Earth System Sciences: "Updated world map of the Köppen-Geiger climate classification" (Supplement)map in PDF (Institute for Veterinary Public Health)Legend explanation, CC BY-SA 4.0, https://commons.wikimedia.org/w/index.php?curid=47086879](images/2018-06-11-koppen.png) # # (Image from Wikipedia and [1]) # # One funny thing about the Köppen classification is that it puts Portland, Oregon in the same group as San Francisco and in a very similar group to Los Angeles. I might be wrong, but Southern California (comfortably walking outside in a t-shirt at 2am) seemed like a pretty different climate than Seattle (not seeing the sun for n months)! # # I wanted to try classifying climates of locations. In this post, I'll try to classify the climate of continental US weather stations based on the weather they recorded with the help of a neural net. I'll train a neural net on some task that also helps it learn vector representations for each station. Then I'll cluster the vector representations to cluster similar stations into climate classifications. # ## Data # # I used data from the [Global Historical Climatology Network](http://doi.org/10.7289/V5D21VHZ) [2, 3]. I used the subset of weather stations from the U.S. Historical Climatology Network. # # This gives 2.4G of data containing decades (sometimes over a century!) of daily weather reports from 1,218 stations around the US. It usually gives high temperatures, low temperatures, and precipitation, but also gives information about snow, wind, fog, evaporation rates, volcanic dust, tornados, etc. # # ### Getting the data # # Wheee, let's download a few GBs of historical weather data. # The [readme](ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/daily/readme.txt) is super useful. # The `tar` file for the US stations is located at `ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/daily/ghcnd_hcn.tar.gz` (heads up, it uses an underscore, not a hyphen like the readme says!) # The other useful file is `ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/daily/ghcnd-stations.txt`, which contains coordinates and station names. # ### Weather stations # # If I go through the `.dly` files in `ghcnd_hcn/` folder, and look up the station names in `ghcnd-stations.txt`, I can plot the stations I have daily weather records for using [Basemap](https://github.com/matplotlib/basemap). (See "Basemap" below for code and details.) # # ![](images/2018-06-11-stations.png) # + from collections import namedtuple import time import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from sklearn.cluster import KMeans # helper functions you can skip over :D SAVE = True def maybe_save_plot(filename): if SAVE: plt.tight_layout() plt.savefig('images/' + filename, bbox_inches="tight") def hide_ticks(plot): plot.axes.get_xaxis().set_visible(False) plot.axes.get_yaxis().set_visible(False) # - # ### Reducing the data # # [The script I used for processing data is located here](https://github.com/jessstringham/notebooks/blob/master/scripts/process_weather_data.py). # # There are two issues with the dataset: it's a little large to easily load and manipulate in memory and some records are missing or are labeled as low-quality. # I'd love to figure out how to use larger datasets or handle the low-quality data, but I'll save that for another post. # Instead, I'll create a small subset I can comfortably load into memory. # For each station, I'll sample up to 2000 daily weather reports, including the maximum temperature, minimum temperature, and precipitation. # # I limit the data in a few ways. I dropped data before 1990 to make the datasets more manageable. # I also require all three weather values are not missing and don't have a quality issue. # These may introduce bias. If I was doing real science instead of trying to make a pretty graph, I'd justify these decisions better! # ### Loading the dataset # # [See `scripts/process_weather_data.py`](https://github.com/jessstringham/notebooks/blob/master/scripts/process_weather_data.py) for details, but if you ran the command in this folder using `data/weather/data` as the output file, such as: # # python scripts/process_weather_data.py [filtered .dly files, see process_weather_data] data/weather/data # # then I think this code should work. # + DATA_PATH = 'data/weather' matrix_file = os.path.join(DATA_PATH, 'data/gsn-2000-TMAX-TMIN-PRCP.npz') # column labels STATION_ID_COL = 0 MONTH_COL = 1 DAY_COL = 2 VAL_COLS_START = 3 TMAX_COL = 3 TMIN_COL = 4 PRCP_COL = 5 with np.load(matrix_file) as npz_data: weather_data = npz_data['data'].astype(np.int32) print(weather_data.shape) # I decided to switch over to using the day of the year instead of two # eh, this isn't perfect (it assumes all months have 31 days), but it helps differentiate # the first of the month vs the last. weather_data_day_of_year_data = 31 * (weather_data[:, MONTH_COL] - 1) + (weather_data[:, DAY_COL] - 1) # - # To give an idea of how this data looks, here's the number of examples per station, the minimum and maximum temperature. # # There are a few things I'm overlooking for now: the weather report values are always integers and precipitation is always positive. Precipitation has a larger range and is often 0. I'll end up converting the month and day into the approximate day of the year (e.g., 21 = January 21), which means January 1 and December 31 are considered far apart. # + NUM_STATIONS = np.max(weather_data[:, STATION_ID_COL]) + 1 print(NUM_STATIONS) fig, axs = plt.subplots(2, 2, figsize=(9, 9)) axs[0][0].hist(weather_data[:, STATION_ID_COL], bins=NUM_STATIONS) axs[0][0].set_title('examples per station') axs[0][1].hist(weather_data[:, MONTH_COL], bins=12) axs[0][1].set_title('examples per month') axs[1][0].hist(weather_data[:, TMIN_COL], bins=range(-400, 400, 20), alpha=0.6, label='min') axs[1][0].hist(weather_data[:, TMAX_COL], bins=range(-400, 400, 20), alpha=0.6, label='max') axs[1][0].legend() axs[1][0].set_title('temperature') axs[1][1].hist(weather_data[:, PRCP_COL], bins=20) axs[1][1].set_title('precipitation') maybe_save_plot('2018-06-11-data') # - # ## Neural network # # The purpose of the neural network is to learn a good vector representation of the weather station. To do this, I'll set up a task that hopefully encourages the network to learn a good vector representation. # # The inputs are the *station_id* and *day of the year* (so the month and day, but the year is missing). # # The network needs to predict the *precipitation*, *high temperature* and *low temperature* for that day. I compare its prediction to an arbitrary year's actual weather and use how poorly it does to tell [TensorFlow](https://www.tensorflow.org) how to update the network parameters. # # To get the vector representation, I pass the station_id through an [embedding](https://www.tensorflow.org/programmers_guide/embedding) layer. # Then I concatenate the day to make a bigger vector. I'll pass the station+day vector through a little neural network that needs to predict the three weather values. The full network looks something like this: # # ![](images/2018-06-11-nn.png) # # In the image above, the weather data from U.S. Historical Climatology Network gives me the blue and green boxes. I'm using the blue boxes (station_id and day) as input and the green boxes (precipitation, high temperature, low temperature) as output. As I train the model, backpropagation will find better parameters for the orange boxes (the station vector representations and the neural network.) # # When I'm satisfied, I'll take the vector representation and use an unsupervised classifier on it # I don't particularly care about the neural network learns. I'm just using it to learn the vector representations. # Once I have the vector representations, I'll use [K-Means clustering](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html) to cluster stations that have similar vectors. These clusters will become my climate classification! # + # network parameters BATCH_SIZE = 50 EMBEDDING_SIZE = 20 HIDDEN_UNITS = 40 # and classification parameters. How many climates I want. CLUSTER_NUMBER = 6 # Tensorflow and classification logs LOG_PATH = os.path.join(DATA_PATH, 'tf_logs') TENSORFLOW_SUMMARY_FILE = os.path.join(LOG_PATH, 'tf_summary_{}') TENSORFLOW_CHECKPOINT_FILE = os.path.join(LOG_PATH, 'model.cpkt') STATION_CLASSIFICATION_FILE = os.path.join(LOG_PATH, 'station_classification_{run_id}_{step_i}.tsv') # load the stations file that process_weather_data.py saved. # This tells us what the first column's ids map to. with open(os.path.join(DATA_PATH, 'data/stations') )as f: list_of_stations = [line.strip() for line in f.readlines()] # hiding this code block in blog post # - # ### Defining the network # # Below I implement the network in TensorFlow. # + tf.reset_default_graph() # set up the batch and input data InputBatch = namedtuple('InputBatch', [ 'station_ids', 'month_day', ]) # let's try out tf.data! set up the dataset and iterator. with tf.variable_scope('data_loading'): station_id_placeholder = tf.placeholder(tf.int32, (None,), name='station_id_placeholder') month_day_placeholder = tf.placeholder(tf.float32, (None,), name='month_day_placeholder') # day of the year target_placeholder = tf.placeholder(tf.float32, (None, 3), name='target_placeholder') dataset = tf.data.Dataset.from_tensor_slices(( InputBatch( station_ids=station_id_placeholder, month_day=month_day_placeholder, ), target_placeholder # and grab all the weather values ))\ .shuffle(buffer_size=10000)\ .repeat()\ .batch(BATCH_SIZE) iterator = dataset.make_initializable_iterator() input_batch, targets = iterator.get_next() # Feed the station id through the embedding. This embeddings variable # is the whole point of this network! embeddings = tf.Variable( tf.random_uniform( [NUM_STATIONS, EMBEDDING_SIZE], -1.0, 1.0), dtype=tf.float32, name='station_embeddings' ) embedded_stations = tf.nn.embedding_lookup(embeddings, input_batch.station_ids) # Drop in the month/day data station_and_day = tf.concat([ embedded_stations, tf.expand_dims(input_batch.month_day, 1), ], axis=1) # Now build a little network that can learn to predict the weather dense_layer = tf.contrib.layers.fully_connected(station_and_day, num_outputs=HIDDEN_UNITS) with tf.variable_scope('prediction'): prediction = tf.contrib.layers.fully_connected( dense_layer, num_outputs=3, activation_fn=None, # don't use an activation on prediction ) # Set up loss and optimizer loss_op = tf.losses.mean_squared_error(prediction, targets) tf.summary.scalar('loss', loss_op) train_op = tf.train.AdamOptimizer().minimize(loss_op) # And additional tensorflow fun things merged_summaries = tf.summary.merge_all() init = tf.global_variables_initializer() saver = tf.train.Saver() # - # ### Clustering # # As the model is training, I'll occasionally cluster the stations and save the results. def save_classification(save_location, trained_embeddings): kmeans = KMeans(n_clusters=CLUSTER_NUMBER, random_state=0).fit(trained_embeddings) with open(save_location, 'w') as f: for station, label in zip(stations, kmeans.labels_): f.write('{}\t{}\n'.format(station, label)) # Now I can start training! You can monitor the job through [TensorBoard](https://www.tensorflow.org/programmers_guide/summaries_and_tensorboard). # # ### Deciding when to stop # # If I had a metric for how good the climate classifications were, I could use the metric to decide when the model is done training. For the purpose of this post (again, pretty pictures), I'll just train the model long enough to run through the dataset a few times. # # (I have 2.4M weather reports. Since I'm using batch size of 50, it will take around 50K steps will go through the dataset once.) # + MAX_STEPS = 1000000 CHECKPOINT_EVERY_N_STEPS = 20000 run_id = int(time.time()) print('starting run {}'.format(run_id)) with tf.Session() as sess: sess.run(init) sess.run(iterator.initializer, { station_id_placeholder: weather_data[:, STATION_ID_COL], month_day_placeholder: weather_data_day_of_year_data, target_placeholder: weather_data[:, VAL_COLS_START:], }) writer = tf.summary.FileWriter(TENSORFLOW_SUMMARY_FILE.format(run_id), sess.graph) for step_i in range(MAX_STEPS): summary, loss, _ = sess.run([merged_summaries, loss_op, train_op]) writer.add_summary(summary, global_step=step_i) if step_i % CHECKPOINT_EVERY_N_STEPS == 0: print('step: {} last loss: {}'.format(step_i, loss)) saver.save(sess, TENSORFLOW_CHECKPOINT_FILE) # extract and save the classification embedding_values = sess.run(embeddings) save_classification( STATION_CLASSIFICATION_FILE.format(run_id=run_id, step_i=step_i), embedding_values, ) writer.close() # - # ## Climate! # # Here's the last saved figure from a run that used 6 classes. # # ![](images/2018-06-11-us.png) # Starting with a disclaimer: it's really easy to start seeing patterns in noise! # # That said, I think it's neat that places near each other were assigned to the same group! The neural network didn't know about the latitude or longitude of stations, only random weather reports from those stations. # # It's also neat that parts of the map look to me like they follow Köppen! The east is split up by latitude into brown, pink, and green. The West coast gets its own and the West gets another. # # Though Portland and Seattle still share California's climate. It's also probably weird that the humid South and the arid South West all have the same climate. The Yellow climate also looks a little arbitrary. It doesn't pick up mountains. # ## Etc # # And that's it! I did a proof of concept of classifying climate with neural nets! # Here are a few other things I found during this project. # # ### Year-long predictions by climate # # The point of this model was to train the embeddings. But the model also learned to predict the weather for each station. For fun, let's check them out. # + def predict_a_year(station_ids): '''Given a list of station_ids, predict a year of weather for each of them''' # shh, just pretend all the months have 31 days DAY_COUNT = 31 * 12 all_days = np.arange(DAY_COUNT) station_values = [] with tf.Session() as sess: saver.restore(sess, TENSORFLOW_CHECKPOINT_FILE) for station_id in station_ids: station_ids = station_id * np.ones((DAY_COUNT,)) sample_input_batch = InputBatch( station_ids=station_ids, month_day=all_days, ) month_day, pred = sess.run( [input_batch.month_day, prediction], {input_batch: sample_input_batch} ) station_values.append((month_day, pred)) return station_values # Let's grab all of the stations for each climate. I got these numbers using this script: # # for class_i in {0..5}; do # cat tf_logs/station_classification_1528754676_980000.tsv\ # | awk -F\t -v class=$class_i '{ if ($2 == class) print NR - 1 }' \ # | paste -s -d"," -; # done # # + PRED_TMAX_COL = 0 PRED_TMIN_COL = 1 PRED_PRCP_COL = 2 stations_per_climate_to_plot = [ [1,3,42,60,108,111,114,121,125,155,180,181,197,201,280,286,309,310,319,344,346,348,384,386,422,439,495,513,560,601,606,617,664,669,675,676,682,697,704,705,710,716,733,794,796,832,838,895,930,974,981,987,1039,1098,1105,1163,1164,1189,1196], [7,12,18,23,25,31,33,35,45,47,50,58,61,62,64,71,80,103,115,116,117,118,120,127,128,129,131,135,137,140,149,154,156,161,164,168,171,183,194,210,213,214,230,232,234,238,239,245,247,251,257,258,259,274,283,297,301,302,313,322,328,336,338,341,342,345,349,351,354,355,357,368,369,381,383,389,391,405,408,411,412,413,416,417,418,423,427,431,433,434,436,446,462,463,467,468,470,471,473,478,480,497,498,509,510,511,530,534,537,542,553,562,564,577,579,585,592,598,603,608,610,615,625,627,628,630,631,632,640,648,665,666,667,670,671,678,679,691,700,713,715,721,722,725,746,755,763,778,780,813,816,817,818,821,828,843,846,852,859,863,870,880,912,917,919,920,921,925,927,932,944,947,952,956,958,959,966,973,990,992,997,1002,1004,1014,1016,1018,1024,1027,1028,1030,1031,1047,1051,1054,1055,1057,1062,1065,1087,1096,1106,1111,1116,1121,1125,1126,1142,1146,1150,1158,1159,1160,1168,1182,1191,1193,1197,1202,1214], [22,159,208,248,252,254,256,268,269,281,290,291,318,320,326,329,358,361,367,379,393,400,402,406,419,430,459,474,475,482,483,484,486,489,492,502,508,519,523,524,528,536,555,558,567,574,581,582,593,599,626,633,634,639,642,645,649,650,657,662,677,684,693,695,701,707,709,714,719,728,729,731,732,741,744,761,765,769,770,787,788,793,800,814,819,830,834,835,836,853,855,857,861,862,864,867,875,884,892,894,900,903,908,913,915,916,918,929,941,943,949,953,965,968,978,982,983,988,994,998,1008,1011,1013,1015,1020,1021,1022,1034,1035,1037,1038,1040,1044,1049,1050,1052,1063,1068,1070,1076,1078,1081,1084,1092,1102,1113,1122,1124,1127,1132,1135,1141,1143,1147,1152,1154,1155,1156,1162,1169,1170,1172,1185,1188,1204,1209,1210,1212,1213], [0,2,11,15,20,21,27,29,32,34,36,38,40,49,52,56,72,73,79,82,83,84,90,91,94,96,101,102,104,106,132,136,139,142,143,145,148,152,153,162,163,170,172,174,175,177,178,179,186,187,196,198,202,203,204,205,207,209,216,219,221,223,226,227,229,231,235,236,241,242,243,249,250,255,260,263,264,266,270,275,276,277,278,282,284,288,289,292,293,294,295,298,299,305,306,308,312,314,315,317,323,324,325,327,330,332,333,335,337,339,347,352,356,362,363,364,366,374,375,380,385,388,390,392,394,397,398,399,401,403,404,407,409,410,421,424,425,429,435,438,441,447,448,451,452,454,455,456,458,464,465,466,469,472,477,481,485,493,494,496,501,503,504,505,512,515,517,521,522,527,531,533,535,538,540,546,552,556,557,563,568,569,570,573,576,578,580,583,586,587,589,594,595,600,602,604,607,611,612,614,616,620,622,623,635,636,637,641,644,646,651,661,663,668,672,673,674,685,686,688,692,696,698,699,706,708,711,712,718,723,726,727,734,735,737,738,739,743,747,748,751,752,753,757,762,764,766,767,768,772,777,779,781,783,785,791,798,801,802,804,805,810,815,822,826,827,839,841,842,847,851,865,868,873,876,878,879,882,888,889,891,897,898,899,905,907,914,923,924,926,931,933,934,935,940,945,948,951,963,976,977,980,984,989,993,995,996,1006,1007,1029,1032,1033,1041,1042,1043,1056,1059,1061,1067,1074,1079,1080,1088,1091,1094,1095,1097,1099,1100,1101,1107,1110,1115,1130,1131,1133,1134,1140,1144,1149,1157,1166,1179,1180,1183,1184,1190,1195,1199,1200,1203,1208,1211,1216], [6,8,9,10,13,14,16,17,19,24,26,37,41,43,44,46,48,51,53,54,57,63,65,67,68,69,75,77,78,81,85,86,87,88,89,92,93,97,99,109,110,112,113,122,126,130,133,134,144,147,151,157,165,169,173,176,182,185,189,190,192,193,195,211,212,217,218,224,228,237,240,265,267,272,279,296,303,304,307,311,321,331,340,350,353,359,360,365,373,378,387,396,414,420,426,437,444,453,457,461,476,487,490,500,507,514,520,526,529,532,539,547,549,554,559,561,575,596,597,605,613,619,629,638,654,655,656,659,660,680,681,683,687,690,702,703,717,736,740,742,749,754,756,759,760,771,782,784,795,803,807,812,823,825,829,831,840,848,850,854,858,881,883,885,886,887,896,901,906,910,911,928,936,937,939,946,950,954,957,961,964,967,970,979,985,999,1000,1001,1003,1005,1009,1010,1012,1019,1025,1026,1045,1046,1048,1053,1064,1069,1071,1072,1073,1075,1077,1085,1089,1093,1104,1109,1112,1114,1117,1120,1123,1128,1129,1136,1137,1138,1139,1145,1148,1161,1165,1171,1174,1175,1176,1177,1181,1186,1187,1192,1194,1205,1206,1207,1217], [4,5,28,30,39,55,59,66,70,74,76,95,98,100,105,107,119,123,124,138,141,146,150,158,160,166,167,184,188,191,199,200,206,215,220,222,225,233,244,246,253,261,262,271,273,285,287,300,316,334,343,370,371,372,376,377,382,395,415,428,432,440,442,443,445,449,450,460,479,488,491,499,506,516,518,525,541,543,544,545,548,550,551,565,566,571,572,584,588,590,591,609,618,621,624,643,647,652,653,658,689,694,720,724,730,745,750,758,773,774,775,776,786,789,790,792,797,799,806,808,809,811,820,824,833,837,844,845,849,856,860,866,869,871,872,874,877,890,893,902,904,909,922,938,942,955,960,962,969,971,972,975,986,991,1017,1023,1036,1058,1060,1066,1082,1083,1086,1090,1103,1108,1118,1119,1151,1153,1167,1173,1178,1198,1201,1215], ] climate_predictions = [] for climate_stations in stations_per_climate_to_plot: climate_predictions.append(predict_a_year(climate_stations)) climate_id_to_color = ['red', 'blue', 'orange', 'maroon', 'orchid', 'darkcyan'] # hiding this code block in blog post # + fig, axs = plt.subplots(1, 2, figsize=(8, 3)) _, arbitrary_station = climate_predictions[0][0] axs[0].plot(arbitrary_station[:, PRED_TMAX_COL]) axs[0].plot(arbitrary_station[:, PRED_TMIN_COL]) axs[0].set_title('temperature') axs[1].plot(arbitrary_station[:, PRED_PRCP_COL]) axs[1].set_title('precipitation') maybe_save_plot('2018-06-11-year-temp-single-pred') plt.show() # - # ### Predictions by climate # # It would be cool to see why the model placed each station in each climate. These graphs are pretty cool, but I wouldn't read too much into them! # # I'll group up weather stations by assigned climate, and ask the model to predict the weather every day of the year. Then for each day, I'll plot the prediction of the median of all stations in that climate. To give an idea of the range of predictions in that climate, and shade in the area between the 5th and 95th percentile of stations. # # In other words, on June 11, the precipitation chart will show a solid line where the median of the precipitation predictions for all stations. It shows a shaded region between the 5th and 95th percentile of precipitation predictions. (Note it's not following a particular station! It's just looking at all of the predictions for a day) # #### Temperature # # It looks like blue (the West) and maroon (northern part of east of the Rockies) have colder winters. # + def plot_predictions(climate_predictions, plot_temperature): fig, axs = plt.subplots( 1, len(climate_predictions), figsize=(15, 2), sharey=True ) for climate_i, climate_pred in enumerate(climate_predictions): x_grid = climate_pred[0][0] # make a vector of all stations and tranpose so it's day, station, value climate_preds = np.vstack(( [station_data] for _, station_data in climate_pred )) central_pred = np.median(climate_preds, axis=0) lower_bound = np.percentile(climate_preds, q=5, axis=0) upper_bound = np.percentile(climate_preds, q=95, axis=0) if plot_temperature: values = [0, 1] else: values = [2] for value_i in values: axs[climate_i].fill_between( x_grid, lower_bound[:, value_i], upper_bound[:, value_i], alpha=0.2, label='two standard deviations' ) axs[climate_i].plot( x_grid, central_pred[:, value_i] ) axs[climate_i].set_title(climate_id_to_color[climate_i]) plot_predictions(climate_predictions, plot_temperature=True) maybe_save_plot('2018-06-11-year-temp-predictions') plt.show() # - # #### Precipitation # # I think the precipitation predictions look a little weird. That said, I approve that the red dots were mostly on the West Coast, and the precipitation predictions show that climate includes a lot of places with very rainy winters. And that blue, the West, includes more dry places. plot_predictions(climate_predictions, plot_temperature=False) maybe_save_plot('2018-06-11-year-precipitation-predictions') plt.show() # ### TensorBoard Projector # # TensorBoard has a neat [embedding](https://www.tensorflow.org/programmers_guide/embedding) visualization currently called Projector. If you add column headers to the tsv files the classifier outputs above, you can load them in as labels and kind of get an idea of what KMeans is doing! # # ![](images/2018-06-11-tensorboard.png) # # # ### Day of the year # # This was funny: before I used the day of the year, I used separate values for the month and the year. Here is what the predictions looked like: # # | | | | # |--|--|--| # | ![](images/2018-06-11-silly-nn-1.png) | ![](images/2018-06-11-silly-nn-2.png) | ![](images/2018-06-11-silly-nn-3.png) | # # ### Basemap # # I used Basemap to generate images. Here's some of the [code](https://gist.github.com/jessstringham/319ab3a98d5d35010e1ac870ae2fbff1) I used to generate the maps. # # ### Inspiration # # My sister told me about the problem with Köppen! # # The neural network to learn embeddings is a little similar to how [CBOW and Skip-Grams](https://en.wikipedia.org/wiki/Word2vec#CBOW_and_skip_grams) work. # # ### What next? # # This was a neat proof-of-concept! There are lots of directions one can take this project, but I'm out of time for today. A few off the top of my head are: # # - Use more data! # - Snow data is often present in the dataset, so I don't even need to deal with missing data. # - Try different representations of days, like using an embedding! # - Find a better way to choose the model. # [1] By <NAME>., <NAME>., and <NAME>.(University of Melbourne)Enhanced, modified, and vectorized by Ali Zifan. - Hydrology and Earth System Sciences: "Updated world map of the Köppen-Geiger climate classification" (Supplement)map in PDF (Institute for Veterinary Public Health)Legend explanation, CC BY-SA 4.0, https://commons.wikimedia.org/w/index.php?curid=47086879 # # [2] <NAME>., <NAME>, <NAME>, <NAME>, and <NAME>, 2012: An overview # of the Global Historical Climatology Network-Daily Database. Journal of Atmospheric # and Oceanic Technology, 29, 897-910, doi:10.1175/JTECH-D-11-00103.1. # # [3] <NAME>., <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, B.E.Gleason, and <NAME>, 2012: Global Historical Climatology Network - # Daily (GHCN-Daily), Version 3. # NOAA National Climatic Data Center. http://doi.org/10.7289/V5D21VHZ 2018/06/09.
2018-06-11-climate-classification-with-neural-nets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # ベクトル・行列演算 # - import numpy as np # + [markdown] slideshow={"slide_type": "slide"} # ## ベクトル # # $x = (x_1,...,x_n)$ # - x = np.arange(5) print('x = ', x) print(x[3]) # + [markdown] slideshow={"slide_type": "-"} # ### ベクトルの演算 # + slideshow={"slide_type": "slide"} x = np.array([3.0, 4.0]) y = np.array([2.0, 1.0]) print('x + y = ', x + y) print('x * y = ', x * y) print('x / y = ', x / y) print('x ** y = ', np.power(x,y)) # + [markdown] slideshow={"slide_type": "slide"} # ## 行列 # we can draw a matrix as a table, # where each entry $a_{ij}$ belongs to the $i$-th row and $j$-th column. # # # $$A=\begin{pmatrix} # a_{11} & a_{12} & \cdots & a_{1m} \\ # a_{21} & a_{22} & \cdots & a_{2m} \\ # \vdots & \vdots & \ddots & \vdots \\ # a_{n1} & a_{n2} & \cdots & a_{nm} \\ # \end{pmatrix}$$ # + slideshow={"slide_type": "slide"} A = np.arange(12).reshape((3,4)) print('A =', A) print(A[2,3]) # + [markdown] slideshow={"slide_type": "slide"} # ### 次元 # - print(A.ndim) print(A.shape) print(A.size) # + [markdown] slideshow={"slide_type": "slide"} # ### 転置行列 # if $B = A^T$, then $b_{ij} = a_{ji}$ for any $i$ and $j$. # - print(A.T) # + [markdown] slideshow={"slide_type": "slide"} # ### 演算 # - a = 2 x = np.ones((2,3)) y = np.zeros((2,3))-1 print('X =',x) print('Y =',y) print('aX =',a*x) print('aX+Y =',a*x+y) # + [markdown] slideshow={"slide_type": "slide"} # ## 総和と平均 # - print('X=',x) print(np.sum(x)) print(np.sum(x, axis=0)) print(np.sum(x, axis=1)) print('A =',A) print(np.mean(A)) print(np.sum(A) / A.size) print(np.mean(A,axis=0)) print(np.sum(A,axis=0) / A.shape[0]) print(np.mean(A,axis=1)) print(np.sum(A,axis=1) / A.shape[1]) # + [markdown] slideshow={"slide_type": "slide"} # ## ベクトル内積 # # Given two vectors $\mathbf{u}$ and $\mathbf{v}$, the dot product $\mathbf{u}^T \mathbf{v}$ is a sum over the products of the corresponding elements: $\mathbf{u}^T \mathbf{v} = \sum_{i=1}^{d} u_i \cdot v_i$. # - x = np.arange(4) +1.0 y = np.ones(4) print('x =',x) print('y =',y) print('x・y =',np.dot(x, y)) np.sum(x * y) # + [markdown] slideshow={"slide_type": "slide"} # ## 行列とベクトルの積 # # $$A=\begin{pmatrix} # a_{11} & a_{12} & \cdots & a_{1m} \\ # a_{21} & a_{22} & \cdots & a_{2m} \\ # \vdots & \vdots & \ddots & \vdots \\ # a_{n1} & a_{n2} & \cdots & a_{nm} \\ # \end{pmatrix},\quad\mathbf{x}=\begin{pmatrix} # x_{1} \\ # x_{2} \\ # \vdots\\ # x_{m}\\ # \end{pmatrix} $$ # # $$A\mathbf{x}= # \begin{pmatrix} # \cdots & \mathbf{a}^T_{1} &... \\ # \cdots & \mathbf{a}^T_{2} & \cdots \\ # & \vdots & \\ # \cdots &\mathbf{a}^T_n & \cdots \\ # \end{pmatrix} # \begin{pmatrix} # x_{1} \\ # x_{2} \\ # \vdots\\ # x_{m}\\ # \end{pmatrix} # = \begin{pmatrix} # \mathbf{a}^T_{1} \mathbf{x} \\ # \mathbf{a}^T_{2} \mathbf{x} \\ # \vdots\\ # \mathbf{a}^T_{n} \mathbf{x}\\ # \end{pmatrix} # $$ # # So you can think of multiplication by a matrix $A\in \mathbb{R}^{m \times n}$ as a transformation that projects vectors from $\mathbb{R}^{m}$ to $\mathbb{R}^{n}$. # # - A = A.reshape((3,4)) print('A =',A) print('x =',x) print('A*x =',A * x) print('Ax =',np.dot(A, x)) # + [markdown] slideshow={"slide_type": "slide"} # ## 行列積 # # If you've gotten the hang of dot products and matrix-vector multiplication, then matrix-matrix multiplications should be pretty straightforward. # # Say we have two matrices, $A \in \mathbb{R}^{n \times k}$ and $B \in \mathbb{R}^{k \times m}$: # # $$A=\begin{pmatrix} # a_{11} & a_{12} & \cdots & a_{1k} \\ # a_{21} & a_{22} & \cdots & a_{2k} \\ # \vdots & \vdots & \ddots & \vdots \\ # a_{n1} & a_{n2} & \cdots & a_{nk} \\ # \end{pmatrix},\quad # B=\begin{pmatrix} # b_{11} & b_{12} & \cdots & b_{1m} \\ # b_{21} & b_{22} & \cdots & b_{2m} \\ # \vdots & \vdots & \ddots & \vdots \\ # b_{k1} & b_{k2} & \cdots & b_{km} \\ # \end{pmatrix}$$ # + [markdown] slideshow={"slide_type": "slide"} # $$AB = \begin{pmatrix} # \cdots & \mathbf{a}^T_{1} &... \\ # \cdots & \mathbf{a}^T_{2} & \cdots \\ # & \vdots & \\ # \cdots &\mathbf{a}^T_n & \cdots \\ # \end{pmatrix} # \begin{pmatrix} # \vdots & \vdots & & \vdots \\ # \mathbf{b}_{1} & \mathbf{b}_{2} & \cdots & \mathbf{b}_{m} \\ # \vdots & \vdots & &\vdots\\ # \end{pmatrix} # = \begin{pmatrix} # \mathbf{a}^T_{1} \mathbf{b}_1 & \mathbf{a}^T_{1}\mathbf{b}_2& \cdots & \mathbf{a}^T_{1} \mathbf{b}_m \\ # \mathbf{a}^T_{2}\mathbf{b}_1 & \mathbf{a}^T_{2} \mathbf{b}_2 & \cdots & \mathbf{a}^T_{2} \mathbf{b}_m \\ # \vdots & \vdots & \ddots &\vdots\\ # \mathbf{a}^T_{n} \mathbf{b}_1 & \mathbf{a}^T_{n}\mathbf{b}_2& \cdots& \mathbf{a}^T_{n} \mathbf{b}_m # \end{pmatrix} # $$ # # You can think of the matrix-matrix multiplication $AB$ as simply performing $m$ matrix-vector products and stitching the results together. # + slideshow={"slide_type": "slide"} B = np.ones(shape=(4, 3)) print('A =',A) print('B =',B) print('AB =',np.dot(A, B)) # + [markdown] slideshow={"slide_type": "slide"} # ## ノルム # # All norms must satisfy a handful of properties: # # 1. $\|\alpha A\| = |\alpha| \|A\|$ # 1. $\|A + B\| \leq \|A\| + \|B\|$ # 1. $\|A\| \geq 0$ # 1. If $\forall {i,j}, a_{ij} = 0$, then $\|A\|=0$ # + slideshow={"slide_type": "slide"} print('||x|| =',np.linalg.norm(x)) print('||A|| =',np.linalg.norm(A)) # -
linalg/linear-algebra.ipynb
# ##### Copyright 2021 Google LLC. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # steel_lns # <table align="left"> # <td> # <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/contrib/steel_lns.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a> # </td> # <td> # <a href="https://github.com/google/or-tools/blob/master/examples/contrib/steel_lns.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a> # </td> # </table> # First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab. # !pip install ortools # + # Copyright 2010 <NAME> <EMAIL>, <EMAIL> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ortools.constraint_solver import pywrapcp import random parser = argparse.ArgumentParser() parser.add_argument( '--data', default='examples/data/steel_mill/steel_mill_slab.txt', help='path to data file') parser.add_argument( '--time_limit', default=20000, type=int, help='global time limit') parser.add_argument( '--lns_fragment_size', default=10, type=int, help='size of the random lns fragment') parser.add_argument( '--lns_random_seed', default=0, type=int, help='seed for the lns random generator') parser.add_argument( '--lns_fail_limit', default=30, type=int, help='fail limit when exploring fragments') # ---------- helper for binpacking posting ---------- def BinPacking(solver, binvars, weights, loadvars): """post the load constraint on bins. constraints forall j: loadvars[j] == sum_i (binvars[i] == j) * weights[i]) """ pack = solver.Pack(binvars, len(binvars)) pack.AddWeightedSumEqualVarDimension(weights, loadvars) solver.Add(pack) solver.Add(solver.SumEquality(loadvars, sum(weights))) # ---------- data reading ---------- def ReadData(filename): """Read data from <filename>.""" f = open(filename) capacity = [int(nb) for nb in f.readline().split()] capacity.pop(0) capacity = [0] + capacity max_capacity = max(capacity) nb_colors = int(f.readline()) nb_slabs = int(f.readline()) wc = [[int(j) for j in f.readline().split()] for i in range(nb_slabs)] weights = [x[0] for x in wc] colors = [x[1] for x in wc] loss = [ min([x for x in capacity if x >= c]) - c for c in range(max_capacity + 1) ] color_orders = [[o for o in range(nb_slabs) if colors[o] == c] for c in range(1, nb_colors + 1)] print('Solving steel mill with', nb_slabs, 'slabs') return (nb_slabs, capacity, max_capacity, weights, colors, loss, color_orders) # ---------- dedicated search for this problem ---------- class SteelDecisionBuilder(pywrapcp.PyDecisionBuilder): """Dedicated Decision Builder for steel mill slab. Search for the steel mill slab problem with Dynamic Symmetry Breaking during search is an adaptation (for binary tree) from the paper of <NAME> and <NAME> CPAIOR-2008. The value heuristic comes from the paper Solving Steel Mill Slab Problems with Constraint-Based Techniques: CP, LNS, and CBLS, Schaus et. al. to appear in Constraints 2010 """ def __init__(self, x, nb_slabs, weights, loss_array, loads): pywrapcp.PyDecisionBuilder.__init__(self) self.__x = x self.__nb_slabs = nb_slabs self.__weights = weights self.__loss_array = loss_array self.__loads = loads self.__max_capacity = len(loss_array) - 1 def Next(self, solver): var, weight = self.NextVar() if var: v = self.MaxBound() if v + 1 == var.Min(): # Symmetry breaking. If you need to assign to a new bin, # select the first one. solver.Add(var == v + 1) return self.Next(solver) else: # value heuristic (important for difficult problem): # try first to place the order in the slab that will induce # the least increase of the loss loads = self.getLoads() l, v = min( (self.__loss_array[loads[i] + weight], i) for i in range(var.Min(), var.Max() + 1) if var.Contains(i) and loads[i] + weight <= self.__max_capacity) decision = solver.AssignVariableValue(var, v) return decision else: return None def getLoads(self): load = [0] * len(self.__loads) for (w, x) in zip(self.__weights, self.__x): if x.Bound(): load[x.Min()] += w return load def MaxBound(self): """ returns the max value bound to a variable, -1 if no variables bound""" return max([-1] + [ self.__x[o].Min() for o in range(self.__nb_slabs) if self.__x[o].Bound() ]) def NextVar(self): """ mindom size heuristic with tie break on the weights of orders """ res = [(self.__x[o].Size(), -self.__weights[o], self.__x[o]) for o in range(self.__nb_slabs) if self.__x[o].Size() > 1] if res: res.sort() return (res[0][2], -res[0][1]) # returns the order var and its weight else: return (None, None) def DebugString(self): return 'SteelMillDecisionBuilder(' + str(self.__x) + ')' # ----------- LNS Operator ---------- class SteelRandomLns(pywrapcp.BaseLns): """Random LNS for Steel.""" def __init__(self, x, rand, lns_size): pywrapcp.BaseLns.__init__(self, x) self.__random = rand self.__lns_size = lns_size def InitFragments(self): pass def NextFragment(self): while self.FragmentSize() < self.__lns_size: pos = self.__random.randint(0, self.Size() - 1) self.AppendToFragment(pos) return True # ----------- Main Function ----------- # ----- solver and variable declaration ----- (nb_slabs, capacity, max_capacity, weights, colors, loss, color_orders) =\ ReadData(args.data) nb_colors = len(color_orders) solver = pywrapcp.Solver('Steel Mill Slab') x = [solver.IntVar(0, nb_slabs - 1, 'x' + str(i)) for i in range(nb_slabs)] load_vars = [ solver.IntVar(0, max_capacity - 1, 'load_vars' + str(i)) for i in range(nb_slabs) ] # ----- post of the constraints ----- # Bin Packing. BinPacking(solver, x, weights, load_vars) # At most two colors per slab. for s in range(nb_slabs): solver.Add( solver.SumLessOrEqual([ solver.Max([solver.IsEqualCstVar(x[c], s) for c in o]) for o in color_orders ], 2)) # ----- Objective ----- objective_var = \ solver.Sum([load_vars[s].IndexOf(loss) for s in range(nb_slabs)]).Var() objective = solver.Minimize(objective_var, 1) # ----- start the search and optimization ----- assign_db = SteelDecisionBuilder(x, nb_slabs, weights, loss, load_vars) first_solution = solver.Assignment() first_solution.Add(x) first_solution.AddObjective(objective_var) store_db = solver.StoreAssignment(first_solution) first_solution_db = solver.Compose([assign_db, store_db]) print('searching for initial solution,', end=' ') solver.Solve(first_solution_db) print('initial cost =', first_solution.ObjectiveValue()) # To search a fragment, we use a basic randomized decision builder. # We can also use assign_db instead of inner_db. inner_db = solver.Phase(x, solver.CHOOSE_RANDOM, solver.ASSIGN_MIN_VALUE) # The most important aspect is to limit the time exploring each fragment. inner_limit = solver.FailuresLimit(args.lns_fail_limit) continuation_db = solver.SolveOnce(inner_db, [inner_limit]) # Now, we create the LNS objects. rand = random.Random() rand.seed(args.lns_random_seed) local_search_operator = SteelRandomLns(x, rand, args.lns_fragment_size) # This is in fact equivalent to the following predefined LNS operator: # local_search_operator = solver.RandomLNSOperator(x, # args.lns_fragment_size, # args.lns_random_seed) local_search_parameters = solver.LocalSearchPhaseParameters( objective_var, local_search_operator, continuation_db) local_search_db = solver.LocalSearchPhase(first_solution, local_search_parameters) global_limit = solver.TimeLimit(args.time_limit) print('using LNS to improve the initial solution') search_log = solver.SearchLog(100000, objective_var) solver.NewSearch(local_search_db, [objective, search_log, global_limit]) while solver.NextSolution(): print('Objective:', objective_var.Value(),\ 'check:', sum(loss[load_vars[s].Min()] for s in range(nb_slabs))) solver.EndSearch()
examples/notebook/contrib/steel_lns.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Parshav-Shah/ISYS5002_portfolio/blob/main/Audio_to__text.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="SIt8ARzEZ253" outputId="b9161703-71d6-4a79-f7e9-0b9a40bc1716" # !wget https://www.pacdv.com/sounds/voices/maybe-next-time.wav # !wget https://www.pacdv.com/sounds/voices/open-the-goddamn-door.wav # + id="JgisLBsscWIA" file_name = "maybe-next-time.wav" audio_file_name = "open-the-goddamn-door.wav" # + colab={"base_uri": "https://localhost:8080/", "height": 75} id="-V7vjrPwcurh" outputId="973b8afa-496b-4794-f2f3-45762b52a9b8" # reference https://stackoverflow.com/questions/16241944/playing-a-sound-in-a-ipython-notebook import IPython IPython.display.Audio(file_name) IPython.display.Audio(audio_file_name) # + colab={"base_uri": "https://localhost:8080/"} id="g1qInNyedfsx" outputId="563d53ad-71a6-44d5-eb36-a000a32f3fc6" pip install SpeechRecognition # + id="3hqSyxBcd0ex" import speech_recognition as sr # + colab={"base_uri": "https://localhost:8080/"} id="fnMhOEBheVeB" outputId="b99d6b08-678b-4b96-f591-cbdd4d9a0ce2" list_of_function_in_sr= dir(sr) print(list_of_function_in_sr) # + id="WiZEXguYfjFz" r = sr.Recognizer() with sr.AudioFile(audio_file_name) as source: audio_file = r.record(source) # read the entire audio file text_file = r.recognize_google(audio_file) print(text_file) # + id="Q7jreFyGik9k" with open('audio_to_text_output.txt','w') as f: f.writelines(text_file) # + id="a0P3K6kRjjkg"
Audio_to__text.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # + # Require the packages require(ggplot2) require(reshape2) library(repr) options(repr.plot.width=10.5, repr.plot.height=6.5) # - # Load the data accuracies <- read.csv("data/accuracies_semeval_verbs_semisupervised.csv") accuracies$metric <- "accuracy" mcp <- read.csv("data/mcp_semeval_verbs_semisupervised.csv") mcp$metric <- "mcp" lcr <- read.csv("data/lcr_semeval_verbs_semisupervised.csv") lcr$metric <- "lcr" # Transform the data accuracies.long <- melt(accuracies, id.vars = c("metric"), variable.name = "experiment", value.name = "result") mcp.long <- melt(mcp, id.vars = c("metric"), variable.name = "experiment", value.name = "result") lcr.long <- melt(lcr, id.vars = c("metric"), variable.name = "experiment", value.name = "result") #data <- accuracies.long data <- rbind(accuracies.long, mcp.long) data <- rbind(data, lcr.long) data$experiment <- factor(data$experiment, levels=c("supervised_bow", "supervised_vec", "supervised_vecpos", "bootstrap_bow", "bootstrap_vec", "bootstrap_vecpos", "ladder_vec", "ladder_vecpos")) data$metric <- factor(data$metric, levels=c("accuracy", "mcp", "lcr")) levels(data$metric) <- c("Accuracy", "Most Frequent Class Precision", "Less Frequent Classes Recall") # Plot p <- ggplot(data, aes(experiment, result)) p <- p + stat_boxplot(geom="errorbar") p <- p + facet_wrap(~ metric) p <- p + geom_boxplot(aes(fill=experiment)) p <- p + scale_y_continuous(breaks=seq(0, 1, 0.1)) p <- p + scale_fill_discrete(name="Experiment", labels=c("Supervised\nBag-of-Words\n& Logistic Regression", "Supervised\nWord Embeddings\n& Multilayer Perceptron", "Supervised\nWord Embeddings\nand PoS\n& Multilayer Perceptron", "Naive Bootstrap\nBag-of-Words\n& Logistic Regression", "Naive Bootstrap\nWord Embeddings\n& Multilayer Perceptron", "Naive Bootstrap\nWord Embeddings\nand PoS\n& Multilayer Perceptron", "Ladder Networks\nWord Embeddings\n& Multilayer Perceptron", "Ladder Networks\nWord Embeddings\nand PoS\n& Multilayer Perceptron")) p <- p + labs(title="Semeval (Verbs Only) Semisupervised Experiments Comparison") p <- p + theme( plot.title=element_text(size=15, face="bold", margin=margin(10, 0, 10, 0), vjust=1, lineheight=0.6), strip.text.x=element_text(size=10), axis.title.x=element_blank(), axis.title.y=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), legend.title=element_text(face="bold", size=13), legend.text=element_text(size=11), legend.key.height=unit(3.5,"line") ) p # Save the plot ggsave("plots/semeval_semisupervised_verbs_results.png", plot=p, width=10.5, height=6.5)
graphics/semeval_verbs_boxplots_semisupervised_R.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import shapeworks as sw import numpy as np # ## shapeworks Image from numpy array # + dims = (1,3,2) # NOTE: numpy dims are specified in z, y, x order farr = np.ndarray(dims, dtype=np.float32) ival = 10; jval = 50; kval = 1.75 for i in range(0, farr.shape[2]): for j in range(0, farr.shape[1]): for k in range(0, farr.shape[0]): farr[k][j][i] = ival*(i/farr.shape[2]) + jval*(j/farr.shape[1]) + kval/farr.shape[0] # - farr.mean() farr.dtype farr.flags['OWNDATA'] farrimg = sw.Image(farr) farrimg # NOTE: sw.Image dims are specified in x, y, z order farrimg.mean() # ### While the numpy can still look at the memory, it no longer has ownership: farr.flags['OWNDATA'] farrimg += 100 farrimg.mean() farr.mean() # ### ...so the safest thing to do now is let the array go out of scope: # - having used a temporary during Image construction: `img = sw.Image(np.array(arr))` # - variable replacement after Image construction: `arr = np.zeros(1)` # - explicit deletion after Image construction: `del arr` del farr # ## Only dtype.float32 arrays can be used to initialize an image: # + dims = (12,3,21) darr = np.ndarray(dims, dtype=np.float64) ival = 10; jval = 50; kval = 1.75 for k in range(0, dims[0]): for j in range(0, dims[1]): for i in range(0, dims[2]): darr[k][j][i] = ival*(i/darr.shape[2]) + jval*(j/darr.shape[1]) + kval/darr.shape[0] # - darr.dtype darr.flags['OWNDATA'] # note: this try/catch is only used so the notebook runs to completion; not typically necessary try: darrimg = sw.Image(darr) # Throws an exception because dtype must be same as Image's pixel type except Exception as ex: print(ex) darrimg = sw.Image(np.array(darr, dtype=np.float32)) # Makes a copy of the array when passsed darrimg # ## _No unnecessary copies and no memory leaks!_ # ### The numpy array still owns its data since a copy was passed to create the Image. # ### Ownership of the copy's memory was taken by the image before it went out of scope. darr.flags['OWNDATA'] darrimg.mean() darr.mean() darrimg += 50 darrimg.mean() darr.mean() darr *= 10 darrimg.mean() darr.mean() # ### Now for the other way around. # ## numpy array from shapeworks Image ellipsoid_path = "/Users/cam/data/sw/tmp/1x2x2.nrrd" femur_path = "/Users/cam/data/sw/femur.nrrd" img = sw.Image(ellipsoid_path) img arr = img.toArray() arr.dtype arr.mean() img.mean() arr.shape # remember, numpy dims are zyx and Image dims are xyz img.dims() # ### The numpy array references the memory of the current Image and can change it: # arr += 100 img.mean() arr.mean() # ### ...but it still doesn't have ownership. # ### Since many Image operations reallocate internally, it's still safest to let it go out of scope as shown above. arr.flags['OWNDATA'] del arr # ## If a copy is needed, pass `copy=True` to `toArray()` arr = img.toArray(copy=True) arr.flags['OWNDATA'] # ### This can be useful when the array is created from a temporary Image: arr = sw.Image(ellipsoid_path).toArray(copy=True) arr.mean() def use_arr(arr): return arr.mean() use_arr(sw.Image(ellipsoid_path).toArray(copy=True)) # ## viewing the image using pyvista # ### One common reason to get the Image's array is for viewing using pyvista using the `for_viewing` argument: import pyvista as pv pv.set_jupyter_backend(backend="ipyvtklink") # + #help(pv.Plotter) # - plotter = pv.Plotter(shape = (1, 1), notebook = True, border = True) plotter.add_axes() plotter.add_bounding_box() #plotter.show_bounds() # for some reason extremely slow on osx #plotter.show_grid() # for some reason extremely slow on osx # NOTE: pyvisya-wrapped vtk images require 'F' ordering to prevent copying arr = img.toArray(for_viewing = True) # 'F' is `for_viewing` arr.flags arr.flags # sw2vtkImage takes care of this for us vtkimg = sw.sw2vtkImage(img, verbose=True) vol = plotter.add_volume(vtkimg, shade=True, show_scalar_bar=True) plotter.show() # ## Finally, we can `assign` a numpy array to an existing Image # ### This retains the Image's origin, scale, and coordsys. plotter = pv.Plotter(shape = (1, 1), notebook = True, border = True) plotter.add_axes() img1 = sw.Image(femur_path) img1.setSpacing((1.5, 0.75, 1)) # set spacing to show that it's preserved on both copy and assign img2 = sw.Image(img1) # make a copy to be processed by a scipy Python filter (spacing preserved) # ### Let's use a scipy operation on the image: from scipy import ndimage ck = ndimage.gaussian_filter(img2.toArray(), 12.0) # ### The return from this filter is the right size and type, but it's a copy: ck.shape ck.dtype ck.flags['OWNDATA'] # ### Let's assign it back to Image so we can retain Image's origin, scale, and coordsys: img2.assign(ck) # notice numpy array ownership has been transferred to Image ck.flags['OWNDATA'] # ### Now we can look at it again in the plotter: # + plotter.add_volume(sw.sw2vtkImage(img2), shade=True, show_scalar_bar=True) plotter.add_volume(sw.sw2vtkImage(img1), shade=True, show_scalar_bar=True) # - plotter.show()
Examples/Python/notebooks/array passing without copying.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Geographic data in Python {#spatial-class} # # ## Introduction # # In this chapter, we introduce the key Python packages (and data structures) for working with the two major types of spatial data, namely: # # * **shapely** and **geopandas** --- for working with vector layers # * **rasterio** and **xarray** --- for working with rasters # # As we will see later on, **shapely** and **geopandas** are related: # # * **shapely** is a "low-level" package for working with individual vector geometry objects # * **geopandas** is a "high-level" package for working with geometry columns (`GeoSeries` objects), which internally contain **shapely** geometries, and vector layers (`GeoDataFrame` objects) # # While **geopandas** (including its **shapely** dependency), at present, comprises a ubiquitous comprehensive approach for working with vector layers in Python, this is not the case for rasters. # Work with rasters in Python is much less unified. # There are several alternative packages, each with its own advantages and disadvantages. # We focus on the two most comprehensive and fundamental packages, namely: # # * **rasterio** --- a spatial-oriented package, focused on "simple" raster formats (such as GeoTIFF), representing a raster using a combination of a `numpy` array, and a metadata object (`dict`) specifying the spatial referencing of the array # * **xarray** --- A general-purpose package for working with labeled arrays, thus advantageous for processing "complex" raster format (such as NetCDF), representing a raster using its own native classes, namely `xarray.Dataset` and `xarray.DataArray` # # ## Vector data # # ### Introduction # # When introducing the packages for working with vector layers in Python, we are going to go from the complex class (vector layer), through the intermediate (geometry column), to the simple (geometry). # As we will see, the three classes are hierarchical, meaning that the complex encompasses the simple: # # * A vector layer (class `GeoDataFrame`) contains a geometry column (class `GeoSeries`) as one of the columns # * A geometry column (class `GeoSeries`) is composed of individual geometries (class `shapely`) # # The first two classes (`GeoDataFrame` and `GeoSeries`) are defined in package **geopandas**. # The third class is defined in package **shapely**, which deals with individual geometries, and comprises on of the dependencies of the **geopandas** package. # # ### Vector layers # # The typical data structure for vector data is a vector layer. # There are several methods to work with vector layers in Python, ranging from low-level (e.g., **fiona**) to high-level (**geopandas**). # In this book, we focus on **geopandas**. # # Before we begin, we need to import the **geopandas** package, conventionally as `gpd`: import geopandas as gpd # We will also limit the maximum number of printed rows to four, to save space, using the `"display.max_rows"` option of **pandas**: import pandas as pd pd.set_option("display.max_rows", 4) # Most often, we import an existing vector layer from a file, such as a Shapefile or a GeoPackage file. dat = gpd.read_file("data/world.gpkg") # The result is a `GeoDataFrame`: type(dat) # The `GeoDataFrame` class is an extension of the `DataFrame` class. # Thus, we can treat a vector layer as a table and process it using the ordinary, i.e., non-spatial, **pandas** methods. # For example, the following expression creates a subset with just the country name and the geometry (see below): dat = dat[["name_long", "geometry"]] dat # The following expression creates a subset based on a condition, including just `"Egypt"`: dat[dat["name_long"] == "Egypt"] # Finally, to get a sense of the spatial component of the vector layer, it can be plotted using the `.plot` method, as follows: dat.plot() #| eval: false # Todo: eval when fixed import lot.pandas dat.hvplot() # ### Geometry columns # # One of the columns in a `GeoDataFrame` is a geometry column, of class `GeoSeries`. # The geometry column contains the geometric part of the vector layer, e.g., the `POLYGON` or `MULTIPOLYGON` geometries of the 177 countries in `dat`: dat["geometry"] # The geometry column also contains the spatial reference information, if any (see below). # # Many of the spatial operators, such as calculating the centroid, buffer, or bounding box of each feature, in fact involve just the geometry. # Therefore, for example, the following expressions give exactly the same result, a `GeoSeries` with country bounding boxes: dat.bounds dat["geometry"].bounds # Another useful property of the geometry column is the geometry type (see below). # Note that the types of geometries contained in a geometry column (and, thus, a vector layer) are not necessarily the same. # Accordingly, the `.type` property returns a `Series` (of type `string`), rather than a single value: dat["geometry"].type # To summarize the occurrence of different geometry types in a geometry column, we can use the **pandas** method called `value_counts`: dat["geometry"].type.value_counts() # In this case, we see that the `dat` layer contains `Polygon` and `MultiPolygon` geometries. # # ### Geometries # # Each element in the geometry column is a geometry object, of class `shapely`. # For example, here is one specific geometry selected by implicit index (that of Canada): dat["geometry"].iloc[3] # and here is a specific geometry selected based on the `"name_long"` attribute: dat[dat["name_long"] == "Egypt"]["geometry"].iloc[0] # The **shapely** package is compatible with the Simple Features standard. # Accordingly, seven types of geometries are supported. # The following section demonstrates creating a `shapely` geometry of each type, using a `string` in the WKT format as input. # First, we need to import the `shapely.wkt` module: import shapely.wkt as wkt # Then, we use the `wkt.loads` (stands for "load a WKT *s*tring") to transform a WKT string to a `shapely` geometry object. # Here is an example of a `POINT` geometry: point = wkt.loads("POINT (5 2)") point # Here is an example of a `MULTIPOINT` geometry: multipoint = wkt.loads("MULTIPOINT ((5 2), (1 3), (3 4), (3 2))") multipoint # Here is an example of a `LINESTRING` geometry: linestring = wkt.loads("LINESTRING (1 5, 4 4, 4 1, 2 2, 3 2)") linestring # Here is an example of a `MULTILINESTRING` geometry: multilinestring = wkt.loads("MULTILINESTRING ((1 5, 4 4, 4 1, 2 2, 3 2), (1 2, 2 4))") multilinestring # Here is an example of a `POLYGON` geometry: polygon = wkt.loads("POLYGON ((1 5, 2 2, 4 1, 4 4, 1 5), (2 4, 3 4, 3 3, 2 3, 2 4))") polygon # Here is an example of a `MULTIPOLYGON` geometry: multipolygon = wkt.loads("MULTIPOLYGON (((1 5, 2 2, 4 1, 4 4, 1 5)), ((0 2, 1 2, 1 3, 0 3, 0 2)))") multipolygon # And, finally, here is an example of a `GEOMETRYCOLLECTION` geometry: geometrycollection = wkt.loads("GEOMETRYCOLLECTION (MULTIPOINT (5 2, 1 3, 3 4, 3 2), LINESTRING (1 5, 4 4, 4 1, 2 2, 3 2))") geometrycollection # `shapely` geometries act as atomic units of vector data, as spatial operations on a geometry return a single new geometry. # For example, the following expression calculates the difference between the buffered `multipolygon` (using distance of `0.1`) and itself: multipolygon.buffer(0.2).difference(multipolygon) # Internally, many spatial operations on a geometry column (or a vector layer) are basically iterations where the operator is applied on all geometries, one by one, to return a new geometry column (or layer) with the combined results. # # As demonstrated above, a `shapely` geometry object is automatically evaluated to a small image of the geometry (when using an interface capable of displaying it, such as a Jupyter Notebook). # To print the WKT string instead, we can use the `print` function: print(linestring) # We can determine the geometry type using the `.geom_type` property, which is a `string`: linestring.geom_type # Finally, it is important to note that raw coordinates of `shapely` geometries are accessible through a combination of the `.coords`, `.geoms`, `.exterior`, and `.interiors`, properties (depending on the geometry type). # These access methods are useful for when we need to develop our own spatial operators for specific tasks. # For example, the following expression returns the coordinates of the `polygon` geometry exterior (note that the returned object is iterable, thus enclosed in a `list` to return all coordinates at once): list(polygon.exterior.coords) # ## Raster data # # ### Introduction # # As mentioned above, working with rasters in Python is less organized around one comprehensive package (such as the case for vector layers and **geopandas**). # Instead, there are several packages providing alternative (subsets of methods) of working with raster data. # # The two most notable approaches for working with rasters in Python are provided by the **rasterio** and **xarray** packages. # As we will see shortly, they differ in their scope and underlying data models. # Specifically, **rasterio** represents rasters as **numpy** arrays associated with a separate object holding the spatial metadata. # The **xarray** package, however, represents rasters with the native `DataArray` object, which is an extension of **numpy** array designed to hold axis labels and attributes, in the same object, together with the array of raster values. # # Both packages are not comprehensive in the same way as **geopandas** is. For example, when working with **rasterio**, on the one hand, more packages may be needed to accomplish (commonly used) tasks such as zonal statistics (package `zonalstats`) or calculating topographic indices (package `richdem`). On the other hand, **xarray** was extended to accommodate spatial operators missing from the core package itself, with the **rioxarray** and **xarray-spatial** packages. # # In the following two sections, we introduce the two well-established packages, **rasterio** and **xarray**, which form the basis for most raster functionality in Python. # Using any of the add-on packages, or the extensions, should be straightforward, once the reader is familiar with the basics. # # ### Using **rasterio** # # To work with the **rasterio** package, we first need to import it. We also import **numpy**, since (as we will see shortly), the underlying raster data are stored in **numpy** arrays. # To effectively work with those we therefore expose all **numpy** functions. # Finally, we import the `show` function from the `rasterio.plot` sub-module for quick visualization of rasters. import numpy as np import rasterio from rasterio.plot import show import subprocess # Rasters are typically imported from existing files. # When working with **rasterio**, "importing" a raster is actually a two-step process: # # * First, we open a raster file "connection", using `rasterio.open` # * Second, we read raster values from the connection using the `.read` method # # This kind of separation is analogous to basic Python functions for reading from files, such as `open` and `.readline` to read from a text file. # The rationale is that we do not always want to read all information from the file into memory, which is particularly important as rasters size can be larger than RAM size. # Accordingly, the second step (`.read`) is selective. For example, we may want to read just one raster band rather than reading all band. # # In the first step, to create a file connection, we pass a file path to the `rasterio.open` function. # For this example, we use a single-band raster representing elevation in Zion National Park: src = rasterio.open("data/srtm.tif") # To get a first impression of the raster values, we can plot it using the `show` function: show(src) # The "connection" object contains the raster metadata, that is, all of the information other than the raster values. # Let us examine it: src.meta # Importantly, we can see: # # * The raster data type (`dtype`) # * Raster dimensions (`width`, `height`, and `count`, i.e., number of layers) # * Raster Coordinate Reference System (`crs`) # * The raster affine transformation matrix (`transform`) # # The last item (i.e., `transform`) deserves a few more words. # To position a raster in geographical space, in addition to the CRS we must specify the raster *origin* ($x_{min}$, $y_{max}$) and resolution ($delta_{x}$, $delta_{y}$). # In the transform matrix notation, these data items are stored as follows: # # ```{text} # Affine(delta_x, 0.0, x_min, # 0.0, delta_y, y_max) # ``` # # Note that, by convention, raster y-axis origin is set to the maximum value ($y_{max}$) rather than the minimum, and, accordingly, the y-axis resolution ($delta_{y}$) is negative. # # The `.read` method of a raster file connection object is used to read the last but not least piece of information: the raster values. # Importantly, we can read: # # * A particular layer, passing a numeric index (as in `.read(1)`) # * A subset of layers, passing a `list` of indices (as in `.read([1,2])`) # * All layers (as in `.read()`) # # Note that the layer indices start from `1` contrary to the Python convention of the first index being `0`. # # The resulting object is a **numpy** array, with either two or three dimensions: # # * *Three* dimensions, when reading all layers or more than one layer (e.g., `.read()` or `.read([1,2])`). In such case, the dimensions pattern is `(layers, rows, columns)` # * *Two* dimensions, when reading one specific layer (e.g., `.read(1)`) # # For example, let us read the first (and only) layer from the `srtm.tif` raster, using the file connection object `src`: s = src.read(1) s # ### Using `xarray` # # ... import xarray as xr # Reading: #| eval: false # Todo: uncomment when fixed on ci url = "https://github.com/geocompr/py/releases/download/0.1/air.2x2.250.mon.anom.comb.nc" subprocess.run(["wget", "-P", "data", "-nc", url]) x = xr.open_dataset("data/air.2x2.250.mon.anom.comb.nc") x #| eval: false x["air"] # Plot: #| eval: false x["air"].plot() # ## Coordinate Reference Systems dat.crs src.crs # ## Exercises # # ... #
ipynb/tmp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ''' Sistemas de ecuaciones para resolver con el método de NewtonRaphson ''' import math import numpy #Primer sistema de ecuaciones def F1(x,y): R=pow(x,2)+x*y-10 return R def G1(x,y): R=y+3*x*pow(y,2)-50 return R #Derivadas Parciales Primer sistema de ecuaciones def Dx_F1(x,y): R=2*x+y return R def Dy_F1(x,y): R=x return R def Dx_G1(x,y): R=3*pow(y,2) return R def Dy_G1(x,y): R=6*x*y+1 return R def NewtonR1(): x=float(input("Ingresar X: ")) y=float(input("Ingresar Y: ")) punto=numpy.zeros((2)) ecuaciones=numpy.zeros((2)) punto[0]=x punto[1]=y jacobiana=numpy.zeros((2,2)) jacobiana[0][0]=Dx_F1(x,y) jacobiana[0][1]=Dy_F1(x,y) jacobiana[1][0]=Dx_G1(x,y) jacobiana[1][1]=Dy_G1(x,y) jacobianainv=numpy.linalg.inv(jacobiana) ecuaciones[0]=F1(x,y) ecuaciones[1]=G1(x,y) itera=int(input("Ingresa el número de iteraciones: ")) tol=float(input("Ingresa el error de tolerancia: ")) counter=0 error=0 print("\nIteración",counter) print(punto) while(counter<=itera): counter=counter+1 other=numpy.zeros((2)) other=punto punto=punto-numpy.dot(jacobianainv,ecuaciones) error=punto-other error=max(abs(error)) jacobiana[0][0]=Dx_F1(punto[0],punto[1]) jacobiana[0][1]=Dy_F1(punto[0],punto[1]) jacobiana[1][0]=Dx_G1(punto[0],punto[1]) jacobiana[1][1]=Dy_G1(punto[0],punto[1]) jacobianainv=numpy.linalg.inv(jacobiana) ecuaciones[0]=F1(punto[0],punto[1]) ecuaciones[1]=G1(punto[0],punto[1]) print("\nIteración",counter) print("Punto:",punto) print("Error. ",error) if(error<tol): break print("\n\nSOLUCIÓN DEL SISTEMA: ",punto) print("ERROR ALCANZADO:",error) #Segundo Sistema de ecuaciones def F2(x,y): R=pow(x,2)+pow(y,2)-9 return R def G2(x,y): R=-math.exp(x)-2*y-3 return R #Derivadas Parciales Segundo Sistema de ecuaciones def Dx_F2(x,y): R=2*x return R def Dy_F2(x,y): R=2*y return R def Dx_G2(x,y): R=-math.exp(x) return R def Dy_G2(x,y): R=-2 return R def NewtonR2(): x=float(input("Ingresar X: ")) y=float(input("Ingresar Y: ")) punto=numpy.zeros((2)) ecuaciones=numpy.zeros((2)) punto[0]=x punto[1]=y jacobiana=numpy.zeros((2,2)) jacobiana[0][0]=Dx_F2(x,y) jacobiana[0][1]=Dy_F2(x,y) jacobiana[1][0]=Dx_G2(x,y) jacobiana[1][1]=Dy_G2(x,y) jacobianainv=numpy.linalg.inv(jacobiana) ecuaciones[0]=F2(x,y) ecuaciones[1]=G2(x,y) itera=int(input("Ingresa el número de iteraciones: ")) tol=float(input("Ingresa el error de tolerancia: ")) counter=0 error=0 print("\nIteración",counter) print(punto) while(counter<=itera): counter=counter+1 other=numpy.zeros((2)) other=punto punto=punto-numpy.dot(jacobianainv,ecuaciones) error=punto-other error=max(abs(error)) jacobiana[0][0]=Dx_F2(punto[0],punto[1]) jacobiana[0][1]=Dy_F2(punto[0],punto[1]) jacobiana[1][0]=Dx_G2(punto[0],punto[1]) jacobiana[1][1]=Dy_G2(punto[0],punto[1]) jacobianainv=numpy.linalg.inv(jacobiana) ecuaciones[0]=F2(punto[0],punto[1]) ecuaciones[1]=G2(punto[0],punto[1]) print("\nIteración",counter) print("Punto:",punto) print("Error:",error) if(error<tol): break print("\n\nSOLUCIÓN DEL SISTEMA: ",punto) print("ERROR ALCANZADO:",error) #Tercer sistema de ecuaciones def F3(x,y,z): R=2*pow(x,2)-4*x+pow(y,2)+3*pow(z,2)+6*z+2 return R def G3(x,y,z): R=pow(x,2)+pow(y,2)-2*y+2*pow(z,2)-5 return R def H3(x,y,z): R=3*pow(x,2)-12*x+pow(y,2)-3*pow(z,2)+8 return R #Derivadas Parciales Tercer sistema de ecuaciones def Dx_F3(x,y,z): R=4*x-4 return R def Dy_F3(x,y,z): R=2*y return R def Dz_F3(x,y,z): R=6*z+6 return R def Dx_G3(x,y,z): R=2*x return R def Dy_G3(x,y,z): R=2*y-2 return R def Dz_G3(x,y,z): R=4*z return R def Dx_H3(x,y,z): R=6*x-12 return R def Dy_H3(x,y,z): R=2*y return R def Dz_H3(x,y,z): R=-6*z return R def NewtonR3(): x=float(input("Ingresar X: ")) y=float(input("Ingresar Y: ")) z=float(input("Ingresar Z: ")) punto=numpy.zeros((3)) ecuaciones=numpy.zeros((3)) punto[0]=x punto[1]=y punto[2]=z jacobiana=numpy.zeros((3,3)) jacobiana[0][0]=Dx_F3(x,y,z) jacobiana[0][1]=Dy_F3(x,y,z) jacobiana[0][2]=Dz_F3(x,y,z) jacobiana[1][0]=Dx_G3(x,y,z) jacobiana[1][1]=Dy_G3(x,y,z) jacobiana[1][2]=Dz_G3(x,y,z) jacobiana[2][0]=Dx_H3(x,y,z) jacobiana[2][1]=Dy_H3(x,y,z) jacobiana[2][2]=Dz_H3(x,y,z) jacobianainv=numpy.linalg.inv(jacobiana) ecuaciones[0]=F3(x,y,z) ecuaciones[1]=G3(x,y,z) ecuaciones[2]=H3(x,y,z) itera=int(input("Ingresa el número de iteraciones: ")) tol=float(input("Ingresa el error de tolerancia: ")) counter=0 error=0 print("\nIteración",counter) print(punto) while(counter<=itera): counter=counter+1 other=numpy.zeros((3)) other=punto punto=punto-numpy.dot(jacobianainv,ecuaciones) error=punto-other error=max(abs(error)) jacobiana[0][0]=Dx_F3(punto[0],punto[1],punto[2]) jacobiana[0][1]=Dy_F3(punto[0],punto[1],punto[2]) jacobiana[0][2]=Dz_F3(punto[0],punto[1],punto[2]) jacobiana[1][0]=Dx_G3(punto[0],punto[1],punto[2]) jacobiana[1][1]=Dy_G3(punto[0],punto[1],punto[2]) jacobiana[1][2]=Dz_G3(punto[0],punto[1],punto[2]) jacobiana[2][0]=Dx_H3(punto[0],punto[1],punto[2]) jacobiana[2][1]=Dy_H3(punto[0],punto[1],punto[2]) jacobiana[2][2]=Dz_H3(punto[0],punto[1],punto[2]) jacobianainv=numpy.linalg.inv(jacobiana) ecuaciones[0]=F3(punto[0],punto[1],punto[2]) ecuaciones[1]=G3(punto[0],punto[1],punto[2]) ecuaciones[2]=H3(punto[0],punto[1],punto[2]) print("\nIteración",counter) print("Punto:",punto) print("Error:",error) if(error<tol): break print("\n\nSOLUCIÓN DEL SISTEMA: ",punto) print("ERROR ALCANZADO:",error) #Cuarto sistema de ecuaciones def F4(x,y,z): R=pow(x,2)-4*x+pow(y,2) return R def G4(x,y,z): R=pow(x,2)-x-12*y+1 return R def H4(x,y,z): R=3*pow(x,2)-12*x+pow(y,2)-3*pow(z,2)+8 return R #Derivadas Parciales Cuarto sistema de ecuaciones def Dx_F4(x,y,z): R=2*x-4 return R def Dy_F4(x,y,z): R=2*y return R def Dz_F4(x,y,z): R=0 return R def Dx_G4(x,y,z): R=2*x-1 return R def Dy_G4(x,y,z): R=-12 return R def Dz_G4(x,y,z): R=0 return R def Dx_H4(x,y,z): R=6*x-12 return R def Dy_H4(x,y,z): R=2*y return R def Dz_H4(x,y,z): R=-6*z return R def NewtonR4(): x=float(input("Ingresar X: ")) y=float(input("Ingresar Y: ")) z=float(input("Ingresar Z: ")) punto=numpy.zeros((3)) ecuaciones=numpy.zeros((3)) punto[0]=x punto[1]=y punto[2]=z jacobiana=numpy.zeros((3,3)) jacobiana[0][0]=Dx_F4(x,y,z) jacobiana[0][1]=Dy_F4(x,y,z) jacobiana[0][2]=Dz_F4(x,y,z) jacobiana[1][0]=Dx_G4(x,y,z) jacobiana[1][1]=Dy_G4(x,y,z) jacobiana[1][2]=Dz_G4(x,y,z) jacobiana[2][0]=Dx_H4(x,y,z) jacobiana[2][1]=Dy_H4(x,y,z) jacobiana[2][2]=Dz_H4(x,y,z) jacobianainv=numpy.linalg.inv(jacobiana) ecuaciones[0]=F4(x,y,z) ecuaciones[1]=G4(x,y,z) ecuaciones[2]=H4(x,y,z) itera=int(input("Ingresa el número de iteraciones: ")) tol=float(input("Ingresa el error de tolerancia: ")) counter=0 error=0 print("\nIteración",counter) print(punto) while(counter<=itera): counter=counter+1 other=numpy.zeros((3)) other=punto punto=punto-numpy.dot(jacobianainv,ecuaciones) error=punto-other error=max(abs(error)) jacobiana[0][0]=Dx_F4(punto[0],punto[1],punto[2]) jacobiana[0][1]=Dy_F4(punto[0],punto[1],punto[2]) jacobiana[0][2]=Dz_F4(punto[0],punto[1],punto[2]) jacobiana[1][0]=Dx_G4(punto[0],punto[1],punto[2]) jacobiana[1][1]=Dy_G4(punto[0],punto[1],punto[2]) jacobiana[1][2]=Dz_G4(punto[0],punto[1],punto[2]) jacobiana[2][0]=Dx_H4(punto[0],punto[1],punto[2]) jacobiana[2][1]=Dy_H4(punto[0],punto[1],punto[2]) jacobiana[2][2]=Dz_H4(punto[0],punto[1],punto[2]) jacobianainv=numpy.linalg.inv(jacobiana) ecuaciones[0]=F4(punto[0],punto[1],punto[2]) ecuaciones[1]=G4(punto[0],punto[1],punto[2]) ecuaciones[2]=H4(punto[0],punto[1],punto[2]) print("\nIteración",counter) print("Punto:",punto) print("Error:",error) if(error<tol): break print("\n\nSOLUCIÓN DEL SISTEMA: ",punto) print("ERROR ALCANZADO:",error) def Interpolacion(): #Llenamos y solicitamos informaciòn para la tabla de valores n=int(input("Ingrese el nùmero de puntos en la tabla: ")) matrix=numpy.zeros((n,2)) matrix1=numpy.zeros((n,n+1)) for i in range(n): for j in range(2): matrix[i][j]=float(input("Ingrese un valor para la casilla ({0},{1}): ".format(i+1,j+1))) print() print(matrix) while(True): print() op=input("""¿La tabla de valores es correcta?, Introduzca SI o NO: """) if(op.lower()!='si' and op.lower()!='no'): print("ERROR") break if(op.lower()=='no'): coorx=int(input("Ingrese el renglón del elemento que quiere cambiar: ")) coory=int(input("Ingrese la columna Y del elemento que quiere cambiar: ")) for i in range(n): for j in range(2): if(i==coorx-1 and j==coory-1): matrix[i][j]=float(input("Ingrese el elemento correcto: ")) print() print(matrix) print() if(op.lower()=='si'): break matrix.sort(axis=0) #Presentamos la tabla de valores ordenada print("\n\nLa tabla de valores ordenada es: \n") print(matrix) print() for i in range(n): for j in range(2): matrix1[i][j]=matrix[i][j] for j in range(2,n+1): for k in range(0,n-1): if(k+j<n+1): div=(matrix1[k+j-1][0]-matrix1[k][0]) num=(matrix1[k+1][j-1]-matrix1[k][j-1]) matrix1[k][j]=num/div print("\nLa matriz de diferencias divididas es: \n") matrix1=numpy.round(matrix1,4) print(matrix1) #Empezamos a solicitar informaciòn para interpolar while True: x=float(input("\nIngrese el punto a interpolar: ")) m=int(input("\nIngrese el grado del polinomio requerido: ")) #Extraemos datos necesarios para interpolar vector1=numpy.zeros((m)) vector2=numpy.zeros((m+1)) if(x>=matrix1[0][0] and x<=matrix1[n-1][0] and m<=n): for i in range(m): vector1[i]=matrix1[i][0] j=0 for i in range(1,m+2): vector2[j]=matrix1[0][i] j=j+1 for i in range(len(vector1)): vector1[i]=x-vector1[i] interpol=vector2[0] aux=1 for i in range(1,len(vector2)): for j in range(i): aux*=vector1[j] interpol+=vector2[i]*aux aux=1 print("\n********************************************") print("El resultado de interpolacion es: ",interpol) print("********************************************") op=input("\n¿Desea interpolar otro punto? (escribe SI o NO): ") if(op.lower()=='no'): break #Flujo principal del programa print("\n\t\tEQUIPO DE TRABAJO") print("\n\t<NAME>") print("\n\t<NAME>") print("\n\t<NAME>") print("\n\t<NAME>") while(True): print("\n\n\tBienvenido al programa de mètodos numèricos ") print("\n\tMètodos Disponibles: ") print("\n\tNEWTON-RAPHSON ---> (1)") print("\n\tINTERPOLACION POLINOMIAL ---> (2)") print("\n\tSalir ---> (3)") a=int(input("\n\tSeleccione el mètodo a utilizar: ")) if(a==1): while(True): print("\nMétodo Utilizar: Newton-Raphson ") print("\nSistema 1 (presiona 1)") print("Sistema 2 (presiona 2)") print("Sistema 3 (presiona 3)") print("Sistema 4 (presiona 4)") print("Salir (presiona 5)") r=int(input("\nSelecciona tu opción: ")) if(r>5): print("\nERROR") print("\nSelecione una opción de las anteriormente presentadas") if(r==1): while(True): NewtonR1() print("\n¿Desea calcular otra raíz con este método?") print("SI.....(1)") print("NO.....(2)") op=int(input("\nSeleccione: ")) if(op==2): break if(r==2): while(True): NewtonR2() print("\n¿Desea calcular otra raíz con este método?") print("SI.....(1)") print("NO.....(2)") op=int(input("\nSeleccione: ")) if(op==2): break if(r==3): while(True): NewtonR3() print("\n¿Desea calcular otra raíz con este método?") print("SI.....(1)") print("NO.....(2)") op=int(input("\nSeleccione: ")) if(op==2): break if(r==4): while(True): NewtonR4() print("\n¿Desea calcular otra raíz con este método?") print("SI.....(1)") print("NO.....(2)") op=int(input("\nSeleccione: ")) if(op==2): break if(r==5): break if(a==2): while(True): Interpolacion() b=input("\n¿Desea ingresar una nueva tabla de interpolación?: ") if(b.lower()=='no'): break if(a==3): break #Salimos del bucle grande print("\n\n\tGRACIAS POR USAR EL PROGRAMA")
ProyectoMetodos_actualizacion2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import matplotlib.pyplot as plt # %matplotlib inline import matplotlib as mpl mpl.rcParams['figure.figsize'] = (30, 8) # - import cv2 as cv import numpy as np # !ls -l capture = cv.VideoCapture('./slow_traffic_small.mp4') _, frame = capture.read() plt.imshow(frame); # + # setup initial location of window x, y, w, h = 300, 200, 100, 50 # simply hardcoded the values track_window = (x, y, w, h) # set up the ROI for tracking roi = frame[y:y+h, x:x+w] hsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV) mask = cv.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.))) roi_hist = cv.calcHist([hsv_roi],[0],mask,[180],[0,180]) cv.normalize(roi_hist,roi_hist,0,255,cv.NORM_MINMAX) # Setup the termination criteria, either 10 iteration or move by atleast 1 pt term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 ) while(1): ret, frame = capture.read() if ret == True: hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV) dst = cv.calcBackProject([hsv],[0],roi_hist,[0,180],1) # apply camshift to get the new location ret, track_window = cv.CamShift(dst, track_window, term_crit) # Draw it on image pts = cv.boxPoints(ret) pts = np.int0(pts) img2 = cv.polylines(frame,[pts],True, 255,2) cv.imshow('img2',img2) k = cv.waitKey(30) & 0xff if k == 27: break else: break # -
tracking.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Insurance and Incentives # # *By <NAME> and <NAME>* # # This notebook computes optimal contracts for the three examples that lead off chapter 21 of # **Recursive Macroeconomic Theory, Fourth edition** by <NAME> and <NAME>. # # The examples illustrate different sorts of tradeoffs between insurance and incentives that emerge under different # limits on enforcement and information. # # In each of the three economies, a planner or money-lender designs an efficient contract to supply insurance to a risk-averse consumer who receives an exogenous random stream of a non-storable endowment. # # The only way that the consumer to smooth consumption across states and time is to interact with the planner. # # The three models differ in the constraints that they impose on the planner. # # These constraints express the planner's limited ability either to enforce a contract or to observe the consumer's endowment # # Each of the examples uses a version of what we have nicknamed **dynamic programming squared** # # In a dynamic programming squared problem, a value function from one Bellman equation is an argument of another Bellman equation. # # In the examples below, a planner or money lender's value function will have as an argument the value of a villager # that satisfies a Bellman equation # # # ### Three models of a villager and a money lender # # Imagine a village with a large number of ex ante # identical households. Each household has preferences over # consumption streams that are ordered by # $$ E_{-1}\sum_{t=0}^\infty \beta^t u(c_t), $$ # where $u(c)$ is an increasing, strictly concave, and twice # continuously differentiable function, # $\beta \in (0,1)$ is a discount factor, and $E_{-1}$ is the mathematical expectation # not conditioning on any information available at time $0$ or later. # # Each household # receives a stochastic endowment stream $\{y_t\}_{t=0}^\infty$, # where for each $t \geq 0$, $y_t$ is independently and # identically distributed according to the discrete # probability distribution ${\rm Prob} (y_t = \overline y_s) = \Pi_s,$ # where $s \in \{1, 2, \ldots ,S\}\equiv {\bf S}$ and # $\overline y_{s+1}>\overline y_s$. # # The consumption # good is not storable. # # At time $t \geq 1$, the # household has received a history of endowments # $h_t = (y_t, y_{t-1}, \ldots, y_0).$ # # Endowment processes are distributed independently and identically # both across time and # across households. # # # ##### Competitive equilibrium # # In this setting, if there were a competitive equilibrium with # complete markets, at date # $0$ households would trade history- and date-contingent claims. # # Since households are ex ante # identical, each household would consume the per capita # endowment in every period, and its lifetime utility would be # # $$ v_{\rm pool} = \sum_{t=0}^\infty # \beta^t \, u\!\left(\sum_{s=1}^S \Pi_s \overline y_s\right) = # {1 \over 1-\beta}\, u\!\left(\sum_{s=1}^S \Pi_s \overline y_s\right) . # $$ # # Households would thus insure away all # risks from their individual endowment processes. # # But the # incentive constraints that we are about to specify make # this allocation unattainable. # # For each specification of incentive # constraints, we shall solve a planning problem for an efficient # allocation that respects those constraints. # # # Following a tradition started by # <NAME> (1987) [*Lending and the Smoothing of Uninsurable # Income*, in <NAME> and <NAME>, editors, **Contractual Arrangements for # Intertemporal Trade**, Minnesota Studies in Macroeconomics series, Vol. # 1, Minneapolis: University of Minnesota Press, pp. 3--25], we assume that a *moneylender* or *planner* is # the only person in the village who has access to # a risk-free loan market outside the village. # # The moneylender can borrow or lend at a constant one-period # risk-free gross interest rate $R=\beta^{-1}$. # # Households cannot borrow or lend with each other, # and can trade only with the moneylender. # # Furthermore, # we assume that the moneylender is committed to honor his # promises. # # We will study three distinct environments in which there are three alternative types of incentive constraints. # # # **Enviroment a.** Both the money lender and the household observe the household's history of endowments at each time $t$. # Although the moneylender can commit to honor a # contract, households cannot commit and at any time are # free to walk away from an arrangement # with the moneylender # and live in perpetual autarky thereafter. They must be induced not to do so # by the structure of # the contract. # This is a model of *one-sided commitment* in which the # contract must be *self-enforcing*. That is, it must be structured to induce the household to prefer to # conform to it. # # **Environment b.** Households *can* make commitments and enter # into enduring and binding contracts with the moneylender, # but they have private # information about their own incomes. The moneylender # can see neither their income nor their consumption. Instead, # exchanges between the moneylender and a household must # be based on the household's own reports about income # realizations. An incentive-compatible contract induces # a household to report its income truthfully. # # **Environment c.** The environment is the same as b except that now households have access to a storage technology that # cannot be observed by the moneylender. # Households can store nonnegative amounts of goods at a risk-free # gross return of $R$ equal to the interest rate that # the moneylender faces in the outside credit market. # Since the moneylender can both borrow and lend at the interest # rate $R$ outside of the village, # the private storage technology does not change the economy's # aggregate resource constraint, but it does affect the set of # incentive-compatible contracts between the moneylender and the # households. # # # #### Preview # # # When we compute efficient allocations for each of these three # environments, we find that the dynamics of the implied # consumption allocations differ dramatically. # # # We shall see that the dynamics # of consumption outcomes evidently differ substantially across the # three environments, increasing monotonically and then flattening out in environment a, # stochastically heading south in environment b, and stochastically heading north in # environment c. # These sample path properties will reflect how the optimal contracts cope with the three different frictions that we have put into the environment. # # Chapter 21 of RMT4 explains why sample paths of consumption differ # so much across these three settings. # ### Three computed contracts # # # For all three environments discussed, consumers have a utility function: # # $$u(c) = - \gamma^{-1} \exp(-\gamma c)$$ # # We set $\gamma = 0.7$, and the discount factor, $\beta$ to 0.8. # # The consumers receive an iid endowment that can take any integer in the range $[\bar y_1,...,\bar y_{5}] = [6,...,10]$. # # The probability of each realisation is $\Pi_s = \frac{1-\lambda}{1-\lambda^{5}}\lambda^{s-1}$ with $\lambda = 0.4$. # # As mentioned above, an interesting benchmark case is a complete markets environment. # # Because all households are *ex ante* identical, in a complete markets economy each household would consumer the per capita endowment in every period, and its lifetime utility would be: # # $$ v_{pool} = \frac{1}{1-\beta} u \left( \sum_{s=1}^S \Pi_s \bar y_s \right) = \frac{u(c_{pool})}{1-\beta} $$ # # Later we will compare the consumption paths for each enviroment to that which would occur in the complete markets environment. # # In each environment, we compute allocations for the situation in which the planner or money lender just breaks even. # # ## Environment a # # The first environment is one in which the planner is able to commit, but households are not. # # At any time households are free to walk away from an arrangement with the planner, and live in perpetual autarky thereafter. # # RMT4 shows how this problem can be written in a recursive form. # # Equations 21.3.4 to 21.3.8 in RMT4 express the planners's problem as: # # \begin{align} # &P(v) = \max_{c_s,w_s} \sum_{s=1}^S \Pi_s \left[ (\bar y_s - c_s) + \beta P(w_s) \right] \\ # &\text{subject to} \\ # &\sum_{s=1}^S \Pi_s \left[ u(c_s) + \beta w_s \right] \geq v \\ # &u(c_s) + \beta w_s \geq u(\bar y_s) + \beta v_{aut} \text{ , s = 1,...,S} \\ # &c_s \in [c_{min},c_{max}] \\ # &w_s \in [v_{aut},\bar v] # \end{align} # # where $w_s$ is the promised value with which the consumer will enter the next period, given that $y = \bar y_s$ this period. # # The first constraint is a promise keeping constraint, while the second set of constraints are participation constraints. $[c_{min},c_{max}]$ is a bounded set, while $\bar v$ just needs to be a very large number. # # # The value of autarky to the households is: # # $$ v_{aut} = \frac{1}{1-\beta} \sum_{s=1}^S \Pi_s u(\bar y_s) $$ # # Below we solve the moneylender's problem in this environment by approximating $P(v)$ using Chebyshev polynomials. import numpy as np from scipy.optimize import minimize, fsolve from scipy.interpolate import UnivariateSpline import matplotlib.pyplot as plt import numpy.polynomial.chebyshev as cheb # %matplotlib inline # + # Parameter values gamma = 0.7 beta = 0.8 lamb = 0.4 S = 5 y_grid = np.linspace(6,5+S,S) prob_grid = np.zeros(S) for i in range(S): prob_grid[i] = (1 - lamb)/(1-lamb**S)*lamb**(i) # Utility function u = lambda c: -gamma**(-1)*np.exp(-gamma*c) u_inv = lambda u: np.log(-gamma*u)/(-gamma) # Calculate complete markets consumption c_pool = np.dot(prob_grid,y_grid) # Calculate value of autarky v_aut = 1/(1-beta)*np.dot(prob_grid, u(y_grid)) # + # Functions used in each environment # Nodes and basis matrix for Chebyshev approximation def Cheb_basis(order,lb,ub): # Calculate roots of Chebyshev polynomial k = np.linspace(order, 1, order) roots = np.cos((2*k - 1)*np.pi/(2*order)) # Scale to approximation space s = lb + (roots - -1)/2*(ub-lb) # Create basis matrix Phi = cheb.chebvander(roots, order-1) return s, Phi # Value Function Iteration def Bellman_Iterations(s, Phi, P_fun, x_store, coeff, tolc=1e-6, bnds=None, cons=(), max_iters=100): global x, c c = coeff order = Phi.shape[1] iters = 0 diff = 1 while diff > tolc: # 1. Maximization, given value function guess P_iter = np.zeros(order) for i in range(order): x = s[i] res = minimize(P_fun, x_store[i], method = 'SLSQP', bounds = bnds, constraints=cons, tol=1e-15) x_store[i] = res.x P_iter[i] = -P_fun(res.x) # 2. Bellman updating of Value Function coefficients c1 = np.linalg.solve(Phi, P_iter) # 3. Compute distance and update diff = max(abs(c1 - c)) print(diff) c = np.copy(c1) iters = iters + 1 if iters >= max_iters: print('Convergence failed after {} iterations'.format(iters)) break if diff < tolc: print('Convergence achieved after {} iterations'.format(iters)) return c # + # Value Function Approximation # Set bounds and approximation order v_min = v_aut v_max = -0.065 c_min = 0 c_max = 50 order = 70 # Calculate nodes and basis matrix s, Phi = Cheb_basis(order, v_min, v_max) # Bounds for Maximisation lb = np.concatenate([np.ones(S)*c_min, np.ones(S)*v_min], axis=0) ub = np.concatenate([np.ones(S)*c_max, np.ones(S)*v_max], axis=0) # Initialize Value Function coefficients and goess for c,w y = (c_pool - u_inv(s*(1-beta)))/(1-beta) c = np.linalg.solve(Phi, y) x_init = np.concatenate([np.ones(S)*c_min, np.ones(S)*v_min], axis=0) # Function to minimize and constraints def P_fun(x): scale = -1 + 2*(x[S:2*S] - v_min)/(v_max - v_min) P = np.dot(cheb.chebvander(scale,order-1),c) P_fun = - prob_grid.dot((y_grid - x[0:S]) + beta*P) return P_fun def cons12(y): global x return prob_grid.dot(u(y[0:S]) + beta*y[S:2*S]) - x cons1 = ({'type': 'ineq', 'fun': lambda y: u(y[0:S]) + beta*y[S:2*S] - u(y_grid) - beta*v_aut}, {'type': 'ineq', 'fun': cons12}) bnds1 = np.concatenate([lb.reshape(2*S, 1), ub.reshape(2*S, 1)], axis = 1) # Bellman Iterations NBell = 5 tolc = 1e-6 diff = 1 iters = 1 x_store = {} for i in range(order): x_store[i] = x_init c = Bellman_Iterations(s, Phi, P_fun, x_store, c, bnds=bnds1, cons=cons1) # + # Time Series Simulation T = 100 np.random.seed(2) y_series = np.random.choice(y_grid,size = T,p = prob_grid) c_series = np.zeros(T) w_series = np.zeros(T) resid_series = np.zeros(T) pval_series = np.zeros(T) # Initialize v such that P(v) = 0 v_find = lambda v: cheb.chebvander(-1 + 2*(v - v_min)/(v_max - v_min),order-1).dot(c) x = fsolve(v_find,v_max) res = minimize(P_fun,x_init,method = 'SLSQP',bounds = bnds1,constraints = cons1,tol=1e-15) c_series[0] = res.x[np.where(y_grid == y_series[0])[0][0]] w_series[0] = res.x[S + np.where(y_grid == y_series[0])[0][0]] # Simulate for t in range(1,T): x = w_series[t-1] res = minimize(P_fun, x_init,method = 'SLSQP',bounds = bnds1, constraints = cons1, tol=1e-15) c_series[t] = res.x[np.where(y_grid == y_series[t])[0][0]] w_series[t] = res.x[S + np.where(y_grid == y_series[t])[0][0]] plt.plot(c_series, label = 'Environment (a)') plt.plot(np.ones(T)*c_pool, label = 'Complete Markets') plt.ylabel('Consumption') plt.xlabel('Time') plt.legend(loc = 'best'); plt.title('Environment (a)'); # - # The above simulation is equivalent to Figure 21.2.1.a in RMT. # # The discussion in RMT4 confirms that the household's consumption ratchets upwards over time. # # The consumption level is constant after the first time that the household receives the highest possible endowment. # # ## Environment b # # The second environment is one in which households *can* make commitments to enter into binding contracts with the planner, but they have private information about their incomes. # # Consequently, incentive compatability constraints are required to ensure that households truthfully report their incomes. # # Equations 21.5.1 to 21.5.5 in RMT4 express the planners's problem. # # \begin{align} # &P(v) = \max_{b_s,w_s} \sum_{s=1}^S \Pi_s \left[ -b_s + \beta P(w_s) \right] \\ # &\text{s.t.} \\ # &\sum_{s=1}^S \Pi_s \left[ u(\bar y_s + b_s) + \beta w_s \right] = v \\ # & C_{s,k} \equiv u(\bar y_s + b_s) + \beta w_s - [ u(\bar y_s + b_k) + \beta w_k ] \geq 0 \hspace{2mm} \forall \hspace{2mm} s,k \in S \times S\\ # &b_s \in [a - \bar y_s,\infty ] \\ # &w_s \in [- \infty, v_{max}] # \end{align} # # Here $b_s$ is the transfer that the moneylender gives to a household who reports income $y_s$ if their promised value was $v$. # # The promise keeping constraint remains, while the participation constraint has been replaced by a large set of incentive compatibility constraints. # # RMT4 shows that we can discard many of the incentive compatibility constraints. # # In solving the model below, we keep only the local upward and downward incentive compatibility constraints. # + # Set bounds and approximation order b_min = -20 b_max = 20 w_min = -150; w_max = -0.04; v_min = -150; v_max = -0.04; v_pool = u(c_pool)/(1-beta) order = 70 # Calculate nodes and basis matrix s, Phi = Cheb_basis(order,v_min,v_max) # Bounds for Maximisation lb = np.concatenate([np.ones(S)*b_min,np.ones(S)*w_min], axis=0) ub = np.concatenate([np.ones(S)*b_max,np.ones(S)*w_max], axis=0) # For initial guess, use upper bound given in RMT: cbar = np.zeros(order) upper = np.zeros(order) for i in range(order): cbar[i] = u_inv((1-beta)*s[i]) upper[i] = np.dot(prob_grid,(y_grid - cbar[i])/(1-beta)) c = np.linalg.solve(Phi,upper) # Function to minimize and constraints def P_fun2(x): scale = -1 + 2*(x[S:2*S] - v_min)/(v_max - v_min) P = np.dot(cheb.chebvander(scale,order-1),c) P_fun = - prob_grid.dot(-x[0:S] + beta*P) return P_fun def cons23(y): global x return prob_grid.dot(u(y_grid + y[0:S]) + beta*y[S:2*S]) - x cons2 = ({'type': 'ineq', 'fun': lambda x: u(y_grid[1:S] + x[1:S]) + beta*x[S+1:2*S] - u(y_grid[1:S] + x[0:S-1]) - beta*x[S:2*S-1]}, {'type': 'ineq', 'fun': lambda x: u(y_grid[0:S-1] + x[0:S-1]) + beta*x[S:2*S-1] - u(y_grid[0:S-1] + x[1:S]) - beta*x[S+1:2*S]}, {'type': 'eq', 'fun': cons23}) bnds2 = np.concatenate([lb.reshape(2*S,1),ub.reshape(2*S,1)], axis = 1) x_store = {} for i in range(order): x_store[i] = np.concatenate([np.zeros(S),np.ones(S)*s[i]], axis=0) c = Bellman_Iterations(s, Phi, P_fun2, x_store, c, tolc, bnds=bnds2, cons=cons2) # + # Time Series Simulation T = 800 np.random.seed(2) y_series = np.random.choice(y_grid,size = T+1, p = prob_grid) c_series = np.zeros(T) w_series = np.zeros(T) # Initialize v such that P(v) = 0 v_find = lambda v: cheb.chebvander(-1 + 2*(v - v_min)/(v_max - v_min),order-1).dot(c) x = fsolve(v_find,v_aut) x_init = np.concatenate([np.zeros(S),np.ones(S)*x],axis=0) res = minimize(P_fun2,x_init,method = 'SLSQP',bounds = bnds2, constraints = cons2,tol=1e-10) c_series[0] = y_series[0] + res.x[np.where(y_grid == y_series[0])[0][0]] w_series[0] = res.x[S + np.where(y_grid == y_series[0])[0][0]] x_init = res.x # Simulate for t in range(1,T): x = w_series[t-1] res = minimize(P_fun2,x_init,method = 'SLSQP',bounds = bnds2,constraints = cons2,tol=1e-10) c_series[t] = y_series[t] + res.x[np.where(y_grid == y_series[t])[0][0]] w_series[t] = res.x[S + np.where(y_grid == y_series[t])[0][0]] x_init = res.x # Plot plt.plot(c_series, label = 'Environment (b)') plt.plot(np.ones(T)*c_pool, label = 'Complete Markets') plt.ylabel('Consumption') plt.xlabel('Time') plt.title('Environment (b)') plt.legend(loc = 'best'); # - # This simulation reported in the graph above confirms that in environment **b** the incentive compatibility constraints induce the planner to introduce a downward tilt into consumption paths. # # ## Environment c # # The third environment is the same as in (b), except for the additional assumption that households have access to a storage technology. # # A household can store nonnegative amounts that cannot be observed by the planner. # # The text of RMT4 chaper 21 shows that the solution to this problem is the same as in an economy in which each household can lend *or borrow* at the risk-free gross interest rate R, subject to the natural debt limit. # # Thus, the planner enables the household to relax the no-borrowing constraint implied by the restriction that it can store only nonnegative amounts # # We can find the natural debt limit by iterating forward on the households budget constraint: # # \begin{equation} # c + k' = y + Rk # \end{equation} # This iteration gives: # \begin{equation} # k = \frac{1}{R} \sum_{j=0}^\infty \frac{c - y}{R^j} # \end{equation} # # Imposing non-negativity on consumption: # # \begin{equation} # k \geq - \frac{1}{R} \sum_{j=0}^\infty \frac{y}{R^j} # \end{equation} # # Finally, the natural debt limit is found by choosing the lowest possible value of the endowment, so that for any possible endowment stream the household can always pay back its debts: # # \begin{equation} # k \geq - \frac{1}{R} \sum_{j=0}^\infty \frac{\bar y_{min}}{R^j} = - \frac{\bar y_{min}}{R-1} \equiv \phi # \end{equation} # # A recursive presentation of the household's problem is then: # \begin{align} # &V(k,y) = \max_{c,k'} u(c) + \beta E [V(k',y')] \\ # &\text{s.t.} \\ # &c + k' = y + Rk \\ # & k' \geq \phi # \end{align} # # As income is iid, we can re-write the household's problem with only one state. # # Define a = k + y. # # Then # \begin{align} # &V(a) = \max_{c,k'} u(c) + \beta E [V(Rk' + y')] \\ # &\text{subject to} \\ # &c + k' = a \\ # & k' \geq \phi # \end{align} # # Below we solve this latter problem using Value Function Iteration, again with Chebyshev polynomials. # + # Update parameter values # Set bounds and approximation order R = 1/beta k_min = - y_grid[0]/(R - 1) k_max = 100 a_min = R*k_min + min(y_grid) a_max = R*k_max + max(y_grid) order = 150 # Calculate nodes and basis matrix s, Phi = Cheb_basis(order,a_min,a_max) # Create bounds bnds3 = np.array([[k_min,k_max]]) # Value function def P_fun3(kprime): global x,c # Function to minimize scale = -1 + 2*(R*kprime + y_grid - a_min)/(a_max - a_min) P_fun = -(u(x - kprime) + beta * prob_grid.dot(cheb.chebval(scale, c))) return P_fun # Initialize guess and VF coefficients c = np.zeros(order) x_store = {} for i in range(order): x_store[i] = k_min c = Bellman_Iterations(s, Phi, P_fun3, x_store, c, bnds=bnds3) # + # Time Series Simulation T = 800 np.random.seed(2) y_series = np.random.choice(y_grid, size = T+1, p = prob_grid) a_series = np.zeros(T+1) c_series = np.zeros(T) # Initialise at v_aut def k_find(k): scale = -1 + 2 * (R * k + y_grid - a_min)/(a_max - a_min) return prob_grid.dot(cheb.chebval(scale,c)) - v_aut k0 = fsolve(k_find,0) a_series[0] = k0 + y_series[0] # Simulate for t in range(T): x = a_series[t] res = minimize(P_fun3, k_min,method='SLSQP', bounds=bnds3,tol=1e-15) c_series[t] = a_series[t] - res.x a_series[t+1] = R * res.x + y_series[t+1] # Plot plt.plot(c_series, label = 'Environment (c)') plt.plot(np.ones(T)*c_pool, label = 'Complete Markets') plt.ylabel('Consumption') plt.xlabel('Time') plt.title('Environment (c)') plt.legend(loc = 'best') # - # Notice that the introduction of a storage technology for the household means that the consumption path now has an upward trend. # # This occurs because our parameter values satisfy $\beta R = 1$.
insurance_incentives.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Topic Modeling # # For more details on how topic modeling works, [see here](https://topix.io/tutorial/tutorial.html) # ### Execute this cell to install required python module # # After you've installed this once, you can delete this cell. # !pip install pyldavis # ### Import dependencies # + import pandas as pd import numpy as np #from sklearn.datasets import fetch_20newsgroups # module to visualize topics import pyLDAvis.sklearn pyLDAvis.enable_notebook() import warnings warnings.filterwarnings('ignore') # - # ### Load 20newsgroups data # + #news = fetch_20newsgroups(remove=('headers', 'footers', 'quotes')) #df = pd.DataFrame({"body": news.data}) #df = pd.read_csv('all_listings.csv') sydney_listings = pd.read_csv('Reviews_ASCII.csv') df =sydney_listings df.head(10) # - df.shape # ### Preprocess text # + #from utils import clean_text #df['body'] = df['body'].apply(lambda x: clean_text(x)) # - # ### Generate feature vectors # + from sklearn.feature_extraction.text import CountVectorizer # LDA can only use raw term counts for LDA because it is a probabilistic graphical model tf_vectorizer = CountVectorizer(stop_words='english') tf = tf_vectorizer.fit_transform(df['comments'].values.astype('U')) tf_feature_names = tf_vectorizer.get_feature_names() # - # ### Fit feature vectors to the LDA topic model # + from sklearn.decomposition import LatentDirichletAllocation no_topics = 20 lda4 = LatentDirichletAllocation(n_components=no_topics, random_state=4, evaluate_every=1).fit(tf) # - # ### Display top words for each topic # + def display_topics(model, feature_names, no_top_words): for topic_idx, topic in enumerate(model.components_): print(f"Topic: {topic_idx}") print(" ".join([feature_names[i] for i in topic.argsort()[:-no_top_words - 1:-1]])) no_top_words = 5 display_topics(lda4, tf_feature_names, no_top_words) # - # ### Visualizing our topics in 2-dimensional space # # How to interpret this visualization: # 1. Each bubble represents a topic # 2. Larger topics are more frequent in the corpus # 3. Topics closer together are more similar # 4. When you click on a topic, the most relevant terms for that topic show in red on the right, and in blue is the frequency of that term in all other topics # 5. When you hover over a word in the chart on the right, the bubbles will adjust according to how relevant that term is to each topic # pyLDAvis.sklearn.prepare(lda4, tf, tf_vectorizer) # ### Create Document - Topic Matrix # + lda_output = lda4.transform(tf) # column names topicnames = ["Topic" + str(i) for i in range(no_topics)] # index names docnames = ["Doc" + str(i) for i in range(len(df))] # Make the pandas dataframe df_document_topic = pd.DataFrame(np.round(lda_output, 2), columns=topicnames, index=docnames) # Get dominant topic for each document dominant_topic = np.argmax(df_document_topic.values, axis=1) df_document_topic['dominant_topic'] = dominant_topic # Styling def color_green(val): color = 'green' if val > .1 else 'black' return 'color: {col}'.format(col=color) def make_bold(val): weight = 700 if val > .1 else 400 return 'font-weight: {weight}'.format(weight=weight) # Apply Style df_document_topics = df_document_topic.head(15).style.applymap(color_green).applymap(make_bold) df_document_topics # -
Primative Text Analysis/topic modeling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # pyjanitor Usage Walkthrough # `pyjanitor` is a Python-based API on top of `pandas` inspired by the [janitor](https://github.com/sfirke/janitor) R package. It aims to provide a clean, understandable interface based on method chaining for common and less-common tasks involving data cleaning and `DataFrame` manipulation. # # The core philosophy and augmentations on `pandas`' approach to data cleaning and `DataFrame` manipulation include: # # - A method-chaining paradigm for coding efficiency & clarity of code through greatly improved readability. # - Implementation of common, useful `DataFrame` manipulation tasks that saves on repetitive code. # - Focus on active tense / verb approaches to function naming to provide at-a-glance understanding of a data manipulation pipeline. # # ## Why `pyjanitor`? # # Originally a simple port of the R package, `pyjanitor` has evolved from a set of convenient data cleaning routines into an experiment with the method-chaining paradigm. # # Data preprocessing is best expressed as a [directed acyclic graph (DAG)](https://en.wikipedia.org/wiki/Directed_acyclic_graph) of actions taken on data. We take a base data file as the starting point and perform actions on it such as removing null/empty rows, replacing them with other values, adding/renaming/removing columns of data, filtering rows, and more. # # The `pandas` API has been invaluable for the Python data science ecosystem and implements method chaining for a subset of methods as part of the API. For example, resetting indexes (`.reset_index()`), dropping null values (`.dropna()`), and more are accomplished via the appropriate `pd.DataFrame` method calls. # # Inspired by the R statistical language ecosystem, where consistent and good API design in the `dplyr` package enables end-users, who are not necessarily developers, to concisely express data processing code, `pyjanitor` has evolved into a language for expressing the data processing DAG for `pandas` users. # # ## What is method chaining? # # To accomplish these goals, actions for which we would need to invoke imperative-style statements can be replaced with method chains that allow the user to read off the logical order of actions taken. Note the annotated example below. First, we introduce the textual description of a sample data cleaning pipeline: # # - Create `DataFrame`. # - Delete one column. # - Drop rows with empty values in two particular columns. # - Rename another two columns. # - Add a new column. # - Reset index to account for the missing row we removed above # # In `pandas` code, this would look as such: # # ```python # df = pd.DataFrame(...) # create a pandas DataFrame somehow. # del df['column1'] # delete a column from the dataframe. # df = df.dropna(subset=['column2', 'column3']) # drop rows that have empty values in column 2 and 3. # df = df.rename({'column2': 'unicorns', 'column3': 'dragons'}) # rename column2 and column3 # df['new_column'] = ['iterable', 'of', 'items'] # add a new column. # df.reset_index(inplace=True, drop=True) # reset index to account for the missing row we removed above # ``` # # ## The `pyjanitor` approach # # With `pyjanitor`, we enable method chaining with method names that are _verbs_ which describe the action taken: # # ```python # df = ( # pd.DataFrame(...) # .remove_columns(['column1']) # .dropna(subset=['column2', 'column3']) # .rename_column('column2', 'unicorns') # .rename_column('column3', 'dragons') # .add_column('new_column', ['iterable', 'of', 'items']) # .reset_index_inplace(drop=True) # ) # ``` # # We believe the `pyjanitor` chaining-based approach leads to much cleaner code where the intent of a series of `DataFrame` manipulations is much more immediately clear. # # `pyjanitor`’s etymology has a two-fold relationship to “cleanliness”. Firstly, it’s about extending `pandas` with convenient data cleaning routines. Secondly, it’s about providing a cleaner, method-chaining, verb-based API for common `pandas` routines. # # ## A survey of `pyjanitor` functions # # - Cleaning column names (multi-indexes are possible!) # - Removing empty rows and columns # - Identifying duplicate entries # - Encoding columns as categorical # - Splitting your data into features and targets (for machine learning) # - Adding, removing, and renaming columns # - Coalesce multiple columns into a single column # - Convert excel date (serial format) into a Python datetime format # - Expand a single column that has delimited, categorical values into dummy-encoded variables # # A full list of functionality that `pyjanitor` implements can be found in the [API docs](https://pyjanitor.readthedocs.io/). # # ## Some things that are different # # Some `pyjanitor` methods are `DataFrame`-mutating operations, i.e., in place. Given that in a method-chaining paradigm, `DataFrame`s that would be created at each step of the chain cannot be accessed anyway, duplication of data at each step would lead to unnecessary, potential considerable slowdowns and increased memory usage due to data-copying operations. The severity of such copying scales with `DataFrame` size. Take care to understand which functions change the original `DataFrame` you are chaining on if it is necessary to preserve that data. If it is, you can simply `.copy()` it as the first step in a `df.copy().operation1().operation2()...` chain. # # ## How it works # # `pyjanitor` relies on the [Pandas Flavor](https://github.com/Zsailer/pandas_flavor) package to register new functions as object methods that can be called directly on `DataFrame`s. For example: # # ```python # import pandas as pd # import pandas_flavor as pf # # @pf.register_dataframe_method # def remove_column(df, column_name: str): # del df[column_name] # return df # # df = ( # pd.read_csv('my_data.csv') # .remove_column('my_column_name') # .operation2(...) # ) # ``` # # Importing the `janitor` package immediately registers these functions. The fact that each `DataFrame` method `pyjanitor` registers returns the `DataFrame` is what gives it the capability to method chain. # # Note that `pyjanitor` explicitly does not modify any existing `pandas` methods / functionality. # ## Demo of various `DataFrame` manipulation tasks using `pyjanitor` # Here, we'll walk through some useful `pyjanitor`-based approaches to cleaning and manipulating `DataFrame`s. # ### Code preamble: import pandas as pd import janitor import pandas_flavor as pf import numpy as np # ### Let's take a look at our dataset: df = pd.read_excel('dirty_data.xlsx') df # We can see that this dataset is dirty in a number of ways, including the following: # # * Column names contain spaces, punctuation marks, and inconsistent casing # * One row (`7`) with completely missing data # * One column (`do not edit! --->`) with completely missing data # ### Clean up our data using a `pyjanitor` method-chaining pipeline # # Let's run through a demo `DataFrame` cleaning procedure: # + cleaned_df = ( pd.read_excel('dirty_data.xlsx') .clean_names() .remove_empty() .rename_column("%_allocated", "percent_allocated") .rename_column("full_time_", "full_time") .coalesce(["certification", "certification_1"], "certification") .encode_categorical(["subject", "employee_status", "full_time"]) .convert_excel_date("hire_date") .reset_index_inplace(drop=True) ) cleaned_df # - # The cleaned `DataFrame` looks much better and quite a bit more usable for our downstream tasks. # ### Step-by-step walkthrough of `pyjanitor` `DataFrame` manipulations # # Just for clearer understanding of the above, let's see how `pyjanitor` progressively modified the data. # # Loading data in: df = pd.read_excel('dirty_data.xlsx') df # Clean up names by removing whitespace, punctuation / symbols, capitalization: df = df.clean_names() df # Remove entirely empty rows / columns: df.remove_empty() df # Rename particular columns: df = ( df.rename_column("%_allocated", "percent_allocated") .rename_column("full_time_", "full_time") ) df # Take first non-`NaN` row value in two columns: df = df.coalesce(["certification", "certification_1"], "certification") df # Convert string object rows to categorical to save on memory consumption and speed up access: df.dtypes df.encode_categorical(["subject", "employee_status", "full_time"]) df.dtypes # Convert Excel date-formatted column to a more interpretable format: df.convert_excel_date("hire_date") df # ## Example analysis of the data # Let's perform analysis on the above, cleaned `DataFrame`. First we add some additional, randomly-generated data. Note that we `.copy()` the original to preserve it, given that the following would otherwise modify it: # + data_df = ( cleaned_df .copy() .add_columns( lucky_number=np.random.randint(0, 10, len(cleaned_df)), age=np.random.randint(10, 100, len(cleaned_df)), employee_of_month_count=np.random.randint(0, 5, len(cleaned_df)) ) ) data_df # - # Calculate mean, median of all numerical columns after grouping by employee status. Use `.collapse_levels()`, a `pyjanitor` convenience function, to convert the `DataFrame` returned by `.agg()` from having multi-level columns (because we supplied a list of aggregation operations) to single-level by concatenating the level names with an underscore: # + stats_df = ( data_df.groupby('employee_status') .agg(['mean', 'median']) .collapse_levels() .reset_index_inplace() ) stats_df # - # `.reset_index_inplace()` is a `pyjanitor` stand-in for `df.reset_index(inplace=True)`, because the latter does not return the `DataFrame` when the in place mode is used and is therefore not chainable.
examples/notebooks/pyjanitor_intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/TimTree/DS-Unit-2-Kaggle-Challenge/blob/master/module2/assignment_kaggle_challenge_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="7IXUfiQ2UKj6" # Lambda School Data Science, Unit 2: Predictive Modeling # # # Kaggle Challenge, Module 2 # # ## Assignment # - [ ] Read [“Adopting a Hypothesis-Driven Workflow”](https://outline.com/5S5tsB), a blog post by a Lambda DS student about the Tanzania Waterpumps challenge. # - [ ] Continue to participate in our Kaggle challenge. # - [ ] Try Ordinal Encoding. # - [ ] Try a Random Forest Classifier. # - [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.) # - [ ] Commit your notebook to your fork of the GitHub repo. # # ## Stretch Goals # # ### Doing # - [ ] Add your own stretch goal(s) ! # - [ ] Do more exploratory data analysis, data cleaning, feature engineering, and feature selection. # - [ ] Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/). # - [ ] Get and plot your feature importances. # - [ ] Make visualizations and share on Slack. # # ### Reading # # Top recommendations in _**bold italic:**_ # # #### Decision Trees # - A Visual Introduction to Machine Learning, [Part 1: A Decision Tree](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/), and _**[Part 2: Bias and Variance](http://www.r2d3.us/visual-intro-to-machine-learning-part-2/)**_ # - [Decision Trees: Advantages & Disadvantages](https://christophm.github.io/interpretable-ml-book/tree.html#advantages-2) # - [How a Russian mathematician constructed a decision tree — by hand — to solve a medical problem](http://fastml.com/how-a-russian-mathematician-constructed-a-decision-tree-by-hand-to-solve-a-medical-problem/) # - [How decision trees work](https://brohrer.github.io/how_decision_trees_work.html) # - [Let’s Write a Decision Tree Classifier from Scratch](https://www.youtube.com/watch?v=LDRbO9a6XPU) # # #### Random Forests # - [_An Introduction to Statistical Learning_](http://www-bcf.usc.edu/~gareth/ISL/), Chapter 8: Tree-Based Methods # - [Coloring with Random Forests](http://structuringtheunstructured.blogspot.com/2017/11/coloring-with-random-forests.html) # - _**[Random Forests for Complete Beginners: The definitive guide to Random Forests and Decision Trees](https://victorzhou.com/blog/intro-to-random-forests/)**_ # # #### Categorical encoding for trees # - [Are categorical variables getting lost in your random forests?](https://roamanalytics.com/2016/10/28/are-categorical-variables-getting-lost-in-your-random-forests/) # - [Beyond One-Hot: An Exploration of Categorical Variables](http://www.willmcginnis.com/2015/11/29/beyond-one-hot-an-exploration-of-categorical-variables/) # - _**[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)**_ # - _**[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)**_ # - [Mean (likelihood) encodings: a comprehensive study](https://www.kaggle.com/vprokopev/mean-likelihood-encodings-a-comprehensive-study) # - [The Mechanics of Machine Learning, Chapter 6: Categorically Speaking](https://mlbook.explained.ai/catvars.html) # # #### Imposter Syndrome # - [Effort Shock and Reward Shock (How The Karate Kid Ruined The Modern World)](http://www.tempobook.com/2014/07/09/effort-shock-and-reward-shock/) # - [How to manage impostor syndrome in data science](https://towardsdatascience.com/how-to-manage-impostor-syndrome-in-data-science-ad814809f068) # - ["I am not a real data scientist"](https://brohrer.github.io/imposter_syndrome.html) # - _**[Imposter Syndrome in Data Science](https://caitlinhudon.com/2018/01/19/imposter-syndrome-in-data-science/)**_ # # # # # # + [markdown] id="C3ZQUdup8hhm" colab_type="text" # ### Setup # # You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab (run the code cell below). # + colab_type="code" id="o9eSnDYhUGD7" outputId="28f1379b-4a4e-4462-9867-1c58fb2f06f8" colab={"base_uri": "https://localhost:8080/", "height": 1000} import os, sys in_colab = 'google.colab' in sys.modules # If you're in Colab... if in_colab: # Pull files from Github repo os.chdir('/content') # !git init . # !git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git # !git pull origin master # Install required python packages # !pip install -r requirements.txt # Change into directory for module os.chdir('module2') # + id="o3seS6dUJIum" colab_type="code" colab={} os.chdir('/content') # + colab_type="code" id="QJBD4ruICm1m" outputId="f5234983-4ad7-4190-88a1-7149355e0551" colab={"base_uri": "https://localhost:8080/", "height": 34} import pandas as pd from sklearn.model_selection import train_test_split train = pd.merge(pd.read_csv('data/waterpumps/train_features.csv'), pd.read_csv('data/waterpumps/train_labels.csv')) test = pd.read_csv('data/waterpumps/test_features.csv') sample_submission = pd.read_csv('data/waterpumps/sample_submission.csv') train.shape, test.shape # + id="1nyHLekPIESk" colab_type="code" colab={} train, validate = train_test_split(train, random_state=84) # + id="SJV8LRhkJ9TM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="12a3492c-0607-4df6-c5e3-68b4a7822dc7" train.shape, validate.shape # + id="9WoTgusGJ_XU" colab_type="code" colab={} import numpy as np def wrangle(X): # Wrangle the train, validate, and test datasets in one function # Prevent SettingWithCopyWarning X = X.copy() # About 3% of the time, latitude has small values near zero, # outside Tanzania, so we'll treat these values like zero. X['latitude'] = X['latitude'].replace(-2e-08, 0) # There are some values labeled as 0 that should be null values # (ex: Construction year can't realistically be 0). Let's convet # such values into nulls. cols_with_zeros = ['longitude', 'latitude', 'construction_year','population','amount_tsh','gps_height'] for col in cols_with_zeros: X[col] = X[col].replace(0, np.nan) # drop duplicate columns X = X.drop(columns=['quantity_group', 'payment_type']) # Generate column that gives time difference from construction to inspection X['construction_to_inspection'] = pd.to_datetime(X['date_recorded']).dt.year - X['construction_year'] # return the wrangled dataframe return X # + id="JDVI2aqVKFry" colab_type="code" colab={} train = wrangle(train) validate = wrangle(validate) test = wrangle(test) # + id="6MDjz4mmKHod" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 417} outputId="6c6a22e0-a1ae-4bbc-dd42-8f917f3c87ab" train.head() # + id="MCkSsUJbKIWx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="0d3d5123-bd93-46fc-aa43-292c68611ac1" # The target is the the status group target = 'status_group' # There's a lot of features here, numeric and categorical. # Begin by getting all the features that are not the ID or status_group train_features = train.drop(columns=[target,'id']) # Let's take in all the numeric features, which is the all of them minus # the target (and id in this case). numerics = train_features.select_dtypes(include='number').columns.tolist() # And here's all the categorical features categoricals = train_features.select_dtypes(exclude='number').columns.tolist() # Some categorical variables may have a ton of unique values. Not only will # so many unique values make our model difficult to generalize, we'll overflow # our computer's RAM if we did a one-hot encode of them (will explain one-hot # encode shortly) # So let's only accept low cardinality categoricals to analyze (that is, in # this case, the categorical variables with 21 or less unique values.) low_cardinality_categoricals = [col for col in categoricals if train_features[col].nunique() <= 21] # Now here are our features. features = numerics + low_cardinality_categoricals print(features) # + id="NQH-GOafiCIq" colab_type="code" colab={} # Override features in attempt to improve validation score features = train_features.select_dtypes(include='number').columns.tolist() + train_features.select_dtypes(exclude='number').columns.tolist() # + id="cqJFTDkkRPeY" colab_type="code" colab={} X_train = train[features] y_train = train[target] X_validate = validate[features] y_validate = validate[target] X_test = test[features] # + id="VSTtaUIcQ9NZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="6f54e7ce-253e-4ce7-9899-93db0296fd38" # %%time import category_encoders as ce from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LinearRegression from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.feature_selection import f_regression, SelectKBest, f_classif from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='mean'), RandomForestClassifier(n_estimators=100,random_state=235,n_jobs=-1) ) # Fit on train pipeline.fit(X_train, y_train) # Score on validate print('Validation Accuracy', pipeline.score(X_validate, y_validate)) # Predict on test y_pred = pipeline.predict(X_test) # + id="PTEQFFyqRN_4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 694} outputId="5a28c80a-41c3-4189-cd18-f4ea81371052" encoder = pipeline.named_steps['ordinalencoder'] encoded = encoder.transform(X_train) rf = pipeline.named_steps['randomforestclassifier'] importances = pd.Series(rf.feature_importances_, encoded.columns) importances # + id="EuuzbirXhdg3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="3e1d0564-db98-4651-a6f2-aef1644ac60d" submission = test[['id']].copy() submission['status_group'] = y_pred submission.head() # + id="Gas9WDJqi3zZ" colab_type="code" colab={} submission.to_csv('kaggleChallenge.csv', index=False) # + id="cTfjBacdi5oC" colab_type="code" colab={}
module2/assignment_kaggle_challenge_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Separating data into training, validation and test datasets # and assigning labels to the data import os import shutil import glob import numpy as np from PIL import Image # Copy data from the existing folder into train, valid and test folders def copy_data(dir_s, fname): os.chdir(dir_s + '/' + fname) dir_train = dir_s + '/train/' + fname dir_valid = dir_s + '/valid/' + fname dir_test = dir_s + '/test/' + fname if not os.path.exists(dir_train): os.makedirs(dir_train) if not os.path.exists(dir_valid): os.makedirs(dir_valid) if not os.path.exists(dir_test): os.makedirs(dir_test) # Specific for orl dataset i = 1 for filename in os.listdir(dir_s + '/' + fname): if i <= 6: shutil.copy(dir_s + '/' + fname + '/' + str(i) + '.pgm', dir_train) elif i <= 8: shutil.copy(dir_s + '/' + fname + '/' + str(i) + '.pgm', dir_valid) else: shutil.copy(dir_s + '/' + fname + '/' + str(i) + '.pgm', dir_test) i = i + 1 # Assign Labels def assign_addr_label(path): dirs = os.listdir(path) addrs = [] labels = [] for d in dirs: os.chdir(path + '/' + d) addr = glob.glob(path + '/' + d + '/' + '*') label = [d for i in range(len(addr))] addrs.extend(addr) labels.extend(label) return list(zip(addrs, labels)) # Load images and save them def create_data_array(data): # data = list(train, valid, test) train_addrs, train_labels = zip(*data[0]) valid_addrs, valid_labels = zip(*data[1]) test_addrs, test_labels = zip(*data[2]) # Arrays to store the data train_storage = [] valid_storage = [] test_storage = [] mean_storage = [] # Training Images for i in range(len(train_addrs)): if i % 20 == 0 and i > 1: print('Train data: {}/{}'.format(i, len(train_addrs))) addr = train_addrs[i] img = Image.open(addr) img = np.array(img) train_storage.append(img) # print(np.array(img)) mean_storage.append(np.sum(img) / float(len(train_labels))) # print(train_storage) # Validation Images for i in range(len(valid_addrs)): if i % 20 == 0 and i > 1: print('Valid data: {}/{}'.format(i, len(valid_addrs))) addr = valid_addrs[i] img = Image.open(addr) img = np.array(img) valid_storage.append(img) # print(np.array(img)) # print(valid_storage) # Test Images for i in range(len(test_addrs)): if i % 20 == 0 and i > 1: print('Test data: {}/{}'.format(i, len(test_addrs))) addr = test_addrs[i] img = Image.open(addr) img = np.array(img) test_storage.append(img) # print(np.array(img)) # print(test_storage) train_data = list(zip(train_storage, train_labels)) valid_data = list(zip(valid_storage, valid_labels)) test_data = list(zip(test_storage, test_labels)) return [train_data, valid_data, test_data, mean_storage] # Create train, valid and test data from the dataset def load_data_label(dir_src, create=0): # dir_src = '/home/dvveera/db/orl' if create: os.chdir(dir_src) for filename in os.listdir(dir_src): # Specific for orl dataset if filename.startswith('s'): copy_data(dir_src, filename) print('Separated Training, Validation and Test Data') # Assign labels to the data train_path = dir_src + '/' + 'train' train_addr_label = assign_addr_label(train_path) valid_path = dir_src + '/' + 'valid' valid_addr_label = assign_addr_label(valid_path) test_path = dir_src + '/' + 'test' test_addr_label = assign_addr_label(test_path) print('Assigned Labels to Training, Validation and Test Data') # Create train, valid and test data arrays data_label = create_data_array([train_addr_label, valid_addr_label, test_addr_label]) return data_label
Veera/load_dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venv-authorrank # language: python # name: venv-authorrank # --- # # CORD-19 AuthorRank # # An example of how AuthorRank can be applied to datasets that contain authorship information about documents, like the [CORD-19](https://www.semanticscholar.org/cord19) dataset. # ## Imports import author_rank as ar import json import matplotlib.pyplot as plt import networkx as nx import pandas as pd import random # ## Read Data cord_df = pd.read_csv("../data/CORD-19/2020-07-16/metadata.csv", low_memory=False) cord_df.sample(frac=1.).head(10) cord_df.shape # ### Subsetting the Data # # Facilitate a "search" by restricting to documents that feature the word "bronchiolitis". cord_df_search = cord_df[cord_df["title"].astype(str).str.contains("bronchiolitis")] cord_df_search.shape authors_by_document = cord_df_search["authors"].astype(str).apply( lambda row: [r.strip() for r in row.split(";")] ) documents = list() for doc in authors_by_document: doc_dict = { "authors": list() } for auth in doc: doc_dict["authors"].append( {"name": auth} # cord 19 has full name as represented on document ) documents.append(doc_dict) # ## Fit AuthorRank # create an AuthorRank object ar_graph = ar.Graph() documents[0:10] # fit to the data ar_graph.fit( documents=random.sample(documents, 25), # limit to a small number of documents progress_bar=True, # use a progress bar to indicate how far along processing is authorship_key="authors", keys=set(["name"]), ) # ### Show the Scores # get the top authors for a set of documents top = ar_graph.top_authors( normalize_scores=True, n=10 ) # print the results for i, j in zip(top[0], top[1]): print(i, j) # ## Visualize G = ar_graph.graph # + plt.figure(figsize=(20,10)) plt.axis('off') pos = nx.shell_layout(G) edgewidth = [d['weight'] for (u,v,d) in G.edges(data=True)] edgewidth = [d for d in edgewidth] author_scores = list() for i in G.nodes: try: index = top[0].index(i) author_scores.append(top[1][index]) except ValueError: pass nx.draw_networkx_nodes(G, pos, node_size=2) nx.draw_networkx_edges(G, pos, width=edgewidth, edge_color="grey") nx.draw_networkx_labels(G, pos=pos) plt.show() # - G_json = ar_graph.as_json() with open("../visualization/data/cord_graph.json", 'w') as f_out: json.dump(G_json, f_out) scores_json = dict() for t in zip(top[0], top[1]): scores_json[" ".join(t[0])] = t[1] scores_json with open("../visualization/data/cord_scores.json", 'w') as f_out: json.dump(scores_json, f_out)
notebooks/CORD-19.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Query a pandas DataFrame # # Returning a portion of the data in a DataFrame is called slicing or dicing the data # # There are many different ways to query a pandas DataFrame, here are a few to get you started import pandas as pd airports = pd.DataFrame([ ['Seatte-Tacoma', 'Seattle', 'USA'], ['Dulles', 'Washington', 'USA'], ['London Heathrow', 'London', 'United Kingdom'], ['Schiphol', 'Amsterdam', 'Netherlands'], ['Changi', 'Singapore', 'Singapore'], ['Pearson', 'Toronto', 'Canada'], ['Narita', 'Tokyo', 'Japan'] ], columns = ['Name', 'City', 'Country'] ) airports # ## Return one column # Specify the name of the column you want to return # * *DataFrameName*['*columnName*'] # airports['City'] # ## Return multiple columns # Provide a list of the columns you want to return # * *DataFrameName*[['*FirstColumnName*','*SecondColumnName*',...]] airports[['Name', 'Country']] # ## Using *iloc* to specify rows and columns to return # **iloc**[*rows*,*columns*] allows you to access a group of rows or columns by row and column index positions. # You specify the specific row and column you want returned # * First row is row 0 # * First column is column 0 # Return the value in the first row, first column airports.iloc[0,0] # Return the value in the third row, third column airports.iloc[2,2] # A value of *:* returns all rows or all columns airports.iloc[:,:] # You can request a range of rows or a range of columns # * [x:y] will return rows or columns x through y # Return the first two rows and display all columns airports.iloc[0:2,:] # Return all rows and display the first two columns airports.iloc[:,0:2] # You can request a list of rows or a list of columns # * [x,y,z] will return rows or columns x,y, and z airports.iloc[:,[0,2]] # ## Using *loc* to specify columns by name # If you want to list the column names instead of the column positions use **loc** instead of **iloc** airports.loc[:,['Name', 'Country']]
even-more-python-for-beginners-data-tools/05 - Query a pandas Dataframe/05 - Querying DataFrames.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %%writefile README.md # CarND-Path-Planning-Project --- ## Introduction The goal of the project is to autonomous vehicle safely navigate around a virtual highway with other traffic that driving +-10 MPH of the 50 MPH speed limit. ### Objectives * Vehicle driving must not exceed the speed limit and not much slower than speed limit unless there is traffic. * The vehicle does not exceed a total acceleration of 10 m/s^2 and a jerk of 10 m/s^3. * Vehicle must drive safely with out any collision with other vehicles. * Vehicle must stays in the lane unless except for lanae changing. * Lane changing must be smooth and deosn't take more than 3 sec. --- ## Implementation This section descripe the implemetation of driving safely on a highway. I used the car's localization and sensor fusion data to determine where the other vehicles in the road with respect to the vehicle. In addition, by using previous path information in order to ensure smooth transition from cycle to cycle. ### Sensor Fusion Localizing the other vehicles determines the state of the vehicle either it should stay in the lane, or to change lane in line 274 to 348 in `main.cpp` file. I used the sensor fusion provided by the simulation `sensor_fusion` which is vector of vector that contains ` [ id, x, y, vx, vy, s, d]`. the x, y values are in global map coordinates, and the vx, vy values are the velocity components in the global map and they are saved in `vx,vy` magnitude velocity in `check_speed`. s and d are the Frenet coordinates for that car`d, check_car_s`. Firstly, from line 290 to 295 it check if there a car ahead in distance smaller than 30m and if there is a car ahead the `too_close= true`. ```python check_car_s+=((double)previous_size*.02*check_speed); if ((check_car_s>car_s)&&(check_car_s-car_s<30)){ cout<<"too close"<<i<<endl; too_close=true; } ``` From line 299 to 345 it determine either there is a cars on lane 0, 1, 2 and the state saved in `car_lane0, car_lane1, car_lane2`. it check either the cars ahead or back within 30m so while lane changing the our vehicle should to ensure that no close car's in ahead or back. From line 346 to 392 it detrmine the next state of the car either stay in lane, change lane left or shift lane right. It starts with decreasing the speed by .5 s/m in eche cycle and if the target lane is free the car's lane is setted to a new target lane according to the logic in the code. ### Trajectory Generating From line 400 to 492 in `main.cpp` based on determined target lane from sensor fusion and hestorical pathe, it computes the trajectory. The Fernet Coordinates `(s, d)` is used for path planning based on `ref_vel` which is maximum 49.5 m/s and the assigned lane to `lane`. At 30 meters interval a three waypoints as anchor points and these interpolate a smooth path between these using spline interpolation. A constant acceleration is added to the reference velocity to ensure acceleration stays under 10m/s^2. The three anchor points are converted to the local coordinate space (via shift and rotation), and interpolated points are evenly spaced out such that each point is traversed in 0.02 seconds (the time interval). --- ## Result The vehicle navigates safely around the virtual highway without any collision and in the speed limit.
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Problem 3.3 Learning From Data from matplotlib import pyplot as plt import numpy as np import random from random import seed # # Visualizing the dataset # + thk = 5 sep = -5 rad = 10 xs_red = [] ys_red = [] fig = plt.figure() ax = fig.add_subplot(111) ax.set_aspect('equal') for x_coord in np.arange(-(rad+thk),rad+thk,0.15): for y_coord in np.arange(0 ,rad+thk,0.15): if rad**2 <= (x_coord - 0)**2 + (y_coord - 0)**2 <= (rad+thk)**2: xs_red.append(x_coord) ys_red.append(y_coord) xs_blue = [] ys_blue = [] for x_coord in np.arange(-(thk/2),(thk/2 + (2*rad) + thk),0.15): for y_coord in np.arange(-sep ,-(rad++sep+thk),-0.15): if rad**2 <= (x_coord - ((thk/2) + rad))**2 + (y_coord - (-sep))**2 <= (rad+thk)**2: xs_blue.append(x_coord) ys_blue.append(y_coord) plt.scatter(xs_red, ys_red,color = 'red') plt.scatter(xs_blue, ys_blue,color = 'blue') plt.show() # - # # PLA Algorithm # + pycharm={"name": "#%%\n"} """ A function for prediction of Y """ def Y_predict(x_vector,w): x_new = [1] for i in x_vector: x_new.append(i) x_new = np.array((x_new)) res = (np.dot(x_new,w)) if res > 0: Y = 1 return Y elif res < 0: Y = -1 return Y elif res ==0: Y = 0 return Y """ The main training function for the data, with the Attributes ---------- X - The data set iterations - the number of times the weights are iterated eta - the learning rate """ def train(X,iterations,eta): global count global w global all_combined_targets for y_idx in range (len(X)): ran_num = random.randint(0,len(X)-1) x_train = X[ran_num] y_t = Y_predict(x_train,w) misrepresented_list = [] for i,j in enumerate(all_combined_targets): if j!=y_t: misrepresented_list.append(i) if len(misrepresented_list)==0: print('Full accuracy achieved') break random_selection = random.randint(0,len(misrepresented_list)-1) random_index = misrepresented_list[random_selection] x_selected = X[random_index] y_selected = all_combined_targets[random_index] x_with1 = [1] for i in x_selected: x_with1.append(i) x_with1 = np.array((x_with1)) s_t = np.matmul(w,x_with1) if (y_selected*s_t)<=1: w = w+(eta*(y_selected-s_t)*x_with1) if (count==iterations): print('maximum iterations reached in the training block') break count+=1 # + pycharm={"name": "#%%\n"} xs_red = np.array(xs_red) ys_red = np.array(ys_red) xs_blue = np.array(xs_blue) ys_blue = np.array(ys_blue) points_1 = [] res1 = [] for i in range(len(xs_red)): points_1.append([xs_red[i],ys_red[i]]) res1.append(-1) points_1 = np.array(points_1) points_2 = [] res2 = [] for i in range(len(xs_blue)): points_2.append([xs_blue[i],ys_blue[i]]) res2.append(1) points_2 = np.array(points_2) all_input = np.concatenate((points_1, points_2)) #creating a combined dataset all_d = np.concatenate((res2,res1)) # - #Visualizing the linearly separable dataset plt.scatter(xs_red, ys_red, color='red') plt.scatter(xs_blue,ys_blue, color='blue') length_dataset = len(xs_red) d1 = -1 * (np.ones(int(length_dataset/2))) d2 = np.ones(int(length_dataset/2)) all_combined_targets = np.concatenate((d2,d1)) # # WARNING! Running the below snippet of code, will not reach an end. This is how the PLA would react in this case, hence the next two blocks haven't been executed. # # Answer (a) This showcases that the PLA will keep running for infinite time, without reaching an end. # + pycharm={"name": "#%%\n"} #initializing all parameters count = 0 # w0 = random.randint(1,4) # w1 = random.randint(1,4) # w2 = random.randint(1,4) w0,w1,w2 = 0,0,0 w = np.array((w0,w1,w2)) weight= 0 iterations = 100 eta = 0.01 #calling the function train(all_input,iterations,eta) # + pycharm={"name": "#%%\n"} #Visualizing the linearly separable dataset after fig = plt.figure() ax = fig.add_subplot(111) ax.set_aspect('equal') plt.scatter(xs_red, ys_red, color='red') plt.scatter(xs_blue,ys_blue, color='blue') m = -(w[1]/w[2]) c = -(w[0]/w[2]) plt.plot( m*all_input + c,all_input ,'g--') plt.xlim([-20, 30]) plt.ylim([-30, 20]) # - # # Answer (b) Pocket Algorithm #to reset all the varaible.s ONLY TO BE RUN IF YOU HAVE RUN THE CODES BEFORE THIS # %reset from matplotlib import pyplot as plt import numpy as np import random from sklearn import linear_model from random import seed np.random.seed(1) # + thk = 5 sep = -5 rad = 10 xs_red = [] ys_red = [] fig = plt.figure() ax = fig.add_subplot(111) ax.set_aspect('equal') step= 0.44 for x_coord in np.arange(-(rad+thk),rad+thk,step): for y_coord in np.arange(0 ,rad+thk,step): if rad**2 <= (x_coord - 0)**2 + (y_coord - 0)**2 <= (rad+thk)**2: xs_red.append(x_coord) ys_red.append(y_coord) xs_blue = [] ys_blue = [] for x_coord in np.arange(-(thk/2),(thk/2 + (2*rad) + thk),step): for y_coord in np.arange(-sep ,-(rad++sep+thk),-step): if rad**2 <= (x_coord - ((thk/2) + rad))**2 + (y_coord - (-sep))**2 <= (rad+thk)**2: xs_blue.append(x_coord) ys_blue.append(y_coord) plt.scatter(xs_red, ys_red,color = 'red') plt.scatter(xs_blue, ys_blue,color = 'blue') plt.show() # - # dataset preparation x_combined = xs_red +xs_blue y_combined = ys_red + ys_blue y_train_1 = np.ones((len(xs_red), ), dtype=np.float) y_train_2 = -1 * np.ones((len(xs_blue), ), dtype=np.float) y_train = np.concatenate((y_train_1,y_train_2)) x_train = list(zip(x_combined, y_combined)) x_train = np.array(x_train) n_train = len(x_train) learningRate = 0.01 Y = y_train oneVector = np.ones((x_train.shape[0], 1)) x_train = np.concatenate((oneVector, x_train), axis=1) X_train = x_train plotData = [] weights = np.random.rand(3, 1) w_hat = weights misClassifications = 1 minMisclassifications = 10000 iteration = 0 err_train_now = [] err_train_hat = [] train_err_now = 1 train_err_min = 1 def evaluate_error(w, X, y): n = X.shape[0] pred = np.matmul(X, w) pred = np.sign(pred) - (pred == 0) pred = pred.reshape(-1) return np.count_nonzero(pred == y) / n while (misClassifications != 0 and (iteration<100000)): iteration += 1 #for keeping track of the progress for 100000 iterations if (iteration%1000) == 0: print(iteration) misClassifications = 0 for i in range(0, len(X_train)): currentX = X_train[i].reshape(-1, X_train.shape[1]) currentY = Y[i] wTx = np.dot(currentX, weights)[0][0] if currentY == 1 and wTx < 0: misClassifications += 1 weights = weights + learningRate * np.transpose(currentX) elif currentY == -1 and wTx > 0: misClassifications += 1 weights = weights - learningRate * np.transpose(currentX) train_err_now = evaluate_error(weights, X_train, y_train) err_train_now.append(train_err_now) if train_err_now < train_err_min : train_err_min = train_err_now err_train_hat.append(train_err_min) w_hat = weights plotData.append(misClassifications) if misClassifications<minMisclassifications: minMisclassifications = misClassifications print(weights.transpose()) print ("Best Case Accuracy of Pocket Learning Algorithm is: ",(((X_train.shape[0]-minMisclassifications)/X_train.shape[0])*100),"%") plt.title('Number of misclassifications over the number of iterations') plt.plot(np.arange(0,iteration),plotData) plt.xlabel("Number of Iterations") plt.ylabel("Number of Misclassifications") plt.show() # # Answer(c) The plot between the data and the final hypothesis # + #Visualizing the linearly separable dataset fig = plt.figure() ax = fig.add_subplot(111) ax.set_aspect('equal') plt.scatter(xs_red, ys_red, color='red') plt.scatter(xs_blue,ys_blue, color='blue') m = -(weights[1]/weights[2]) c = -(weights[0]/weights[2]) plt.plot( x_train, m*x_train + c ,'g--') plt.xlim([-20, 30]) plt.ylim([-30, 20]) # - # # Answer (d) Linear regression reg = linear_model.LinearRegression() reg.fit(x_train,y_train) weights_linear_regression = reg.coef_ l = [] for i in weights_linear_regression: l.append(i) print(weights_linear_regression) # + #Visualizing the linearly separable dataset fig = plt.figure() ax = fig.add_subplot(111) ax.set_aspect('equal') plt.scatter(xs_red, ys_red, color='red') plt.scatter(xs_blue,ys_blue, color='blue') m = weights_linear_regression[1] c = weights_linear_regression[0] plt.plot( x_train, m*x_train + c ,'g--') plt.xlim([-20, 30]) plt.ylim([-30, 20]) # - # # Answer (d) It is seen that the linear regression offers better results as opposed to PLA, in terms of speed and fit
Problem_3_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Data Science) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:ap-southeast-1:492261229750:image/datascience-1.0 # --- # # Data Scientist - Feature Engineering # # This notebook demonstrates a sample of the activities and artifacts prepared by a Data Scientist to establish the Feature Engineering pipelines. # # *** # *This notebook should work well with the Python 3 (Data Science) kernel in SageMaker Studio* # *** # #### Environment setup # Import libraries, setup logging, and define few variables. # + import logging import json import sagemaker import string from pathlib import Path from sagemaker.utils import name_from_base from sagemaker.feature_store.feature_group import FeatureGroup import shutil from utils.feature_store_utils import format_feature_defs # - # %load_ext autoreload # %autoreload 2 # Set up a logger logger = logging.getLogger("__name__") logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler()) # Define SageMaker and Boto3 sessions and few additional parameters # + sagemaker_session = sagemaker.Session() boto_session = sagemaker_session.boto_session sagemaker_client = sagemaker_session.sagemaker_client region = sagemaker_session.boto_region_name role = sagemaker.get_execution_role() bucket = sagemaker_session.default_bucket() prefix = "mlops-demo" feature_eng_base_path = Path("feature_engineering") feature_eng_base_path.mkdir(exist_ok=True) # + feat_eng_pipelines_path = feature_eng_base_path / "pipelines" feat_eng_pipelines_path.mkdir(exist_ok=True) feat_eng_conf_path = feature_eng_base_path / "configurations" feat_eng_conf_path.mkdir(exist_ok=True) # - # You'll store the offline FeatureStore in a prefix in the default S3 bucket feature_store_offline_s3_uri = f"s3://{bucket}/{prefix}/fs/" # Retrieve the URI of the raw data files stored by [DataScientist-00-DataDownload.ipynb](DataScientist-00-DataDownload.ipynb). # %store -r claims_uri # %store -r customers_uri # ## Data Wrangler # Editing the template `flow` files to point at the correct dataset in S3 # + with (feature_eng_base_path / "claims_flow_template").open("r") as f, ( feat_eng_pipelines_path / "claims.flow" ).open("w") as g: variables = {"data_uri": claims_uri} template = string.Template(f.read()) claims_flow = template.substitute(variables) claims_flow = json.loads(claims_flow) json.dump(claims_flow, g, indent=2) logger.info("Created claims.flow ") with (feature_eng_base_path / "customers_flow_template").open("r") as f, ( feat_eng_pipelines_path / "customers.flow" ).open("w") as g: variables = {"data_uri": customers_uri} template = string.Template(f.read()) claims_flow = template.substitute(variables) claims_flow = json.loads(claims_flow) json.dump(claims_flow, g, indent=2) logger.info("Created customers.flow ") # - # We can review the feature engineering: # - Let's look at the feature engineering for the [Claims Dataset](feature_engineering/claims.flow) # # - Let's look at the feature engineering for the [Customers Dataset](feature_engineering/customers.flow) # ## Feature Store # For development purposes, you could create the *Feature Groups* using the Data Wrangle export option to generate a Jupyter Notebook for each flow file. # In this case instead, you'll still generate the Notebooks, but you'll use to extract the `column_schemas` that we need for the *Feature Groups*. # # # You'll encode the relevant feature groups configurations, including the `column_schemas`, in `*.fg.json` files in `feature_engineering folder`. # These configurations can be parsed by `get_fg_conf()` (in [feature_store_utils.py](utils/feature_store_utils.py)) and can be included in the CI/CD. # Here's a template of a `*.fg.json` file # # ``` # { # "feature_group_name": "customers", # "event_time_feature_name": "event_time", # "feature_group_name": "customers", # "record_identifier_feature_name": "policy_id", # "disable_glue_table_creation": false, # "enable_online_store": false, # "column_schemas":<Copy and paste from the Jupyter Notebook> # } # ``` # + featurestore_runtime = boto_session.client( service_name="sagemaker-featurestore-runtime", region_name=region ) feature_store_session = sagemaker.Session( boto_session=boto_session, sagemaker_client=sagemaker_client, sagemaker_featurestore_runtime_client=featurestore_runtime, ) # - # ### Claims Feature Group # claims_column_schemas = <> # <--- Copy here the column_schemas from the Jupyter Notebook generated with DataWrangler claims_column_schemas = [ {"name": "policy_id", "type": "long"}, {"name": "incident_severity", "type": "long"}, {"name": "num_vehicles_involved", "type": "long"}, {"name": "num_injuries", "type": "long"}, {"name": "num_witnesses", "type": "long"}, {"name": "police_report_available", "type": "long"}, {"name": "injury_claim", "type": "float"}, {"name": "vehicle_claim", "type": "float"}, {"name": "total_claim_amount", "type": "float"}, {"name": "incident_month", "type": "long"}, {"name": "incident_day", "type": "long"}, {"name": "incident_dow", "type": "long"}, {"name": "incident_hour", "type": "long"}, {"name": "fraud", "type": "long"}, {"name": "driver_relationship_self", "type": "long"}, {"name": "driver_relationship_na", "type": "long"}, {"name": "driver_relationship_spouse", "type": "long"}, {"name": "driver_relationship_child", "type": "long"}, {"name": "driver_relationship_other", "type": "long"}, {"name": "incident_type_collision", "type": "long"}, {"name": "incident_type_breakin", "type": "long"}, {"name": "incident_type_theft", "type": "long"}, {"name": "collision_type_front", "type": "long"}, {"name": "collision_type_rear", "type": "long"}, {"name": "collision_type_side", "type": "long"}, {"name": "collision_type_na", "type": "long"}, {"name": "authorities_contacted_police", "type": "long"}, {"name": "authorities_contacted_none", "type": "long"}, {"name": "authorities_contacted_ambulance", "type": "long"}, {"name": "authorities_contacted_fire", "type": "long"}, {"name": "event_time", "type": "float"}, ] # We can now build the Feature Group configuration dictionary claim_fg_props = dict( FeatureGroupName="dev-claims", FeatureDefinitions=format_feature_defs(claims_column_schemas), RecordIdentifierFeatureName="policy_id", EventTimeFeatureName="event_time", OnlineStoreConfig={ "EnableOnlineStore": False, }, OfflineStoreConfig={ "S3StorageConfig": { "S3Uri": feature_store_offline_s3_uri, }, "DisableGlueTableCreation": False, }, Description="Claim feature group", Tags=[ {"Key": "stage", "Value": "dev"}, ], ) try: response = sagemaker_client.create_feature_group(**claim_fg_props, RoleArn=role) except sagemaker_client.exceptions.ResourceInUse: logger.exception("The FeatureGroup exist already", exc_info=False) # For ease of use, you can also create a FeatureGroup object using SageMaker SDK claims_feature_group = FeatureGroup( name=claim_fg_props["FeatureGroupName"], sagemaker_session=feature_store_session, ) claims_feature_group.describe() # ### Customers Feature Group # customers_column_schemas = <> # <--- Copy here the column_schemas from the Jupyter Notebook generated with DataWrangler customers_column_schemas = [ {"name": "policy_id", "type": "long"}, {"name": "customer_age", "type": "long"}, {"name": "customer_education", "type": "long"}, {"name": "months_as_customer", "type": "long"}, {"name": "policy_deductable", "type": "long"}, {"name": "policy_annual_premium", "type": "long"}, {"name": "policy_liability", "type": "long"}, {"name": "auto_year", "type": "long"}, {"name": "num_claims_past_year", "type": "long"}, {"name": "num_insurers_past_5_years", "type": "long"}, {"name": "customer_gender_male", "type": "long"}, {"name": "customer_gender_female", "type": "long"}, {"name": "policy_state_ca", "type": "long"}, {"name": "policy_state_wa", "type": "long"}, {"name": "policy_state_az", "type": "long"}, {"name": "policy_state_or", "type": "long"}, {"name": "policy_state_nv", "type": "long"}, {"name": "policy_state_id", "type": "long"}, {"name": "event_time", "type": "float"}, ] # We can now build the Feature Group configuration dictionary customers_fg_props = dict( FeatureGroupName="dev-customers", FeatureDefinitions=format_feature_defs(customers_column_schemas), RecordIdentifierFeatureName="policy_id", EventTimeFeatureName="event_time", OnlineStoreConfig={ "EnableOnlineStore": False, }, OfflineStoreConfig={ "S3StorageConfig": { "S3Uri": feature_store_offline_s3_uri, }, "DisableGlueTableCreation": False, }, Description="Customers feature group", Tags=[ {"Key": "stage", "Value": "dev"}, ], ) try: response = sagemaker_client.create_feature_group(**customers_fg_props, RoleArn=role) logger.info("FeatureGroup created") except sagemaker_client.exceptions.ResourceInUse: logger.exception("The FeatureGroup exist already", exc_info=False) # For ease of use, you can also create a FeatureGroup object using SageMaker SDK customers_feature_group = FeatureGroup( name=customers_fg_props["FeatureGroupName"], sagemaker_session=feature_store_session, ) customers_feature_group.describe() # ## Data Processing Pipelines # Prepare a subfolder in the `feature_engineering` folder to store the script with the pipeline definition and any additional library we need. shutil.copy("utils/parse_flow.py", feat_eng_pipelines_path) # + # %%writefile {feat_eng_pipelines_path}/feature_ingestion_pipeline.py import json import sagemaker from sagemaker.processing import ( FeatureStoreOutput, ProcessingInput, ProcessingJob, ProcessingOutput, Processor, ) from sagemaker.utils import name_from_base from sagemaker.workflow.parameters import ParameterInteger, ParameterString from sagemaker.workflow.pipeline import Pipeline from sagemaker.workflow.steps import ProcessingStep from sagemaker.wrangler.processing import DataWranglerProcessor from .parse_flow import FlowFile def get_pipeline( role: str, pipeline_name: str, prefix: str, sagemaker_session: sagemaker.Session=None, **kwarg, )-> Pipeline: """[summary] Args: role ([type]): [description] pipeline_name ([type]): [description] sagemaker_session ([type], optional): [description]. Defaults to None. Returns: Pipeline: [description] """ flow_file_path = kwarg["flow_file_path"] feature_group_name = kwarg["feature_group_name"] bucket = sagemaker_session.default_bucket() flow_file = FlowFile(flow_file_path) instance_count = ParameterInteger(name="InstanceCount", default_value=1) instance_type = ParameterString(name="InstanceType", default_value="ml.m5.4xlarge") input_data_uri = ParameterString(name="InputDataURI") flow_file_uri = sagemaker.s3.S3Uploader.upload( local_path=flow_file_path, desired_s3_uri=f"s3://{bucket}/{prefix}/feature_ingestion/{name_from_base(pipeline_name)}", sagemaker_session=sagemaker_session, ) output_content_type = "CSV" output_config = {flow_file.output_name: {"content_type": output_content_type}} job_argument = [f"--output-config '{json.dumps(output_config)}'"] data_sources = [ ProcessingInput( input_name="InputData", source=input_data_uri, destination=f"/opt/ml/processing/{flow_file.input_name}", ) ] outputs = [ ProcessingOutput( output_name=flow_file.output_name, app_managed=True, feature_store_output=FeatureStoreOutput( feature_group_name=feature_group_name ), ) ] data_wrangler_processor = DataWranglerProcessor( role=role, data_wrangler_flow_source=flow_file_uri, instance_count=instance_count, instance_type=instance_type, sagemaker_session=sagemaker_session, ) data_wrangler_step = ProcessingStep( name="data-wrangler-step", processor=data_wrangler_processor, inputs=data_sources, outputs=outputs, job_arguments=job_argument, ) pipeline = Pipeline( name=pipeline_name, parameters=[ instance_count, instance_type, input_data_uri, ], steps=[data_wrangler_step], sagemaker_session=sagemaker_session, ) return pipeline # - # You can now import the function that create the pipeline object. Thanks to the `autoreload` extension, we can update the script and rerun the call above, and the function will be automatically reloaded. from feature_engineering.pipelines.feature_ingestion_pipeline import get_pipeline # ### Claims feature ingestion pipeline claims_pipeline_args = { "flow_file_path": (feat_eng_pipelines_path / "claims.flow").as_posix(), "feature_group_name": claims_feature_group.name, } claims_pipeline = get_pipeline( role=role, pipeline_name="dev-claims-pipeline", sagemaker_session=sagemaker_session, prefix=prefix, **claims_pipeline_args ) json.loads(claims_pipeline.definition()) # ### Customers feature ingestion pipeline customers_pipeline_conf = { "flow_file_path": (feat_eng_pipelines_path / "customers.flow").as_posix(), "feature_group_name": customers_feature_group.name, } customers_pipeline = get_pipeline( role=role, pipeline_name="dev-customers-pipeline", prefix=prefix, sagemaker_session=sagemaker_session, **customers_pipeline_conf ) json.loads(customers_pipeline.definition()) # ### Create the pipelines try: claims_pipeline.update( role_arn=role, description="Claims feature ingestion pipeline", ) logging.info("Pipeline updated") except: claims_pipeline.create( role_arn=role, description="Claims feature ingestion pipeline", ) logging.info("Pipeline created") try: customers_pipeline.update( role_arn=role, description="Claims feature ingestion pipeline", ) logging.info("Pipeline updated") except: customers_pipeline.create( role_arn=role, description="Claims feature ingestion pipeline", ) logging.info("Pipeline created") # ## Run the pipelines claims_pipeline_execution = claims_pipeline.start( parameters={"InputDataURI": claims_uri}, execution_display_name="dev-run", ) claims_pipeline_execution.describe() customers_pipeline_execution = customers_pipeline.start( parameters={"InputDataURI": customers_uri}, execution_display_name="dev-run", ) customers_pipeline_execution.describe() # ## Write configuration files for operationalization # Feature Groups configurations # + claims_fg_props_prod = dict( FeatureGroupName="mlops-demo-claims", FeatureDefinitions=format_feature_defs(claims_column_schemas), RecordIdentifierFeatureName="policy_id", EventTimeFeatureName="event_time", OnlineStoreConfig={ "EnableOnlineStore": True, # <-- In production we want the online store turned on }, OfflineStoreConfig={ "S3StorageConfig": { "S3Uri": feature_store_offline_s3_uri, }, "DisableGlueTableCreation": False, }, Description="Claim feature group", ) with (feat_eng_conf_path / "claims.fg.json").open("w") as f: json.dump(claims_fg_props_prod, f, indent=2) customers_fg_props_prod = dict( FeatureGroupName="mlops-demo-customers", FeatureDefinitions=format_feature_defs(customers_column_schemas), RecordIdentifierFeatureName="policy_id", EventTimeFeatureName="event_time", OnlineStoreConfig={ "EnableOnlineStore": True, # <-- In production we want the online store turned on }, OfflineStoreConfig={ "S3StorageConfig": { "S3Uri": feature_store_offline_s3_uri, }, "DisableGlueTableCreation": False, }, Description="Customers feature group", ) with (feat_eng_conf_path / "customers.fg.json").open("w") as f: json.dump(customers_fg_props_prod, f, indent=2) # - # Pipelines configurations claims_config = dict( pipeline_name="claims-preprocessing", code_file_path="pipelines/feature_ingestion_pipeline.py", pipeline_configuration=claims_pipeline_args, ) with (feat_eng_conf_path / "claims.pipeline.json").open("w") as f: json.dump(claims_config, f, indent=2) customers_config = dict( pipeline_name="customers-preprocessing", code_file_path="pipelines/feature_ingestion_pipeline.py", pipeline_configuration=customers_pipeline_conf, ) with (feat_eng_conf_path / "customers.pipeline.json").open("w") as f: json.dump(customers_config, f, indent=2) # ## Clean-up # + # customers_pipeline.delete() # claims_pipeline.delete() # + # claims_feature_group.delete() # customers_feature_group.delete()
demo-workspace/DataScientist-01-FeatureEng.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + init_cell=true nbsphinx="hidden" pycharm={"is_executing": false} # %matplotlib inline # %config InlineBackend.figure_format = 'svg' import scqubits as scq import scqubits.legacy.sweep_plotting as splot from scqubits import HilbertSpace, InteractionTerm, ParameterSweep import numpy as np # - # .. note:: # This describes a legacy version of the `HilbertSpace` class which is deprecated with scqubits v1.4. # # # # Composite Hilbert Spaces, QuTiP Interface # The `HilbertSpace` class provides data structures and methods for handling composite Hilbert spaces which may consist of multiple qubits or qubits and oscillators coupled to each other. To harness the power of QuTiP, a toolbox for studying stationary and dynamical properties of closed and open quantum systems (and much more), `HilbertSpace` provides a convenient interface: it generates `qutip.qobj` objects which are then directly handled by QuTiP. # ## Example: two transmons coupled to a harmonic mode # # Transmon qubits can be capacitively coupled to a common harmonic mode, realized by an LC oscillator or a transmission-line resonator. The Hamiltonian describing such a composite system is given by: # \begin{equation} # H=H_\text{tmon,1} + H_\text{tmon,2} + \omega_r a^\dagger a + \sum_{j=1,2}g_j n_j(a+a^\dagger), # \end{equation} # where $j=1,2$ enumerates the two transmon qubits, $\omega_r$ is the (angular) frequency of the resonator. Furthermore, $n_j$ is the charge number operator for qubit $j$, and $g_j$ is the coupling strength between qubit $j$ and the resonator. # # ### Create Hilbert space components # # The first step consists of creating the objects describing the individual building blocks of the full Hilbert space. Here, these will be the two transmons and one oscillator: # + pycharm={"is_executing": false} tmon1 = scq.Transmon( EJ=40.0, EC=0.2, ng=0.3, ncut=40, truncated_dim=4 # after diagonalization, we will keep 3 levels ) tmon2 = scq.Transmon( EJ=15.0, EC=0.15, ng=0.0, ncut=30, truncated_dim=4 ) resonator = scq.Oscillator( E_osc=4.5, truncated_dim=4 # up to 3 photons (0,1,2,3) ) # - # The system objects are next grouped into a Python list, and in this form used for the initialization of a `HilbertSpace` object. Once created, a print call to this object outputs a summary of the composite Hilbert space. # + pycharm={"is_executing": false} hilbertspace = scq.HilbertSpace([tmon1, tmon2, resonator]) print(hilbertspace) # - # One useful method of the `HilbertSpace` class is `.bare_hamiltonian()`. This yields the bare Hamiltonian of the non-interacting subsystems, expressed as a `qutip.Qobj`: bare_hamiltonian = hilbertspace.bare_hamiltonian() bare_hamiltonian # ### Set up the interaction between subsystems # The pairwise interactions between subsystems are assumed to have the general form # # $V=\sum_{i\not= j} g_{ij} A_i B_j$, # # where $g_{ij}$ parametrizes the interaction strength between subsystems $i$ and $j$. The operator content of the coupling is given by the two coupling operators $A_i$, $B_j$, which are operators in the two respective subsystems. # This structure is captured by setting up an `InteractionTerm` object: # + pycharm={"is_executing": false} g1 = 0.1 # coupling resonator-CPB1 (without charge matrix elements) g2 = 0.2 # coupling resonator-CPB2 (without charge matrix elements) interaction1 = InteractionTerm( hilbertspace = hilbertspace, g_strength = g1, op1 = tmon1.n_operator(), subsys1 = tmon1, op2 = resonator.creation_operator() + resonator.annihilation_operator(), subsys2 =resonator ) interaction2 = InteractionTerm( hilbertspace = hilbertspace, g_strength = g2, op1 = tmon2.n_operator(), subsys1 = tmon2, op2 = resonator.creation_operator() + resonator.annihilation_operator(), subsys2 = resonator ) # - # Each `InteractionTerm` object is initialized by specifying # 1. the Hilbert space object to which it will belong # 2. the interaction strength coefficient $g_{ij}$ # 3. `op1`, `op2`: the subsystem operators $A_i$, $B_j$ (these should be operators within the subsystems' respective Hilbert spaces only) # 4. `subsys1`: the subsystem objects to which `op1` and `op2` belong # # Note: interaction Hamiltonians of the alternative form $V=g_{ij}A_i B_j^\dagger + g_{ij}^* A_i^\dagger B_J$ (a typical form when performing rotating-wave approximation) can be specified by setting `op1` to $A_i$ and `op2` to $B_j^\dagger$, and providing the additional keyword parameter `add_hc = True`. # # Now, collect all interaction terms in a list, and insert into the HilbertSpace object. # + pycharm={"is_executing": false} interaction_list = [interaction1, interaction2] hilbertspace.interaction_list = interaction_list # - # With the interactions specified, the full Hamiltonian of the coupled system can be obtained via the method `.hamiltonian()`. Again, this conveniently results in a `qubit.Qobj` operator: # + pycharm={"is_executing": false} dressed_hamiltonian = hilbertspace.hamiltonian() dressed_hamiltonian # - # ### Obtaining the eigenspectrum via QuTiP # # Since the Hamiltonian obtained this way is a proper `qutip.qobj`, all QuTiP routines are now available. In the first case, we are still making use of the scqubit `HilbertSpace.eigensys()` method. In the second, case, we use QuTiP's method `.eigenenergies()`: # + pycharm={"is_executing": false} evals, evecs = hilbertspace.eigensys(evals_count=4) print(evals) # - dressed_hamiltonian = hilbertspace.hamiltonian() dressed_hamiltonian.eigenenergies()
docs/source/guide/ipynb/hilbertspace_legacy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd # Load the data (python dict). from data import exam_data, browser_data, employee_data, sample_data #1 Write a Python program to delete the 'attempts' column from the DataFrame exam_data = pd.DataFrame(exam_data) del exam_data['attempts'] exam_data #2 Create a data frame called “Browser” and reset the index to the list ‘new_index’. browser = pd.DataFrame(browser_data, index=['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']) browser.index = ['Iceweasel', 'Safari', 'IE 10', 'Chrome', 'Comodo Dragon'] browser # + #3 Write a program using pandas library in Python to display all of the employees who are of age \ # greater than or equal to 68 from the following data? employees = pd.DataFrame(employee_data) employees[employees.Age >= 68] # + # PART II - Question 4 # 1. Create a data frame and assign it to a variable called “Army”.(Include the columns names) Army = pd.DataFrame(sample_data) Army # + # 2. Set the 'Origin' column as the index of the data frame Army.set_index('Origin', inplace=True) Army # + # 3. Select the 'Regiment', 'Deaths' and 'Size' columns from 'Georgia' and 'Florida', 'California'. df = Army.loc[['Georgia', 'Florida', 'California'], ['Regiment', 'Deaths', 'Size']] df # + # 4. Select the rows from 2 to 8 and the columns from 3 to 6 Army.iloc[2:9, 3:7] # + # 5. Select every row after the third row Army.iloc[3:] # + # 6. Select the columns from 4 to 8 Army.iloc[:, 4:9] # + # 7. Select rows where Battles are greater than 5 Army[Army.Battles > 5] # + # 8. Select rows where Deaths are greater than 500 or less than 50 Army[(Army.Deaths > 500) | (Army.Deaths < 50)] # + # 9. Select all the regiments except 'Scouts' Army[~(Army.Regiment == 'Scouts')] # If you only want 'Regiment' column without 'Scouts', RUN: # Army[~(Army.Regiment == 'Scouts')].Regiments # + # 10. Select the third cell in the row named 'Alaska' Army.loc['Alaska'][2] # -
assignment5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from web3 import Web3 import matplotlib.pyplot as plt import pandas as pd from datetime import datetime import time COLLECTION_NAME = "sss" COLLECTION_FOLDER = "SSS" ENDPOINT = "" w3 = Web3(Web3.HTTPProvider(ENDPOINT)) WEI_TO_GWEI_CONSTANT = 1e9 # to GWEI START_BLOCK = 13276866 END_BLOCK = 13276877 EDGE_BLOCKS = 100 MAX_BLOCK_PULL = 1024 first_block = START_BLOCK - EDGE_BLOCKS last_block = END_BLOCK + EDGE_BLOCKS blocks_left = last_block - first_block + 1 blocks_to_grab = min(MAX_BLOCK_PULL, blocks_left) block_list = [] print("Total Blocks: ", blocks_left) while blocks_left > 0: blocks = w3.eth.fee_history( blocks_to_grab, last_block, [10, 90] ) # args: block_count, newest_block, last_block = blocks["oldestBlock"] blocks_to_grab = min(MAX_BLOCK_PULL, last_block - first_block + 1) blocks_left -= len(blocks["baseFeePerGas"][:-1]) new_start_of_list = blocks["baseFeePerGas"][:-1] new_start_of_list.extend(block_list[1:]) block_list = new_start_of_list print("Total Length of List: ", len(block_list)) print("first base fee: ", block_list[0] / WEI_TO_GWEI_CONSTANT) print("last base fee: ", block_list[-1] / WEI_TO_GWEI_CONSTANT) # get the timestamp for every block block_numbers = list(range(first_block, END_BLOCK + EDGE_BLOCKS + 1)) time_list = [] for i in range(0, len(block_list)): time_list.append(int(w3.eth.getBlock(block_numbers[i]).timestamp)) block_list[i] = block_list[i] / WEI_TO_GWEI_CONSTANT data = pd.DataFrame() data["block_numbers"] = block_numbers data["timestamps"] = time_list data["timestamps"] = pd.to_datetime(data["timestamps"], unit="s") data["base_fee"] = block_list data.to_csv("../../{}/data/gas_data.csv".format(COLLECTION_NAME)) # -
case_studies/general/code/get_gas_prices.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # SVM-Kernels # # # Three different types of SVM-Kernels are displayed below. # The polynomial and RBF are especially useful when the # data-points are not linearly separable. # # + print(__doc__) # Code source: <NAME> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import svm # Our dataset and targets X = np.c_[(.4, -.7), (-1.5, -1), (-1.4, -.9), (-1.3, -1.2), (-1.1, -.2), (-1.2, -.4), (-.5, 1.2), (-1.5, 2.1), (1, 1), # -- (1.3, .8), (1.2, .5), (.2, -2), (.5, -2.4), (.2, -2.3), (0, -2.7), (1.3, 2.1)].T Y = [0] * 8 + [1] * 8 # figure number fignum = 1 # fit the model for kernel in ('linear', 'poly', 'rbf'): clf = svm.SVC(kernel=kernel, gamma=2) clf.fit(X, Y) # plot the line, the points, and the nearest vectors to the plane plt.figure(fignum, figsize=(4, 3)) plt.clf() plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80, facecolors='none', zorder=10, edgecolors='k') plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired, edgecolors='k') plt.axis('tight') x_min = -3 x_max = 3 y_min = -3 y_max = 3 XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j] Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()]) # Put the result into a color plot Z = Z.reshape(XX.shape) plt.figure(fignum, figsize=(4, 3)) plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired) plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'], levels=[-.5, 0, .5]) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(()) plt.yticks(()) fignum = fignum + 1 plt.show()
sklearn/sklearn learning/demonstration/auto_examples_jupyter/svm/plot_svm_kernels.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Task2: Bengali Transfer Learning Binary Classifier #imports from data import BengaliData from model import BengaliLSTMClassifier from train import train_model from eval import evaluate_test_set import torch import pickle from config import config_dict from torch import nn ## load vocab and embedding weights data = BengaliData(config_dict['file_paths']) with open(config_dict['file_paths']['embeddings_path'], 'rb') as f: embedding_weights = pickle.load(f) ## check whether the pre-trained embeddings are the same shape as of train vocabulary assert embedding_weights.T.shape == (len(data.vocab), config_dict['embedding_size']), "Pre-trained embeddings size not equal to size of embedding layer" # + ## create model instance with configurations coming from config file model = BengaliLSTMClassifier(pretrained_state_dict_path= config_dict['file_paths']['pretrained_path'], batch_size=config_dict['batch_size'], output_size=config_dict['out_size'], vocab_size=len(data.vocab), hidden_size=config_dict['hidden_size'], embedding_size=config_dict['embedding_size'], weights=torch.FloatTensor(embedding_weights.T), lstm_layers=config_dict['lstm_layers'], device=config_dict['device']).to(config_dict['device']) # - # ## Loading pretrained LSTM & FC weights from Hindi Classifier ## load pretrained weights model.load_pretrained_layers() # + ## get dataloaders for train and test set bengali_dataloader = data.get_data_loader(batch_size=config_dict['batch_size']) ## filtering out embedding weights since they won't be optimized optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters())) # - try: ## loading the best model saved during training from disk model.load_state_dict(torch.load('{}.pth'.format(config_dict['model_name']), map_location=torch.device(config_dict['device']))) print('model loaded...') except: print('no prior model') # + ## training the model on train set #model = train_model(model, optimizer, bengali_dataloader, data, max_epochs=config_dict['epochs'],config_dict=config_dict) # - ## evaluate model on test set evaluate_test_set(model, data, bengali_dataloader, device=config_dict['device'])
src/task2/2_bengali_lstm_pret/Bengali_Transfer_Learning_Binary_Classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:pytorch] # language: python # name: conda-env-pytorch-py # --- # # Modeling with PyTorch # + import audiomod import ptmod import numpy as np import pandas as pd import torch import torch.nn as nn import torch.optim as optim import pickle import matplotlib.pyplot as plt # import seaborn as sns # plt.style.use('seaborn') # %matplotlib inline # - # ## Train/Test Dataset Creation # Now that it's labeled in the database, I load the datagroup as a pd dataframe, add a train/test column, and then create PyTorch dataset objects. These spectros will be at 1/4 scale, larger than the 1/8 scale spectros used in development. sax1203_datagroup = audiomod.pull_datagroup_from_db('sax1203') # look at number of records with each label sax1203_datagroup.actual.value_counts() sax1203_datagroup.sample(5) # + # creation of dataset objects for train and test sets train_df, test_df = audiomod.tts(sax1203_datagroup) train_4 = ptmod.SpectroDataset(train_df, scaling=0.25) test_4 = ptmod.SpectroDataset(test_df, scaling=0.25) print("Train set length:", len(train_4)) print("Test set length:", len(test_4)) # - # A look at 5 records in the train set: for i in range(5): item = train_4[i] print("\nChunk:", item[2]) print("Label:", item[1]) print("---") ptmod.tensor_stats(item[0]) # ## CNN Architecture # Because of the (1,128,108) dimension of the images, we'll have to re-design the layers of the CNN. # + cnn_layers_test = ( (5,1,10), (2,2,0), (5,1,20), (2,2,0) ) ptmod.cnn_pixels_out((1,128,108), cnn_layers_test, drop_last=True) # - # I could try running it with 13920 values going into the FC layer, but will look at getting it down to the 400-3000 range. # + cnn_layers = ( (5,2,10), (2,2,0), (5,2,20), (2,2,0) ) ptmod.cnn_pixels_out((1,128,108), cnn_layers, drop_last=True) # - # With a stride of 2 on each convolutional layer, this gets it down to 700. Worth a shot. # + params_1 = { 'c1': [5,2,10], 'p1': [2,2], 'c2': [5,2,20], 'p2': [2,2], 'f1': [700,100], 'f2': [100,2] } cnn_700 = ptmod.CNN_cpcpff(params_1) print(cnn_700) # - # ## Testing First Model ptmod.fit( cnn_700, train_4, optim.SGD(cnn_700.parameters(), lr=0.01), nn.CrossEntropyLoss(), 20 ) cnn_700.save_myself('cnn700_171203_1109') train_4_pred = ptmod.predict(cnn_700, train_4) test_4_pred = ptmod.predict(cnn_700, test_4) cnn_700_scores = ptmod.get_scores(train_4_pred, test_4_pred) # Much better than the first attempt! Doesn't look like it's overfitting. What happens with more epochs? # ### Bumping Up Number of Epochs # instantiate a new model object cnn_700_2 = ptmod.CNN_cpcpff(params_1) print(cnn_700_2) cnn_700_2, cnn_700_2_loss = ptmod.fit( cnn_700_2, train_4, optim.SGD(cnn_700_2.parameters(), lr=0.01), nn.CrossEntropyLoss(), 50 ) # + x, y = list(range(1,51)), cnn_700_2_loss fig = plt.figure(figsize=(12,8)) plt.plot(x,y) plt.grid(b=True) plt.xlabel("Epoch") plt.ylabel("Cross-Entropy Loss") plt.title("CNN_700: Loss Per Epoch"); # - train_4_pred2 = ptmod.predict(cnn_700_2, train_4) test_4_pred2 = ptmod.predict(cnn_700_2, test_4) cnn_700_scores = ptmod.get_scores(train_4_pred2, test_4_pred2) # Clearly overfitting -- seriously learned the training set. I'll have to back off on model complexity and/or epochs. Need to do some CV/Grid Search. # + # cnn_700_50_loss_and_predicts = { # 'loss': cnn_700_2_loss, # 'train_df': train_4_pred2, # 'test_df': test_4_pred2 # } # + # save for later # with open('../data/cnn_700_50_loss_and_predicts.p', 'wb') as pf: # pickle.dump(cnn_700_50_loss_and_predicts, pf)
5-sax-detector/code/11-pytorch-tuning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Conv1d" data-toc-modified-id="Conv1d-0.0.1"><span class="toc-item-num">0.0.1&nbsp;&nbsp;</span>Conv1d</a></span></li><li><span><a href="#Attentions" data-toc-modified-id="Attentions-0.0.2"><span class="toc-item-num">0.0.2&nbsp;&nbsp;</span>Attentions</a></span></li><li><span><a href="#STFT" data-toc-modified-id="STFT-0.0.3"><span class="toc-item-num">0.0.3&nbsp;&nbsp;</span>STFT</a></span></li><li><span><a href="#Global-style-tokens" data-toc-modified-id="Global-style-tokens-0.0.4"><span class="toc-item-num">0.0.4&nbsp;&nbsp;</span>Global style tokens</a></span></li></ul></li></ul></li><li><span><a href="#VITS-common" data-toc-modified-id="VITS-common-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>VITS common</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#LayerNorm" data-toc-modified-id="LayerNorm-1.0.1"><span class="toc-item-num">1.0.1&nbsp;&nbsp;</span>LayerNorm</a></span></li><li><span><a href="#Flip" data-toc-modified-id="Flip-1.0.2"><span class="toc-item-num">1.0.2&nbsp;&nbsp;</span>Flip</a></span></li><li><span><a href="#Log" data-toc-modified-id="Log-1.0.3"><span class="toc-item-num">1.0.3&nbsp;&nbsp;</span>Log</a></span></li><li><span><a href="#ElementWiseAffine" data-toc-modified-id="ElementWiseAffine-1.0.4"><span class="toc-item-num">1.0.4&nbsp;&nbsp;</span>ElementWiseAffine</a></span></li><li><span><a href="#DDSConv" data-toc-modified-id="DDSConv-1.0.5"><span class="toc-item-num">1.0.5&nbsp;&nbsp;</span>DDSConv</a></span></li><li><span><a href="#ConvFLow" data-toc-modified-id="ConvFLow-1.0.6"><span class="toc-item-num">1.0.6&nbsp;&nbsp;</span>ConvFLow</a></span></li><li><span><a href="#WN" data-toc-modified-id="WN-1.0.7"><span class="toc-item-num">1.0.7&nbsp;&nbsp;</span>WN</a></span></li><li><span><a href="#ResidualCouplingLayer" data-toc-modified-id="ResidualCouplingLayer-1.0.8"><span class="toc-item-num">1.0.8&nbsp;&nbsp;</span>ResidualCouplingLayer</a></span></li><li><span><a href="#ResBlock" data-toc-modified-id="ResBlock-1.0.9"><span class="toc-item-num">1.0.9&nbsp;&nbsp;</span>ResBlock</a></span></li></ul></li></ul></li></ul></div> # + # default_exp models.common # + # export import numpy as np from scipy.signal import get_window import torch from torch.autograd import Variable from torch import nn from torch.nn import functional as F from torch.nn.utils import remove_weight_norm, weight_norm from librosa.filters import mel as librosa_mel from librosa.util import pad_center, tiny from uberduck_ml_dev.utils.utils import * from uberduck_ml_dev.vendor.tfcompat.hparam import HParams # - # ### Conv1d # + # export class Conv1d(nn.Module): def __init__( self, in_channels, out_channels, kernel_size=1, stride=1, padding=None, dilation=1, bias=True, w_init_gain="linear", ): super().__init__() if padding is None: assert kernel_size % 2 == 1 padding = int(dilation * (kernel_size - 1) / 2) self.conv = nn.Conv1d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, ) nn.init.xavier_uniform_( self.conv.weight, gain=nn.init.calculate_gain(w_init_gain) ) def forward(self, signal): return self.conv(signal) # + # export class LinearNorm(torch.nn.Module): def __init__(self, in_dim, out_dim, bias=True, w_init_gain="linear"): super().__init__() self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias) torch.nn.init.xavier_uniform_( self.linear_layer.weight, gain=torch.nn.init.calculate_gain(w_init_gain) ) def forward(self, x): return self.linear_layer(x) # - # ### Attentions # + # export from numpy import finfo class LocationLayer(nn.Module): def __init__(self, attention_n_filters, attention_kernel_size, attention_dim): super(LocationLayer, self).__init__() padding = int((attention_kernel_size - 1) / 2) self.location_conv = Conv1d( 2, attention_n_filters, kernel_size=attention_kernel_size, padding=padding, bias=False, stride=1, dilation=1, ) self.location_dense = LinearNorm( attention_n_filters, attention_dim, bias=False, w_init_gain="tanh" ) def forward(self, attention_weights_cat): processed_attention = self.location_conv(attention_weights_cat) processed_attention = processed_attention.transpose(1, 2) processed_attention = self.location_dense(processed_attention) return processed_attention class Attention(nn.Module): def __init__( self, attention_rnn_dim, embedding_dim, attention_dim, attention_location_n_filters, attention_location_kernel_size, fp16_run, ): super(Attention, self).__init__() self.query_layer = LinearNorm( attention_rnn_dim, attention_dim, bias=False, w_init_gain="tanh" ) self.memory_layer = LinearNorm( embedding_dim, attention_dim, bias=False, w_init_gain="tanh" ) self.v = LinearNorm(attention_dim, 1, bias=False) self.location_layer = LocationLayer( attention_location_n_filters, attention_location_kernel_size, attention_dim ) if fp16_run: self.score_mask_value = finfo("float16").min else: self.score_mask_value = -float("inf") def get_alignment_energies(self, query, processed_memory, attention_weights_cat): """ PARAMS ------ query: decoder output (batch, n_mel_channels * n_frames_per_step) processed_memory: processed encoder outputs (B, T_in, attention_dim) attention_weights_cat: cumulative and prev. att weights (B, 2, max_time) RETURNS ------- alignment (batch, max_time) """ processed_query = self.query_layer(query.unsqueeze(1)) processed_attention_weights = self.location_layer(attention_weights_cat) energies = self.v( torch.tanh(processed_query + processed_attention_weights + processed_memory) ) energies = energies.squeeze(-1) return energies def forward( self, attention_hidden_state, memory, processed_memory, attention_weights_cat, mask, attention_weights=None, ): """ PARAMS ------ attention_hidden_state: attention rnn last output memory: encoder outputs processed_memory: processed encoder outputs attention_weights_cat: previous and cummulative attention weights mask: binary mask for padded data """ if attention_weights is None: alignment = self.get_alignment_energies( attention_hidden_state, processed_memory, attention_weights_cat ) if mask is not None: alignment.data.masked_fill_(mask, self.score_mask_value) attention_weights = F.softmax(alignment, dim=1) attention_context = torch.bmm(attention_weights.unsqueeze(1), memory) attention_context = attention_context.squeeze(1) return attention_context, attention_weights # + from numpy import finfo finfo("float16").min # - F.pad(torch.rand(1, 3, 3), (2, 2), mode="reflect") # ### STFT # + # export class STFT: """adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft""" def __init__( self, filter_length=1024, hop_length=256, win_length=1024, window="hann", padding=None, device="cpu", rank=None, ): self.filter_length = filter_length self.hop_length = hop_length self.win_length = win_length self.window = window self.forward_transform = None scale = self.filter_length / self.hop_length fourier_basis = np.fft.fft(np.eye(self.filter_length)) self.padding = padding or (filter_length // 2) cutoff = int((self.filter_length / 2 + 1)) fourier_basis = np.vstack( [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])] ) if device == "cuda": dev = torch.device(f"cuda:{rank}") forward_basis = torch.cuda.FloatTensor( fourier_basis[:, None, :], device=dev ) inverse_basis = torch.cuda.FloatTensor( np.linalg.pinv(scale * fourier_basis).T[:, None, :].astype(np.float32), device=dev, ) else: forward_basis = torch.FloatTensor(fourier_basis[:, None, :]) inverse_basis = torch.FloatTensor( np.linalg.pinv(scale * fourier_basis).T[:, None, :].astype(np.float32) ) if window is not None: assert filter_length >= win_length # get window and zero center pad it to filter_length fft_window = get_window(window, win_length, fftbins=True) fft_window = pad_center(fft_window, filter_length) fft_window = torch.from_numpy(fft_window).float() if device == "cuda": fft_window = fft_window.cuda(rank) # window the bases forward_basis *= fft_window inverse_basis *= fft_window self.fft_window = fft_window self.forward_basis = forward_basis.float() self.inverse_basis = inverse_basis.float() def transform(self, input_data): num_batches = input_data.size(0) num_samples = input_data.size(1) self.num_samples = num_samples # similar to librosa, reflect-pad the input input_data = input_data.view(num_batches, 1, num_samples) input_data = F.pad( input_data.unsqueeze(1), ( self.padding, self.padding, 0, 0, ), mode="reflect", ) input_data = input_data.squeeze(1) forward_transform = F.conv1d( input_data, Variable(self.forward_basis, requires_grad=False), stride=self.hop_length, padding=0, ) cutoff = self.filter_length // 2 + 1 real_part = forward_transform[:, :cutoff, :] imag_part = forward_transform[:, cutoff:, :] magnitude = torch.sqrt(real_part**2 + imag_part**2) phase = torch.autograd.Variable(torch.atan2(imag_part.data, real_part.data)) return magnitude, phase def inverse(self, magnitude, phase): recombine_magnitude_phase = torch.cat( [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1, ) inverse_transform = F.conv_transpose1d( recombine_magnitude_phase, Variable(self.inverse_basis, requires_grad=False), stride=self.hop_length, padding=0, ) if self.window is not None: window_sum = window_sumsquare( self.window, magnitude.size(-1), hop_length=self.hop_length, win_length=self.win_length, n_fft=self.filter_length, dtype=np.float32, ) # remove modulation effects approx_nonzero_indices = torch.from_numpy( np.where(window_sum > tiny(window_sum))[0] ) window_sum = torch.autograd.Variable( torch.from_numpy(window_sum), requires_grad=False ) window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum inverse_transform[:, :, approx_nonzero_indices] /= window_sum[ approx_nonzero_indices ] # scale by hop ratio inverse_transform *= float(self.filter_length) / self.hop_length inverse_transform = inverse_transform[:, :, int(self.filter_length / 2) :] inverse_transform = inverse_transform[:, :, : -int(self.filter_length / 2) :] return inverse_transform def forward(self, input_data): self.magnitude, self.phase = self.transform(input_data) reconstruction = self.inverse(self.magnitude, self.phase) return reconstruction # + # export class MelSTFT: def __init__( self, filter_length=1024, hop_length=256, win_length=1024, n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0, mel_fmax=8000.0, device="cpu", padding=None, rank=None, ): self.n_mel_channels = n_mel_channels self.sampling_rate = sampling_rate if padding is None: padding = filter_length // 2 self.stft_fn = STFT( filter_length, hop_length, win_length, device=device, rank=rank, padding=padding, ) mel_basis = librosa_mel( sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax ) mel_basis = torch.from_numpy(mel_basis).float() if device == "cuda": mel_basis = mel_basis.cuda() self.mel_basis = mel_basis def spectral_normalize(self, magnitudes): output = dynamic_range_compression(magnitudes) return output def spectral_de_normalize(self, magnitudes): output = dynamic_range_decompression(magnitudes) return output def spec_to_mel(self, spec): mel_output = torch.matmul(self.mel_basis, spec) mel_output = self.spectral_normalize(mel_output) return mel_output def spectrogram(self, y): assert y.min() >= -1 assert y.max() <= 1 magnitudes, phases = self.stft_fn.transform(y) return magnitudes.data def mel_spectrogram(self, y, ref_level_db=20, magnitude_power=1.5): """Computes mel-spectrograms from a batch of waves PARAMS ------ y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1] RETURNS ------- mel_output: torch.FloatTensor of shape (B, n_mel_channels, T) """ assert y.min() >= -1 assert y.max() <= 1 magnitudes, phases = self.stft_fn.transform(y) magnitudes = magnitudes.data return self.spec_to_mel(magnitudes) def griffin_lim(self, mel_spectrogram, n_iters=30): mel_dec = self.spectral_de_normalize(mel_spectrogram) # Float cast required for fp16 training. mel_dec = mel_dec.transpose(0, 1).cpu().data.float() spec_from_mel = torch.mm(mel_dec, self.mel_basis).transpose(0, 1) spec_from_mel *= 1000 out = griffin_lim(spec_from_mel.unsqueeze(0), self.stft_fn, n_iters=n_iters) return out # + from IPython.display import Audio stft = STFT() mel_stft = MelSTFT() mel = mel_stft.mel_spectrogram(torch.clip(torch.randn(1, 1000), -1, 1)) assert mel.shape[0] == 1 assert mel.shape[1] == 80 mel = torch.load("./test/fixtures/stevejobs-1.pt") aud = mel_stft.griffin_lim(mel) # - # hide Audio(aud, rate=22050) # ### Global style tokens # + # export from torch.nn import init class ReferenceEncoder(nn.Module): """ inputs --- [N, Ty/r, n_mels*r] mels outputs --- [N, ref_enc_gru_size] """ def __init__(self, hp): super().__init__() K = len(hp.ref_enc_filters) filters = [1] + hp.ref_enc_filters convs = [ nn.Conv2d( in_channels=filters[i], out_channels=filters[i + 1], kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), ) for i in range(K) ] self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList( [nn.BatchNorm2d(num_features=hp.ref_enc_filters[i]) for i in range(K)] ) out_channels = self.calculate_channels(hp.n_mel_channels, 3, 2, 1, K) self.gru = nn.GRU( input_size=hp.ref_enc_filters[-1] * out_channels, hidden_size=hp.ref_enc_gru_size, batch_first=True, ) self.n_mel_channels = hp.n_mel_channels self.ref_enc_gru_size = hp.ref_enc_gru_size def forward(self, inputs, input_lengths=None): out = inputs.view(inputs.size(0), 1, -1, self.n_mel_channels) for conv, bn in zip(self.convs, self.bns): out = conv(out) out = bn(out) out = F.relu(out) out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K] N, T = out.size(0), out.size(1) out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K] if input_lengths is not None: input_lengths = torch.ceil(input_lengths.float() / 2 ** len(self.convs)) input_lengths = input_lengths.cpu().numpy().astype(int) out = nn.utils.rnn.pack_padded_sequence( out, input_lengths, batch_first=True, enforce_sorted=False ) self.gru.flatten_parameters() _, out = self.gru(out) return out.squeeze(0) def calculate_channels(self, L, kernel_size, stride, pad, n_convs): for _ in range(n_convs): L = (L - kernel_size + 2 * pad) // stride + 1 return L class MultiHeadAttention(nn.Module): """ input: query --- [N, T_q, query_dim] key --- [N, T_k, key_dim] output: out --- [N, T_q, num_units] """ def __init__(self, query_dim, key_dim, num_units, num_heads): super().__init__() self.num_units = num_units self.num_heads = num_heads self.key_dim = key_dim self.W_query = nn.Linear( in_features=query_dim, out_features=num_units, bias=False ) self.W_key = nn.Linear(in_features=key_dim, out_features=num_units, bias=False) self.W_value = nn.Linear( in_features=key_dim, out_features=num_units, bias=False ) def forward(self, query, key): querys = self.W_query(query) # [N, T_q, num_units] keys = self.W_key(key) # [N, T_k, num_units] values = self.W_value(key) split_size = self.num_units // self.num_heads querys = torch.stack( torch.split(querys, split_size, dim=2), dim=0 ) # [h, N, T_q, num_units/h] keys = torch.stack( torch.split(keys, split_size, dim=2), dim=0 ) # [h, N, T_k, num_units/h] values = torch.stack( torch.split(values, split_size, dim=2), dim=0 ) # [h, N, T_k, num_units/h] # score = softmax(QK^T / (d_k ** 0.5)) scores = torch.matmul(querys, keys.transpose(2, 3)) # [h, N, T_q, T_k] scores = scores / (self.key_dim**0.5) scores = F.softmax(scores, dim=3) # out = score * V out = torch.matmul(scores, values) # [h, N, T_q, num_units/h] out = torch.cat(torch.split(out, 1, dim=0), dim=3).squeeze( 0 ) # [N, T_q, num_units] return out class STL(nn.Module): """ inputs --- [N, token_embedding_size//2] """ def __init__(self, hp): super().__init__() self.embed = nn.Parameter( torch.FloatTensor(hp.token_num, hp.token_embedding_size // hp.num_heads) ) d_q = hp.ref_enc_gru_size d_k = hp.token_embedding_size // hp.num_heads self.attention = MultiHeadAttention( query_dim=d_q, key_dim=d_k, num_units=hp.token_embedding_size, num_heads=hp.num_heads, ) init.normal_(self.embed, mean=0, std=0.5) def forward(self, inputs): N = inputs.size(0) query = inputs.unsqueeze(1) keys = ( torch.tanh(self.embed).unsqueeze(0).expand(N, -1, -1) ) # [N, token_num, token_embedding_size // num_heads] style_embed = self.attention(query, keys) return style_embed class GST(nn.Module): def __init__(self, hp): super().__init__() self.encoder = ReferenceEncoder(hp) self.stl = STL(hp) def forward(self, inputs, input_lengths=None): enc_out = self.encoder(inputs, input_lengths=input_lengths) style_embed = self.stl(enc_out) return style_embed # - DEFAULTS = HParams( n_symbols=100, symbols_embedding_dim=512, mask_padding=True, fp16_run=False, n_mel_channels=80, # encoder parameters encoder_kernel_size=5, encoder_n_convolutions=3, encoder_embedding_dim=512, # decoder parameters n_frames_per_step=1, # currently only 1 is supported decoder_rnn_dim=1024, prenet_dim=256, prenet_f0_n_layers=1, prenet_f0_dim=1, prenet_f0_kernel_size=1, prenet_rms_dim=0, prenet_fms_kernel_size=1, max_decoder_steps=1000, gate_threshold=0.5, p_attention_dropout=0.1, p_decoder_dropout=0.1, p_teacher_forcing=1.0, # attention parameters attention_rnn_dim=1024, attention_dim=128, # location layer parameters attention_location_n_filters=32, attention_location_kernel_size=31, # mel post-processing network parameters postnet_embedding_dim=512, postnet_kernel_size=5, postnet_n_convolutions=5, # speaker_embedding n_speakers=123, # original nvidia libritts training speaker_embedding_dim=128, # reference encoder with_gst=True, ref_enc_filters=[32, 32, 64, 64, 128, 128], ref_enc_size=[3, 3], ref_enc_strides=[2, 2], ref_enc_pad=[1, 1], ref_enc_gru_size=128, # style token layer token_embedding_size=256, token_num=10, num_heads=8, ) GST(DEFAULTS) # # VITS common # ### LayerNorm # export class LayerNorm(nn.Module): def __init__(self, channels, eps=1e-5): super().__init__() self.channels = channels self.eps = eps self.gamma = nn.Parameter(torch.ones(channels)) self.beta = nn.Parameter(torch.zeros(channels)) def forward(self, x): x = x.transpose(1, -1) x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) return x.transpose(1, -1) LayerNorm(3) # ### Flip # export class Flip(nn.Module): def forward(self, x, *args, reverse=False, **kwargs): x = torch.flip(x, [1]) if not reverse: logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) return x, logdet else: return x # ### Log # export class Log(nn.Module): def forward(self, x, x_mask, reverse=False, **kwargs): if not reverse: y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask logdet = torch.sum(-y, [1, 2]) return y, logdet else: x = torch.exp(x) * x_mask return x # ### ElementWiseAffine # export class ElementwiseAffine(nn.Module): def __init__(self, channels): super().__init__() self.channels = channels self.m = nn.Parameter(torch.zeros(channels, 1)) self.logs = nn.Parameter(torch.zeros(channels, 1)) def forward(self, x, x_mask, reverse=False, **kwargs): if not reverse: y = self.m + torch.exp(self.logs) * x y = y * x_mask logdet = torch.sum(self.logs * x_mask, [1, 2]) return y, logdet else: x = (x - self.m) * torch.exp(-self.logs) * x_mask return x # ### DDSConv # export class DDSConv(nn.Module): """ Dialted and Depth-Separable Convolution """ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): super().__init__() self.channels = channels self.kernel_size = kernel_size self.n_layers = n_layers self.p_dropout = p_dropout self.drop = nn.Dropout(p_dropout) self.convs_sep = nn.ModuleList() self.convs_1x1 = nn.ModuleList() self.norms_1 = nn.ModuleList() self.norms_2 = nn.ModuleList() for i in range(n_layers): dilation = kernel_size**i padding = (kernel_size * dilation - dilation) // 2 self.convs_sep.append( nn.Conv1d( channels, channels, kernel_size, groups=channels, dilation=dilation, padding=padding, ) ) self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) self.norms_1.append(LayerNorm(channels)) self.norms_2.append(LayerNorm(channels)) def forward(self, x, x_mask, g=None): if g is not None: x = x + g for i in range(self.n_layers): y = self.convs_sep[i](x * x_mask) y = self.norms_1[i](y) y = F.gelu(y) y = self.convs_1x1[i](y) y = self.norms_2[i](y) y = F.gelu(y) y = self.drop(y) x = x + y return x * x_mask # ### ConvFLow # + # export import math from uberduck_ml_dev.models.transforms import piecewise_rational_quadratic_transform class ConvFlow(nn.Module): def __init__( self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, # tail_bound=5.0, tail_bound=10.0, ): super().__init__() self.in_channels = in_channels self.filter_channels = filter_channels self.kernel_size = kernel_size self.n_layers = n_layers self.num_bins = num_bins self.tail_bound = tail_bound self.half_channels = in_channels // 2 self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) self.proj = nn.Conv1d( filter_channels, self.half_channels * (num_bins * 3 - 1), 1 ) self.proj.weight.data.zero_() self.proj.bias.data.zero_() def forward(self, x, x_mask, g=None, reverse=False): x0, x1 = torch.split(x, [self.half_channels] * 2, 1) h = self.pre(x0) h = self.convs(h, x_mask, g=g) h = self.proj(h) * x_mask b, c, t = x0.shape h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( self.filter_channels ) unnormalized_derivatives = h[..., 2 * self.num_bins :] x1, logabsdet = piecewise_rational_quadratic_transform( x1, unnormalized_widths, unnormalized_heights, unnormalized_derivatives, inverse=reverse, tails="linear", tail_bound=self.tail_bound, ) x = torch.cat([x0, x1], 1) * x_mask logdet = torch.sum(logabsdet * x_mask, [1, 2]) if not reverse: return x, logdet else: return x # - cf = ConvFlow(192, 2, 3, 3) # NOTE(zach): figure out the shape of the forward stuff. # cf(torch.rand(2, 2, 1), torch.ones(2, 2, 1)) # ### WN # + # export from uberduck_ml_dev.utils.utils import fused_add_tanh_sigmoid_multiply class WN(torch.nn.Module): def __init__( self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0, ): super(WN, self).__init__() assert kernel_size % 2 == 1 self.hidden_channels = hidden_channels self.kernel_size = (kernel_size,) self.dilation_rate = dilation_rate self.n_layers = n_layers self.gin_channels = gin_channels self.p_dropout = p_dropout self.in_layers = torch.nn.ModuleList() self.res_skip_layers = torch.nn.ModuleList() self.drop = nn.Dropout(p_dropout) if gin_channels != 0: cond_layer = nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1) self.cond_layer = weight_norm(cond_layer, name="weight") for i in range(n_layers): dilation = dilation_rate**i padding = int((kernel_size * dilation - dilation) / 2) in_layer = nn.Conv1d( hidden_channels, 2 * hidden_channels, kernel_size, dilation=dilation, padding=padding, ) in_layer = weight_norm(in_layer, name="weight") self.in_layers.append(in_layer) # last one is not necessary if i < n_layers - 1: res_skip_channels = 2 * hidden_channels else: res_skip_channels = hidden_channels res_skip_layer = nn.Conv1d(hidden_channels, res_skip_channels, 1) res_skip_layer = weight_norm(res_skip_layer, name="weight") self.res_skip_layers.append(res_skip_layer) def forward(self, x, x_mask, g=None, **kwargs): output = torch.zeros_like(x) n_channels_tensor = torch.IntTensor([self.hidden_channels]) if g is not None: g = self.cond_layer(g) for i in range(self.n_layers): x_in = self.in_layers[i](x) if g is not None: cond_offset = i * 2 * self.hidden_channels g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] else: g_l = torch.zeros_like(x_in) acts = fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) acts = self.drop(acts) res_skip_acts = self.res_skip_layers[i](acts) if i < self.n_layers - 1: res_acts = res_skip_acts[:, : self.hidden_channels, :] x = (x + res_acts) * x_mask output = output + res_skip_acts[:, self.hidden_channels :, :] else: output = output + res_skip_acts return output * x_mask def remove_weight_norm(self): if self.gin_channels != 0: remove_weight_norm(self.cond_layer) for l in self.in_layers: remove_weight_norm(l) for l in self.res_skip_layers: remove_weight_norm(l) # - # ### ResidualCouplingLayer # export class ResidualCouplingLayer(nn.Module): def __init__( self, channels, hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=0, gin_channels=0, mean_only=False, ): assert channels % 2 == 0, "channels should be divisible by 2" super().__init__() self.channels = channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size self.dilation_rate = dilation_rate self.n_layers = n_layers self.half_channels = channels // 2 self.mean_only = mean_only self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) self.enc = WN( hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels, ) self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) self.post.weight.data.zero_() self.post.bias.data.zero_() def forward(self, x, x_mask, g=None, reverse=False): x0, x1 = torch.split(x, [self.half_channels] * 2, 1) h = self.pre(x0) * x_mask h = self.enc(h, x_mask, g=g) stats = self.post(h) * x_mask if not self.mean_only: m, logs = torch.split(stats, [self.half_channels] * 2, 1) else: m = stats logs = torch.zeros_like(m) if not reverse: x1 = m + x1 * torch.exp(logs) * x_mask x = torch.cat([x0, x1], 1) logdet = torch.sum(logs, [1, 2]) return x, logdet else: x1 = (x1 - m) * torch.exp(-logs) * x_mask x = torch.cat([x0, x1], 1) return x # ### ResBlock # + # export class ResBlock1(torch.nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): super(ResBlock1, self).__init__() self.convs1 = nn.ModuleList( [ weight_norm( nn.Conv1d( channels, channels, kernel_size, 1, dilation=dilation[0], padding=get_padding(kernel_size, dilation[0]), ) ), weight_norm( nn.Conv1d( channels, channels, kernel_size, 1, dilation=dilation[1], padding=get_padding(kernel_size, dilation[1]), ) ), weight_norm( nn.Conv1d( channels, channels, kernel_size, 1, dilation=dilation[2], padding=get_padding(kernel_size, dilation[2]), ) ), ] ) self.convs1.apply(init_weights) self.convs2 = nn.ModuleList( [ weight_norm( nn.Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1), ) ), weight_norm( nn.Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1), ) ), weight_norm( nn.Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1), ) ), ] ) self.convs2.apply(init_weights) def forward(self, x, x_mask=None): for c1, c2 in zip(self.convs1, self.convs2): xt = F.leaky_relu(x, LRELU_SLOPE) if x_mask is not None: xt = xt * x_mask xt = c1(xt) xt = F.leaky_relu(xt, LRELU_SLOPE) if x_mask is not None: xt = xt * x_mask xt = c2(xt) x = xt + x if x_mask is not None: x = x * x_mask return x def remove_weight_norm(self): for l in self.convs1: remove_weight_norm(l) for l in self.convs2: remove_weight_norm(l) class ResBlock2(torch.nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3)): super(ResBlock2, self).__init__() self.convs = nn.ModuleList( [ weight_norm( nn.Conv1d( channels, channels, kernel_size, 1, dilation=dilation[0], padding=get_padding(kernel_size, dilation[0]), ) ), weight_norm( nn.Conv1d( channels, channels, kernel_size, 1, dilation=dilation[1], padding=get_padding(kernel_size, dilation[1]), ) ), ] ) self.convs.apply(init_weights) def forward(self, x, x_mask=None): for c in self.convs: xt = F.leaky_relu(x, LRELU_SLOPE) if x_mask is not None: xt = xt * x_mask xt = c(xt) x = xt + x if x_mask is not None: x = x * x_mask return x def remove_weight_norm(self): for l in self.convs: remove_weight_norm(l) # - # export LRELU_SLOPE = 0.1
nbs/models.common.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="ecYnWlyCm7NO" # # **Regular Expressions** # # - A sequence of characters that defines a search pattern. # - '\d' means any character between 0 and 9 where d is digit (Meta character) # - Literal characters are characters that are specified and will always occur. # - Meta characters are characters that are more generalized not specific. # - E.g. to match a number in the format "917-55-1234", the regular expression will be "\d\d\d-\d\d\d-\d\d\d\d" # + colab={"base_uri": "https://localhost:8080/"} id="KVI-Fijrm-0J" outputId="0325bec7-1e75-41ee-a2ea-b39de556f435" # For example matching a time format import re line = "Jan 3 07:57:39 Kali sshd[1397]: Failed password 02:12:36 for root from 172.16.12.55 port 34380 ssh2" regex = "\d+" result = re.findall(regex, line) # returns all of the digit matches as a list first_result = re.findall(regex, line)[0] # returns first match print(result) print(first_result) # + [markdown] id="vvaHNnp9pAhK" # - '.' means any character, and '*' means 0 or more # - For example, if we write a regular expression "rainbow.*", it means all data that begins with rainbow, could be rainbow123, rainbow boy, rainbow city etc. # - '.*' is a wildcard that matches the universe # + [markdown] id="X0xsYv9RpK7J" # ## Meta characters # # ### Single Characters # # - '\d' matches any character between 0 and 9 where d means digit # - '\w' matches any character A-Za-z0-9 where w means word # - '\s' matches any whitespace (can match a space, a tab etc.) # - '.' matches any character whatsoever # - Capitalizing 'd' or 'w' or 's' makes the expression the opposite # # # ### Quantifiers # # # - They are meta characters that modify the previous character in a regular expression. (e.g. how many of those things you want to match in a row) # - '*' matches 0 or more # - '+' matches 1 or more # - '?' matches 0 or 1 (optional) # - {n} matches all n # - {min, max} # # For example, '\w\w\w' means match all first 3 words. Also, '\w{3}' does the same thing # # # + colab={"base_uri": "https://localhost:8080/"} id="FxVFfyEPovyc" outputId="ba9cd2ca-4c9b-4080-a094-f3dc04d5b0e0" word = "I just realized how interesting coding is" regex = "\w+" result = re.findall(regex, word) # returns each word as a list print(result) # + colab={"base_uri": "https://localhost:8080/"} id="5BqAIt4AqQ9k" outputId="7efaeee7-1720-46f9-e365-130039236822" word = "The colors of the rainbow has many colours and the rainbow does not have a single colour" regex = "colou?rs?" # ? before a string signifies that the string is optional result = re.findall(regex, word) # returns all of the matches as a list print(result) # + colab={"base_uri": "https://localhost:8080/"} id="IFp21L0Sq307" outputId="425f948f-a51d-4a93-b565-2c5397faa128" word = "I just realized how interesting coding is" regex = "\w{3}" result = re.findall(regex, word) # returns the first three character of each word as a list print(result) # + [markdown] id="vQTqzoL1rdrK" # In the example below, we get to see that '.*' is greedy by default. It will continue to match until it can no more match # + colab={"base_uri": "https://localhost:8080/"} id="oAD9hgn8rKQU" outputId="5606cc84-9d89-4570-8c6a-7abb770c81d9" word = "[Google](http://google.com), [test] \n [itp](http:itp.nyu.edu)" regex = r"\[.*\]" result = re.findall(regex, word) # return s all of the matches as a list print(result) # + [markdown] id="nYcl9VrisUNq" # Note that '?' paired with a quantifier makes '.*' not greedy # + colab={"base_uri": "https://localhost:8080/"} id="5i_NgQtNsV9k" outputId="ffcd052d-8937-43e3-ef25-3297c8c004a3" word = "[Google](http://google.com), [test] \n [itp](http:itp.nyu.edu)" regex = "\[.*?\]" result = re.findall(regex, word) # returns all of the matches as a list print(result) # + [markdown] id="cKHmcyuHs1_F" # ### Position # # - They are meta characters that matches the position of a character in the string itself. # - '^' means beginning # - '$' means end # - '\b' word boundary (it is advisable to use escape before it (i.e. "\\b") otherwise \b means a backspace characters # + colab={"base_uri": "https://localhost:8080/"} id="3avxo-dtseO6" outputId="a57a79e7-47cc-4c5b-fdcb-7d9f3820aae2" word = "The colors of the rainbow has many colours and the rainbow does not have a single colour" # regex = "\w+$" # means 1 or more word characters at the end of a line. # regex = "^\w+$" # means 1 or more word characters at the beginning and end of a line (equally just a line with just one word). regex = "^\w+" # means the beginning of a line followed by 1 or more word characters result = re.findall(regex, word) # returns all of the matches as a list print(result) # + colab={"base_uri": "https://localhost:8080/"} id="gRbjRDmZtzvT" outputId="d0de001b-f15a-42ea-e131-5d557b564a1d" word = "The colors of the rainbow has many colours and the rainbow does not have a single colour" regex = "\\b\w{3}\\b" # this matches 3 word characters specifically result = re.findall(regex, word) # returns all of the matches as a list print(result) # + colab={"base_uri": "https://localhost:8080/"} id="RqxzN1iMuclr" outputId="b278de26-d1cc-425e-976e-3cbfad082762" word = "The colors of the rainbow has many colours and the rainbow does not have a single colour" regex = "\\b\w{5,9}\\b" # this matches 5 to 9 word characters specifically result = re.findall(regex, word) # returns all of the matches as a list print(result) # + colab={"base_uri": "https://localhost:8080/"} id="NC7XLpLQupX4" outputId="579fdea6-71c2-4e7a-a999-c1d3802c538b" # shallow copy copies by referencing the original value, while deep copy copies with no reference import copy x = [1,[2]] y = copy.copy(x) z = copy.deepcopy(x) y is z # + [markdown] id="Z0n3xq8vwJIx" # ## Character classes # # # - Character classes are stuffs that appear in between square brackets. # - Each string inside the square brackets are alternatives to each other # - Also, characters in the square brackets do not possess there meta characteristics, instead they are just literal characters. # # + colab={"base_uri": "https://localhost:8080/"} id="AgYmw9nvwa-I" outputId="5e842d02-e1af-457f-b5ef-8e8225deb88e" word = "lynk is not the correct spelling of link" regex = "l[yi]nk" # this matches either link or lynk result = re.findall(regex, word) # returns all of the matches as a list print(result) # + [markdown] id="9UvkOvrXxtLL" # - The only two special characters inside the square brackets are the '-' and '^' # - '-' inside a square brackets can be used when we want to get a range of strings, e.g. 'a-z1-9' matches any character from a to z and from 1 to 9 # + colab={"base_uri": "https://localhost:8080/"} id="ja2D89HMwbsS" outputId="362e2f8c-06c8-4813-8a03-b51772d276dd" word = "I am in my 400L, I am currently XX years of age in the year 2018" regex = "[0-3]{2}" # this matches characters from 0 to 3 and is max of two characters long result = re.findall(regex, word) # returns all of the matches as a list print(result) # + [markdown] id="YbSqhsEkzXYb" # - '^' inside a square brackets can be used when we want to get anything that is not amongst the remaining characters after it. # - Note that if '^' is not located at the beginning, after the first pair of square brackets, then it isn't a special/meta character again, but a literal one. # + colab={"base_uri": "https://localhost:8080/"} id="2nenn6vhx2Yk" outputId="f84254a5-ee67-4cd9-b196-20b215f2fd1c" word = "I am in my 400L, I am currently XX years of age in the year 2018" regex = "[^0-3]{2}" # this matches characters from 0 to 3 and is max of two characters long result = re.findall(regex, word) # returns all of the matches as a list print(result) # + [markdown] id="edolsnTD0HuO" # ### Alternation # # - We know that in Character classes, each string inside the square brackets are alternatives to each other, which is a limitation. # - With alternation, multiple strings can be alternatives to each other. # - For example, in '(com|net)', we mean 'com' or 'net'. # + colab={"base_uri": "https://localhost:8080/"} id="kBO9RpE2zci7" outputId="74d00aa8-63d8-4028-9045-af4faf1a1afe" word = "I am in my 400L, I am currently XX years of age in the year 2018. My email addresses are <EMAIL>, <EMAIL>, <EMAIL>" regex = "\w+@\w+\.(?:com|net|org|live|edu)" # this matches email addresses result = re.findall(regex, word) # returns all of the matches as a list print(result) # + [markdown] id="lPbMXJlf2b2x" # > ## Capturing Groups # # # - Suppose we have a stringm '212-555-1234', and we want to match it, we use: # "\d{3}-(\d{3})-(\d{4})" # - Note that the whole string is automatically grouped by regex as "GROUP 0". # - Also, using a bracket in this context signifies that the content of the bracket is "Group 1" and "Group 2" respectively. # - Accessing each group is with the use of a '$' or '\'. e.g. $1 or \1 signifies 'GROUP 1' # + colab={"base_uri": "https://localhost:8080/"} id="G1ZcInXL2ZMS" outputId="3cf3d87f-45a6-4f6e-da3a-0ed555223313" word = "These are some phone numbers 917-555-1234. Also, you can call me at 646.555.1234 and of course I'm always reachable at (212)867-5509" regex = "\(?\d{3}[-.)]\d{3}[-.]\d{4}" # this matches phone numbers result = re.findall(regex, word) # returns all of the matches as a list print(result) # + colab={"base_uri": "https://localhost:8080/"} id="qBPe5h4m3BFc" outputId="f77fbe9a-fcd5-4954-cbfe-ba834b59a93f" word = "[Google](http://google.com), [test] \n [itp](http:itp.nyu.edu)" regex = "\[.*?\]\(http.*?\)" # ? matches the name of a link and the link itself result = re.findall(regex, word) # returns all of the matches as a list print(result) # + [markdown] id="qrgQAAzZ3-dR" # - To replace with the name of the link and the link itself in an html format, we first group them (i.e. "\[(.*?)\]\((http.*?)\)" # + colab={"base_uri": "https://localhost:8080/"} id="SFVmTgVs4Cyp" outputId="4a3d7799-7d02-4f8c-bb19-7787ab43156c" word = "2017-07-05 16:04:18.000000" regex = "\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}" result = re.findall(regex, word) # returns all of the matches as a list print(result) # + [markdown] id="fAWMXj3u4-rn" # ### re.sub # + colab={"base_uri": "https://localhost:8080/"} id="kOfTqvHB4Khd" outputId="e2b877fc-6921-4f0d-a114-fd7575528d0d" print(re.sub('vegetables', 'pie', 'I would like some vegetables.')) # + colab={"base_uri": "https://localhost:8080/"} id="CuiZSLyG5LQ1" outputId="ce47511a-83c7-47ab-8407-4fcaabdf04ae" veggie_request = 'I would like some vegetables, vitamins, and water.' print(re.sub('vegetables|vitamins|water', 'pie', veggie_request)) # + colab={"base_uri": "https://localhost:8080/"} id="ZsFSD1ZU5msY" outputId="4d49fd8e-86c9-41a8-af1d-79f31687e6b3" messy_phone_number = '(123) 456-7890' print(re.sub(r'\D', '', messy_phone_number)) # /D matches all non digit characters # + id="q9BVx4rA6PCn"
Miscellaneous/02. Regular Expressions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %run imports.py import pandas as pd # + dateparse = lambda x: pd.datetime.strptime(x, '%d-%m-%y') data14 = pd.read_csv("cpcb_dly_aq_delhi-2014.csv", parse_dates=['Sampling Date'], date_parser=dateparse).drop("Location of Monitoring Station", axis=1).sort_values("Sampling Date") data14 = data14[data14["Type of Location"] != "Industrial Area"] # - data14["DateMonth"]=data14["Sampling Date"].dt.to_period("M") data14 = data14.groupby("DateMonth").mean().drop("PM 2.5", axis=1) data14
Air-Pollution-Levels-Exploratory-Data-Analysis-master/data_2014.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from binance.spot import Spot # + tags=[] def orders(crypto = 'ETHBRL'): spot_client = Spot(base_url="https://api.binance.com") orders = spot_client.depth(crypto) bids = orders['bids'] asks = orders['asks'] df1 = pd.DataFrame({'symb':crypto,'type':'bids','price':list(zip(*bids))[0],'qty':list(zip(*bids))[1]}) df2 = pd.DataFrame({'symb':crypto,'type':'asks','price':list(zip(*asks))[0],'qty':list(zip(*asks))[1]}) for col in ['price','qty']: df1[col] = pd.to_numeric(df1[col]) df2[col] = pd.to_numeric(df2[col]) df_final = pd.concat([df1,df2]) return df_final,df1,df2 # - orders,buy,sell = orders() orders.to_csv('orders.csv')
Orders.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # Dimensionality Reduction & Visualization # # # + import numpy as np # import sklearn stuff from sklearn import datasets from sklearn.decomposition import PCA from sklearn.cluster import KMeans from sklearn.mixture import GaussianMixture # set up for plotting as interactive figures in the notebook # %matplotlib notebook import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.patches as mpatches # - # ## Load some data # # load the iris dataset iris = datasets.load_iris() # note that the iris data is 4-dimensional iris.data.shape # let's look at the first 10 elements iris.data[:10] # ## Plot the data # Since the iris dataset has 4 features, we'll need to plot them as pairs; we can use color to represent class label. Here is an example of plotting the first two dimensions: # + plt.figure() # make a new figure to plot in # let's set up a list of colors for the different class labels: colors = ['darkred', 'blue', 'orange'] # now we'll loop over the points in our data set, and plot them one at a time for i in range(len(iris.data)): # use the first 2 dimensions as our x and y coordinates x = iris.data[i][0] y = iris.data[i][1] # use the target (which we know is 0, 1, or 2) to select the color for this point c = colors[iris.target[i]] # plot the point as a single point in a scatter graph plt.scatter(x, y, color=c) # now let's add some axis labels; we'll use the names from the data set plt.xlabel(iris.feature_names[0]) plt.ylabel(iris.feature_names[1]) # if we want a key, we'll need to make "handles" attaching colors to names red = mpatches.Patch(color='darkred', label='setosa') blue = mpatches.Patch(color='blue', label='versicolor') orange = mpatches.Patch(color='orange', label='virginica') # now actually show the legend plt.legend(handles=[red, blue, orange]) # let's add a title plt.title('Iris dataset (first two dimensions)') # - # # Plot the other combinations of axes # You will need a total of 6 plots (including the one above) to plot all possible combinations of dimensions; the remaining 5 are left to you, but you should be able to copy the example above and make minor modifications to it. # + # todo: plot 2 plt.figure() # make a new figure to plot in # let's set up a list of colors for the different class labels: colors = ['darkred', 'blue', 'orange'] # now we'll loop over the points in our data set, and plot them one at a time for i in range(len(iris.data)): # use the first second and third dimensions as our x and y coordinates x = iris.data[i][1] y = iris.data[i][2] # use the target (which we know is 0, 1, or 2) to select the color for this point c = colors[iris.target[i]] # plot the point as a single point in a scatter graph plt.scatter(x, y, color=c) # now let's add some axis labels; we'll use the names from the data set plt.xlabel(iris.feature_names[1]) plt.ylabel(iris.feature_names[2]) # if we want a key, we'll need to make "handles" attaching colors to names red = mpatches.Patch(color='darkred', label='setosa') blue = mpatches.Patch(color='blue', label='versicolor') orange = mpatches.Patch(color='orange', label='virginica') # now actually show the legend plt.legend(handles=[red, blue, orange]) # let's add a title plt.title('Iris dataset (second and third dimensions)') # + plt.figure() # make a new figure to plot in # let's set up a list of colors for the different class labels: colors = ['darkred', 'blue', 'orange'] # now we'll loop over the points in our data set, and plot them one at a time for i in range(len(iris.data)): # use the first third and fourth dimensions as our x and y coordinates x = iris.data[i][2] y = iris.data[i][3] # use the target (which we know is 0, 1, or 2) to select the color for this point c = colors[iris.target[i]] # plot the point as a single point in a scatter graph plt.scatter(x, y, color=c) # now let's add some axis labels; we'll use the names from the data set plt.xlabel(iris.feature_names[2]) plt.ylabel(iris.feature_names[3]) # if we want a key, we'll need to make "handles" attaching colors to names red = mpatches.Patch(color='darkred', label='setosa') blue = mpatches.Patch(color='blue', label='versicolor') orange = mpatches.Patch(color='orange', label='virginica') # now actually show the legend plt.legend(handles=[red, blue, orange]) # let's add a title plt.title('Iris dataset (third and fourth dimensions)') # + plt.figure() # make a new figure to plot in # let's set up a list of colors for the different class labels: colors = ['darkred', 'blue', 'orange'] # now we'll loop over the points in our data set, and plot them one at a time for i in range(len(iris.data)): # use the first 2 dimensions as our x and y coordinates x = iris.data[i][3] y = iris.data[i][0] # use the target (which we know is 0, 1, or 2) to select the color for this point c = colors[iris.target[i]] # plot the point as a single point in a scatter graph plt.scatter(x, y, color=c) # now let's add some axis labels; we'll use the names from the data set plt.xlabel(iris.feature_names[3]) plt.ylabel(iris.feature_names[0]) # if we want a key, we'll need to make "handles" attaching colors to names red = mpatches.Patch(color='darkred', label='setosa') blue = mpatches.Patch(color='blue', label='versicolor') orange = mpatches.Patch(color='orange', label='virginica') # now actually show the legend plt.legend(handles=[red, blue, orange]) # let's add a title plt.title('Iris dataset (fourth and first dimensions)') # + plt.figure() # make a new figure to plot in # let's set up a list of colors for the different class labels: colors = ['darkred', 'blue', 'orange'] # now we'll loop over the points in our data set, and plot them one at a time for i in range(len(iris.data)): # use the first first and third dimensions as our x and y coordinates x = iris.data[i][0] y = iris.data[i][2] # use the target (which we know is 0, 1, or 2) to select the color for this point c = colors[iris.target[i]] # plot the point as a single point in a scatter graph plt.scatter(x, y, color=c) # now let's add some axis labels; we'll use the names from the data set plt.xlabel(iris.feature_names[0]) plt.ylabel(iris.feature_names[2]) # if we want a key, we'll need to make "handles" attaching colors to names red = mpatches.Patch(color='darkred', label='setosa') blue = mpatches.Patch(color='blue', label='versicolor') orange = mpatches.Patch(color='orange', label='virginica') # now actually show the legend plt.legend(handles=[red, blue, orange]) # let's add a title plt.title('Iris dataset (first and third dimensions)') # + plt.figure() # make a new figure to plot in # let's set up a list of colors for the different class labels: colors = ['darkred', 'blue', 'orange'] # now we'll loop over the points in our data set, and plot them one at a time for i in range(len(iris.data)): # use the first 2 dimensions as our x and y coordinates x = iris.data[i][1] y = iris.data[i][3] # use the target (which we know is 0, 1, or 2) to select the color for this point c = colors[iris.target[i]] # plot the point as a single point in a scatter graph plt.scatter(x, y, color=c) # now let's add some axis labels; we'll use the names from the data set plt.xlabel(iris.feature_names[1]) plt.ylabel(iris.feature_names[3]) # if we want a key, we'll need to make "handles" attaching colors to names red = mpatches.Patch(color='darkred', label='setosa') blue = mpatches.Patch(color='blue', label='versicolor') orange = mpatches.Patch(color='orange', label='virginica') # now actually show the legend plt.legend(handles=[red, blue, orange]) # let's add a title plt.title('Iris dataset (second and fourth dimensions)') # - # # ## Run PCA # Here, we'll apply principal component analysis (PCA) to the dataset. We'll use `n_components=2` to indicate we want to reduce our dimensionality to 2 # set up a PCA learner pca = PCA(n_components = 2) # actually run the fit algorithm eigenbasis = pca.fit(iris.data) # transform our data using the learned transform iris2d = eigenbasis.transform(iris.data) # note that our transformed dat is now 2-dimensional iris2d.shape # again, let's look at the first 10 elements; note that they are 2 dimensional, rather than 4 iris2d[:10] # ### Examining components # We can look at the actual "principal components," which we're using as the basis for our transformed data space. Since each component is a vector in the original data space, we can see what "axis" in the original space is the one of primary variance. # # Since we said to use the top 2 components, we're going to have two vectors, each of length 4 (since our original data was 4 dimensional). # # We can also show the amount of the total variance explained by each component, which tells us how "important" they are. # the actual components print("principal components:\n", pca.components_) # let's also look at how much of the total variance we were able to cover with 2 dimensions print('percentage of variance explained by first 2 principal components:', pca.explained_variance_ratio_) # ## Make a plot of the 2D "transformed" data # example adapted from http://scikit-learn.org/stable/auto_examples/decomposition/plot_pca_vs_lda.html#sphx-glr-auto-examples-decomposition-plot-pca-vs-lda-py # + # make a new figure plt.figure() # pick some colors to use colors = ['navy', 'turquoise', 'darkorange'] # plot our points with colors and labels for color, i, iris.target_name in zip(colors, [0, 1, 2], iris.target_names): plt.scatter(iris2d[iris.target == i, 0], iris2d[iris.target == i, 1], color=color, label=iris.target_name) plt.legend(loc='best') plt.title('PCA of IRIS dataset') # - # here's an alternative version of plotting this data that may be easier to understand: colors = ['red', 'blue', 'green'] plt.figure() # loop over examples, and plot each one for i in range(len(iris2d)): point = iris2d[i] classLabel = iris.target[i] # plot a dot at an (x, y) coordinate, using the specified color. plt.scatter(point[0], point[1], color=colors[classLabel]) # # Compare this to the 6 plots from before # # One common thing to observe is Setosa remians same and can be distinguished easily. The other two classes are very close to each other here also. We can try separating with a line but there would be few points on the otehr side of line for each of these two classes. # # Pros - allows estimating probabilities in high-dimensional data # - Faster processing # Cons - might be too expensive for many applications # # # #
Homeworks/example 2 _ML_Dimensionality reduction_12_Mummadi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Data Exploration # # The idea of this notebook is to explore the data in the satelite images that come in. Aim to explore the different channels of the satelite data as well and understand the strcuture of the data produced by the 'data generator'. # # *Unclear if this work has been done before, but still useful to explore the data for myself # + import os from nowcasting_dataset.dataset import NetCDFDataset, SAT_VARIABLE_NAMES import plotly.graph_objects as go import plotly import pandas as pd DATA_PATH = 'gs://solar-pv-nowcasting-data/prepared_ML_training_data/v4/' TEMP_PATH = '.' # + # set up data generator train_dataset = NetCDFDataset( 24_900, os.path.join(DATA_PATH, 'train'), os.path.join(TEMP_PATH, 'train')) train_dataset.per_worker_init(1) train_dataset_iterator = iter(train_dataset) # - # get batch of data, this may take a few seconds to run data = next(train_dataset_iterator) # get the timestamp of the image sat_datetime = pd.to_datetime(data['sat_datetime_index'][0, 0],unit='s') # + print(list(data.keys())) print('') print('Shape of data') for k,v in data.items(): print(k,v.shape) # -
notebooks/2021-08/2021-08-20/data_exploration.ipynb
# ##### Copyright 2020 Google LLC. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # rostering_with_travel # <table align="left"> # <td> # <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/contrib/rostering_with_travel.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a> # </td> # <td> # <a href="https://github.com/google/or-tools/blob/master/examples/contrib/rostering_with_travel.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a> # </td> # </table> # First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab. # !pip install ortools # + from ortools.sat.python import cp_model def SolveRosteringWithTravel(): model = cp_model.CpModel() # [duration, start, end, location] jobs = [[3, 0, 6, 1], [5, 0, 6, 0], [1, 3, 7, 1], [1, 3, 5, 0], [3, 0, 3, 0], [3, 0, 8, 0]] max_length = 20 num_machines = 3 all_machines = range(num_machines) horizon = 20 travel_time = 1 num_jobs = len(jobs) all_jobs = range(num_jobs) intervals = [] optional_intervals = [] performed = [] starts = [] ends = [] travels = [] for m in all_machines: optional_intervals.append([]) for i in all_jobs: # Create main interval. start = model.NewIntVar(jobs[i][1], horizon, 'start_%i' % i) duration = jobs[i][0] end = model.NewIntVar(0, jobs[i][2], 'end_%i' % i) interval = model.NewIntervalVar(start, duration, end, 'interval_%i' % i) starts.append(start) intervals.append(interval) ends.append(end) job_performed = [] job_travels = [] for m in all_machines: performed_on_m = model.NewBoolVar('perform_%i_on_m%i' % (i, m)) job_performed.append(performed_on_m) # Create an optional copy of interval to be executed on a machine location0 = model.NewIntVar(jobs[i][3], jobs[i][3], 'location_%i_on_m%i' % (i, m)) start0 = model.NewIntVar(jobs[i][1], horizon, 'start_%i_on_m%i' % (i, m)) end0 = model.NewIntVar(0, jobs[i][2], 'end_%i_on_m%i' % (i, m)) interval0 = model.NewOptionalIntervalVar( start0, duration, end0, performed_on_m, 'interval_%i_on_m%i' % (i, m)) optional_intervals[m].append(interval0) # We only propagate the constraint if the tasks is performed on the machine. model.Add(start0 == start).OnlyEnforceIf(performed_on_m) # Adding travel constraint travel = model.NewBoolVar('is_travel_%i_on_m%i' % (i, m)) startT = model.NewIntVar(0, horizon, 'start_%i_on_m%i' % (i, m)) endT = model.NewIntVar(0, horizon, 'end_%i_on_m%i' % (i, m)) intervalT = model.NewOptionalIntervalVar( startT, travel_time, endT, travel, 'travel_interval_%i_on_m%i' % (i, m)) optional_intervals[m].append(intervalT) job_travels.append(travel) model.Add(end0 == startT).OnlyEnforceIf(travel) performed.append(job_performed) travels.append(job_travels) model.Add(sum(job_performed) == 1) for m in all_machines: if m == 1: for i in all_jobs: if i == 2: for c in all_jobs: if (i != c) and (jobs[i][3] != jobs[c][3]): is_job_earlier = model.NewBoolVar('is_j%i_earlier_j%i' % (i, c)) model.Add(starts[i] < starts[c]).OnlyEnforceIf(is_job_earlier) model.Add(starts[i] >= starts[c]).OnlyEnforceIf( is_job_earlier.Not()) # Max Length constraint (modeled as a cumulative) # model.AddCumulative(intervals, demands, max_length) # Choose which machine to perform the jobs on. for m in all_machines: model.AddNoOverlap(optional_intervals[m]) # Objective variable. total_cost = model.NewIntVar(0, 1000, 'cost') model.Add(total_cost == sum( performed[j][m] * (10 * (m + 1)) for j in all_jobs for m in all_machines)) model.Minimize(total_cost) # Solve model. solver = cp_model.CpSolver() result = solver.Solve(model) print() print(result) print('Statistics') print(' - conflicts : %i' % solver.NumConflicts()) print(' - branches : %i' % solver.NumBranches()) print(' - wall time : %f ms' % solver.WallTime()) SolveRosteringWithTravel()
examples/notebook/contrib/rostering_with_travel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # The following exercise is from <u> Computational Physics </u> by <NAME> # ### Exercise 2.6: Planetary Orbits # The orbit in space of one body around another, such as planets around the Sun, need not be circular. In general it takes the form of an ellipse, with the body sometimes closer in and sometimes further out. If you are given the distance $l_1$ of the closest approach that a planet makes to the Sun, also called <i>perihelion</i>, and its linear velocity $v_1$ at perihelion, then any other property of the orbit can be calculated from these two as follows. # <b>a)</b> Kepler's second law tells us that the distance $l_2$ and velocity $v_2$ of the planet at its most distant point, or <i>aphelion</i>, satisfies $l_2v_2 = l_1v_1$. At the same time the total energy, kinetic plus gravitational, of a planet with velocity $v$ and distance $r$ from the Sun is given by: # $$ E = \frac{1}{2} m v^{2} - G \frac{mM}{r}$$ # where $m$ is the planet's mass, $M = 1.9891 \times 10^{30}\ kg$ is the mass of the Sun, and $G = 6.6738 \times 10^{-11}\ m^3kg^{-1}s^{-2}$ is Newton's gravitational constant. # # Given that energy must be conserved, $v_2$ is the smaller root of the quadratic equation: # # $$v_2^2 - \frac{2GM}{v_1 l_1}v_2 - \left[v_1^2 - \frac{2GM}{l_1}\right] = 0 $$ # Once we solve for $v_2$ we can calculate $l_2$ using the relation $l_2 = l_1 v_1 / v_2$. # + # TO DO: By hand, solve for v_2 using quadratic equation above (note: if you do it right, it reduced very nicely) # BONUS CHALLENGE: Can you write the equation for v_2 using LaTex? # To format an equation using Latex, make sure you're in a Markdown cell. # Wrap everything you want formatted in dollar signs, for example: $ y = ax + b $ # Reference my equations above to see how it works! # (Also Google is your best friend when it comes to LaTex) # - # $v_2 = \frac{2GM}{v_1l_1}-v_1$ # <b>b)</b> Given the value of $v_1$, $l_1$, and $l_2$, other paramters of the orbit are given by simple formulas that can be derived from Kepler's laws and the fact that the orbit is an ellipse: # # # Semi-major axis: $a = \frac{1}{2} (l_1 + l_2) $ # # Semi-minor axis: $ b = \sqrt{l_1 l_2} $ # # Orbital period: $ T = \frac{2\pi a b}{l_1 v_1} $ # # Orbital eccentricity: $e = \frac{l_2 - l_1}{l_2 + l_1} $ # Write a function that takes the distance to the Sun and the velocity at perihelion, then calculates and print the quantities $l_2$, $v_2$, $T$ and $e$. # TO DO: Write a function that takes v_1, l_1, G, and M as parameters, and calculates l_2, v_2, T, and e. # MAKE SURE YOU COMMENT YOUR CODE from numpy import sqrt import math # imports sqrt() function def orbit_calculations(v1, l1, G, M): v2 = (2*G*M)/(v1*l1) - v1 #calculates v2 from v1, G, M, and l1 print("v2 = ", v2) l2 = (l1 * v1)/v2 #calculates l2 from l1, v1, and v2 print("l2 = ", l2) a = (l1 + l2)/2 b = sqrt(l1 * l2) #calculates semi-major and semi-minor axes, respectvely, from l1 and l2 T = (2*math.pi*a*b)/(l1 * v1) #calculates orbital period from semi-major axis (a), semi-minor axis (b), l1, and v1 print("T = ", T) e = (l2 - l1)/(l2 + l1) #calculates orbital eccentricity from l1 and l2 print("e = ", e) pass # <b> c) </b> Test your program by having it calculate the properties of the orbits of the Earth (for which $l_1 = 1.4710 \times 10^{11} m$ and $v_1 = 3.0287 \times 10^4 ms^{-1}$) # # (You should find the orbital period of the Earth is one year) # TO DO: Calculate the properties of Earth's orbit # MAKE SURE YOU COMMENT YOUR CODE v1 = 3.0287E4 #velocity of earth around the sun l1 = 1.4710E11 #distance of earth from the sun G = 6.67E-11 #gravititational constant M = 1.98E30 #mass of the sun orbit_calculations(v1, l1, G, M) # Period = 31879548.178257972 seconds or 1.01 years # <b> d) </b> Test your program by having it calculate the properties of the orbit of Halley's comet ($l_1 = 8.7830 \times 10^{10} m$ and $v_1 = 5.4529 \times 10^4 ms^{-1}$). # # What is the orbital period of Halley's comet? # TO DO: Calculate the properties of Halley's comet's orbit # MAKE SURE YOU COMMENT YOUR CODE v1 = 5.4529E4 #velocity of Halley's comet l1 = 8.7830E10 #distance from Halley's comet to sun G = 6.67E-11 #gravitational constant M = 1.98E30 #mass of the sun orbit_calculations(v1, l1, G, M)
Assignment2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="wwKqWGYJF4jX" # # "Reproducing Reformer: Our Amazing Submission & Team Experience" # # > Fast ai collabaration with amazing peeps for participating in the 2020 Papers With Code Reproducibility Challenge # # - badges: true # - categories: [nlp, reformer, transformers, language-modelling] # - image: images/papers_with_code.png # - # *Where we all met?* **[here](https://forums.fast.ai/t/reproducibility-challenge-2020-fastai-folks-interested/80336)**❤️ # ### The Challenge # Way back in October 2020 the Papers With Code[ ML Reproducibility Challenge 2020](https://paperswithcode.com/rc2020) was launched and shared in the fast.ai [forums](https://forums.fast.ai/). A few of us jumped at the chance to test our ML knowledge and push our skills. Fast forward 110 days since that initial post and we delivered [our Reformer Reproducibility submission](https://openreview.net/forum?id=3s8Y7dHYkN-) via OpenReview!!🤩 # # Our Whole Project is Documented here : [Project](https://arampacha.github.io/reformer_fastai/) # # The Wandb reports we made : [reports](https://wandb.ai/fastai_community/reformer-fastai/reports/Reformer-Reproducibility-Report---Vmlldzo0MzQ1OTg) # # # # ```Here are a few reflections on our experience: what we enjoyed, tools we used and what we would have done differently:``` # # # ### TLDR; # # * Working as a team pushes your motivation, your skills and your throughput # * [nbdev](https://nbdev.fast.ai/) for development, Weights & Biases for tracking and Discord for communication # * We could have better used task/project management tools more, maybe we needed a different tool # * Next time we’ll start experiments sooner and maybe pick a more practical paper # * It was a massive learning experience and a lot of fun # # ![](my_icons/20210211_reformer_reproducibiliy/flags.png) # # ### Why participate # # Implementing code from scratch is much more enjoyable and meaningful when there is a direct application, e.g. working towards this reproducibility challenge. Spending weeks and months focussed on a single paper forces you to understand the paper down to the last full stop. It also gives you a great appreciation of how difficult writing a good paper is, you see almost every word and sentence is chosen carefully to communicate a particular concept, problem or model setting. # # ### N heads are better than one a.k.a. Multihead Attention # # Our team was distributed across 6 countries and everyone had a somewhat different background, set of skills and personality. This mix was definitely beneficial for getting things done much more smoothly. Having 2 x N eyes researching implementation information or reviewing code really improved coverage and sped up the entire process. It also makes debugging much faster! # # ![](my_icons/20210211_reformer_reproducibiliy/doh.png) # # # Writing code that the entire team will use also meant writing cleaner code with more tests so that it was as clear as possible for your teammates. And finally, during a long project like this it’s easy to get distracted or lazy, however seeing everyone else delivering great work quickly pulls you back into line! # # ![](my_icons/20210211_reformer_reproducibiliy/christmas.png) # # ### Good tools Are key for us : A good tool improves the way you work. A great tool improves the way you think. # # Read more: https://www.wisesayings.com/tool-quotes/#ixzz6mZj38LCP # # # **nbdev** # # The [nbdev](https://nbdev.fast.ai/) literate programming environment from fast.ai was super convenient to minimise the project’s development friction. Writing tests as we developed meant that we caught multiple bugs early and auto-generation of docs lends itself immensely to the reproducibility of your code. Most of us will be using this again for our next projects. # # **Weights & Biases** # # Weights & Biases generously gave us a team account which enabled us all to log our experiments to a single project. Being directly able to link your runs and results to the final report was really nice. Also it's pretty exciting monitoring 10+ experiments live! # # **Discord** # # A Discord server worked really well for all our chat and voice communication. Frequent calls to catchup and agree on next steps were super useful. Todo lists and core pieces of code often ended up as pinned messages for quick reference and linking Github activity to a channel was useful for keeping an eye on new commits to the repo. # # **Overleaf** # # When it came to writing the final report in latex, Overleaf was a wonderful tool for collaborative editing. # # **ReviewNB** # # The ReviewNB app on GitHub was very useful for visualizing diffs in notebooks. # # ![](my_icons/20210211_reformer_reproducibiliy/cuts.png) # # # ### Learn from the best # # The Reformer architecture had several complex parts, and having [Phil Wang's](https://github.com/lucidrains/reformer-pytorch) and [HuggingFace's](https://huggingface.co/transformers/model_doc/reformer.html) # Github code was very helpful to understand design decisions and fix issues. # # ### Things we can improve for the next time # # **Start experiments early** # # We started our experiments quite late in the project; as we aimed to reimplement Reformer in Pytorch (with reference to existing implementations) about ~90% of our time was spent on ensuring our implementation was faithful to the paper and that it was working correctly. In retrospect starting experiments earlier would have allowed more in depth exploration of what we observed while testing. Full scale experiments have a way of inducing problems you didn’t foresee during the implementation phase... # # **Task distribution and coordination** # # When working in a distributed and decentralized team, efficient task allocation and tracking is important. Early in the project todo lists lived in people’s heads, or were quickly buried under 50 chat messages. This was suboptimal for a number of reasons, including that it made involving new people in the project more challenging as they could not easily identify where they could best contribute. # # We made a switch to Trello to better track open tasks. It worked reasonably well however its effectiveness was probably proportional to how much time a couple of team members had to review the kanban board, advocate for its use and focus the team’s attention there. The extra friction associated with needing to use another tool unconnected to Github or Discord was probably the reason for why we didn’t use it as much as we could have. Integrating Trello into our workflow or giving Github Projects a trial could have been useful. # # # **More feedback** # # We had originally intended to get feedback from the fastai community during the project. In the end we were too late in sharing our material, so there wasn’t time for much feedback. Early feedback would have been very useful and the project might have benefited from some periodic summary of accomplishments and current problems. We could have solicited additional feedback from the authors too. # # **Distributed training** # # This was our first exposure to distributed training and unfortunately we had a lot of issues with it. We were also unable to log the results from distributed runs properly to Weights & Biases. This slowed down our experiment iteration speed and is why we could not train our models for as long as we would have preferred. # # ![](my_icons/20210211_reformer_reproducibiliy/gcp.png) # # **Choice of paper to reproduce** # # It would have been useful to calculate a rough estimate of the compute budget the paper’s experiments required before jumping into it. In the latter stages of the project we realised that we would be unable to fully replicate some of the paper’s experiments, but instead had to run scaled down versions. In addition, where your interest sits between theoretical and practical papers should be considered when selecting a paper for the challenge. # # **More tools** # # We could have tried even more handy tools such as [knockknock](https://github.com/huggingface/knockknock) to alert us when models are finished training and Github Projects for task management. # # ### Some final thoughts # # We came out of this project even more motivated compared to how we entered; a great indication that it was both enjoyable and useful for us! Our advice would be to not hesitate to join events like this one and challenge yourself, and try and find one or more other folks in the forums or Discord to work with. After successfully delivering our submission to the challenge we are all eager to work together again on our next project, stay tuned for more! # # ![](my_icons/20210211_reformer_reproducibiliy/launch.png) # + [markdown] id="odjoARyxw-yE" # # Thanks for Reading This Far 🙏 # # As always, I would love to hear your feedback, what could have been written better or clearer, you can find me on twitter & Linkedin: **[twitter](https://twitter.com/PriyanK_7n)** # **[Linkedin](https://www.linkedin.com/in/priyank-n-707019195)** # -
_notebooks/2021-02-19-reformer-reproducibility-challenge.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Translating RNA into Protein # ## Problem # # The 20 commonly occurring amino acids are abbreviated by using 20 letters from the English alphabet (all letters except for B, J, O, U, X, and Z). Protein strings are constructed from these 20 symbols. Henceforth, the term genetic string will incorporate protein strings along with DNA strings and RNA strings. # # The RNA codon table dictates the details regarding the encoding of specific codons into the amino acid alphabet. # # Given: An RNA string s corresponding to a strand of mRNA (of length at most 10 kbp). # # Return: The protein string encoded by s. # + I = input() listI = [] for i in range(int(len(I)/3)) : listI.append(I[3*i:3*(i+1)]) codon = """UUU F CUU L AUU I GUU V UUC F CUC L AUC I GUC V UUA L CUA L AUA I GUA V UUG L CUG L AUG M GUG V UCU S CCU P ACU T GCU A UCC S CCC P ACC T GCC A UCA S CCA P ACA T GCA A UCG S CCG P ACG T GCG A UAU Y CAU H AAU N GAU D UAC Y CAC H AAC N GAC D UAA Stop CAA Q AAA K GAA E UAG Stop CAG Q AAG K GAG E UGU C CGU R AGU S GGU G UGC C CGC R AGC S GGC G UGA Stop CGA R AGA R GGA G UGG W CGG R AGG R GGG G """.split() codon_dict = dict(zip(codon[::2],codon[1::2])) O = "" for i in listI : if(codon_dict[i] == "Stop") : break; O += codon_dict[i] print(O) # -
Bioinformatics Stronghold/LEVEL 1/PROT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Demo 1~3 # You can find a simple version in `elegantrl/tutorial/run.py` # You can also find demo 1~3 in `elegantrl/run.py` (formal version) # # elegantrl/tutorial <1000 lines # ``` # net.py # 160 lines # agent.py # 530 lines # run.py # 320 lines # env.py # 160 lines (not necessary) # ``` # The structtion of formal version is similar to tutorial version. from elegantrl.tutorial.run import Arguments, train_and_evaluate from elegantrl.tutorial.env import PreprocessEnv import gym gym.logger.set_level(40) # Block warning # ## Demo 1: Discrete action space # + '''choose an DRL algorithm''' from elegantrl.tutorial.agent import AgentDoubleDQN # AgentDQN args = Arguments(agent=None, env=None, gpu_id=None) args.agent = AgentDoubleDQN() # + '''choose environment''' args.env = PreprocessEnv(env=gym.make('CartPole-v0')) args.net_dim = 2 ** 7 # change a default hyper-parameters args.batch_size = 2 ** 7 "TotalStep: 2e3, TargetReward: , UsedTime: 10s" # args.env = PreprocessEnv(env=gym.make('LunarLander-v2')) # args.net_dim = 2 ** 8 # args.batch_size = 2 ** 8 # - '''train and evaluate''' train_and_evaluate(args) # ## Demo 2: Continuous action space '''DEMO 2.1: choose an off-policy DRL algorithm''' from elegantrl.agent import AgentSAC # AgentTD3, AgentDDPG args = Arguments(if_off_policy=True) args.agent = AgentSAC() '''DEMO 2.2: choose an on-policy DRL algorithm''' from elegantrl.tutorial.agent import AgentPPO args = Arguments(if_off_policy=False) # hyper-parameters of on-policy is different from off-policy args.agent = AgentPPO() '''choose environment''' env = gym.make('Pendulum-v0') env.target_reward = -200 # set target_reward manually for env 'Pendulum-v0' args.env = PreprocessEnv(env=env) args.reward_scale = 2 ** -3 # RewardRange: -1800 < -200 < -50 < 0 args.net_dim = 2 ** 7 args.batch_size = 2 ** 7 "TotalStep: 3e5, TargetReward: -200, UsedTime: 300s" # args.env = PreprocessEnv(env=gym.make('LunarLanderContinuous-v2')) # args.reward_scale = 2 ** 0 # RewardRange: -800 < -200 < 200 < 302 # "TotalStep: 9e4, TargetReward: 200, UsedTime: 2500s" # args.env = PreprocessEnv(env=gym.make('BipedalWalker-v3')) # args.reward_scale = 2 ** 0 # RewardRange: -200 < -150 < 300 < 334 # args.break_step = int(2e5) # args.if_allow_break = False # "TotalStep: 2e5, TargetReward: 300, UsedTime: 5000s" '''train and evaluate''' train_and_evaluate(args) # train_and_evaluate__multiprocessing(args) # try multiprocessing in formal version # ## Demo 3: Custom Env from AI4Finance # + args = Arguments(if_off_policy=False) '''choose an DRL algorithm''' from elegantrl.tutorial.agent import AgentPPO args.agent = AgentPPO() from elegantrl.tutorial.env import FinanceMultiStockEnv # a standard env for ElegantRL, not need PreprocessEnv() args.env = FinanceMultiStockEnv(if_train=True) args.env_eval = FinanceMultiStockEnv(if_train=False) # eva_len = 1699 - train_len args.reward_scale = 2 ** 0 # RewardRange: 0 < 1.0 < 1.25 < args.break_step = int(5e6) args.max_step = args.env.max_step args.max_memo = (args.max_step - 1) * 8 args.batch_size = 2 ** 11 "TotalStep: 2e5, TargetReward: 1.25, UsedTime: 200s" # - '''train and evaluate''' train_and_evaluate(args) # args.rollout_num = 8 # train_and_evaluate__multiprocessing(args) # try multiprocessing in formal version # ## Demo 4: train in PyBullet (MuJoCo) (wait for adding) import gym # don't worry about 'WARN: Box bound precision lowered by casting to float32' import pybullet_envs # PyBullet is free, but MuJoCo is paid from AgentEnv import decorate_env from AgentRun import Arguments, train_and_evaluate from AgentZoo import AgentTD3, AgentSAC, AgentPPO env_name = 'AntBulletEnv-v0' assert env_name in { "AntBulletEnv-v0", "Walker2DBulletEnv-v0", "HalfCheetahBulletEnv-v0", "HumanoidBulletEnv-v0", "HumanoidFlagrunBulletEnv-v0", "HumanoidFlagrunHarderBulletEnv-v0", } env = gym.make(env_name) env = decorate_env(env, if_print=True) args = Arguments() args.agent_rl = AgentSAC # AgentSAC can't reach target_reward=2500, try AgentModSAC args.env = env args.reward_scale = 2 ** -3 args.break_step = int(1e6 * 8) args.eval_times = 2 # ## Demo 5: Atari game (wait for adding) env_name = 'breakout-v0' # 'SpaceInvaders-v0'
eRL_demos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="cb19f71d-51c8-417f-829e-3179d3319dcd" _uuid="c3b9226e142667d6b96e34daf7d6e42bea0ea1e2" # # **通过Logistic Regression预测Titanic乘客是否能在事故中生还** # # 1. [导入工具库和数据](#t1.) # 2. [查看缺失数据](#t2.) # * 2.1. [年龄](#t2.1.) # * 2.2. [仓位](#t2.2.) # * 2.3. [登船地点](#t2.3.) # * 2.4. [对数据进行调整](#t2.4.) # * 2.4.1 [额外的变量](#t2.4.1.) # 3. [数据分析](#t3.) # 4. [Logistic Regression](#t4.) # # + [markdown] _cell_guid="33c91cae-2ff8-45a6-b8cb-671619e9c933" _uuid="0a395fd25f20834b070ef55cb8987c8c1f9b55f9" # <a id="t1."></a> # # 1. 导入工具库和数据 # + _cell_guid="de05512e-6991-44df-9599-da92a7e459ac" _uuid="d8bdd5f0320e244e4702ed8ec1c2482b022c51cd" import numpy as np import pandas as pd from sklearn import preprocessing import matplotlib.pyplot as plt plt.rc("font", size=14) import seaborn as sns sns.set(style="white") #设置seaborn画图的背景为白色 sns.set(style="whitegrid", color_codes=True) # + _cell_guid="e0a17223-f682-45fc-89a5-667af9782bbe" _uuid="7964157913fbcff581fc1929eed487708e81ac9c" # 将数据读入 DataFrame df = pd.read_csv("./titanic_data.csv") # 预览数据 df.head() # + _cell_guid="872d0de9-a873-4b60-b1ee-d557ee39d8a1" _uuid="d38222a64d4dfd1d1ee1a7ee1f58c4aa54560de3" print('数据集包含的数据个数 {}.'.format(df.shape[0])) # + [markdown] _cell_guid="6578c0da-7bcf-433d-9f28-a66d8dfa6fa3" _uuid="8660e63a62c2fcdb4f7633380166438caf5edae9" # <a id="t2."></a> # # 2. 查看缺失数据 # + _cell_guid="29dddd33-d995-4b0f-92ea-a361b368cc42" _uuid="d4fe22ead7e187724ca6f3ba7ba0e6412ae0e874" # 查看数据集中各个特征缺失的情况 df.isnull().sum() # + [markdown] _cell_guid="7776faeb-6a8f-4460-a367-4b087d2cc089" _uuid="696b428bd3ca49421f650665267ce7ca1b358814" # <a id="t2.1."></a> # ## 2.1. 年龄 # + _cell_guid="d4ee6559-6d0c-409d-9dca-1d105a4ccd8a" _uuid="129cf984d05d9ce97c54548145e65f9e4b9b0c37" # "age" 缺失的百分比 print('"age" 缺失的百分比 %.2f%%' %((df['age'].isnull().sum()/df.shape[0])*100)) # + [markdown] _cell_guid="951f7bb8-779c-4eac-85a2-3fdcfdcd293e" _uuid="c8fff460fb532a063f6944450809014ca831ca52" # 约 20% 的乘客的年龄缺失了. 看一看年龄的分别情况. # + _cell_guid="6d65fcfa-52bf-45ab-b959-64a32c1c1976" _uuid="c6fd60f15d5e803d4dffc89e782c6fbc72445a83" ax = df["age"].hist(bins=15, color='teal', alpha=0.6) ax.set(xlabel='age') plt.xlim(-10,85) plt.show() # + [markdown] _cell_guid="e62d6951-d968-43ba-aabf-add90524d042" _uuid="24c201948b9c8c8076ab01271a4790d9db9096b5" # 由于“年龄”的偏度不为0, 使用均值替代缺失值不是最佳选择, 这里可以选择使用中间值替代缺失值 # # # <font color=red> 注: 在概率论和统计学中,偏度衡量实数随机变量概率分布的不对称性。偏度的值可以为正,可以为负或者甚至是无法定义。在数量上,偏度为负(负偏态)就意味着在概率密度函数左侧的尾部比右侧的长,绝大多数的值(不一定包括中位数在内)位于平均值的右侧。偏度为正(正偏态)就意味着在概率密度函数右侧的尾部比左侧的长,绝大多数的值(不一定包括中位数)位于平均值的左侧。偏度为零就表示数值相对均匀地分布在平均值的两侧,但不一定意味着其为对称分布。</font> # + _cell_guid="1d70c27b-1e4d-4d5e-8a39-c134389d436c" _uuid="4f13840d4f9bf1b4331523c99274aa0627485e6c" # 年龄的均值 print('The mean of "Age" is %.2f' %(df["age"].mean(skipna=True))) # 年龄的中间值 print('The median of "Age" is %.2f' %(df["age"].median(skipna=True))) # + [markdown] _cell_guid="dea7b01c-c8c1-401f-a336-36ee73de2222" _uuid="e1a08114e302ddc90266e5f065b3f0b5a200bc89" # <a id="t2.2."></a> # ## 2.2. 仓位 # + _cell_guid="1a1ad808-0a63-43ac-b757-71195880ed4f" _uuid="1acbce9c6bc5d586dda3e47b7506067a85524e66" # 仓位缺失的百分比 print('"Cabin" 缺失的百分比 %.2f%%' %((df['cabin'].isnull().sum()/df.shape[0])*100)) # + [markdown] _cell_guid="eda8c434-63ff-4875-8566-2e194c0d3f66" _uuid="b6e037c7ac5ec476516031a06b042d8b9999ba44" # 约 77% 的乘客的仓位都是缺失的, 最佳的选择是不使用这个特征的值. # + [markdown] _cell_guid="0e696cff-ca80-4cb5-862c-ee80f4b1ab1f" _uuid="d575319b1f528c7a153d8ab680282048cb163b14" # <a id="t2.3."></a> # ## 2.3. 登船地点 # + _cell_guid="f21c2b55-2126-439d-8b1d-e96dafc97d81" _uuid="92ab9e62fb62f2a0fb9972baf6ada444187540e6" # 登船地点的缺失率 print('"Embarked" 缺失的百分比 %.2f%%' %((df['embarked'].isnull().sum()/df.shape[0])*100)) # + [markdown] _cell_guid="d03a4187-c527-4f71-8260-0495f4523e9e" _uuid="dc97b80524057522f024d0ae6f1abe77cb994903" # 只有 0.23% 的乘客的登船地点数据缺失, 可以使用众数替代缺失的值. # + _cell_guid="22924bc4-5dfa-4df7-b0d0-de3ede9c58b7" _uuid="f2a915f45264f8a580de6cc382d96b370eb75730" print('按照登船地点分组 (C = Cherbourg, Q = Queenstown, S = Southampton):') print(df['embarked'].value_counts()) sns.countplot(x='embarked', data=df, palette='Set2') plt.show() # + _cell_guid="def67427-3257-4dce-872e-7f5b4202d18a" _uuid="c57a9f8a54efa382bc94b695c9664330d01709ea" print('乘客登船地点的众数为 %s.' %df['embarked'].value_counts().idxmax()) # + [markdown] _cell_guid="c4c55f99-ce99-44f9-b7a8-d4d623ae9295" _uuid="19cfaae8c484dcb1d00f69b2771e86dc249e9793" # 由于大多数人是在南安普顿(Southhampton)登船, 可以使用“S”替代缺失的数据值 # + [markdown] _cell_guid="684c308f-25ae-4039-9332-ddb58953a054" _uuid="3609e785d210d5a8110f7ce550e61007d066449b" # <a id="t2.4."></a> # ## 2.4. 根据缺失数据情况调整数据 # + [markdown] _cell_guid="b3025cdc-fe9f-43b6-bda1-e45c1f25e77c" _uuid="06d2762ccec3f11564870fe941fc9ac45d71662f" # 基于以上分析, 我们进行如下调整: # * 如果一条数据的 "Age" 缺失, 使用年龄的中位数 28 替代. # * 如果一条数据的 "Embarked" 缺失, 使用登船地点的众数 “S” 替代. # * 由于太多乘客的 “Cabin” 数据缺失, 从所有数据中丢弃这个特征的值. # + _cell_guid="bc0d7121-1008-4890-9043-07eba1524e15" _uuid="feeed4b6775f88edf5de12b0ee6ee73c16eba61d" data = df.copy() data["age"].fillna(df["age"].median(skipna=True), inplace=True) data["embarked"].fillna(df['embarked'].value_counts().idxmax(), inplace=True) data.drop('cabin', axis=1, inplace=True) # + _cell_guid="0cfe1c08-71a6-493e-803d-db255af01697" _uuid="d6be29651bb903964e02d3a7bcc7033513eb76c9" # 确认数据是否还包含缺失数据 data.isnull().sum() # - # # 按照以上处理的方式, 处理仍然存在缺失数据的情况 # TODO: data["pclass"].fillna(df['pclass'].value_counts().idxmax(), inplace=True) data["survived"].fillna(df['survived'].value_counts().idxmax(), inplace=True) data["name"].fillna(df['name'].value_counts().idxmax(), inplace=True) data["sex"].fillna(df['sex'].value_counts().idxmax(), inplace=True) data["sibsp"].fillna(df['sibsp'].value_counts().idxmax(), inplace=True) data["parch"].fillna(df['parch'].value_counts().idxmax(), inplace=True) data["ticket"].fillna(df['ticket'].value_counts().idxmax(), inplace=True) data["fare"].fillna(df['fare'].value_counts().idxmax(), inplace=True) # + _cell_guid="10dcfe1b-34f1-4bd8-b937-5ae8daf4a378" _uuid="3ee37b1151416aeeec8ebd7b94bb0184aabc57cd" # 预览调整过的数据 data.head() # - # ### 查看年龄在调整前后的分布 # + _cell_guid="dda26046-b93b-49ee-a52e-35355ecb425c" _uuid="293aec20df86ef529d10ae1f051dfe921ba07b88" plt.figure(figsize=(15,8)) ax = df["age"].hist(bins=15, normed=True, stacked=True, color='teal', alpha=0.6) df["age"].plot(kind='density', color='teal') ax = data["age"].hist(bins=15, normed=True, stacked=True, color='orange', alpha=0.5) data["age"].plot(kind='density', color='orange') ax.legend(['Raw Age', 'Adjusted Age']) ax.set(xlabel='Age') plt.xlim(-10,85) plt.show() # + [markdown] _cell_guid="6925fcc2-977b-4369-85e1-77a9210326a7" _uuid="d8280757e6bc627821fb0540c87ccd6ca110f1e0" # <a id="t2.4.1."></a> # ## 2.4.1. 其它特征的处理 # + [markdown] _cell_guid="5cf98f33-fdd5-4a16-b6bf-fa36bc8b84e0" _uuid="3bfdee842f11d27ca490f466c45ef9bf3673e7ae" # 数据中的两个特征 “sibsp” (一同登船的兄弟姐妹或者配偶数量)与“parch”(一同登船的父母或子女数量)都是代表是否有同伴同行. 为了预防这两个特征具有多重共线性, 我们可以将这两个变量转为一个变量 “TravelAlone” (是否独自一人成行) # # # <font color='red'>注: 多重共线性(multicollinearity)是指多变量线性回归中,变量之间由于存在高度相关关系而使回归估计不准确。比如虚拟变量陷阱(英语:Dummy variable trap)即有可能触发多重共线性问题。</font> # + _cell_guid="759c3c8e-8db6-41d9-a1a2-058a15b338a6" _uuid="d1f5815ba663f7e8cc17d7efcff73653af5b1bdb" ## 创建一个新的变量'TravelAlone'记录是否独自成行, 丢弃“sibsp” (一同登船的兄弟姐妹或者配偶数量)与“parch”(一同登船的父母或子女数量) data['TravelAlone']=np.where((data["sibsp"]+data["parch"])>0, 0, 1) data.drop('sibsp', axis=1, inplace=True) data.drop('parch', axis=1, inplace=True) # + [markdown] _cell_guid="e4a22367-b719-4204-952f-d2e9a3b8075e" _uuid="ca53796bf788bd3b015f1a79a97e050bafa2c770" # 对类别变量(categorical variables)使用独热编码(One-Hot Encoding), 将字符串类别转换为数值 # + _cell_guid="f95361e8-2533-4731-a7ab-a99cf686ed50" _uuid="4494fcbf9faa90151e20042f74d73395fac3cc8e" # 对 Embarked","Sex"进行独热编码, 丢弃 'name', 'ticket' final =pd.get_dummies(data, columns=["embarked","sex"]) final.drop('name', axis=1, inplace=True) final.drop('ticket', axis=1, inplace=True) final.head() # + [markdown] _cell_guid="1430d510-1c8d-4544-8009-3911fff7afbb" _uuid="4e26c19bf719b7086addc0e1981c00836a19f189" # <a id="t3."></a> # # 3. 数据分析 # + [markdown] _cell_guid="2655428b-d69d-4c0f-85ff-e31ada8e37b9" _uuid="32e9c04a3281fb1aa8c77e1406c56cd820459202" # <a id="t3.1."></a> # ## 3.1. 年龄 # + _cell_guid="9f9ca9e5-50a0-4487-ba53-815dda90af1c" _uuid="790e8d7ca89d19e276b3398e299c42893a796b79" plt.figure(figsize=(15,8)) ax = sns.kdeplot(final["age"][final.survived == 1], color="darkturquoise", shade=True) sns.kdeplot(final["age"][final.survived == 0], color="lightcoral", shade=True) plt.legend(['Survived', 'Died']) plt.title('Density Plot of Age for Surviving Population and Deceased Population') ax.set(xlabel='Age') plt.xlim(-10,85) plt.show() # + [markdown] _cell_guid="8e304d72-27f3-41cf-863f-63872f4c37df" _uuid="6c5625b454f5e01dd6b6d843d801851c14c64d1e" # ### 生还与遇难群体的分布相似, 唯一大的区别是生还群体中用一部分低年龄的乘客. 说明当时的人预先保留了孩子的生还机会. # # + [markdown] _cell_guid="a643b196-91c6-4b12-9463-0f984fbfc91a" _uuid="337b3ced0c6423cf1d126f23a7e60c0181af6a47" # <a id="t3.2."></a> # ## 3.2. 票价 # + _cell_guid="9f31ffe1-7cd8-4169-b193-ed44e56d0bd4" _uuid="4a1c521f08460f6983eca0c4e01294fb7c86e4f9" plt.figure(figsize=(15,8)) ax = sns.kdeplot(final["fare"][final.survived == 1], color="darkturquoise", shade=True) sns.kdeplot(final["fare"][final.survived == 0], color="lightcoral", shade=True) plt.legend(['Survived', 'Died']) plt.title('Density Plot of Fare for Surviving Population and Deceased Population') ax.set(xlabel='Fare') plt.xlim(-20,200) plt.show() # + [markdown] _cell_guid="346b7322-a3e4-48df-bbb1-4d8ec7716f3f" _uuid="2717310b6c443d675c7342be0c2c18b265723273" # 生还与遇难群体的票价分布差异比较大, 说明这个特征对预测乘客是否生还非常重要. 票价和仓位相关, 也许是仓位影响了逃生的效果, 我们接下来看仓位的分析. # + [markdown] _cell_guid="cf585311-4029-4be4-8af2-3eea8258801a" _uuid="4524affda51265ea23fa923e2ea7f93d7bb91875" # <a id="t3.3."></a> # ## 3.3. 仓位 # + _cell_guid="676548e8-6dd4-4180-800c-7b164acb3877" _uuid="08fd677214959e0b938a0f8a94b63ab548673ea5" sns.barplot('pclass', 'survived', data=df, color="darkturquoise") plt.show() # + [markdown] _cell_guid="193233f8-b220-4cae-aa0f-f822316d5623" _uuid="8ddb19191253a6e09dfcb0beff2b3690f1052d52" # 如我们所料, 一等舱的乘客生还几率最高. # + [markdown] _cell_guid="c59f8e8f-e8c2-40fb-b9c8-12dddd6d318f" _uuid="2fc06b75321946b721852f78431435f9ba5fef39" # <a id="t3.4."></a> # ## 3.4. 登船地点 # + _cell_guid="6e5bec50-2f5e-433e-9130-c56956fddad3" _uuid="a9f0598701c7c5224eaa73dafa869af73beffe18" sns.barplot('embarked', 'survived', data=df, color="teal") plt.show() # + [markdown] _cell_guid="88d78820-35a5-48fd-a234-9f3ca3fca779" _uuid="2f6a0329cf0c7b771a707ec790efc065924e1ee2" # 从法国 Cherbourge 登录的乘客生还率最高 # + [markdown] _cell_guid="9e6dc87e-ba59-4004-8145-79709328fe27" _uuid="92bacce85a7dec5509217b9570bc2a2fea6a8452" # <a id="t3.5."></a> # ## 3.5. 是否独自成行 # + _cell_guid="67017a88-93d4-412b-9adf-8b4d1d9b9db0" _uuid="e0c3dc16292ef0bcabf0fc680d821ef654084ab4" sns.barplot('TravelAlone', 'survived', data=final, color="mediumturquoise") plt.show() # + [markdown] _cell_guid="e9e68cef-5e74-46aa-8343-39afbbf00efe" _uuid="f160bd7399e024ae669d55f09caf6e7902768851" # 独自成行的乘客生还率比较低. 当时的年代, 大多数独自成行的乘客为男性居多. # + [markdown] _cell_guid="201b4c9d-b9f0-4ae9-8580-0b4e24ee62be" _uuid="693c25c25f3590f0b027725471ddd74d56f154af" # <a id="t3.6."></a> # ## 3.6. 性别 # + _cell_guid="7b416e59-8616-4a44-93e1-a8005eff78a9" _uuid="354794315925dff1e96229cc737eaf299aaea17a" sns.barplot('sex', 'survived', data=df, color="aquamarine") plt.show() # + [markdown] _cell_guid="490ed298-f0e4-466b-acc8-81280315e6a2" _uuid="80c02b9fe2151c443f189cbe44c9cacf7e5c44a4" # 很明显, 女性的生还率比较高 # + [markdown] _cell_guid="c833cbf5-74db-44ff-90fa-b600ff0a09d7" _uuid="39dbc095f99dcec6d25a7a4561e81bb641078622" # <a id="t4."></a> # # 4. 使用Logistic Regression做预测 # + [markdown] _cell_guid="b894002e-07cf-4d02-b708-a2ac387eed54" _uuid="e35125f8aa230d4875541aa4f6b5964d2f14a6a3" # # ### 将数据集分为训练和测试数据集用于检测模型效果 # + _cell_guid="84233f59-f3c7-4ea0-884d-96f8ad4d5b10" _uuid="46336228eeb864bc82e6739768122579d1c9634c" from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score # 使用如下特征做预测 cols = ["age","fare","TravelAlone","pclass","embarked_C","embarked_S","sex_male"] # 创建 X (特征) 和 y (类别标签) X = final[cols] y = final['survived'] # 将 X 和 y 分为两个部分 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=2) # 检测 logistic regression 模型的性能 # TODO 添加代码: # 1.训练模型, # 特征缩放 # X_normalizer = StandardScaler() # N(0,1) # X_train = X_normalizer.fit_transform(X_train) # X_test = X_normalizer.transform(X_test) logreg = LogisticRegression() logreg.fit(X_train, y_train) # 2.根据模型, 以 X_test 为输入, 生成变量 y_pred y_pred = logreg.predict(X_test) print('Train/Test split results:') print("准确率为 %2.3f" % accuracy_score(y_test, y_pred))
assets/source/titanic-logistic-regression-homework.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Bag of Colors # This is my personal attempt at making a bag of colors implementation. It was made into an interactive notebook, so that it ends up well documented and easy to pick up by someone else. # # For more details on this algorithm, please see the original paper: # # > <NAME>, <NAME>, <NAME>, "Bag-of-colors for improved image search". Online: <https://dl.acm.org/citation.cfm?id=2072298.2072034> # import os from os import path import random import numpy as np from PIL import Image, ImageCms, ImageFile import matplotlib.pyplot as plt import matplotlib.image as mpimg import faiss import h5py as h5 # %matplotlib inline ImageFile.LOAD_TRUNCATED_IMAGES = True # #### Constants # In my experiments, I have used the data set from the [ImageCLEF](http://imageclef.org) 2018 caption challenge. Any other sufficiently large image data set will work as well. Nevertheless, checking other implementation details is recommended: the image chunking block size was adjusted to work for smaller images. # + N_BLOCKS = 256 BLOCK_SIZE = 10 # original is 16 # update constants to point to a directory of images DATA_DIR = "training_data" TEST_DATA_DIR = "testing_data" # we'll pick a small sample of the data set for experimentation purposes N_DATA = 25 SAMPLE_FILES = [path.join(DATA_DIR, fname) for (i, fname) in zip(range(N_DATA), os.listdir(DATA_DIR))] # all files! ALL_FILES = [path.join(DATA_DIR, fname) for (i, fname) in enumerate(os.listdir(DATA_DIR))] ALL_FILES.sort() assert len(ALL_FILES) > 10000 TEST_FILES = [path.join(TEST_DATA_DIR, fname) for (i, fname) in enumerate(os.listdir(TEST_DATA_DIR))] TEST_FILES.sort() # - # Now, let's pick up one image and show how it should be adapted for the algorithm. sample_image = Image.open(SAMPLE_FILES[4]) sample_image = sample_image.resize([160, 160]) _ = plt.imshow(sample_image) # Here's what the article says about BoCs: # # 1. Resize each image to 256×256 pixels, convert it to CIE-Lab and split it in blocks of 16×16 pixels (i.e., 256 blocks in total). # # 2. For each block, find the most occurring color. Ties are randomly resolved. If this color corresponds to less than 5 occurrences (out of 256), select an arbitrary color from the block. # # 3. At this point, we have extracted 256 Lab colors per image. The set of 256×$N$ colors from all images is clustered using a k-means algorithm, producing a $k_c$ Lab colors palette. # # My data set is already converted to a smaller dimension, so I'll be using blocks of 10x10 on 160x160 images instead (still 256 blocks in total). # + # create color profiles for RGB <-> LAB conversions srgb_profile = ImageCms.createProfile("sRGB") lab_profile = ImageCms.createProfile("LAB") rgb2lab_transform = ImageCms.buildTransformFromOpenProfiles(srgb_profile, lab_profile, "RGB", "LAB") lab2rgb_transform = ImageCms.buildTransformFromOpenProfiles(lab_profile, srgb_profile, "LAB", "RGB") # + def convert_to_cielab(img: Image) -> Image: # first make sure it's RGB if img.mode != "RGB": img = img.convert("RGB") # then apply transformation return ImageCms.applyTransform(img, rgb2lab_transform) def convert_to_rgb(img: Image) -> Image: return ImageCms.applyTransform(img, lab2rgb_transform) # - sample_image_cie = convert_to_cielab(sample_image) # Let's turn color block extraction into a function. # def extract_dominant_colors(image: np.ndarray) -> np.ndarray: """ Args: image: np.array [W, H, C = 3] dtype=uint8 Returns: np.array [256, 3] dtype=uint8 """ assert len(image.shape) == 3 (w, h, c) = image.shape assert c == 3 def dominant_color(block, occurrence_threshold=4): """ Args: block: np.array [W, H, 3] occurrence_threshold: int if most occurring color is less than this, pick a random color from the block instead. Using 4 instead of 5 because the blocks are also a bit smaller """ block = np.reshape(block, [-1, 3]) hist = {} for color in block: [c,i,e] = color key = (c, i, e) if key in hist: hist[key] += 1 else: hist[key] = 1 (color, count) = max(hist.items(), key=lambda e:e[1]) if count < occurrence_threshold: # not significant enough, choose a random color return list(random.choice(block)) return list(color) colors = np.zeros([N_BLOCKS, 3], dtype=np.uint8) k = 0 for i in range(0, w, BLOCK_SIZE): for j in range(0, h, BLOCK_SIZE): block = image[i: i + BLOCK_SIZE, j: j + BLOCK_SIZE] dcolor = dominant_color(block) colors[k] = dcolor k += 1 return colors # Let's see what colors we get with the sample image. sample_colors = extract_dominant_colors(np.array(sample_image_cie)) s = sample_colors #s = np.sort(sample_colors, axis=0) #s = np.unique(sample_colors, axis=0) s = np.reshape(s, [16, 16, 3]) sample_colors_img = Image.fromarray(s, 'LAB') sample_colors_img = convert_to_rgb(sample_colors_img) plt.figure(figsize=(3, 3)) plt.imshow(sample_image) plt.figure(figsize=(3, 3)) _ = plt.imshow(sample_colors_img) # This looks ok on this end! # ### Visual color codebook generation # Now that we can fetch the dominant colors of each image, let's produce a color vocabulary (codebook) with k-means clustering. I'll be using Faiss for this, by accumulating all colors into a 2-dimensional array. Let's experiment with multiple values of $k$. def collect_dominant_colors(files: list) -> np.ndarray: """Collect the dominant colors of the set into a single ndarray. Args: files: list of image file names Returns: np.ndarray [N * 256, 3] dtype=f32 """ all_colors = np.zeros([len(files) * 256, 3], dtype=np.float32) for i, file in enumerate(files): img = Image.open(file).resize([160, 160]) img = np.array(convert_to_cielab(img)) colors = extract_dominant_colors(img) all_colors[i * 256: (i + 1) * 256] = colors.astype(np.float32) return all_colors def generate_codebook(colors, k, niter=25, gpu_res=None, gpu_device=None) -> (np.ndarray, faiss.Index): """ Args: colors : np.ndarray [N, 3] of colors k : the size of the codebook niter : number of k-means clustering iterations gpu_res : faiss.GpuResources or None, required for a GPU backed index gpu_device : int or None, whether to make a GPU backed index Returns: tuple (centroids, index) centroids : np.array [k, 3] index : faiss.Index trained with the codebook (L2 metric) """ # we'll use the Clustering API so that we can choose # the clustering index cp = faiss.ClusteringParameters() cp.niter = niter cp.verbose = False cp.spherical = False clus = faiss.Clustering(3, k, cp) index = faiss.IndexFlatL2(3) if gpu_res is not None and gpu_device is not None: index = faiss.index_cpu_to_gpu(gpu_res, gpu_device, index) clus.train(colors, index) obj = faiss.vector_float_to_array(clus.obj) loss = obj[-1] print("Finished training codebook of size {}. Loss: {}".format(k, loss)) centroids = faiss.vector_float_to_array(clus.centroids).reshape([k, 3]) return centroids, index x_colors = collect_dominant_colors(SAMPLE_FILES) kmeans_16 = generate_codebook(x_colors, 16, niter=50) kmeans_32 = generate_codebook(x_colors, 32, niter=50) kmeans_64 = generate_codebook(x_colors, 64, niter=50) kmeans_128 = generate_codebook(x_colors, 128, niter=50) kmeans_256 = generate_codebook(x_colors, 256, niter=50) # We now have the codebook in the index. We can use it directly to build our bags of colors. We can also see how the codebook looks like. def view_codebook(centroids, figsize=(6,2)): # sort the colors, so that they look pretty carr = centroids.tolist() carr = sorted(carr, key=lambda v: v[2]) carr = np.reshape(np.array(carr, dtype=np.float32), [-1, 16, 3]) # convert to image codebook_img = Image.fromarray(carr, 'LAB') codebook_img = convert_to_rgb(codebook_img) plt.figure(figsize=figsize) _ = plt.imshow(codebook_img) view_codebook(kmeans_16[0]) view_codebook(kmeans_32[0]) view_codebook(kmeans_64[0]) view_codebook(kmeans_128[0]) view_codebook(kmeans_256[0], figsize=(12, 2)) kmeans_16 = None kmean_32 = None kmeans_64 = None kmeans_128 = None kmeans_256 = None # Ok, time to use more of the data set! Since our data set is too big for k-means clustering, we'll pick a random portion to serve as a template set. # + N_TEMPLATE = 25000 # twenty-five thousand RANDOM_SEED = 386104 print("Using {} template samples".format(N_TEMPLATE)) random.seed(RANDOM_SEED) TEMPLATE_FILES = random.sample(ALL_FILES, k=N_TEMPLATE) # - template_colors = collect_dominant_colors(TEMPLATE_FILES) # Power up! We'll use the GPU for building and retrieving from these codebooks. res = faiss.StandardGpuResources() kmeans_64 = generate_codebook(template_colors, 64, niter=100, gpu_res=res, gpu_device=0) kmeans_128 = generate_codebook(template_colors, 128, niter=100, gpu_res=res, gpu_device=0) kmeans_256 = generate_codebook(template_colors, 256, niter=100, gpu_res=res, gpu_device=0) kmeans_512 = generate_codebook(template_colors, 512, niter=100, gpu_res=res, gpu_device=0) view_codebook(kmeans_64[0]) view_codebook(kmeans_128[0]) view_codebook(kmeans_256[0]) view_codebook(kmeans_512[0]) # ### Bag of Color generation # For each color in an image, look for the nearest color in the codebook, and increment that position in the bag. def generate_bags(codebook: faiss.Idex, files: iterable) -> np.ndarray: """Generate the bags of colors. Args: codebook: faiss.Index containing the codebook files: list of file names (length N) Returns: np.array [N, k] """ assert codebook.ntotal > 0 all_bags = np.zeros([len(files), codebook.ntotal], dtype=np.float32) for i, file in enumerate(files): img = Image.open(file).resize([160, 160]) img = np.array(convert_to_cielab(img), dtype=np.float32).reshape([-1, 3]) # batch search for the code of pixels codes = codebook.assign(img, 1) for j in range(len(img)): all_bags[i, codes[j]] += 1 return all_bags sample_bags = generate_bags(kmeans_256[1], SAMPLE_FILES) # Let's see how a bag looks like. def view_bag(x: np.ndarray): # the histogram of the data plt.figure(figsize=(8, 2)) plt.bar(range(len(x)), x, facecolor='blue', alpha=0.75) view_bag(sample_bags[0]) view_bag(sample_bags[12]) view_bag(sample_bags[14]) # Bags are often sparse, with some colors of high frequency. These bags can be normalized to attenuate this effect. # ## Bag normalization techniques def max_normalize(bocs: np.ndarray) -> np.ndarray: """Linearly normalize the bags so that the maximum of each bag is 1.""" return bocs / np.max(bocs, axis=1, keepdims=True) def tf_idf_normalize(bocs: np.ndarray) -> np.ndarray: """tf-idf normalization.""" tf = bocs / np.sum(1e-10 + bocs, axis=1, keepdims=True) dcount = np.sum(bocs.astype(np.bool).astype(np.float), axis=0) idf = np.log(len(bocs) / dcount) return tf * idf def power_normalize(bocs: np.ndarray) -> np.ndarray: """Power-law and L1 vector normalization.""" # element-wise square root, then L1 normalization o = np.sqrt(bocs) o /= np.sum(o, axis=1, keepdims=True) return o # ### Examples # max normalization nbags = max_normalize(sample_bags) view_bag(nbags[0]) view_bag(nbags[1]) view_bag(nbags[2]) # td-idf normalization nbags = tf_idf_normalize(sample_bags) view_bag(nbags[0]) view_bag(nbags[1]) view_bag(nbags[2]) # power-law + L1 normalization nbags = power_normalize(sample_bags) view_bag(nbags[0]) view_bag(nbags[1]) view_bag(nbags[2]) # ### Save all bags in the training set # This code will save the outcome in an hdf5 file. OUTPUT_FILE = "bocs-256-train.h5" z = generate_bags(kmeans_256[1], ALL_FILES) # no normalization, this can be done later k = z.shape[1] n_samples = len(ALL_FILES) with h5.File(OUTPUT_FILE, mode='w') as f: f.create_dataset('data', data=z, shape=[n_samples, k], dtype='float32') h5ids = f.create_dataset('id', shape=[n_samples], dtype=h5.special_dtype(vlen=str)) for (i, bag) in enumerate(z): h5set[i] = bag h5ids[i] = path.basename(ALL_FILES[i])[:-4] print("Bags of colors (palette of size {}) was saved in {}".format(k, OUTPUT_FILE)) # ### Save all bags in the testing set OUTPUT_FILE = "bocs-256-test.h5" z = generate_bags(kmeans_256[1], TEST_FILES) k = z.shape[1] n_samples = len(TEST_FILES) with h5.File(OUTPUT_FILE, mode='w') as f: f.create_dataset('data', data=z, shape=[n_samples, k], dtype='float32') h5ids = f.create_dataset('id', shape=[n_samples], dtype=h5.special_dtype(vlen=str)) for i, filename in enumerate(TEST_FILES): h5ids[i] = path.basename(TEST_FILES[i])[:-4] print("Bags of colors (palette of size {}) was saved in {}".format(k, OUTPUT_FILE)) # That's all, folks!
Bag of Colors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + language="javascript" # IPython.OutputArea.prototype._should_scroll = function(lines) { # return false; # } # # - import numpy as np import matplotlib.pyplot as plt from scipy.constants import c from functions import * import warnings warnings.filterwarnings('ignore') WDMS_pars = ([1050, 1200], # WDM up downs in wavelengths [m] [930, 1200], [930,1050], [930, 1200]) WDMS_pars = ([1051.5, 1090], # WDM up downs in wavelengths [m] [1007., 1090]) lams = 1095 spl_loss = -0 #[db] lstart,lend = 780,1400 N = 2**14 fv = np.linspace(1e-3*c/lend,1e-3*c/lstart, N) lv = 1e-3*c/fv WDM_vec = [WDM(i[0], i[1],fv,c) for i in WDMS_pars] def db(x): return 10*np.log10(x) def perc(l): return 100*10**(l/10) def Total_loss(l,spl_loss): return db(WDM_vec[0].il_port2(l)) + db(WDM_vec[1].il_port2(l)) + spl_loss fig = plt.figure(figsize=(20,10)) plt.plot(lv,perc(Total_loss(lv,spl_loss))) #plt.ylim([-60,0]) plt.axvline(lams, label = 'Signal',color='b') plt.title("Transitence at the signal wavelength:"+str(lams)+" is "+ str(perc(Total_loss(lams,spl_loss)))) plt.legend() plt.show() for spl_loss in (-1,-1.1,-1.2,-1.3,-1.4): print(perc(Total_loss(lams,spl_loss))) # ## Moving the first WDM to get the some signal in spl_loss = -1 lams_vec = np.arange(1091, 1107, 1)[:-1] def Total_inserted_seed(lams_spoil,lams): lamp = 1051.5 WDM1 = WDM(lamp, lams_spoil,fv,c) return db(WDM1.il_port1(fv_sp = lams)) def Total_Trans_in_fibre(lams, lams_spoil, spl_loss): lamp = 1051.5 Omega = 2*pi*c/(lamp*1e-9) - 2*pi*c/(lams*1e-9) omegai = 2*pi*c/(lamp*1e-9) +Omega lami = 1e9*2*pi*c/(omegai) WDMS_pars = ([lamp,lams_spoil], # WDM up downs in wavelengths [m] [lami, lams]) WDM_vec = [WDM(i[0], i[1],fv,c) for i in WDMS_pars] return db(WDM_vec[0].il_port2(lams)) + db(WDM_vec[1].il_port2(lams)) + spl_loss for lams in lams_vec: lams_spoil = lams + 20 print("Transmitence of the signal "+ str(lams)+" in the FOPO is :", str(perc(Total_Trans_in_fibre(lams,lams_spoil,spl_loss)))+', With ' +str(perc(Total_inserted_seed(lams_spoil,lams)))+ ' of signal inserted') spoil = 0 T = [perc(Total_Trans_in_fibre(lams,lams+spoil,spl_loss)) for lams in lams_vec] Insertion = [perc(Total_inserted_seed(lams+spoil ,lams)) for lams in lams_vec] lams_vec spoil_vec = np.arange(-28,42,0.2) lams_vec = 1095 spl_loss = -1 insertion,T = [],[] for spoil in spoil_vec: T.append(perc(Total_Trans_in_fibre(lams,lams+spoil,spl_loss))) insertion.append(perc(Total_inserted_seed(lams+spoil ,lams)) ) import pickle as pl fig = plt.figure(figsize=(15,10)) plt.plot(spoil_vec, T, label = 'Transmitence in cavity') plt.plot(spoil_vec,insertion, label = 'Inserted seed') plt.xlabel(r'$\delta [nm]$') plt.ylabel(r'%') plt.legend(fontsize = 14) plt.savefig('inserted_transmited.png') data = (spoil_vec, T, insertion) with open('inserted_transmited.pickle','wb') as f: pl.dump((fig,data),f) plt.show() for i in (10e-3, 100e-3,1): print(0.1565*i, 23.7365365533*0.01*i) fig = plt.figure(figsize=(20,10)) plt.plot(lv,perc(Total_loss(lv,spl_loss))) #plt.ylim([-60,0]) plt.axvline(lams, label = 'Signal_trans',color='b') plt.title("Transitence at the signal wavelength:"+str(lams)+" is "+ str(perc(Total_loss(lams,spl_loss)))) plt.legend() plt.savefig('../12517/Trans.png') plt.show()
Research_logs_and_notebooks/Stable_logs/total_loss_per_round.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # 2. Quickstart: Running a single Schwarzschild model # By the end of the notebook, you will have run a Schwarzschild model. This will involve, # 1. understanding the configuration file # 2. executing commands to create and run a Schwarzschild model # 3. plotting some output for this model # ## Setup # You should be in the directory ```docs/tutorial_notebooks```. If this is the first time you have run this tutorial, this directory should have the following structure, # # ``` # tutorial_notebooks # |- # | |- NGC6278_input # | |- mge.ecvs # | |- gauss_hermite_kins.ecvs # | |- bins.dat # | |- aperture.dat # | # |- NGC6278_config.yaml # |- *.ipynb # ``` # # We have provided example input data files and a configution file for the galaxy NGC6278. The input data files are: # # - ``mge.ecvs`` - the Multi Gaussian Expansion (MGE) describing the stellar surface density # - ``gauss_hermite_kins.ecvs`` - the kinematics extracted for this galaxy # - ``aperture.dat`` - information about the spatial apertures/bins using for kinematic extraction # - ``bins.dat`` - information about the spatial apertures/bins using for kinematic extraction # # The MGE and kinematics files must be in the form of [Astropy ECSV files](https://docs.astropy.org/en/stable/api/astropy.io.ascii.Ecsv.html). This is a convenient and flexible file format, so we use it wherever possible for storing our input and output data. # # We have provided test data for one galaxy. A very basic set of instructions for generating your own input data are as follows, # # - fit and MGE to a photometric image e.g. using [mge](http://www-astro.physics.ox.ac.uk/~mxc/software/#mge) # - Voronoi bin your IFU datacube e.g. using [e.g. vorbin](http://www-astro.physics.ox.ac.uk/~mxc/software/#binning) # - extract kinematics from the binned datacube e.g. using [e.g. PPXF](http://www-astro.physics.ox.ac.uk/~mxc/software/#ppxf) # - make the apertures and binfile files. An example script which has created these files is the file ``generate_input_califa.py`` in [this directory](https://github.com/dynamics-of-stellar-systems/triaxschwarz/tree/sabine/schwpy/data_prepare) # # In the future, we will provide more examples of test data, and more detailed instructions for preparing your own data. # # To get started, let's import DYNAMITE and print the version and installation path, # + import dynamite as dyn print('DYNAMITE') print(' version', dyn.__version__) print(' installed at ', dyn.__path__) # - # ## Reading the configuration file # The configuration file controls all of the settings that you may wish to vary when running your Schwarschild models, e.g. # # - specifying the components of the gravitational potential # - specifying the potential parameters, or parameter ranges, etc # - specify what type of kinematic data you are providing, e.g. # - discrete vs continuous, # - Gauss-Hermite vs Histograms # - the location of the input and output files # - the number of models you want to run # # This list of options is incomplete - for a more detailed description of the configuration file, see the documentation. # # The configuration file for this tutorial is # ``` # NGC6278_config.yaml # ``` # Open this file in a text editor, alongside this notebook, to see how it is organised. The file is in ``yaml`` format. The basic structure of a yaml files are pairs of keys and values # ``` # key : value # ``` # which can be organised into hierarchical levels separated by tabs # ``` # main_key: # sub_key1 : value1 # sub_key2 : value2 # ``` # Comments begin with a ``#``. Values can be any type of variable e.g. integers, floats, strings, booleans etc. # # To read in the congfiguration file we can use the following command, creating a configuration object which here we call ``c``, fname = 'NGC6278_config.yaml' c = dyn.config_reader.Configuration(fname, reset_logging=True) # On making this object, some output is printed telling us whether any previous models have been found. Assuming that you're running this tutorial for the first time, then no models will be found and an empty table is created at ``AllModels.table``. This table holds holds information about all the models which have been run so far. # # The configuration object ``c`` is structured in a similar way to the the configuration file itself. For example, the configuration file is split into two sections. The top section defines aspects the physical system we wish to model - e.g. the globular cluster, galaxy or galaxy cluster - while the second section contains all other settings we need for running a model - e.g. settings about the orbit integration and input/output options. The two sections are stored in the ``system`` and ``settings`` attributes of the configuration object, respectively, print(type(c.system)) print(type(c.settings)) # The physical system is comprised of components, which are stored in a list ``c.system.cmp_list`` print(f'cmp_list is a {type(c.system.cmp_list)}') print(f'cmp_list has length {len(c.system.cmp_list)}') # Let's print information about the components, for i in range(3): print(f'Information about component {i}:') # extract component i from the component list component = c.system.cmp_list[i] # print the name print(f' name = {component.name}') # print a list of the names of the parameters of this component parameters = component.parameters parameter_names = [par0.name for par0 in parameters] string = ' has parameters : ' for name in parameter_names: string += f'{name} , ' print(string) # print the type of this component print(f' type = {type(component)}') # does it contribute to the potential? print(f' contributes to the potential? - {component.contributes_to_potential}') # Each component has a name, some parameters, and a type. **Currently, dynamite only supports this exact combination of component types**, i.e. # # - one ``Plummer`` component representing the black hole # - one ``NFW`` component representing the dark halo (as of v1.0.0, DYNAMITE additionally supports ``Hernquist``, ``TriaxialCoredLogPotential``, and ``GeneralisedNFW`` dark halos, see documentation) # - one ``TriaxialVisibleComponent`` representing the stellar body of the galaxy # # In the future, we want to support other components, and more flexible combinations of components. # # For the stars - i.e. component 2 - we must provide some input data files. The location of these files is specified in the configuration file, at # ``` # settings -> io_settings -> input_directory # ``` # which takes the value, c.settings.io_settings['input_directory'] # The names of the following files are also specified in the configuration file, in the locations # ``` # system_components -> stars -> mge_pot # system_components -> stars -> mge_lum # system_components -> stars -> kinematics -> <kinematics name> --> datafile # system_components -> stars -> kinematics -> <kinematics name> --> aperturefile # system_components -> stars -> kinematics -> <kinematics name> --> binfile # ``` # which take values of the appropriate filenames. You are free to give these files whatever name you like, as long as it is specified in the configuration file. # # Let's have a look at the MGE, c.system.cmp_list[2].mge_pot, c.system.cmp_list[2].mge_lum # The kinematics are stored here, type(c.system.cmp_list[2].kinematic_data) # Note that this object has type ``list``. This is because a single component can have multiple different sets of kinematics. In this example, the first (and only) entry in the list is type(c.system.cmp_list[2].kinematic_data[0]) # We see that this kinematics object has type ``GaussHermite``. This has also been specified in the configuration file, under # ``` # system_components -> stars -> kinematics --> kinset1 --> type # ``` # In addition to ``GaussHermite``, DYNAMITE also supports ``BayesLOSVD`` kinematics (see documentation). # # The kinemtic data itself can be accessed as follows, c.system.cmp_list[2].kinematic_data[0].data # ## Creating a Schwarzschild model # Our next step will be to create a model i.e. a ``dyn.model.Model`` object. To help understand what the Model object is, let's read the internal documentation (i.e. the docstring) for the class: # # ``` # class Model(object): # """A DYNAMITE model. # # The model can be run by running the methods (i) get_orblib, (ii) get_weights # and (iii) (in the future) do_orbit_colouring. Running each of these methods # will return the appropriate object, e.g. model.get_orblib() --> returns an # OrbitLibrary object model.get_weights(...) --> returns a WeightSolver object # # Parameters # ---------- # config : a ``dyn.config_reader.Configuration`` object # parset : row of an Astropy Table # contains the values of the potential parameters for this model # directory : str # The model directory name (without path). If None or not specified, # the all_models_file will be searched for the directory name. If the # all_models file does not exist, the model directory will be set to # ``orblib_000_000/ml{ml}``. # # Returns # ------- # Nothing returned. Attributes holding outputs are are added to the # object when methods are run. # # """ # def __init__(self, config=None, parset=None, directory=None): # ``` # Looking at the init signature for this class, we see that a ``Model`` requires 3 input arguments. We've already met the first, the configuration object ``c``. # The remaining input parameter we need to provide is ``parset``. This is a particular set of values for each of the parameters of the model. In the configuration file, every parameter has a been given a ``value``. We can extract a parameter set specified by these values as follows, parset = c.parspace.get_parset() print(parset) # Note that, compared to values specified in the configuration file, parameters which have been specified as logarithmic, i.e. those configured as # ``` # parameters -> XXX -> logarithmic : True # ``` # have been exponentiated in this table. More details can be found in the tutorial/documentation `parameter_space.ipynb`. # # With the first 2 input arguments at hand, we can now create our model object, model = dyn.model.Model(config=c, parset=parset) # As this is our first model and we did not specify an explicit model directory, a standard directory name has been assigned to the model. Note that this path has not been created in the file system so far. # # Having created the model object, we can now run it. First, let's create directories for the output, model.setup_directories() # This should have created a directory, where the name has been specified in the config file, c.settings.io_settings['output_directory'] # inside of which you should find # ``` # NGC6278_output/models/ # ``` # inside of which a unique directory for *this particular* model has been created, called model.get_model_directory() # The directory name is constructed from the standard name for new, directly created models and their ``ml`` value. # # The next step is to calculate the orbit library. This step will take a few minutes, model.get_orblib() # Having calculated an orbit library, we now need to find out which orbits are useful for reproducing the observations. This is an Non-Negative Least Squares (NNLS) optimization problem, which can be solved as follows model.get_weights() # Congratulations! You have run your first Schwarzschild model using DYNAMITE. The chi-squared of this model is, model.chi2 # Is that good? I don't know! To find out, we'll have to run more models and compare their chi-squared values. For information about this, see the tutorial `running_a_grid_of_models.ipynb`. # ## Plot the Models # # Now let's look at some output for the model that we have just run # + plotter = dyn.plotter.Plotter(config=c) figure = plotter.plot_kinematic_maps(model) # - # The top row shows the data, the middle row shows the model, and the bottom row shows the residuals. The columns, from left to right, are the stellar surface density, $V$, $\sigma$, $h_3$ and $h_4$. We can see the following features in the fit, # # - the model and data surface densities are very similar # - the sense of rotation of the $v$ map is reproduced well, though the amplitude is lower than observed # - the $\sigma$, $h_3$ and $h_4$ maps are less well reproduced # # While the fit is certainly not perfect, it's reassuring to see that some features are reproduced well. To improve the fit, we will have to explore parameter space more fully. See the tutorial on "running a grid of models" for more details. # ## Exercise # # Change one of the potential parameters, then run another Schwarzschild model. You could do this by creating a new configuration file, or manually changing one of the parameters in this notebook. For example, to change the parameter ``ml`` - which is the mass-to-light ratio of the stars - to six, then you can uncomment the next line, # + # parset['f'] = 6. # - # Change any parameter you would like, then re-run a model and save the output plots.
docs/tutorial_notebooks/2_quickstart.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Homework 3 Problem 1 # In this homework, you'll learn how to model the sentences with recurrent neural networks(RNNs). We'll provide you with basic skeleton codes for preprocessing sequences and performing sentimental analysis with RNNs. However, provided codes can be improved with some simple modifications. The purpose of this homework is to implement several advanced techniques for improving the performance of vanilla RNNs. # # First, we'll import required libraries. # + # !pip install torchtext # !pip install spacy # !python -m spacy download en import random import time import torch import torch.nn as nn import torch.optim as optim from torchtext import data from torchtext import datasets # - # ## Preprocessing # # For your convenience, we will provide you with the basic preprocessing steps for handling IMDB movie dataset. For more information, see https://pytorch.org/text/ TEXT = data.Field(tokenize='spacy', include_lengths=True) LABEL = data.LabelField(dtype=torch.float) train_data, test_data = datasets.IMDB.splits(TEXT, LABEL) # + train_data, valid_data = train_data.split(random_state=random.seed(1234)) print('Number of training examples: {:d}'.format(len(train_data))) print('NUmber of validation examples: {:d}'.format(len(valid_data))) print('Number of testing examples: {:d}'.format(len(test_data))) # - TEXT.build_vocab(train_data, max_size=25000) LABEL.build_vocab(train_data) # Tokens include <unk> and <pad> print('Unique tokens in text vocabulary: {:d}'.format(len(TEXT.vocab))) # Label is either positive or negative print('Unique tokens in label vocabulary: {:d}'.format(len(LABEL.vocab))) device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu') batch_size = 64 train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits( (train_data, valid_data, test_data), batch_size=batch_size, sort_within_batch=False, device=device) # + # Note that the sequence is padded with <PAD>(=1) tokens after the sequence ends. for batch in train_iterator: text, text_length = batch.text break print(text[:, -1]) print(text[-10:, -1]) print(text_length[-1]) # - # We will re-load dataset since we already loaded one batch in above cell. device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu') batch_size = 64 train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits( (train_data, valid_data, test_data), batch_size=batch_size, sort_within_batch=True, device=device) # # Problems # # We will provide you with skeleton codes for training RNNs below. Run this code and you'll notice that the training / validation performance is not better than random guessing (50\~60%). # In this homework, you'll have to improve the performance of this network above 80% with several techniques commonly used in RNNs. **Please provide your answer in your report and attach notebook file which contains source code for below techniques.** # # (a) (3pt) Look at the shape of tensor `hidden` and `embedded`. Have you noticed what is the problem? Explain what is the issue and report the test performance when you fix the issue. (Hint: This is related to the length of sequences. See how sequence is padded. You may use `nn.utils.rnn.pack_padded_sequence`.) # # (b) (3pt) Use different architectures, such as LSTM or GRU, and report the test performance. "Do not" change hyperparameters from (a), such as batch_size, hidden_dim,... # # Now, try to use below techniques to further improve the performance of provided source codes. Compare the test performance of each component with/without it. # # (c) (1pt) For now, the number of layers in RNN is 1. Try to stack more layers, up to 3. # # (d) (1pt) Use bidirectional RNNs. # # (e) (1pt) Use dropout for regularization with stacked layers (recommended: 3 layers and dropout rate 0.5). # # (f) (1pt) Finally, apply all techniques and have an enough time to play with introduced techniques (e.g., changing hyperparameters, train more epochs, try other techniques you know, ...). Report the final test performance with your implementation and hyperparameter choice. Please note that this is not a competition assignment. We will not evaluate your assignment strictly! # + ## CODE FOR STUDENTs ## class SimpleRNN(nn.Module): def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim, pad_idx): super(SimpleRNN, self).__init__() self.embedding = nn.Embedding(input_dim, embedding_dim, padding_idx=pad_idx) self.rnn = nn.RNN(embedding_dim, hidden_dim) self.fc = nn.Linear(hidden_dim, output_dim) def forward(self, text, text_lengths): embedded = self.embedding(text) output, hidden = self.rnn(embedded) hidden = hidden[-1] return self.fc(hidden.squeeze(0)) ## CODE FOR TAs ## class SimpleRNN(nn.Module): def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim, dropout, bidirectional, num_layers, pad_idx): super(SimpleRNN, self).__init__() self.bidirectional = bidirectional self.embedding = nn.Embedding(input_dim, embedding_dim, padding_idx=pad_idx) self.rnn = nn.LSTM(embedding_dim, hidden_dim, bidirectional=bidirectional, dropout=dropout, num_layers=num_layers) if self.bidirectional: self.fc = nn.Linear(hidden_dim*2, output_dim) else: self.fc = nn.Linear(hidden_dim, output_dim) self.dropout = nn.Dropout(dropout) self.dropout_rate = dropout def forward(self, text, text_lengths): embedded = self.embedding(text) if self.dropout_rate > 0.: embedded = self.dropout(embedded) # pack sequence packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths) packed_output, (hidden, cell) = self.rnn(packed_embedded) if self.bidirectional: hidden = torch.cat([hidden[-2], hidden[-1]], dim=1) else: hidden = hidden[-1] if self.dropout_rate > 0.: hidden = self.dropout(hidden) return self.fc(hidden.squeeze(0)) # - def binary_accuracy(preds, y): rounded_preds = torch.round(torch.sigmoid(preds)) correct = (rounded_preds == y).float() acc = correct.sum() / len(correct) return acc # + ## CODE FOR STUDENTs input_dim = len(TEXT.vocab) embedding_dim = 100 hidden_dim = 128 output_dim = 1 num_epochs = 10 val_iter = 1 pad_idx = TEXT.vocab.stoi[TEXT.pad_token] model = SimpleRNN(input_dim, embedding_dim, hidden_dim, output_dim, pad_idx) ## CODE FOR TAs input_dim = len(TEXT.vocab) embedding_dim = 100 hidden_dim = 128 output_dim = 1 num_epochs = 10 dropout = 0.5 bidirectional = True val_iter = 1 num_layers = 3 pad_idx = TEXT.vocab.stoi[TEXT.pad_token] model = SimpleRNN(input_dim, embedding_dim, hidden_dim, output_dim, dropout, bidirectional, num_layers, pad_idx) # + optimizer = optim.Adam(model.parameters()) criterion = nn.BCEWithLogitsLoss().to(device) model = model.to(device) model.train() best_valid_loss = float('inf') for epoch in range(num_epochs): running_loss = 0 running_acc = 0 start_time = time.time() for batch in train_iterator: text, text_lengths = batch.text predictions = model(text, text_lengths).squeeze(-1) loss = criterion(predictions, batch.label) acc = binary_accuracy(predictions, batch.label) optimizer.zero_grad() loss.backward() optimizer.step() running_loss += loss.item() running_acc += acc.item() running_loss /= len(train_iterator) running_acc /= len(train_iterator) if epoch % val_iter == 0: model.eval() valid_loss = 0 valid_acc = 0 with torch.no_grad(): for batch in valid_iterator: text, text_lengths = batch.text eval_predictions = model(text, text_lengths).squeeze(1) valid_loss += criterion(eval_predictions, batch.label).item() valid_acc += binary_accuracy(eval_predictions, batch.label).item() model.train() valid_loss /= len(valid_iterator) valid_acc /= len(valid_iterator) if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save(model.state_dict(), './simplernn.pth') training_time = time.time() - start_time print('#####################################') print('Epoch {:d} | Training Time {:.1f}s'.format(epoch+1, training_time)) print('Train Loss: {:.4f}, Train Acc: {:.2f}%'.format(running_loss, running_acc*100)) if epoch % val_iter == 0: print('Valid Loss: {:.4f}, Valid Acc: {:.2f}%'.format(valid_loss, valid_acc*100)) # + ## THIS IS THE TEST PERFORMANCE YOU SHOULD REPORT ## model.load_state_dict(torch.load('./simplernn.pth')) model.eval() test_loss, test_acc = 0, 0 with torch.no_grad(): for batch in test_iterator: text, text_lengths = batch.text test_preds = model(text, text_lengths).squeeze(1) test_loss += criterion(test_preds, batch.label).item() test_acc += binary_accuracy(test_preds, batch.label).item() test_loss /= len(test_iterator) test_acc /= len(test_iterator) print('Test Loss: {:.4f}, Test Acc: {:.2f}%'.format(test_loss, test_acc*100)) # -
AI502-TA/hw3-1(TA).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import wisps import splat import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix,accuracy_score, precision_score from sklearn.preprocessing import MinMaxScaler, StandardScaler from sklearn.model_selection import train_test_split, KFold,RepeatedKFold, GridSearchCV import seaborn as sns import pandas as pd # %matplotlib inline # - comb=pd.read_hdf(wisps.COMBINED_PHOTO_SPECTRO_FILE, key='new_stars') comb=comb.dropna(how='all') comb=comb.loc[(comb[wisps.INDEX_NAMES]).dropna().index] comb.shape # + train_df=pd.read_pickle(wisps.LIBRARIES+'/training_set.pkl').reset_index(drop=True) pred_df=wisps.Annotator.reformat_table(comb).reset_index(drop=True) # - pred_df=pred_df.drop_duplicates(subset='grism_id') pred_df.shape, train_df.shape # + def apply_scale(x): ##put features on a log scale #replace nans y=np.log10(x) if np.isnan(y) or np.isinf(y): y=np.random.uniform(-99, -98) return y def create_labels(row): #use multiclass system label=0 if row.label ==0.: label=0 if (row.label==1) & (row.spt <20): label=1 if (row.label==1) & np.logical_and(row.spt >=20, row.spt<30): label=2 if (row.label==1) & np.logical_and(row.spt >=30, row.spt<45): label=3 return label # + pred_df['grism_id']=pred_df.grism_id.apply(lambda x: x.lower()) # - #features=wisps.INDEX_NAMES features=np.concatenate([['snr2','f_test', 'line_chi', 'spex_chi'], wisps.INDEX_NAMES]) #features=['snr2','snr1', 'snr3', 'snr4', 'f_test'] pred_df=pred_df[pred_df.snr2>3.] train_df['spt']=train_df.spt.apply(wisps.make_spt_number) pred_df['spt']=pred_df.spt.apply(wisps.make_spt_number) labels=train_df.apply(create_labels, axis=1).values train_df[features]=(train_df[features]).applymap(apply_scale) pred_df[features]=(pred_df[features]).applymap(apply_scale) scaler = MinMaxScaler(feature_range=(-100, 100)) scaler.fit(train_df[features]) X=scaler.transform(train_df[features]) y=labels #scale the data set to predict for the prediction set pred_set=scaler.transform(pred_df[features]) class_weigths={0:1., 1:40/10000, 2:1/10000, 3:5/10000} X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=np.random.randint(1000)) np.linspace(10, 1000, 10, dtype=int) import dask from dask.distributed import Client, progress client = Client(processes=False, threads_per_worker=4, n_workers=1, memory_limit='2GB', silence_logs='error') client import warnings warnings.filterwarnings('ignore') # + np.linspace(2, 10, 10, dtype=int) # + #do some grid search, use dask to make it faster? parameters = {'n_estimators': np.linspace(10, 1000, 10, dtype=int), 'min_samples_split': np.linspace(1, 10, 10, dtype=int)} #random forest mrf= RandomForestClassifier() #initialize grid search object clf = GridSearchCV(mrf, parameters, refit=False, verbose=2, scoring='precision_macro') #fit the training data clf.fit(X, y) # - ressc=pd.DataFrame(clf.cv_results_) plt.plot(ressc['mean_test_score']) clf.best_score_, clf.best_params_ rf = RandomForestClassifier(n_estimators=670, min_samples_split=3, verbose=True,bootstrap=True, n_jobs=-1, class_weight=class_weigths, criterion='entropy', random_state=np.random.randint(1000), warm_start=False) rf.fit(X_train, y_train) pred_labels = rf.predict(X_test) model_accuracy = accuracy_score(y_test, pred_labels) print ('accuracy score {}'.format(model_accuracy)) classes=['non-UCD', 'M7-L0', 'L', 'T'] cm = pd.DataFrame(confusion_matrix(y_test, pred_labels), columns=classes, index=classes) precision_score(y_test, pred_labels, average='macro') # + #create a table a confusion matrix fig, ax=plt.subplots(figsize=(8, 6)) #matr=(cm/cm.sum()).applymap(lambda x: np.round(x, 2)).values matr=cm.values im = ax.imshow(matr, cmap='Blues') # We want to show all ticks... ax.set_xticks(np.arange(len(classes))) ax.set_yticks(np.arange(len(classes))) # ... and label them with the respective list entries ax.set_xticklabels(classes) ax.set_yticklabels(classes) ax.set_xlabel('True Label', fontsize=18) ax.set_ylabel('Predicted Label', fontsize=18) # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=0, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. for i in range(len(classes)): for j in range(len(classes)): text = ax.text(j, i, matr[i, j], ha="center", va="center", color="k", fontsize=18) ax.set_xlim([-0.5, 3.5]) ax.set_ylim([3.5, -0.5]) plt.tight_layout() plt.savefig(wisps.OUTPUT_FIGURES+'/confusion_matrix.pdf') # - rlabels=rf.predict(pred_set) len(rlabels[rlabels>0]) cands=pd.read_pickle(wisps.OUTPUT_FILES+'/true_spectra_cands.pkl') cands['grism_id']=cands.grism_id.apply(lambda x: x.lower()) cands['spt']=[x.spectral_type for x in cands.spectra] len(cands), len( pred_df[pred_df.grism_id.isin(cands.grism_id.values)]) strs=wisps.datasets['stars'] # + #cands[~ cands.grism_id.isin(pred_df.grism_id.values) ] # - cands[~ cands.grism_id.isin(strs.grism_id.values) ] true=(pred_df[(rlabels>0) & pred_df.grism_id.isin(cands.grism_id.values)])#.drop_duplicates(subset='grism_id') truep=len(true) ps=len(rlabels[rlabels>0]) len(true[true.spt.between(17,19)]), len(true[true.spt.between(20,29)]), len(true[true.spt.between(30,40)]) 'FP rate {}'.format((ps-truep)/ps) rf_dict={'classifier': rf, 'sclr':scaler, 'feats':features} import pickle #save the random forest output_file=wisps.OUTPUT_FILES+'/random_forest_classifier.pkl' with open(output_file, 'wb') as file: pickle.dump(rf_dict,file) sv_df=pred_df[(rlabels>0)] sv_df.to_pickle(wisps.LIBRARIES+'/labelled_by_rf.pkl') slbyids=pd.read_pickle(wisps.OUTPUT_FILES+'/selected_by_indices.pkl') # + #slbyids # - len(sv_df[(sv_df.grism_id.isin(slbyids.grism_id)) & (sv_df.grism_id.isin(cands.grism_id))]) len(sv_df[( ~sv_df.grism_id.isin(slbyids.grism_id)) & (sv_df.grism_id.isin(cands.grism_id))]) len(slbyids[( ~slbyids.grism_id.isin(sv_df.grism_id)) & (slbyids.grism_id.isin(cands.grism_id))]) pred_df.shape not_in=cands[~cands.grism_id.isin(comb.grism_id)] (not_in[['spt', 'grism_id']]).values s=wisps.Source(filename='goodss-04-G141_17402', is_ucd=True) # + #low=pd.read_pickle(wisps.LIBRARIES+'/lowsnr_obejcts.pkl') # + #high=pd.read_pickle(wisps.LIBRARIES+'/highsnr_obejcts.pkl') # + #comb=pd.concat([low, high], ignore_index=True) # + #comb['grism_id']=comb.grism_id.apply(lambda x: str(x).lower()) # + #comb.to_hdf(wisps.COMBINED_PHOTO_SPECTRO_FILE, key='new_stars') # + #comb.loc[578] # - comb.shape s=wisps.Source(filename='goodss-28-G141_14876') s.plot() files=glob.glob('/users/caganze/desktop/ATMO_2020_models/evolutionary_tracks/ATMO_CEQ/MKO_WISE_IRAC/*') dframes=[] for f in tqdm(files): dframes.append(ascii.read(f).to_pandas()) df=pd.concat(dframes).rename(columns='Teff') df.columns=[x.lower() for x in df.columns] df=df.rename(columns={'teff':'temperature'}) df.to_csv('/users/caganze/research/wisps/evmodels/phillips2020.csv')
notebooks/lsstdsfp_supervised_learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Peruvian Fiscal Numbers # ## Introduction # The function `clean_pe_ruc()` cleans a column containing Peruvian fiscal number (RUC) strings, and standardizes them in a given format. The function `validate_pe_ruc()` validates either a single RUC strings, a column of RUC strings or a DataFrame of RUC strings, returning `True` if the value is valid, and `False` otherwise. # RUC strings can be converted to the following formats via the `output_format` parameter: # # * `compact`: only number strings without any seperators or whitespace, like "20512333797". # * `standard`: RUC strings with proper whitespace in the proper places. Note that in the case of RUC, the compact format is the same as the standard one. # * `dni`: return the DNI (CUI) part of the number for natural persons. Note if the RUC is not for natural persons, return NaN. # # Invalid parsing is handled with the `errors` parameter: # # * `coerce` (default): invalid parsing will be set to NaN # * `ignore`: invalid parsing will return the input # * `raise`: invalid parsing will raise an exception # # The following sections demonstrate the functionality of `clean_pe_ruc()` and `validate_pe_ruc()`. # ### An example dataset containing RUC strings import pandas as pd import numpy as np df = pd.DataFrame( { "ruc": [ "20512333797", "20512333798", '7542011030', '7552A10004', '8019010008', "hello", np.nan, "NULL", ], "address": [ "123 Pine Ave.", "main st", "1234 west main heights 57033", "apt 1 789 s maple rd manhattan", "robie house, 789 north main street", "1111 S Figueroa St, Los Angeles, CA 90015", "(staples center) 1111 S Figueroa St, Los Angeles", "hello", ] } ) df # ## 1. Default `clean_pe_ruc` # # By default, `clean_pe_ruc` will clean ruc strings and output them in the standard format with proper separators. from dataprep.clean import clean_pe_ruc clean_pe_ruc(df, column = "ruc") # ## 2. Output formats # This section demonstrates the output parameter. # ### `standard` (default) clean_pe_ruc(df, column = "ruc", output_format="standard") # ### `compact` clean_pe_ruc(df, column = "ruc", output_format="compact") # ### `dni` clean_pe_ruc(df, column = "ruc", output_format="dni") # ## 3. `inplace` parameter # # This deletes the given column from the returned DataFrame. # A new column containing cleaned RUC strings is added with a title in the format `"{original title}_clean"`. clean_pe_ruc(df, column="ruc", inplace=True) # ## 4. `errors` parameter # ### `coerce` (default) clean_pe_ruc(df, "ruc", errors="coerce") # ### `ignore` clean_pe_ruc(df, "ruc", errors="ignore") # ## 4. `validate_pe_ruc()` # `validate_pe_ruc()` returns `True` when the input is a valid RUC. Otherwise it returns `False`. # # The input of `validate_pe_ruc()` can be a string, a Pandas DataSeries, a Dask DataSeries, a Pandas DataFrame and a dask DataFrame. # # When the input is a string, a Pandas DataSeries or a Dask DataSeries, user doesn't need to specify a column name to be validated. # # When the input is a Pandas DataFrame or a dask DataFrame, user can both specify or not specify a column name to be validated. If user specify the column name, `validate_pe_ruc()` only returns the validation result for the specified column. If user doesn't specify the column name, `validate_pe_ruc()` returns the validation result for the whole DataFrame. from dataprep.clean import validate_pe_ruc print(validate_pe_ruc('20512333797')) print(validate_pe_ruc('20512333798')) print(validate_pe_ruc('7542011030')) print(validate_pe_ruc('7552A10004')) print(validate_pe_ruc('8019010008')) print(validate_pe_ruc("hello")) print(validate_pe_ruc(np.nan)) print(validate_pe_ruc("NULL")) # ### Series validate_pe_ruc(df["ruc"]) # ### DataFrame + Specify Column validate_pe_ruc(df, column="ruc") # ### Only DataFrame validate_pe_ruc(df)
docs/source/user_guide/clean/clean_pe_ruc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Contoh penggunaan `morph_analyzer.py` # # Pakej yang diperlukan: `pyspellchecker` (Dapatkan di https://pypi.org/project/pyspellchecker/ jika tidak termasuk dalam sistem anda.) # # (Required package: `pyspellchecker` (Download from https://pypi.org/project/pyspellchecker/ if your system does not have it.)) import pickle with open("rootlist.pkl", "rb") as f: rootlist = pickle.load(f) import morph_analyzer as ma # ## Tanpa kamus MALINDO Morph # # (Without the MALINDO Morph dictionary) ma.morph("keberkaitananlah", rootlist) # ### Parameter `Indo` # # Parameter `Indo` mengaktifkan awalan _N-_ (cth. _N-_ + _kopi_ = _ngopi_). # # (The `Indo` parameter activates the prefix _N-_ (e.g. _N-_ + _kopi_ = _ngopi_).) ma.morph("nyampai", rootlist) ma.morph("nyampai", rootlist, Indo=True) # ### Parameter `n` # # Parameter `n` mengawal jumlah calon yang dihasilkan. Nilai lalainya ialah 5. # # (The `n` parameter controls the number of the candidates to be produced. The default value is 5.) ma.morph("mengebibkah", rootlist) ma.morph("mengebibkah", rootlist, n=15) # ## Bersama dengan kamus MALINDO Morph # # Walaupun `morph_analzser.py` boleh digunakan secara sendirian, adalah lebih realistik untuk menggunakannya bersama dengan kamus MALINDO Morph yang (kebanyakan) analisis morfologinya sudah diperiksa oleh manusia. Dalam contoh kod di bawah, `morph_analyzer.py` digunakan hanya apabila perkataan yang ingin dianalisis tidak termasuk dalam kamus MALINDO Morph. # # (With the MALINDO Morph dictionary # Although `morph_analyzer.py` can be used by itself, it is more realistic to use it with the MALINDO Morph dictionary in which most of the morphological analyses have been examined manually. In the code below, `morph_analyzer.py` is used only when the word to be analysed is not found in the MALINDO Morph dictionary.) # + # Buat kamus daripada MALINDO Morph with open("malindo_dic_20200917.tsv", "r", encoding="utf-8") as f: #Gunakan versi terkini MALINDO Moprh katakata = [] for l in f: items = l.strip().split("\t") if not items[0].startswith("ex-"): #bahagian yg sudah diperiksa manusia sahaja katakata.append(tuple(items[1:7])) #tanpa ID, sumber, dasar, lema kamus = dict() for kata in katakata: surface = kata[1] if not surface in kamus.keys(): kamus[surface] = [] kamus[surface].append(kata) # - def analisis(w, Indo=False, n=5): try: return kamus[w][:n] except: return list(ma.morph(w, rootlist, Indo, n)) # ### Perkataan yang ada dalam kamus MALINDO Morph # # (Words available in the MALINDO Morph dictionary) analisis("mengeposkan") analisis("mereka") analisis("mereka", n=1) # ### Perkataan yang tidak ada dalam kamus MALINDO Morph # # (Words unavailable in the MALINDO Morph dictionary) analisis("mengepobkan") analisis("mengepobkan", n=3)
contoh_penggunaan.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Backpropagation Tutorial # (C) 2019 by [<NAME>](http://damir.cavar.me/) # **Version:** 0.1, November 2019 # **Download:** This and various other Jupyter notebooks are available from my [GitHub repo](https://github.com/dcavar/python-tutorial-for-ipython). # ## Introduction # For more details on Backpropagation and its use in Neural Networks see Rumelhart, Hinton, and Williams (1986a) and Rumelhart, Hinton & Williams (1986b). A detailed overview is also provided in Goodfellow, Bengio, and Courville (2016). # The ideas and initial versions of this Python-based notebook have been inspired by many open and public tutorials and articles, but in particular by these three: # - <NAME> (2015) [*A Neural Network in 11 lines of Python (Part 1)*](https://iamtrask.github.io/2015/07/12/basic-python-network/) # - <NAME> (2015) [*A Step by Step Backpropagation Example*](https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/) # - <NAME> (2018) [*Derivative of the Sigmoid function*](https://towardsdatascience.com/derivative-of-the-sigmoid-function-536880cf918e) # # A lot of code examples and discussion has been compiled here using these sources. # ### Preliminaries # This notebook uses [nbextensions](https://github.com/ipython-contrib/jupyter_contrib_nbextensions) with *python-markdown/main* enabled. These extensions might not work in Jupyter Lab, thus some variable references in the markdown cells might not display. # We will use [numpy](https://numpy.org/) in the following demo. Let us import it and assign the *np* alias to it: import numpy as np # For plots of curves and functions we will use *pyplot* from *matplotlib*. We will import it here: from matplotlib import pyplot as plt # ## Non-linearity Function and Derivatives # The Sigmoid function is defined as: # $$\sigma(x) = \frac{1}{1 + e^{-x}} $$ # We can specify it in Python as: def sigmoid(x): return 1 / (1 + np.exp(-x)) # We can now plot the sigmoid function for x values between -10 and 10: # %matplotlib inline x = np.arange(-10, 10, 0.2) y = sigmoid(x) fig = plt.figure() ax = fig.add_axes([0, 0, 1, 1]) ax.plot(x, y) ax.set_xlabel("x") ax.set_ylabel("y") ax.set_title("Sigmoid") print() # In the following for Backpropagation we will make use of the Derivative of the Sigmoid function. The Derivative of Sigmoid is defined as: # $$\frac{d}{dx}\sigma(x) = \sigma(x) (1 - \sigma(x))$$ # We can derive this equation as follows. Assume that: # $$\frac{d}{dx} \sigma(x) = \frac{d}{dx} \frac{1}{1 + e^{-x}} $$ # We can invert the fraction using a negative exponent: # $$\frac{d}{dx} \sigma(x) = \frac{d}{dx} \frac{1}{1 + e^{-x}} = \frac{d}{dx} (1 + e^{-x})^{-1}$$ # We can apply the *reciprocal rule*, which is, the numerator is the derivative of the function ($g'(x)$) times -1 divided by the square of the denominator $g(x)$: # $$\frac{d}{dx} \left[ \frac{1}{g(x)} \right] = \frac{-g'(x)}{[g(x)]^2} = -g(x)^{-2} g'(x)$$ # In our Derivative of Sigmoid derivation, we can now reformulate as: # $$\frac{d}{dx} (1 + e^{-x})^{-1} = -(1 + e^{-x})^{-2} \frac{d}{dx} (1 + e^{-x})$$ # With $\alpha$ and $\beta$ constants, the *Rule of Linearity* says that: # $$\frac{d}{dx} \left( \alpha f(x) + \beta g(x) \right) = \frac{d}{dx} \left( \alpha f(x) \right) + \frac{d}{dx} \left( \beta g(x) \right) = \alpha f'(x) + \beta g'(x)$$ # This means, using the Rule of Linearity and given that the derivative of a constant is 0, we can rewrite our equation as: # $$\frac{d}{dx} (1 + e^{-x})^{-1} = -(1 + e^{-x})^{-2} \frac{d}{dx} (1 + e^{-x}) = -(1 + e^{-x})^{-2} \left( \frac{d}{dx}[1] + \frac{d}{dx}[e^{-x}] \right) = -(1 + e^{-x})^{-2} \left( 0 + \frac{d}{dx}[e^{-x}] \right) = -(1 + e^{-x})^{-2} \frac{d}{dx}[e^{-x}] $$ # The *Exponential Rule* says that: # $$\frac{d}{dx} e^{u(x)} = e^{u(x)} \frac{d}{dx} x$$ # We can thus rewrite: # $$\frac{d}{dx} (1 + e^{-x})^{-1} = -(1 + e^{-x})^{-2} e^{-x} \frac{d}{dx}[-x] $$ # This is equivalent to: # $$\frac{d}{dx} (1 + e^{-x})^{-1} = -(1 + e^{-x})^{-2} e^{-x} -\frac{d}{dx}[x]$$ # Given that a derivative of a variable is 1, we can rewrite as: # $$\frac{d}{dx} (1 + e^{-x})^{-1} = -(1 + e^{-x})^{-2} e^{-x} -1 = (1 + e^{-x})^{-2} e^{-x} = \frac{e^{-x}}{(1 + e^{-x})^2}$$ # We can rewrite the derivative as: # $$\frac{d}{dx} (1 + e^{-x})^{-1} = \frac{1 e^{-x}}{(1 + e^{-x}) (1 + e^{-x})} = \frac{1}{1 + e^{-x}} \frac{e^{-x}}{1 + e^{-x}} = \frac{1}{1 + e^{-x}} \frac{e^{-x} + 1 - 1}{1 + e^{-x}} = \frac{1}{1 + e^{-x}} \left( \frac{1 + e^{-x}}{1 + e^{-x}} - \frac{1}{1 + e^{-x}} \right)$$ # We can simplify this to: # $$\frac{d}{dx} (1 + e^{-x})^{-1} = \frac{1}{1 + e^{-x}} \left( 1 - \frac{1}{1 + e^{-x}} \right)$$ # This means that we can derive the Derivative of the Sigmoid function as: # $$\frac{d}{dx} \sigma(x) = \sigma(x) ( 1 - \sigma(x) )$$ # We can specify the Python function of the Derivative of the Sigmoid function as: def sigmoidDerivative(x): return sigmoid(x) * (1 - sigmoid(x)) # We can plot the Derivative of the Sigmoid Function as follows: # %matplotlib inline x = np.arange(-10, 10, 0.2) fig = plt.figure() ax = fig.add_axes([0, 0, 1, 1]) y = sigmoidDerivative(x) ax.plot(x, y, color="red", label='Derivative of Sigmoid'.format(1)) y = sigmoid(x) ax.plot(x, y, color="blue", label='Sigmoid'.format(1)) fig.legend(loc='center right') ax.set_xlabel("x") ax.set_ylabel("y") ax.set_title("Derivative of the Sigmoid Function") print() # ## Forward- and Backpropagation # We will define a simple network that takes an input as defined for *X* and that generates a corresponding output as defined in *y*. The input array *X* is: X = np.array( [ [0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1] ] ) # The rows in $X$ are the input vectors for our training or learning phase. Each vector has 3 dimensions. # The output array *y* represents the expected output that the network is expected to learn from the input data. It is defined as a row-vector with 4 rows and 1 column: y = np.array( [0, 0, 1, 1] ).reshape(-1, 1) np.shape(y) # We will define a weight matrix *W* and initialize it with random weights: W = 2 * np.random.random((3, 1)) - 1 print(W) # In this simple example *W* is the weight matrix that connects two layers, the input (*X*) and the output layer (*O*). # The optimization or learning phase consists of a certain number of iterations that : iterations = 4000 # Let us keep track of the output error (as becomes clear below) in the following variable: error = 0.0 # Repeat for a specific number of iterations the following computations. Initially we take the entire set of training examples in *X* and process them all at the same time. This is called *full batch* training, inidicated by the dot-product between *X* and *W*. Computing *O* is the first prediction step by taking the dot-product of *X* and *W* and computing the sigmoid function over it: # + for i in range(iterations): O = sigmoid(np.dot(X, W)) O_error = y - O error = np.mean(np.abs(O_error)) if (i % 100) == 0: print("Error:", error) # Compute the delta O_delta = O_error * sigmoidDerivative(O) # update weights W += np.dot(X.T, O_delta) print("O:", O) # - # The matrix *X* has 4 rows and 3 columns. The weight matrix *W* has 3 rows and 1 column. The output will be a row vector with 4 rows and 1 column, representing the output that we want to align as close as possible to *y*. # + [markdown] variables={" error ": "0.001393528327629781", " iterations ": "4000"} # *O_error* is the difference between *y* and the initial guess in *O*. We want to see *O* to reflect *y* as closely as possible. After {{ iterations }} in the loop above, we see that *O* is resembling *y* very well, with an error of {{ error }}. # - # In the next step we compute the derivative of the sigmoid function for the initial guess vector. The Derivative is weighted by the error, which means that if the slope was shallow (close to or approaching 0), the guess was quite good, that is the network was confident about the output for a given input. If the slope was higher, as for example for *x = 0*, the prediction was not very good. Such bad predictions get updated significantly, while the confident predictions get updated minimally, multiplying them with some small number close to 0. # For every single weight, we # ## Adding a Layer # In the following example we will slightly change the ground truth. Compare the following definition of *y* with the definition above: y = np.array([[0], [1], [1], [0]]) # In the following network specification we introduce a second layer # + np.random.seed(1) # randomly initialize our weights with mean 0 Wh = 2 * np.random.random((3, 4)) - 1 Wo = 2 * np.random.random((4, 1)) - 1 Xt = X.T # precomputing the transform of X for the loop for i in range(80000): # Feed forward through layers X, H, and O H = sigmoid(np.dot(X, Wh)) O = sigmoid(np.dot(H, Wo)) # how much did we miss the target value? O_error = y - O error = np.mean(np.abs(O_error)) if (i % 10000) == 0: print("Error:", error) # compute the direction of the optimization for the output layer O_delta = O_error * sigmoidDerivative(O) # how much did each H value contribute to the O error (according to the weights)? H_error = O_delta.dot(Wo.T) # compute the directions of the optimization for the hidden layer H_delta = H_error * sigmoidDerivative(H) Wo += H.T.dot(O_delta) Wh += Xt.dot(H_delta) print(O) # - # ## References # - Goodfellow, Ian, <NAME>, <NAME> (2016). [Deep Learning](http://www.deeplearningbook.org/). MIT Press. # - Nielsen, <NAME>. (2015). "Chapter 2: How the backpropagation algorithm works". Neural Networks and Deep Learning. Determination Press. # - Rumelhart, <NAME>., <NAME>, <NAME> (1986a). "Learning representations by back-propagating errors". *Nature* 323 (6088): 533–536. [doi:10.1038/323533a0](https://doi.org/10.1038%2F323533a0). # - Rumelhart, <NAME>., <NAME>, <NAME> (1986b). "8. Learning Internal Representations by Error Propagation". In: <NAME>, <NAME> (eds.). Parallel Distributed Processing: Explorations in the Microstructure of Cognition. Volume 1: Foundations. Cambridge: MIT Press.
notebooks/Backpropagation_Tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # import os modules to create path across operating system to load csv file import os # module for reading csv files import csv # read csv data and load to budgetDB csvpath = os.path.join("Resources","budget_data.csv") # creat a txt file to hold the analysis outputfile = os.path.join("Analysis","budget_analysis.txt") # set var and initialize to zero totalMonths = 0 totalBudget = 0 # set list to store all of the monthly changes monthChange = [] months = [] # + # use csvreader object to read the csv file with csvreader object with open(csvpath, newline = "") as csvfile: # create a csv reader object csvreader = csv.reader(csvfile, delimiter=",") # skip the first row since it has all of the column information #next(csvreader) #header: date, profit/losses print(csvreader) #with open(inputfile) as budget_data: # # create a csv reader object # csvreader = csv.reader(budget_data) # + with open(cereal_csv, encoding='utf-8-sig') as csvfile: csv_reader = csv.reader(csvfile, delimiter=",") # @NOTE: This time, we do not use `next(csv_reader)` because there is no header for this file # read the header row header = next(csvreader) # move to the next row (first row) firstRow = next(csvreader) # # Read through each row of data after the header # for row in csv_reader: # # Convert row to float and compare to grams of fiber # if float(row[7]) >= 5: # print(row) # - # establish the previous budget and note that budget is in index 1 previousBudget = float(firstRow[1]) # increment the total months and add on to the total amount of budget totalMonths += 1 totalBudget += float(firstRow[1]) # + for row in csvreader: totalMonths += 1 totalBudget += float(row[1]) # calculate the net change netChange = float(row[1]) - previousBudget # add on to the list of monthly changes monthlyChanges.append(netChange) # add the first month that a change occurred, note that the month is in index 0 months.append(row[0]) # update the previous budget previousBudget = float(row[1]) # - # calculate the average change averageChange = sum(monthlyChanges) / len(monthlyChanges) # set variables to hold the month and value of the greatest increase and decrease in profits greatestIncrease = [months[0], monthlyChanges[0]] greatestDecrease = [months[0], monthlyChanges[0]] # + # create loop to calculate the index of the greatest and least monthly change for m in range(len(monthlyChanges)): if (monthlyChanges[m] > greatestIncrease[1]): greatestIncrease[1] = monthlyChanges[m] greatestIncrease[0] = months[m] if (monthlyChanges[m] < greatestDecrease[1]): greatestDecrease[1] = monthlyChanges[m] greatestDecrease[0] = months[m] # start generating the output output = ( f"Financial Anaylsis \n" f"------------------------- \n" f"Total Months: {totalMonths} \n" f"Total Budget: ${totalBudget} \n" f"Average Change: ${averageChange} \n" f"Greatest Increase in Profit: {greatestIncrease[0]} ({greatestIncrease[1]}) \n" f"Greatest Dncrease in Profit: {greatestDecrease[0]} ({greatestDecrease[1]})" ) # + # print the output print(output) # export the output to the output text file with open(outputfile, "w") as textfile: textfile.write(output) # -
.ipynb_checkpoints/Untitled1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Action Conditional Deep Markov Model using cartpole dataset # + from tqdm import tqdm import torch from torch import optim import torch.nn as nn import torch.nn.functional as F from torchvision import transforms, datasets from tensorboardX import SummaryWriter import numpy as np from utils import DMMDataset, imshow, postprocess from torch.utils.data import DataLoader import matplotlib.pyplot as plt # %matplotlib inline seed = 1 torch.manual_seed(seed) if torch.cuda.is_available(): device = "cuda" else: device = "cpu" # - # ## Prepare Dataset # you have to run prepare_cartpole_dataset.py or download from : # https://drive.google.com/drive/folders/1w_97RLFS--CpdUCNw1C-3yPLhceZxkO2?usp=sharing # + batch_size = 256 train_loader = DataLoader(DMMDataset(), batch_size=batch_size, shuffle=True, drop_last=True) # test_loader = DataLoader(DMMTestDataset(), batch_size=batch_size, shuffle=False, drop_last=True) _x = iter(train_loader).next() print(_x['episode_frames'][0][0:30].shape) # + imshow(postprocess(_x['episode_frames'][0][0:30])) # 0: Push cart to the left # 1:Push cart to the right print(_x['actions'][0][0:30]) # for more details about actions: https://github.com/openai/gym/blob/38a1f630dc9815a567aaf299ae5844c8f8b9a6fa/gym/envs/classic_control/cartpole.py#L37 # for more details about CartPole-v1: https://gym.openai.com/envs/CartPole-v1/ # + from pixyz.utils import print_latex from pixyz.distributions import Bernoulli, Normal, Deterministic h_dim = 32 hidden_dim = 32 z_dim = 16 t_max = 30 u_dim = 1 # - # ## Deep Markov Model # * Original paper: Structured Inference Networks for Nonlinear State Space Models (https://arxiv.org/abs/1609.09869) # * Original code: https://github.com/clinicalml/dmm # # # Prior(Transition model): $p_{\theta}(z_{t} | z_{t-1}, u) = \cal{N}(\mu = f_{prior_\mu}(z_{t-1}, u), \sigma^2 = f_{prior_\sigma^2}(z_{t-1}, u)$ # Generator(Emission): $p_{\theta}(x | z)=\mathscr{B}\left(x ; \lambda=g_{x}(z)\right)$ # # RNN: $p(h) = RNN(x)$ # Inference(Combiner): $p_{\phi}(z | h, z_{t-1}, u) = \cal{N}(\mu = f_{\mu}(h, z_{t-1}, u), \sigma^2 = f_{\sigma^2}(h, z_{t-1}, u)$ # ## Define probability distributions # + # RNN class RNN(Deterministic): """ h = RNN(x) Given observed x, RNN output hidden state """ def __init__(self): super(RNN, self).__init__(var=["h"], cond_var=["x"]) # 28x28x3 → 32 self.conv1 = nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1) self.conv2 = nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1) self.fc1 = nn.Linear(128*7*7, 256) self.fc2 = nn.Linear(256, 32) self.rnn = nn.GRU(32, h_dim, bidirectional=True) self.h0 = nn.Parameter(torch.zeros(2, 1, self.rnn.hidden_size)) self.hidden_size = self.rnn.hidden_size def forward(self, x): h0 = self.h0.expand(2, x.size(1), self.rnn.hidden_size).contiguous() x = x.reshape(-1, 3, 28, 28) # Nx3x28x28 h = F.relu(self.conv1(x)) # Nx64x14x14 h = F.relu(self.conv2(h)) # Nx128x7x7 h = h.view(h.shape[0], 128*7*7) # Nx128*7*7 h = F.relu(self.fc1(h)) # Nx256 h = F.relu(self.fc2(h)) # Nx32 h = h.reshape(30, -1, 32) # 30x128x32 h, _ = self.rnn(h, h0) # 30x128x32, 1x128x32 return {"h": h} # Emission p(x_t | z_t) class Generator(Normal): """ Given the latent z at time step t, return the vector of probabilities that parameterizes the bernlulli distribution p(x_t | z_t) """ def __init__(self): super(Generator, self).__init__(var=["x"], cond_var=["z"]) self.fc1 = nn.Linear(z_dim, 256) self.fc2 = nn.Linear(256, 128*7*7) self.conv1 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1) self.conv2 = nn.ConvTranspose2d(64, 3, kernel_size=4, stride=2, padding=1) def forward(self, z): h = F.relu(self.fc1(z)) h = F.relu(self.fc2(h)) h = h.view(h.shape[0], 128, 7, 7) # 128*7*7 h = F.relu(self.conv1(h)) # 64x14x14 generated_x = self.conv2(h) # 3x28x28 return {"loc": generated_x, "scale": 1.0} class Inference(Normal): """ given the latent z at time step t-1, the hidden state of the RNN h(x_{0:T} and u return the loc and scale vectors that parameterize the gaussian distribution q(z_t | z_{t-1}, x_{t:T}, u) """ def __init__(self): super(Inference, self).__init__(var=["z"], cond_var=["h", "z_prev", "u"]) self.fc1 = nn.Linear(z_dim+u_dim, h_dim*2) self.fc21 = nn.Linear(h_dim*2, z_dim) self.fc22 = nn.Linear(h_dim*2, z_dim) def forward(self, h, z_prev, u): feature = torch.cat((z_prev, u), 1) h_z = torch.tanh(self.fc1(feature)) h = 0.5 * (h + h_z) return {"loc": self.fc21(h), "scale": F.softplus(self.fc22(h))} class Prior(Normal): """ Given the latent variable at the time step t-1 and u, return the mean and scale vectors that parameterize the gaussian distribution p(z_t | z_{t-1}, u) """ def __init__(self): super(Prior, self).__init__(var=["z"], cond_var=["z_prev", "u"]) self.fc1 = nn.Linear(z_dim+u_dim, hidden_dim) self.fc21 = nn.Linear(hidden_dim, z_dim) self.fc22 = nn.Linear(hidden_dim, z_dim) def forward(self, z_prev, u): feature = torch.cat((z_prev, u), 1) h = F.relu(self.fc1(feature)) return {"loc": self.fc21(h), "scale": F.softplus(self.fc22(h))} # + prior = Prior().to(device) encoder = Inference().to(device) decoder = Generator().to(device) rnn = RNN().to(device) generate_from_prior = prior * decoder print_latex(generate_from_prior) # - # ## Define loss # + from pixyz.losses import KullbackLeibler from pixyz.losses import Expectation as E from pixyz.losses import LogProb from pixyz.losses import IterativeLoss step_loss = - E(encoder, LogProb(decoder)) + KullbackLeibler(encoder, prior) # IterativeLoss: https://docs.pixyz.io/en/latest/losses.html#pixyz.losses.IterativeLoss _loss = IterativeLoss(step_loss, max_iter=t_max, series_var=["x", "h", "u"], update_value={"z": "z_prev"}) loss = E(rnn, _loss).mean() # + from pixyz.models import Model dmm = Model(loss, distributions=[rnn, encoder, decoder, prior], optimizer=optim.RMSprop, optimizer_params={"lr": 5e-4}, clip_grad_value=10) print(dmm) print_latex(dmm) # - # ## Sampling code # + def data_loop(epoch, loader, model, device, train_mode=False): mean_loss = 0 for data in loader: x = data['episode_frames'].to(device) # 256,30,3,28,28 u = data['actions'].to(device) # 256,30,1 batch_size = x.size()[0] x = x.transpose(0, 1) # 30,256,3,28,28 u = u.transpose(0, 1) # 30,256,1 z_prev = torch.zeros(batch_size, z_dim).to(device) if train_mode: mean_loss += model.train({'x': x, 'z_prev': z_prev, 'u': u}).item() * batch_size else: mean_loss += model.test({'x': x, 'z_prev': z_prev, 'u': u}).item() * batch_size mean_loss /= len(loader.dataset) if train_mode: print('Epoch: {} Train loss: {:.4f}'.format(epoch, mean_loss)) else: print('Test loss: {:.4f}'.format(mean_loss)) return mean_loss _data = iter(train_loader).next() _u = _data['actions'].to(device) # 256,30,1 _u = _u.transpose(0, 1) # 30,256,1 def plot_video_from_latent(batch_size): x = [] z_prev = torch.zeros(batch_size, z_dim).to(device) for step in range(t_max): samples = generate_from_prior.sample({'z_prev': z_prev, 'u': _u[step]}) x_t = decoder.sample_mean({"z": samples["z"]}) z_prev = samples["z"] x.append(x_t[None, :]) x = torch.cat(x, dim=0).transpose(0, 1) return x # - # ## Train epochs = 200 for epoch in tqdm(range(1, epochs + 1)): train_loss = data_loop(epoch, train_loader, dmm, device, train_mode=True) sample = plot_video_from_latent(batch_size) if epoch % 50 == 0: plt.figure(figsize=(10,3)) for i in range(30): plt.subplot(3,10,i+1) plt.imshow(sample[0][i].cpu().detach().numpy().astype(np.float).reshape(3,28,28).transpose(1,2,0)) plt.show() imshow(postprocess(sample[0].cpu().detach())) imshow(postprocess(_data["episode_frames"][0])) print(_data["actions"][0])
tutorial/English/04-DeepMarkovModel.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R (system-wide) # language: r # metadata: # cocalc: # description: R statistical programming language # priority: 10 # url: https://www.r-project.org/ # name: ir # resource_dir: /ext/jupyter/kernels/ir # --- # # Introduction to R # # **Questions** # - What is an object? # - What is a function and how can we pass arguments to functions? # - How can values be initially assigned to variables of different data types? # - How can a vector be created What are the available data types? # - How can subsets be extracted from vectors? # - How does R treat missing values? # - How can we deal with missing values in R? # # **Objectives** # - Assign values to objects in R. # - Learn how to name objects. # - Use comments to inform script. # - Solve simple arithmetic operations in R. # - Call functions and use arguments to change their default options. # - Inspect the content of vectors and manipulate their content. # - Subset and extract values from vectors. # - Analyze vectors with missing data. # - Define the following terms as they relate to R: object, vector, assign, call, function. # ## Variable # # In R, information is stored as variables. These variables can be numerical, sets, text, dates, and can be more complex like graphs and datasets (which will be covered later) # # To store something as a variable, we can use either "<-" or "=". But most commonly used is the "<-", in general its good practice. # > NOTE: "<-" are not the same thing "->" temperature <- 27 # "<-" is an assignment operator. It assigns the value(s) on the right to the object(s) on the left. The example above creates the symbol called temperature and assigns it the numeric value of 27. Some R users would say “temperature gets 27.” time_minutes is now a numeric vector with one element. Or you could say temperature is a numeric vector, and the first element is the number 27. # # When assigning a value to an object, R does not print anything to the console. You can force R to print the value by using parentheses OR by typing the object name # + #method 1 (temperature <- 27 ) #method 2 temperature # - # > NOTE: You can give your objects any name, but it is recommended to that the names be explicit and not too long. Here are some tips for assigning values: # # -Do not use names of functions that already exist in R: There are some names that cannot be used because they are the names of fundamental functions in R (e.g., if, else, for, see here for a complete list. In general, even if it’s allowed, it’s best to not use other function names (e.g., c, T, mean, data, df, weights). If in doubt, check the help to see if the name is already in use. # # -R is case sensitive: age is different from Age and y is different from Y. # # -No blank spaces or symbols other than underscores: R users get around this in a couple of ways, either through capitalization (e.g. myData) or underscores (e.g. my_data). It’s also best to avoid dots (.) within an object name as in my.dataset. There are many functions in R with dots in their names for historical reasons, but dots have a special meaning in R (for methods) and other programming languages. # # -Do not begin with numbers or symbols: 2x is not valid, but x2 is. # # -Be descriptive, but make your variable names short: It’s good practice to be descriptive with your variable names. If you’re loading in a lot of data, choosing myData or x as a name may not be as helpful as, say, ebookUsage. Finally, keep your variable names short, since you will likely be typing them in frequently. # We can store strings and text as variables. This is done by using "". Store your first and last name as well as your favourit colour below in between the quotes. nameFirst <- " " nameLast <- " " colour <- " " # We may include variables in arithmetic expressions. These variables need to have a value that was assigned previously by some assignment operation. R will substitute the variable name by the respective value when solving the expression that contain these variables. If you include a variable that does not exist (e.g. you mispelled the name),R will generate an error saying that it does not know the value of that variable tempFarh <- temperature * 9/5 +32 #incorrect call tempfarh # If you sometimes forget what variables you already used, you can use the ls() function to list all what is currently stored in memory. It will only give you the variable names and not the values stored. ls() # We can also change an object’s value by assigning it a new one. This overwrites the previous value without prompting you, so be careful! Also, assigning a value to one object does not change the values of other objects. # # Lets take a look at an example # + #first temperature set to 27 degrees Celsius (temperature <- 27) #convert to farhenheit (tempFarh <- temperature * 9/5 +32) #changing temperture to 88 degrees Celsius (temperature <- 88) # - # If we run tempFarh, what will we get? # - 80.6 # - 190.4 # - Error # - No idea tempFarh # To correct this, we re-run the equation for the conversion. (tempFarh <- temperature * 9/5 +32) # ## Commenting # # All programming languages allow the programmer to include comments in their code. To do this in R we use the # character. Anything to the right of the # sign and up to the end of the line is treated as a comment and will not be evaluated by R. You can start lines with comments or include them after any code on the line. # # Comments are essential to helping you remember what your code does, and explaining it to others. Commenting code, along with documenting how data is collected and explaining what each variable represents, is essential to reproducible research # + temperatureMorning <- 23 #morning temperature taken at 7AM temperatureAfternoon <- 33 #afternoon temperature taken at 1PM temperatureEvening <-20 #evening temperature taken at 5PM tempertureNight <- 5 #night temperature taken at 9PM tempFarh <- temperature * 9/5 +32 #convert a Celcius temp to Fahrenhait # - # RStudio makes it easy to comment or uncomment a paragraph: after selecting the lines you want to comment, press at the same time on your keyboard Ctrl + Shift + C. If you only want to comment out one line, you can put the cursor at any location of that line (i.e. no need to select the whole line), then press Ctrl + Shift + C. # + temperatureMorning <- 23 #morning temperature taken at 7AM temperatureAfternoon <- 33 #afternoon temperature taken at 1PM temperatureEvening <-20 #evening temperature taken at 5PM tempertureNight <- 5 #night temperature taken at 9PM tempFarh <- temperature * 9/5 +32 #convert a Celcius temp to Fahrenhait # - # #### EXERCISE 2.1 # # Create two variables length and width and assign them any values. Create a third variable area and give it a value based on the current values of length and width. #Exercise 2.1 length <- 2.5 width <- 3.2 area <- length * width area # What happens when you change the values of length and width with the code below? length <- 7 width <- 6 area # What happened here is that you did not update the area variable. You need to run the area equation again to reassign area to a new length and width. # ## Functions and their arguments # # R is a “functional programming language,” meaning it contains a number of functions you use to do something with your data. Functions are “canned scripts” that automate more complicated sets of commands. Many functions are predefined, or can be made available by importing R packages as we saw in the “Before We Start” lesson. # # Call a function on a variable by entering the function into the console, followed by parentheses and the variables. A function usually gets one or more inputs called arguments. For example, if you want to take the sum of 3 and 4, you can type in sum(3, 4). In this case, the arguments must be a number, and the return value (the output) is the sum of those numbers. An example of a function call is: # # sum(3,4) # The function is.function() will check if an argument is a function in R. If it is a function, it will print TRUE to the console. # # Functions can be nested within each other. For example, sqrt() takes the square root of the number provided in the function call. Therefore you can run sum(sqrt(9), 4) to take the sum of the square root of 9 and add it to 4. # # Typing a question mark before a function will pull the help page up in the Navigation Pane in the lower right. Type ?sum to view the help page for the sum function. You can also call help(sum). This will provide the description of the function, how it is to be used, and the arguments. # # In the case of sum(), the ellipses . . . represent an unlimited number of numeric elements. # + #?sum() # - is.function(sum) # check to see if sum() is a function sum(3, 4, 5, 6, 7) # sum takes an unlimited number (. . .) of numeric elements # Here are examples of some other frequently used functions: # + #sqrt(x) : Returns the square root of x sqrt(543) #log(x, base=y) : Takes the logarithm of x with base y; if base is not specified, returns the natural logarithm log(12.3) log(8, base=2) #sin(x) : returns sine of an angle NOTE: angles are in radians sin(0.2) # - # Lets complicate things a little. You can use functions within functions, but be careful of your brackets! When possible declare your variable externally, then call on them in the funtion. This function here uses the unit circle to find the angle. We are using the coordinates # # ( $\frac{\sqrt{3}}{2}$ , $\frac{1}{2}$ ) # This will return an result close to $\frac {\pi}{6}$ or 30$^{\circ}$ # # The point here is not to quiz you on the unit circle, but rather to show that it is best to break down complicated functions. # + #method 1 a=0.5 b=sqrt(3)/2 sin( a/b ) #method 2 sin( (0.5) / (sqrt(3)/2) ) # - # ## Arguments # Some functions take arguments which may either be specified by the user, or, if left out, take on a default value. However, if you want something specific, you can specify a value of your choice which will be used instead of the default. This is called passing an argument to the function. # # For example, sum() takes the argument option na.rm. If you check the help page for sum (call ?sum), you can see that na.rm requires a logical (TRUE/FALSE) value specifying whether NA values (missing data) should be removed when the argument is evaluated. # # By default, na.rm is set to FALSE, so evaluating a sum with missing values will return NA: sum(3, 4, NA) # Even though we do not see the argument here, it is operating in the background, as the NA value remains. 3 + 4 + NA is NA. # # But setting the argument na.rm to TRUE will remove the NA: sum(3, 4, NA, na.rm = TRUE) # It is very important to understand the different arguments that functions take, the values that can be added to those functions, and the default arguments. Arguments can be anything, not only TRUE or FALSE, but also other objects. Exactly what each argument means differs per function, and must be looked up in the documentation. # # It’s good practice to put the non-optional arguments first in your function call, and to specify the names of all optional arguments. If you don’t, someone reading your code might have to look up the definition of a function with unfamiliar arguments to understand what you’re doing. # ## Vectors # # A vector is the most common and basic data type in R, and is pretty much the workhorse of R. A vector is a sequence of elements of the same type. Vectors can only contain “homogenous” data–in other words, all data must be of the same type. The type of a vector determines what kind of analysis you can do on it. For example, you can perform mathematical operations on numeric objects, but not on character objects. # # We can assign a series of values to a vector using the c() function. c() stands for combine. If you read the help files for c() by calling help(c), you can see that it takes an unlimited . . . number of arguments. # # For example we can create a vector of checkouts for a collection of books and assign it to a new object checkouts: checkouts <- c(25, 15, 18) checkouts # A vector can also contain characters. For example, we can have a vector of the book titles (title) and authors (author): title <- c("Macbeth","Dracula","1984") # The quotes around “Macbeth”, etc. are essential here. Without the quotes R will assume there are objects called Macbeth and Dracula in the environment. As these objects don’t yet exist in R’s memory, there will be an error message. # # There are many functions that allow you to inspect the content of a vector. length() tells you how many elements are in a particular vector: length(checkouts) # print the number of values in the checkouts vector # An important feature of a vector, is that all of the elements are the same type of data. The function class() indicates the class (the type of element) of an object: class(checkouts) class(title) # Type ?str into the console to read the description of the str function. You can call str() on an R object to compactly display information about it, including the data type, the number of elements, and a printout of the first few elements. str(checkouts) str(title) # You can use the c() function to add other elements to your vector: author <- "Stoker" author <- c(author, "Orwell") # add to the end of the vector author <- c("Shakespeare", author) author # In the first line, we create a character vector author with a single value "Stoker". In the second line, we add the value "Orwell" to it, and save the result back into author. Then we add the value "Shakespeare" to the beginning, again saving the result back into author. # # We can do this over and over again to grow a vector, or assemble a dataset. As we program, this may be useful to add results that we are collecting or calculating. # # Use this to open a tab in the Script Pane (upper left) to view your data. This is helpful if you have a very long vector you need to browse. # # An **atomic vector** is the simplest R data type and is a linear vector of a single type. Above, we saw 2 of the 6 main atomic vector types that R uses: "character" and "numeric" (or "double"). These are the basic building blocks that all R objects are built from. The other 4 atomic vector types are: # # - "logical" for TRUE and FALSE (the boolean data type) # - "integer" for integer numbers (e.g., 2L, the L indicates to R that it’s an integer) # - "complex" to represent complex numbers with real and imaginary parts (e.g., 1 + 4i) and that’s all we’re going to say about them # - "raw" for bitstreams that we won’t discuss further # # # You can check the type of your vector using the typeof() function and inputting your vector as the argument. # # Vectors are one of the many data structures that R uses. Other important ones are lists (list), matrices (matrix), data frames (data.frame), factors (factor) and arrays (array). # #### Exercise 2.3 # # + #Exercise 3.2 # - # You’ve probably noticed that objects of different types get converted into a single, shared type within a vector. In R, we call converting objects from one class into another class coercion. These conversions happen according to a hierarchy, whereby some types get preferentially coerced into other types. This hierarchy is: logical < integer < numeric < complex < character < list. # # You can also coerce a vector to be a specific data type with as.character(), as.logical(), as.numeric, etc. For example, to coerce a number to a character: x <- as.character(200) x # We can also call class() class(x) # > NOTE : if we try to add a number to x, we will get an error message non-numeric argument to binary operator–in other words, x is non-numeric and cannot be added to a number. # ## Removing Objects from the environment # # To remove an object from your R environment, use the rm() function. Remove multiple objects with rm(list = c("add", "objects", "here)), adding the objects in c() using quotation marks. To remove all objects, use rm(list = ls()) or click the broom icon in the Environment Pane, next to “Import Dataset.” x <- 5 y <- 10 z <- 15 rm(x) # remove x rm(list =c("y", "z")) # remove y and z rm(list = ls()) # remove all objects
_episodes/Section 2.1-1.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: SageMath 9.5 # language: sage # name: sagemath-9.5 # --- # # Протоколы аутентификации IS_DEBUG = 1 def trace(*args, **kwargs): """ Отладочная трассировка """ global IS_DEBUG if IS_DEBUG: print('[TRACE]', end=' ') print(*args, **kwargs) # ---- # + # Использую хэш-функцию ГОСТ 34.11-2012 "Стрибог" from pygost.gost34112012256 import GOST34112012256 # Эта либа тоже понадобится import binascii # Для генерации случайных чисел (КГПСЧ) from Crypto.Random import get_random_bytes # - # ## Протокол PAP # ![PAP Protocol](../images/PAP_1.png) # ----- # ---- # + def streebog_hash(*args) -> bytes: """ Расчет хэша для нескольких аргументов (по типу конкатенации) """ hasher = GOST34112012256() for arg in args: hasher.update(arg) return hasher.digest() def bytes_as_hex(b: bytes) -> str: """ Перевод бинарных данных в hex-строчку """ return binascii.hexlify(b).decode() # - # ------ class PAPServer(object): def __init__(self): """ Инициализация сервера. Создается пустая база с пользователями. """ self._db = {} # { login: hash(password) } def register_user(self, login: str, password: str): """ Регистрация юзера. На вход получает логин и пароль. Ничего не возвращает. Выбрасывает исключение в случае наличия пользователя в базе """ trace('[PAP Server]', f'Attempt to register user with credentials {login}:{password}') if login in self._db: trace('[PAP Server]', 'User already exists') raise ValueError(f'User {login} is already registered') # # Рассчитаю хэш пароля и запишу в базу # self._db[login] = streebog_hash(password.encode()) trace('[PAP Server]', f'User successfully registered, password hash: {bytes_as_hex(self._db[login])}') def login(self, login: str, password: str) -> bool: """ Вход пользователя по паролю. На вход получает логин и пароль. Возвращает True в случае успеха, False - иначе. """ trace('[PAP Server]', f'Attempt to login with credentials {login}:{password}') if login not in self._db: trace('[PAP Server]', "User doesn't exist") return False # # Пользователь есть, рассчитаю хэш пароля и сравню с имеющимся # pass_hash = streebog_hash(password.encode()) real_hash = self._db[login] trace('[PAP Server]', f'Password hash: {bytes_as_hex(pass_hash)}') trace('[PAP Server]', f'Stored hash: {bytes_as_hex(real_hash)}') return pass_hash == real_hash class PAPClient(object): def __init__(self, login, password): """ Инициализация клиента. На вход получает логин и пароль. """ self._login = login self._passwd = password trace('[PAP Client]', f'User with credentials {self._login}:{self._passwd} created') def register(self, srv: PAPServer): """ Регистрация пользователя на сервере. На вход принимает дескриптор сервера. """ try: srv.register_user(self._login, self._passwd) except Exception as e: trace(e) def login(self, srv: PAPServer): """ Вход пользователя на сервере. На вход принимает дескриптор сервера. """ if srv.login(self._login, self._passwd): print('[PAP Client]', f'User {self._login} logged in successfully') else: print('[PAP Client]', f'Wrong username or password') # ---- # Создаю сервер server = PAPServer() # Теперь создаю двух юзеров: # - Алису - валидного пользователя # - Еву - пользователя, пытающегося представиться Алисой alice = PAPClient('Alice', 'Trust_m3_0r_n0t') eve = PAPClient('Alice', 'try_t0_gu3$$') # Регистрирую валидного пользователя alice.register(server) # Захожу под кредами валидного пользователя alice.login(server) # Пытаюсь зайти под неправильными кредами eve.login(server) # ----- # ## Протокол CHAP # ![CHAP Protocol](../images/CHAP_1.png) # Соответствие между описанием и кодом: # - значение N (N1, N2) именуется челледжом (challenge). # # ---- class CHAPServer(object): def __init__(self): """ Инициализация сервера. Создается пустая база с пользователями. """ self._db = {} def register_user(self, login, password): """ Регистрация юзера. На вход получает логин и пароль. Ничего не возвращает. Выбрасывает исключение в случае наличия пользователя в базе """ trace('[CHAP Server]', f'Attempt to register user with credentials {login}:{password}') if login in self._db: trace('[CHAP Server]', 'User already exists') raise ValueError(f'User {login} is already registered') # # Запишу в базу # self._db[login] = [password.encode(), None] trace('[CHAP Server]', f'User successfully registered') def login(self, login, response): """ Вход пользователя. На вход получает логин и хэш пароля с челленджем. Возвращает True в случае успеха, False - иначе. """ trace('[CHAP Server]', f'''Attempt to login with: {login = }, response = {bytes_as_hex(response)}''') if login not in self._db: trace('[CHAP Server]', "User doesn't exist") return False # # Пользователь есть, достану челлендж и обнулю # challenge = self._db[login][1] self._db[login][1] = None if challenge is None: trace('[CHAP Server]', f'No challenge was generated for {login = }') return False # # Рассчитаю хэш и сравню с респонсом # challenge_hash = streebog_hash(challenge, self._db[login][0]) trace('[CHAP Server]', f'Calculated hash: {bytes_as_hex(challenge_hash)}') trace('[CHAP Server]', f'Received hash: {bytes_as_hex(response)}') return challenge_hash == response def generate_challenge(self, login): """ Генерация челленджа. На вход получает логин. Возвращает случайный челлендж в случае успеха, None - иначе. """ trace('[CHAP Server]', f'Attempt to generate challege for: {login = }') if login not in self._db: trace('[CHAP Server]', "User doesn't exist") return None # # Если юзер есть в базе, то генерирую челлендж и сохраняю у себя # challenge = get_random_bytes(16) self._db[login][1] = challenge trace('[CHAP Server]', f'Generated challenge = {bytes_as_hex(challenge)}') return challenge class CHAPClient(object): def __init__(self, login, password): """ Инициализация клиента. На вход получает логин и пароль. """ self._login = login self._passwd = password trace('[CHAP Client]', f'User with credentials {self._login}:{self._passwd} created') def register(self, srv: CHAPServer): """ Регистрация пользователя на сервере. На вход принимает дескриптор сервера. """ try: srv.register_user(self._login, self._passwd) except Exception as e: trace(e) def login(self, srv: CHAPServer): """ Вход пользователя на сервере. На вход принимает дескриптор сервера. """ # # Получаю челлендж с сервера # challenge = srv.generate_challenge(self._login) if challenge is None: print('[CHAP Client]', f'Wrong username or password') return # # Хэширую челлендж с паролем и пытаюсь залогиниться # response = streebog_hash(challenge, self._passwd.encode()) if srv.login(self._login, response): print('[CHAP Client]', f'User {self._login} logged in successfully') else: print('[CHAP Client]', f'Wrong username or password') # ---- # Создаю сервер server = CHAPServer() # Теперь создаю двух юзеров: # - Алису - валидного пользователя # - Еву - пользователя, пытающегося представиться Алисой alice = CHAPClient('Alice', 'Trust_m3_0r_n0t') eve = CHAPClient('Alice', 'try_t0_gu3$$') # Регистрирую валидного пользователя alice.register(server) # Захожу под кредами валидного пользователя alice.login(server) # Пытаюсь зайти под неправильными кредами eve.login(server) # ---- # ## Двустронний протокол CHAP class ModifiedCHAPServer(object): def __init__(self): """ Инициализация сервера. Создается пустая база с пользователями. Задаются логин и пароль. """ self._db = {} self._server_login = 'Alice' self._server_pass = '<PASSWORD>@' def register_user(self, login, password): """ Регистрация юзера. На вход получает логин и пароль. Возвращает логин и пароль сервера (для регистрации на клиенте). Выбрасывает исключение в случае наличия пользователя в базе """ trace('[Modified CHAP Server]', f'Attempt to register user with credentials {login}:{password}') if login in self._db: trace('[Modified CHAP Server]', 'User already exists') raise ValueError(f'User {login} is already registered') # # Запишу в базу # self._db[login] = [password.encode(), None] trace('[Modified CHAP Server]', f'User successfully registered') return self._server_login, self._server_pass def login(self, login, response, client_challenge): """ Вход пользователя. На вход получает логин, хэш пароля с челленджем и челлендж клиента. Возвращает логин сервера и хэш клиентского челледжа в случае успеха, None - иначе. """ trace('[Modified CHAP Server]', f'''Attempt to login with: {login = }, response = {bytes_as_hex(response)}, client_challenge = {bytes_as_hex(client_challenge)}''') if login not in self._db: trace('[Modified CHAP Server]', "User doesn't exist") return None # # Пользователь есть, достану челлендж и обнулю # challenge = self._db[login][1] self._db[login][1] = None if challenge is None: trace('[Modified CHAP Server]', f'No challenge was generated for {login = }') return None # # Рассчитаю хэш и сравню с респонсом # challenge_hash = streebog_hash(challenge, self._db[login][0]) trace('[Modified CHAP Server]', f'Calculated hash: {bytes_as_hex(challenge_hash)}') trace('[Modified CHAP Server]', f'Received hash: {bytes_as_hex(response)}') if challenge_hash != response: return None # # Теперь считаю хэш для челленджа клиента и возвращаю его # client_challenge_hash = streebog_hash(client_challenge, self._server_pass.encode()) return self._server_login, client_challenge_hash def generate_challenge(self, login): """ Генерация челленджа. На вход получает логин. Возвращает случайный челлендж в случае успеха, None - иначе. """ trace('[Modified CHAP Server]', f'Attempt to generate challege for: {login = }') if login not in self._db: trace('[Modified CHAP Server]', "User doesn't exist") return None # # Если юзер есть в базе, то генерирую челлендж и сохраняю у себя # challenge = get_random_bytes(16) self._db[login][1] = challenge trace('[Modified CHAP Server]', f'Generated challenge = {bytes_as_hex(challenge)}') return challenge class ModifiedCHAPClient(object): def __init__(self, login, password): """ Инициализация клиента. На вход получает логин и пароль. Инициализируется пустая база серверов. """ self._login = login self._passwd = password self._servers_db = {} trace('[Modified CHAP Client]', f'User with credentials {self._login}:{self._passwd} created') def register(self, srv: ModifiedCHAPServer): """ Регистрация пользователя на сервере. На вход принимает дескриптор сервера. """ try: login, password = srv.register_user(self._login, self._passwd) self._servers_db[login] = password except Exception as e: trace(e) def login(self, srv: ModifiedCHAPServer): """ Вход пользователя на сервере. На вход принимает дескриптор сервера. """ # # Получаю челлендж с сервера # srv_challenge = srv.generate_challenge(self._login) if srv_challenge is None: print('[Modified CHAP Client]', f'Wrong username or password') return # # Хэширую челлендж с паролем, генерирую свой и пытаюсь залогиниться # response = streebog_hash(srv_challenge, self._passwd.encode()) challenge = self.generate_challenge() srv_data = srv.login(self._login, response, challenge) if srv_data is not None: srv_login, srv_response = srv_data if srv_login not in self._servers_db: print('[Modified CHAP Client]', f'Server {srv_login} not registered') return # # Сервер есть в базе, сверяю хэши # srv_challenge_hash = streebog_hash(challenge, self._servers_db[srv_login].encode()) trace('[Modified CHAP Client]', f'Calculated hash: {bytes_as_hex(srv_challenge_hash)}') trace('[Modified CHAP Client]', f'Received hash: {bytes_as_hex(srv_response)}') if srv_challenge_hash == srv_response: print('[Modified CHAP Client]', f'User {self._login} logged in successfully') else: print('[Modified CHAP Client]', f'Server {srv_login} is not authenticated') return print('[Modified CHAP Client]', f'Wrong username or password') @staticmethod def generate_challenge(): """ Генерация челленджа. Возвращает случайное число. """ return get_random_bytes(16) # ------ # Создаю сервер server = ModifiedCHAPServer() # Теперь создаю двух юзеров: # - Алису - валидного пользователя # - Еву - пользователя, пытающегося представиться Алисой alice = ModifiedCHAPClient('Alice', 'Trust_m3_0r_n0t') eve = ModifiedCHAPClient('Alice', 'try_t0_gu3$$') # Регистрирую валидного пользователя alice.register(server) # Захожу под кредами валидного пользователя alice.login(server) # Пытаюсь зайти под неправильными кредами eve.login(server) # ---- # ## Протокол S/KEY # ![S/KEY Protocol](../images/SKEY_1.png) # Соответствие между описанием и кодом: # - значение R именуется солью (salt); # - значение K именуется ключом (key); # - значение I именуется номером транзации (transaction number); # - значение N именуется количеством раундов (`SKEYClient.ROUNDS`). # # ---- def generate_passwords(key, salt, number_of_passwords): """ Генерирует последовательность паролей по ключу и соли в заданном количестве штук. Возвращает список паролей. """ result = [] intermediate = key + salt for _ in range(SKEYClient.ROUNDS): intermediate = streebog_hash(intermediate) result.append(intermediate) return result # ---- class SKEYServer(object): def __init__(self, rounds): """ Инициализация сервера. Создается пустая база с пользователями. Задается количество итераций. """ self._db = {} # { login: [key, transaction_number, salt, passwords, current_password]} self._rounds = rounds def register_user(self, login, key): """ Регистрация юзера. На вход получает логин и ключ. Возвращает соль для этого клиента. Выбрасывает исключение в случае наличия пользователя в базе """ trace('[S/KEY Server]', f'Attempt to register user {login} with key {bytes_as_hex(key)}') if login in self._db: trace('[S/KEY Server]', 'User already exists') raise ValueError(f'User {login} is already registered') self._db[login] = [key, 1, None, [], None] salt = self.update_salt(login) trace('[S/KEY Server]', f'User successfully registered') return salt def login(self, login, nth_password): """ Вход пользователя. На вход получает логин и сессионный пароль. Возвращает 1 в случае успеха, 0 в случае необходимости в следующий раз обновить соль, -1 - иначе. """ trace('[S/KEY Server]', f'''Attempt to login with: {login = }, nth_password = {<PASSWORD>(nth_password)}''') if login not in self._db: trace('[S/KEY Server]', "User doesn't exist") return -1 # # Если пользователь есть, а пароль верный, то инкременирую счетчик # и успешно возвращаю управление. Иначе - просто отдаю управление. # if nth_password != self._db[login][4]: return -1 new_transaction_number = self._increment_transaction_number(login) if -1 == new_transaction_number: return 0 self._db[login][4] = self._db[login][3][-new_transaction_number] return 1 def get_transaction_number(self, login): """ Возвращает номер транзации для заданного пользователя или None, если такого пользователя нет. """ trace('[S/KEY Server]', f'Attempt to get transaction number for: {login = }') if login not in self._db: trace('[S/KEY Server]', "User doesn't exist") return None return self._db[login][1] def update_salt(self, login): """ Обновление соли для пользователя. Проверки существования пользователя не производятся, так как предполагается, что метод будет вызываться только для уже существующего пользователя. """ trace('[S/KEY Server]', f'Attempt to generate salt for {login = }') self._generate_salt_and_passwords(login) self._db[login][1] = 1 trace('[S/KEY Server]', f'Salt for user {login} successfully updated') return self._db[login][2] def _generate_salt_and_passwords(self, login): """ Генерирует соль для пользователя и последовательность паролей. Проверки существования пользователя не производятся, так как предполагается, что метод будет вызываться только для уже существующего пользователя. На вход принимает логин. Возвращает соль. """ salt = get_random_bytes(16) self._db[login][2] = salt trace('[S/KEY Server]', f'Generated salt = {bytes_as_hex(salt)}') self._db[login][3] = generate_passwords(self._db[login][0], self._db[login][2], self._rounds) self._db[login][4] = self._db[login][3][-1] trace('[S/KEY Server]', 'Passords generated') return salt def _increment_transaction_number(self, login): """ Инкрементирует счетчик для заданного пользователя. Возвращает новое значение счетчика или -1, если необходима смена соли. """ self._db[login][1] += 1 if self._db[login][1] > self._rounds: trace('[S/KEY Server]', 'Passwords number exceeded') return -1 return self._db[login][1] class SKEYClient(object): ROUNDS = 2 def __init__(self, login, key): """ Инициализация клиента. На вход получает логин и ключ. Инициализируется пустые соль и список паролей. """ self._login = login self._key = key self._salt = None self._passwords = [] trace('[S/KEY Client]', f'User {self._login} created, key = {bytes_as_hex(key)}') def register(self, server: SKEYServer): """ Регистрация пользователя на сервере. На вход принимает дескриптор сервера. """ try: salt = server.register_user(self._login, self._key) self._passwords = generate_passwords(self._key, salt, SKEYClient.ROUNDS) trace('[S/KEY Client]', 'Passwords generated') except Exception as e: trace(e) def login(self, server: SKEYServer): """ Вход пользователя на сервере. На вход принимает дескриптор сервера. """ transaction_number = server.get_transaction_number(self._login) if transaction_number is None: print('[S/KEY Client]', f'Wrong username or password') return trace('[S/KEY Client]', f'Transaction number {transaction_number}') login_result = server.login(self._login, self._passwords[-transaction_number]) if login_result == 1: print('[S/KEY Client]', f'User {self._login} logged in successfully') elif login_result == 0: print('[S/KEY Client]', f'User {self._login} logged in successfully') new_salt = server.update_salt(self._login) self._passwords = generate_passwords(self._key, new_salt, SKEYClient.ROUNDS) trace('[S/KEY Client]', f'Salt updated successfully') else: print('[S/KEY Client]', f'Wrong username or password') # ---- # Создаю сервер server = SKEYServer(SKEYClient.ROUNDS) # Создаю случайный ключ (ну можно и фиксированный для # демонстрации, но пусть будет случайным) key = get_random_bytes(16) bytes_as_hex(key) # Теперь создаю двух юзеров: # - Алису - валидного пользователя # - Еву - пользователя, пытающегося представиться Алисой alice = SKEYClient('Alice', key) eve = SKEYClient('Alice', b'Unknown key') # Регистрирую пользователя alice.register(server) # Захожу под кредами пользователя alice.login(server) # И еще раз, тут должна произойти последняя попытка # аутентификации и, как следствие, обновление соли автоматическое alice.login(server) # И еще раз логинимся. Соль должна быть уже другой. alice.login(server) # А теперь попробую залогиниться невалидным пользователем # Ева не знает ключа, соответственно пароли генерятся на основе # неизвестного ключа (и соль в данном случае неизвестна) eve._passwords = generate_passwords(eve._key, b'\x00', SKEYClient.ROUNDS) eve.login(server)
Lab 1/Lab 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Time Series Chains # # ## Forecasting Web Query Data with Anchored Time Series Chains (ATSC) # # This example is adapted from the [Web Query Volume case study](http://www.www2015.it/documents/proceedings/proceedings/p721.pdf) and utilizes the main takeaways from the [Matrix Profile VII](https://www.cs.ucr.edu/~eamonn/chains_ICDM.pdf) research paper. # ## Getting Started # # Let's import the packages that we'll need to load, analyze, and plot the data. # + # %matplotlib inline import pandas as pd import numpy as np import stumpy from scipy.io import loadmat import matplotlib.pyplot as plt from matplotlib.patches import Rectangle, FancyArrowPatch import urllib import ssl import io import itertools # - # ## What are Time Series Chains? # # Time series chains may be informally considered as motifs that evolve or drift in some direction over time. The figure below illustrates the difference # between [time series motifs](Tutorial_1.ipynb) (left) and time series chains (right). # + def change_plot_size(width, height, plt): fig_size = plt.rcParams["figure.figsize"] fig_size[0] = width fig_size[1] = height plt.rcParams["figure.figsize"] = fig_size change_plot_size(20, 6, plt) # - x = np.random.rand(20) y = np.random.rand(20) n = 10 motifs_x = 0.5 * np.ones(n) + np.random.uniform(-0.05, 0.05, n) motifs_y = 0.5 * np.ones(n) + np.random.uniform(-0.05, 0.05, n) sin_x = np.linspace(0, np.pi/2, n+1) sin_y = np.sin(sin_x)/4 chains_x = 0.5 * np.ones(n+1) + 0.02 * np.arange(n+1) chains_y = 0.5 * np.ones(n+1) + sin_y fig, axes = plt.subplots(nrows=1, ncols=2) axes[0].scatter(x, y, color='lightgrey') axes[0].scatter(motifs_x, motifs_y, color='red') axes[1].scatter(x, y, color='lightgrey') axes[1].scatter(chains_x[0], chains_y[0], edgecolor='red', color='white') axes[1].scatter(chains_x[1:n], chains_y[1:n], color='red') axes[1].scatter(chains_x[n], chains_y[n], edgecolor='red', color='white', marker='*', s=200) plt.show() # Above, we are visualizing time series subsequences as points in high-dimensional space. Shown on the left is a time series motif and it can be thought of as a collection of points that approximate a platonic ideal. In contrast, depicted on the right, is a time series chain and it may be thought of as an evolving trail of points in the space. Here, the open red circle represents the first link in the chain, the anchor. Both motifs and chains have the property that each subsequence is relatively close to its nearest neighbor. However, the motif set (left) also has a relatively small diameter. In contrast, the set of points in a chain (right) has a diameter that is much larger than the mean of each member’s distance to its nearest neighbor and, moreover, the chain has the important property of <b>directionality</b>. For example, in the case of a motif, if an additional member was added to the motif set, its location will also be somewhere near the platonic ideal, but independent of the previous subsequences. In contrast, in the case of a chain, the location of the next member of the chain would be somewhere after the last red circle, possibly where the open red star is located. # ## A Simplified Example # # Adapted from the [Matrix Profile VII](https://www.cs.ucr.edu/~eamonn/chains_ICDM.pdf) paper, consider the following time series: # # 47, 32, 1, 22, 2, 58, 3, 36, 4, -5, 5, 40 # # Assume that the subsequence length is 1 and the distance between two subsequences is simply the absolute difference # between them. To be clear, we are making these simple and pathological assumptions here just for the purposes of # elucidation; we are actually targeting much longer subsequence lengths and using z-normalized Euclidean distance in our # applications. To capture the directionality of a time series chain, we need to store the left and right nearest neighbor information into the left (IL) and right (IR) matrix profile indices: # # | Index | Value | Left Index (IL) | Right Index (IR) | # |-------|-------|-----------------|------------------| # | 1 | 47 | - | 12 | # | 2 | 32 | 1 | 8 | # | 3 | 1 | 2 | 5 | # | 4 | 22 | 2 | 8 | # | 5 | 2 | 3 | 7 | # | 6 | 58 | 1 | 12 | # | 7 | 3 | 5 | 9 | # | 8 | 36 | 2 | 12 | # | 9 | 4 | 7 | 11 | # | 10 | -5 | 3 | 11 | # | 11 | 5 | 9 | 12 | # | 12 | 40 | 8 | - | # # In this vertical/transposed representation, the `index` column shows the location of every subsequence in the time series, the `value` column contains the original numbers from our time series above, the `IL` column shows the left matrix profile indices, and `IR` is the right matrix profile indices. For example, `IR[2] = 8` means the right nearest neighbor of `index = 2` (which has `value = 32`) is at `index = 8` (which has `value = 36`). Similarly, `IL[3] = 2` means that the left nearest neighbor of `index = 3` (with `value = 1`) is at `index = 2` (which has `value = 32`). To better visualize the left/right matrix profile index, we use arrows to link every subsequence in the time series with its left and right nearest neighbors: # + nearest_neighbors = np.array([[1, 47, np.nan, 12], [2, 32, 1, 8], [3, 1, 2, 5], [4, 22, 2, 8], [5, 2, 3, 7], [6, 58, 1, 12], [7, 3, 5, 9], [8, 36, 2, 12], [9, 4, 7, 11], [10, -5, 3, 11], [11, 5, 9, 12], [12, 40, 8, np.nan]]) colors = [['C1', 'C1'], ['C2', 'C5'], ['C3', 'C5'], ['C4', 'C4'], ['C3', 'C2'], ['C5', 'C3'], ['C3', 'C2'], ['C2', 'C1'], ['C3', 'C2'], ['C6', 'C1'], ['C6', 'C2'], ['C1', 'C1']] style="Simple, tail_width=0.5, head_width=6, head_length=8" kw = dict(arrowstyle=style, connectionstyle="arc3, rad=-.5",) xs = np.arange(nearest_neighbors.shape[0]) + 1 ys = np.zeros(nearest_neighbors.shape[0]) plt.plot(xs, ys, "-o", markerfacecolor="None", markeredgecolor="None", linestyle="None") x0, x1, y0, y1 = plt.axis() plot_margin = 5.0 plt.axis((x0 - plot_margin, x1 + plot_margin, y0 - plot_margin, y1 + plot_margin)) plt.axis('off') for x, y, nearest_neighbor, color in zip(xs, ys, nearest_neighbors, colors): plt.text(x, y, str(int(nearest_neighbor[1])), color="black", fontsize=20) # Plot right matrix profile indices if not np.isnan(nearest_neighbor[3]): arrow = FancyArrowPatch((x, 0.5), (nearest_neighbor[3], 0.5), color=color[0], **kw) plt.gca().add_patch(arrow) # Plot left matrix profile indices if not np.isnan(nearest_neighbor[2]): arrow = FancyArrowPatch((x, 0.0), (nearest_neighbor[2], 0.0), color=color[1], **kw) plt.gca().add_patch(arrow) plt.show() # - # An arrow pointing from a number to its right nearest neighbor (arrows shown above the time series) can be referred to as forward arrow and an arrow pointing from a number to its left nearest neighbor (arrows shown below the time series) can be referred to as a backward arrow. According to the formal definition of a time series chain (see [Matrix Profile VII](https://www.cs.ucr.edu/~eamonn/chains_ICDM.pdf) for a thorough definition and discussion), every pair of consecutive subsequences in a chain must be connected by both a forward arrow and a backward arrow. A keen eye will spot the fact that the longest chain in our simplified example is: # + nearest_neighbors = np.array([[1, 47, np.nan, np.nan], [2, 32, np.nan, np.nan], [3, 1, np.nan, 5], [4, 22, np.nan, np.nan], [5, 2, 3, 7], [6, 58, np.nan, np.nan], [7, 3, 5, 9], [8, 36, np.nan, np.nan], [9, 4, 7, 11], [10, -5, np.nan, np.nan], [11, 5, 9, np.nan], [12, 40, np.nan, np.nan]]) colors = [['C1', 'C1'], ['C2', 'C5'], ['C3', 'C5'], ['C4', 'C4'], ['C3', 'C2'], ['C5', 'C3'], ['C3', 'C2'], ['C2', 'C1'], ['C3', 'C2'], ['C6', 'C1'], ['C6', 'C2'], ['C1', 'C1']] style="Simple, tail_width=0.5, head_width=6, head_length=8" kw = dict(arrowstyle=style, connectionstyle="arc3, rad=-.5",) xs = np.arange(nearest_neighbors.shape[0]) + 1 ys = np.zeros(nearest_neighbors.shape[0]) plt.plot(xs, ys, "-o", markerfacecolor="None", markeredgecolor="None", linestyle="None") x0, x1, y0, y1 = plt.axis() plot_margin = 5.0 plt.axis((x0 - plot_margin, x1 + plot_margin, y0 - plot_margin, y1 + plot_margin)) plt.axis('off') for x, y, nearest_neighbor, color in zip(xs, ys, nearest_neighbors, colors): plt.text(x, y, str(int(nearest_neighbor[1])), color="black", fontsize=20) # Plot right matrix profile indices if not np.isnan(nearest_neighbor[3]): arrow = FancyArrowPatch((x, 0.5), (nearest_neighbor[3], 0.5), color=color[0], **kw) plt.gca().add_patch(arrow) # Plot left matrix profile indices if not np.isnan(nearest_neighbor[2]): arrow = FancyArrowPatch((x, 0.0), (nearest_neighbor[2], 0.0), color=color[1], **kw) plt.gca().add_patch(arrow) plt.show() # - # The longest extracted chain is therefore 1 ⇌ 2 ⇌ 3 ⇌ 4 ⇌ 5. Note that we see a gradual monotonic increase in the data but, in reality, the increase or decrease in drift can happen in arbitrarily complex ways that can be detected by the time series chains approach. The key component of drifting is that the time series must contain chains with clear directionality. # # STUMPY is capable of computing: # # 1. anchored time series chains (ATSC) - grow a chain from a user-specified anchor (i.e., specific subsequence) # 2. all-chain set (ALLC) - a set of anchored time series chains (i.e., each chain starts with a particular subsequence) that are not subsumed by another longer chain # 3. unanchored time series chain(s) - the unconditionally longest chain within a time series (there could be more than one if there were chains with the same length) # # So, what does this mean in the context of a real time series? Let's take a look at a real example from web query data! # ## Retrieve the Data # # We will be looking at a noisy dataset that is under-sampled and has a growing trend, which will perfectly illustrate the idea regarding time series chains. The data contains a decade-long GoogleTrend query volume (collected weekly from 2004-2014) for the keyword Kohl’s, an American retail chain. First, we'll download the data, extract it, and insert it into a pandas dataframe. # + context = ssl.SSLContext() # Ignore SSL certificate verification for simplicity url = 'https://sites.google.com/site/timeserieschain/home/Kohls_data.mat?attredirects=0&revision=1' raw_bytes = urllib.request.urlopen(url, context=context).read() data = io.BytesIO(raw_bytes) mat = loadmat(data) mdata = mat['VarName1'] mdtype = mdata.dtype df = pd.DataFrame(mdata, dtype=mdtype, columns=['volume']) df.head() # - # ## Visualizing the Data plt.plot(df['volume'], color='black') plt.xlim(0, df.shape[0]+12) color = itertools.cycle(['white', 'gainsboro']) for i, x in enumerate(range(0, df.shape[0], 52)): plt.text(x+12, 0.9, str(2004+i), color="black", fontsize=20) rect = Rectangle((x, -1), 52, 2.5, facecolor=next(color)) plt.gca().add_patch(rect) plt.show() # The raw time series above displays ten years of web query volume for the keyword "Kohl's", where each alternating white and grey vertical band represents a 52 week period starting from 2004 to 2014. As depicted, the time series features a significant but unsurprising "end-of-year holiday bump". Relating back to time series chains, we can see that the bump is generally increasing over time and so we might be able to capture this when we compute the unanchored chain. # # However, as we learned above, in order to compute any time series chains, we also need the left and right matrix profile indices. Luckily for us, according to the docstring, the `stump` function not only returns the (bidirectional) matrix profile and the matrix profile indices in the first and second columns of the NumPy array, respectively, but the third and fourth columns consists of the left matrix profile indices and the right matrix profile indices, respectively: # ?stumpy.stump # ## Computing the Left and Right Matrix Profile Indices # # So, let's go ahead and compute the matrix profile indices and we'll set the window size, `m = 20`, which is the approximate length of a "bump". m = 20 mp = stumpy.stump(df['volume'], m=m) # ## Computing the Unanchored Chain # # Now, with our left and right matrix profile indices in hand, we are ready to call the all-chain set function, `allc`, which not only returns the all-chain set but, as a freebie, it also returns the unconditionally longest chain, also know as the unanchored chain. The latter of which is really what we're most interested in. all_chain_set, unanchored_chain = stumpy.allc(mp[:, 2], mp[:, 3]) # ## Visualizing the Unanchored Chain # + plt.plot(df['volume'], linewidth=1, color='black') for i in range(unanchored_chain.shape[0]): y = df['volume'].iloc[unanchored_chain[i]:unanchored_chain[i]+m] x = y.index.values plt.plot(x, y, linewidth=3) color = itertools.cycle(['white', 'gainsboro']) for i, x in enumerate(range(0, df.shape[0], 52)): plt.text(x+12, 0.9, str(2004+i), color="black", fontsize=20) rect = Rectangle((x, -1), 52, 2.5, facecolor=next(color)) plt.gca().add_patch(rect) plt.show() # - plt.axis('off') for i in range(unanchored_chain.shape[0]): data = df['volume'].iloc[unanchored_chain[i]:unanchored_chain[i]+m].reset_index().values x = data[:, 0] y = data[:, 1] plt.axvline(x=x[0]-x.min()+(m+5)*i + 11, alpha=0.3) plt.axvline(x=x[0]-x.min()+(m+5)*i + 15, alpha=0.3, linestyle='-.') plt.plot(x-x.min()+(m+5)*i, y-y.min(), linewidth=3) plt.show() # The discovered chain shows that over the decade, the bump transitions from a smooth bump covering the period between Thanksgiving (solid vertical line) and Christmas (dashed vertical line), to a more sharply focused bump centered on Thanksgiving. This seems to reflect the growing importance of "Cyber Monday", a marketing term for the Monday after Thanksgiving. The phrase was created by marketing companies to persuade consumers to shop online. The term made its debut on November 28th, 2005 in a press release entitled “Cyber Monday Quickly Becoming One of the Biggest Online Shopping Days of the Year”. Note that this date coincides with the first glimpse of the sharpening peak in our chain. # # It also appears that we may have “missed” a few links in the chain. However, note that the data is noisy and undersampled, and the “missed” bumps are too distorted to conform with the general evolving trend. This noisy example actually illustrates the robustness of the time series chains technique. As noted before, we don't # actually need “perfect” data in order to find meaningful chains. Even if some links are badly distorted, the discovered chain will still be able to include all of the other evolving patterns. # # One final consideration is the potential use of chains to predict the future. One could leverage the evolving links within the chains in order to forecast the shape of the next bump. We refer the reader to the [Matrix Profile VII](https://www.cs.ucr.edu/~eamonn/chains_ICDM.pdf) for further discussions on this topic. # ## Summary # # And that's it! You've just learned the basics of how to identify directional trends, also known as chains, within your data using the matrix profile indices and leveraging `allc`. # # ## Resources # # [Matrix Profile VII](https://www.cs.ucr.edu/~eamonn/chains_ICDM.pdf) # # # [Matrix Profile VII Supplementary Materials](https://sites.google.com/site/timeserieschain/)
docs/Tutorial_Time_Series_Chains.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Exercise 10. Introduction to the hypothesis testing, one-sample tests # ## <NAME>, <NAME> # # From interval estimates to hypothesis tests # # ## What is a statistical hypothesis test? # # Let's have the following: # # - random variable X (for example men's height) # - selection from a random variable (height measurement of 30 men) # # Statistical testing of hypotheses decides the validity of statistical statement (hypothesis) based on the data obtained: # # - $H_0$ - null hypotheses # - $H_A$ - alternative hypotheses # # For example: # # $H_0$: $\mu_X = 175$ # # $H_A$: $\mu_X > 175$ # # Since this is a statistical decision, it will always be tied to some level of significance $\alpha$. We can always reach only 2 different decisions: # # - I reject $H_0$ in favor of $H_A$ # - this means that I claim that $H_0$ does not apply # - this decision is with the maximum error $\alpha$(significance level, type I error) - this means that we are able to influence the size of this error # - I don't reject $H_0$ # - this means that I claim that due to the obtained data(selection) it is not possible to reject $H_0$ # - this decision is with error $\beta$(type II error), this error is not directly controllable and depends on the type of test used # # How hypothesis tests relate to interval estimates and how the level of significance enters them will be shown in the next section. # ## Interval estimation and significance level # # data = readxl::read_excel("data/uvod.xlsx") head(data) # + options(repr.plot.width = 12) # width of graphs in Jupyter par(mfrow = c(1, 2)) # graph graph matrices 1x2 boxplot(data$data) hist(data$data) # + moments::skewness(data$data) # oblique moments::kurtosis(data$data) - 3 # sharpness shapiro.test(data$data)$p.value # normality test # - length(data$data) mean(data$data) sd(data$data) # We make a 95% interval estimate of the mean using a t-test: # # t.test(data$data, alternative = "two.sided", conf.level = 0.95)$conf.int # Now imagine that we want to test the hypothesis:<br>$H_0$: $\mu = 100$<br>$H_A$: $\mu \neq 100$<br>What would be the decision with respect to the calculated IO and so the significance level $\alpha = 0.05$? # # Well, confidence interval covers the value 100 with maximal error of 5% (confidence 95%). Therefore, we can say we cannot reject this hypothesis at significance value 5%. # Let's further imagine that we want to test the hypothesis:<br>$H_0$: $\mu = 105$<br>$H_A$: $\mu \neq 105$<br>What would be the decision with respect to the calculated IO and so the significance level $\alpha = 0.05$? # # Well, now its a different case: confidence interval does not cover the value 105 with maximal error of 5% (confidence 95%). Therefore, we can say we can reject this hypothesis at significance value 5%. # **What we just did is called a classic test.**<br> # # We will show you more classic tests for one-sided alternatives.<br>$H_0$: $\mu = 105$<br>$H_A$: $\mu > 105$<br> # # t.test(data$data, alternative = "greater", conf.level = 0.95)$conf.int # $H_0$: $\mu = 105$<br>$H_A$: $\mu < 105$<br> # # t.test(data$data, alternative = "less", conf.level = 0.95)$conf.int # Note that the first of these one-sided alternatives led to a "rejection" of $H_0$. This is because of the comparison of the unlikely $H_0$ with the even less likely $H_A$. # # #### P-values and connection with CI # # An alternative to the classical test(where we create CI) is the so-called pure significance test: # H_0: mu=105 # H_A: mu<>105 t.test(data$data, mu = 105, alternative = "two.sided") t.test(data$data, mu = 105, alternative = "two.sided")$p.value # The pure significance test results in a p-value. Based on it, we decide whether or not to reject $H_0$. # # p-value can be understood as the highest possible level of significance, such that our decision is - I do not reject. Thus, the CI/field of acceptance would contain the examined value: # + # H_0: mu=105 # H_A: mu<>105 p.hod = t.test(data$data, mu = 105, alternative = "two.sided")$p.value p.hod t.test(data$data, alternative = "two.sided", conf.level = 1 - p.hod)$conf.int # + # H_0: mu=105 # H_A: mu>105 p.hod = t.test(data$data, mu = 105, alternative = "greater")$p.value p.hod t.test(data$data, alternative = "greater", conf.level = 1 - p.hod)$conf.int # + # H_0: mu=105 # H_A: mu<105 p.hod = t.test(data$data, mu = 105, alternative = "less")$p.value p.hod t.test(data$data, alternative = "less", conf.level = 1 - p.hod)$conf.int # - # ## Overwiev of tests for one sample # # ### Position measures # # By position measures we mean the data that determines the position of the data. For data from the normal distribution we can estimate the mean value, for others the median. # # #### a) student's t-test # # - we test the mean value # - the data must come from a normal distribution # - exploratory: skewness and sharpness lie in(-2,2) # - exploratory: The QQ graph has points approximately on the line # - exact: using a statistical test, eg Shapiro-Wilk test(shapiro.test(data)) # H_0: mu=100 # H_A: mu<>100 t.test(data$data, mu = 100, alternative = 'two.sided')$p.value # H_0: mu=100 # H_A: mu>100 t.test(data$data, mu = 100, alternative = 'greater')$p.value # H_0: mu=100 # H_A: mu<100 t.test(data$data, mu = 100, alternative = 'less')$p.value # #### b) Wilcoxn test # # - we test the median # - the data must come from a symmetric distribution # - exploratory: skewness lies in(-2,2) # - exploratory: histogram looks approximately symmetrical # H_0: X_0.5=100 # H_A: X_0.5<>100 wilcox.test(data$data, mu = 100, alternative = 'two.sided')$p.value # H_0: X_0.5=100 # H_A: X_0.5>100 wilcox.test(data$data, mu = 100, alternative = 'greater')$p.value # H_0: X_0.5=100 # H_A: X_0.5<100 wilcox.test(data$data, mu = 100, alternative = 'less')$p.value # #### c) sign test test # # - we test the median # - larger range selection(>10) # - requires "BSDA" library # - as the most robust test, it can also be used for discontinuous data # H_0: X_0.5=100 # H_A: X_0.5<>100 BSDA::SIGN.test(data$data, md = 100, alternative = 'two.sided')$p.value # H_0: X_0.5=100 # H_A: X_0.5>100 BSDA::SIGN.test(data$data, md = 100, alternative = 'greater')$p.value # H_0: X_0.5=100 # H_A: X_0.5<100 BSDA::SIGN.test(data$data, md = 100, alternative = 'less')$p.value # ### Variability measures # # By measures of variability we mean the data determining the dispersion/variability of the data. For data from the normal distribution, we can estimate the standard deviation. # # #### standard deviation test # # - we test the standard deviation # - the data must come from a normal distribution # - exploratory: skewness and kurtosis lie in(-2,2) # - Explosive: The QQ graph has points approximately on the line # - exact: using a statistical test, eg Shapiro-Wilk test(shapiro.test(data)) # - requires "EnvStats" package # - function in R, compares variance !!! # H_0: sigma=10 # H_A: sigma<>10 EnvStats::varTest(data$data, sigma.squared = 10*10, alternative = 'two.sided')$p.value # H_0: sigma=10 # H_A: sigma>10 EnvStats::varTest(data$data, sigma.squared = 10*10, alternative = 'greater')$p.value # H_0: sigma=10 # H_A: sigma<10 EnvStats::varTest(data$data, sigma.squared = 10*10, alternative = 'less')$p.value # ## Probability # # #### Test of probability # # - We test the probability # - We require sufficient data: $n>\frac{9}{p(1-p)}$ # - Clopper's - Pearson's estimate(binom.test) # - does not take data as a parameter, but the number of successes and the number of observation # + pi = 0.3 data_bin = runif(n = 100, min = 0, max = 1) < pi n = length(data_bin) x = sum(data_bin) n x # - # H_0: pi=0.2 # H_A: pi<>0.2 binom.test(x = x, n = n, p = 0.2, alternative = 'two.sided')$p.value # H_0: pi=0.2 # H_A: pi>0.2 binom.test(x = x, n = n, p = 0.2, alternative = 'greater')$p.value # H_0: pi=0.2 # H_A: pi<0.2 binom.test(x = x, n = n, p = 0.2, alternative = 'less')$p.value # # Examples library(dplyr) library(rstatix) # # ## Example 1. # # We have a selection of 216 patients and we measured their protein serum(file testy_jednovyberove.xlsx sheet bilk_serum). Verify that the average protein serum(Albumin) of all patients of this type(population average µ) differs statistically significantly from 35 g/l. # Reading data from xlsx file(using readxl package) albumin = readxl::read_excel("data/testy_jednovyberove.xlsx", sheet = "bilk_serum") head(albumin) colnames(albumin)="value" # Exploratory analysis boxplot(albumin$value) summary(albumin$value) length(albumin$value) # sd is rounded to 3 valid digits sd(albumin$value) # sd and position measures are rounded to the nearest thousandth # **Position measurement test** # # # + # Verification of normality - exploratory moments::skewness(albumin$value) # skew moments::kurtosis(albumin$value)-3 # sharpness options(repr.plot.width = 12) # width of graphs in Jupyter par(mfrow = c(1, 2)) # matrix of 1x2 graphs qqnorm(albumin$value) qqline(albumin$value) hist(albumin$value) # + # We will use the normality test for the final decision on data normality. # The presumption of normality is verified by the Shapir - Wilk test. # H0: Data is a selection from the normal distribution. # Ha: Data is not a selection from the normal distribution. shapiro.test(albumin$value) # p-value>0.05 ->Na hl. significance of 0.05, the assumption of normality cannot be rejected. # + # normal OK ->t.test # H0: mu=35 g/l # Ha: mu<>35 g/l t.test(albumin$value, mu=35, alternative = "two.sided") # p-value<0.05 ->at significance level of 0.05 we reject the null hypothesis # in favor of the alternative hypothesis # The mean albumin value differs statistically significantly from 35 g/l. # - # ## Example 2. # # Survival times for 100 lung cancer patients treated with the new drug are listed in the tests_jednovyberove.xlsx sheet "preziti". It is known from previous studies that the average survival of such patients without the administration of a new drug is 22.2 months. Can these data suggest that the new drug prolongs survival? # # # Reading data from xlsx file(using readxl package) preziti = readxl::read_excel("data/testy_jednovyberove.xlsx", sheet = "preziti") head(preziti) colnames(preziti)="value" # + # # Exploratory analysis par(mfrow = c(1, 2)) # graph matrix 1x2 boxplot(preziti$value) hist(preziti$value) # - # **Data contains outliars -> we can delete them. Or note that this is probably an exponential distribution and the outliars are not actually there(the distribution simply behaves this way.)** # # # Data contains outliars. We can list them with the help of f-ce boxplot. preziti$ID = seq(1,length(preziti$value)) outliers = preziti %>% identify_outliers(value) outliers # if we decided to remove outliers, then preziti$value_no_outliars = ifelse(preziti$ID %in% outliers$ID,NA,preziti$value) # Exploratory analysis for data without remote observations boxplot(preziti$value_no_outliars) length(na.omit(preziti$value_no_outliars)) # sd is rounded to 3 valid digits sd(preziti$value_no_outliars,na.rm=TRUE) # sd and position measurements round. to tenths # **Position measure(mean/median) test** # # # + # Verification of normality - exploratory moments::skewness(preziti$value_no_outliars,na.rm=TRUE) moments::kurtosis(preziti$value_no_outliars,na.rm=TRUE)-3 par(mfrow = c(1, 2)) # graph matrix 1x2 qqnorm(preziti$value_no_outliars) qqline(preziti$value_no_outliars) hist(preziti$value_no_outliars) # QQ - graph and history show that the choice of truth. is not a choice of standards. distribution. # Slanting and pointing corresponds to standards. distribution. # we will use the normality test. # - # We verify the assumption of normality by the Shapirs. Wilkov's test. shapiro.test(preziti$value_no_outliars) # p-value<0.05 ->at significance 0.05, we reject the assumption of normality # + # exploratory assessment of symmetry - exponential distribution - no symmetry # + # normality rejected ->symmetry rejected ->Sign. test # H0: median=22.2 months # Ha: median>22.2 months BSDA::SIGN.test(preziti$value_no_outliars, md=22.2, alternative="greater") # p-value>0.05 -> at significance of 0.05, the null hypothesis cannot be rejected # Median survival time is not statistically significantly greater than 22.2 months. # - median(preziti$value_no_outliars, na.rm = TRUE) # ## Example 3. # # The machine produces piston rings of a given diameter. The manufacturer states that the standard deviation of the ring diameter is 0.05 mm. To verify this information, 80 rings were randomly selected and their diameter was measured(file testy_jednovyberove.xlsx sheet krouzky). Can the results obtained be considered statistically significant in terms of improving the quality of production? # Reading data from xlsx file(using readxl package) krouzky = readxl::read_excel("data/testy_jednovyberove.xlsx", sheet = "krouzky") head(krouzky) colnames(krouzky)="value" # # Exploratory analysis boxplot(krouzky$value) # Data contains outliars. We can list them with the help of f-ce boxplot. krouzky$ID = seq(1,length(krouzky$value)) outliers = krouzky %>% identify_outliers(value) outliers # if we decided to remove outliers, then krouzky$value_no_outliars = ifelse(krouzky$ID %in% outliers$ID,NA,krouzky$value) # Exploratory analysis for data without remote observations summary(krouzky$value_no_outliars,na.rm=TRUE) boxplot(krouzky$value_no_outliars) length(na.omit(krouzky$value_no_outliars))# sd is rounded to 3 valid digits sd(krouzky$value_no_outliars,na.rm=TRUE) # sd and position measures round. per thousandths # + # Verification of normality - exploratory moments::skewness(krouzky$value_no_outliars,na.rm=TRUE) moments::kurtosis(krouzky$value_no_outliars,na.rm=TRUE)-3 par(mfrow = c(1, 2)) # matrix of 1x2 graphs qqnorm(krouzky$value_no_outliars) qqline(krouzky$value_no_outliars) hist(krouzky$value_no_outliars) # Both skew and sharpness comply with standards. distribution. # We will use for the final decision on data normality # - # normality test. # We verify the assumption of normality by the Shapirs. Wilkov's test. shapiro.test(krouzky$value_no_outliars) # p-value>0.05 ->Na hl. significance of 0.05 cannot be assumed norms. reject # + # variability test ->variance test # H0: sigma=0.05 mm # Ha: sigma<0.05 mm EnvStats::varTest(krouzky$value_no_outliars, sigma.squared = 0.05^2, alternative = "less") # p-value<0.05 ->At the significance level of 0.05 we reject H0 in favor of Ha # + # How to find a 95% interval standard deviation estimate? pom = EnvStats::varTest(krouzky$value_no_outliars,sigma.squared = 0.05^2, alternative = "less", conf.level=0.95) sqrt(pom$conf.int) # - # ## Example 4. # # TT states that 1% of their resistors do not meet the required criteria. 15 unsuitable resistors were found in the tested delivery of 1000 pieces. Is this result with agreement with TT's assertion or can we reject it? n = 1000 # selection range x = 15 # number of "successes" p = x/n # relative frequency(probability point estimate) p # Verification of assumptions 9/(p*(1-p)) # + # Clopper - Pearson(exact) test # H0: pi=0.01 # Ha: pi<>0.01 binom.test(x = x, n= n, p = 0.01, alternative="two.sided")
Exercise 10/T12_hypothesis_testing1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Grouping by Time # # In previous notebooks, we learned how to downsample/upsample time series data. In this notebook, we will group spans of time together to get a result. For instance, we can find out the number of up or down days for a stock within each trading month, or calculate the number of flights per day for an airline. pandas gives you the ability to group by a period of time. Let's begin by reading in our stock dataset. import pandas as pd stocks = pd.read_csv('../data/stocks/stocks10.csv', parse_dates=['date'], index_col='date') stocks.head(3) # ### Find the average closing price of Amazon for every month # If we are interested in finding the average closing price of Amazon for every month, then we need to group by month and aggregate the closing price with the mean function. # # ### Grouping column, aggregating column, and aggregating method # This procedure is very similar to how we grouped and aggregated columns in previous notebooks. The only difference is that, our grouping column will now be a datetime column with an additional specification for the amount of time. # # ### Use the `resample` method # Instead of the `groupby` method, we use a special method for grouping time together called `resample`. We must pass the `resample` method an offset alias string. The rest of the process is the exact same as the `groupby` method. We call the `agg` method and pass it a dictionary mapping the aggregating columns to the aggregating functions. # # ### `resample` syntax # # The first parameter we pass to `resample` is the [offset alias][1]. Here, we choose to group by month. We then chain the `agg` method and must use one of the alternative syntaxes as the pandas developers have not yet implemented column renaming for the `resample` method. # # [1]: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases stocks.resample('M').agg({'AMZN': 'mean'}).head(3) # ### Use any number of aggregation functions # Map the aggregating column to a list of aggregating functions. stocks.resample('M').agg({'AMZN': ['size', 'min', 'mean', 'max']}).head(3) # ### Group by Quarter stocks.resample('Q').agg({'AMZN': ['size', 'min', 'mean', 'max']}).head(4) # ### Label as the entire Period # Notice how the end date of both the month and day are used as the returned index labels for the time periods. We can change the index labels so that they show just the time period we are aggregating over by setting the `kind` parameter to 'period'. amzn_period = stocks.resample('Q', kind='period').agg({'AMZN': ['size', 'min', 'mean', 'max']}) amzn_period.head(4) # ## The PeriodIndex # We no longer have a DatetimeIndex. Pandas has a completely separate type of object for this called the **PeriodIndex**. The index label '2016Q1' refers to the entire period of the first quarter of 2016. Let's inspect the index to see the new type. amzn_period.index[:10] # ## The Period data type # Pandas also has a completely separate data type called a **Period** to represent **columns** of data in a DataFrmae that are specific **periods of time**. This is directly analagous to the PeriodIndex, but for DataFrame columns. Examples of a Period are the entire month of June 2014, or the entire 15 minute period from June 12, 2014 5:15 to June 12, 2014 5:30. # # ### Convert a datetime column to a Period # We can use the `to_period` available with the `dt` accessor to convert datetimes to Period data types. You must pass it an offset alias to denote the length of the time period. Let's convert the `date` column in the weather dataset to a monthly Period column . weather = pd.read_csv('../data/weather.csv', parse_dates=['date']) weather.head(3) # Let's make the conversion from datetime to period and assign the result as a new column in the DataFrame. date = weather['date'] weather['date_period'] = weather['date'].dt.to_period('M') weather.head(3) # ### Why is the data type "object"? # Unfortunately, Pandas doesn't explicitly label the Period object as such when outputting the data types. But if we inspect each individual element, you will see that they are indeed Period objects. weather.dtypes # Inspecting each individual element. weather.loc[0, 'date_period'] # ### The `dt` accessor works for Period columns # # Even though it is technically labeled as object, pandas still has attributes and methods specific to periods. weather['date_period'].dt.month.head(3) weather['date_period'].dt.month.head(3) # Return the span of time with the `freq` attribute. weather['date_period'].dt.freq # ## Anchored offsets # # By default, when grouping by week, pandas chooses to end the week on Sunday. Let's verify this by grouping by week and taking the resulting index label and determining its weekday name. week_mean = stocks.resample('W').agg({'AMZN': ['size', 'min', 'mean', 'max']}) week_mean.head(3) week_mean.index[0].day_name() # ### Anchor by a different day # # You can anchor the week to any day you choose by appending a dash and then the first the letters of the day of the week. Let's anchor the week to Wednesday. stocks.resample('W-WED').agg({'AMZN': ['size', 'min', 'mean', 'max']}).head(3) # ### Longer intervals of time with numbers appended to offset aliases # We can actually add more details to our offset aliases by using a number to specify an amount of that particular offset alias. For instance, **`5M`** will group in 5 month intervals. stocks.resample('5M').agg({'AMZN': ['size', 'min', 'mean', 'max']}).head(3) # Group by every 22 weeks anchored to Thursday. stocks.resample('22W-THU').agg({'AMZN': ['size', 'min', 'mean', 'max']}).head(3) # ## Calling `resample` on a datetime column # The `resample` method can still work without a Datetimeindex. If there is a column that is of the datetime data type, you can use the `on` parameter to specificy that column. Let's reset the index and then call `resample` on that DataFrame. amzn_reset = stocks.reset_index() amzn_reset.head(3) # The only difference is that we specify the grouping column with the `on` parameter. The result is the exact same. amzn_reset.resample('W-WED', on='date').agg({'AMZN': ['size', 'min', 'mean', 'max']}).head(3) # ## Calling `resample` on a Series # # Above, we called `resample` on a DataFrame. We can also use it for Series. Let's select Amazon's closing price as a Series. amzn_close = stocks['AMZN'] amzn_close.head(3) # For a Series, the aggregating column is just the values. It's not necessary to use the `agg` method in order to aggregate. Instead, we can call aggregation methods directly. Here, we find the mean closing price by month. amzn_close.resample('M').mean().head() # To compute multiple aggregations, use the `agg` method and pass it a list of the aggregating functions as strings. Here we find the total number of trading days ('size'), the min, max, and mean of the closing price for every three year period. amzn_close.resample('3Y', kind='period').agg(['size', 'min', 'max', 'mean']) # ## Exercises # # Execute the following cell that reads in 20 years of Microsoft stock data and use it for the first few exercises. msft = pd.read_csv('../data/stocks/msft20.csv', parse_dates=['date'], index_col='date') msft.head(3) # ### Exercise 1 # <span style="color:green; font-size:16px">In which week did MSFT have the greatest number of its shares (volume) traded?</span> # ### Exercise 2 # # <span style="color:green; font-size:16px">With help from the `diff` method, find the quarter containing the most number of up days.</span> # ### Exercise 3 # # <span style="color:green; font-size:16px">Find the mean price per year along with the minimum and maximum volume.</span> # ### Exercise 4 # # <span style="color:green; font-size:16px">Use the `to_datetime` function to convert the hire date column into datetimes. Reassign this column in the `emp` DataFrame.</span> # ### Exercise 5 # # <span style="color:green; font-size:16px">Without putting `hire_date` into the index, find the mean salary based on `hire_date` over 5 year periods. Also return the number of salaries used in the mean calculation for each period.</span>
jupyter_notebooks/pandas/mastering_data_analysis/07. Time Series/03. Grouping by Time.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # imports import numpy as np import pandas as pd # load data df = pd.read_excel('ia110220.xlsx') print(df.shape) df.head() df.describe().T # TODO - try to convert columns with symbols to actual ADV questions df.columns[0:100] df.columns[101:200] df.columns[201:262] df.describe(exclude=[np.number]).T # most popular cities df['Main Office City'].value_counts(normalize=True).head(15) # most popular states pop_state = df['Main Office State'].value_counts(normalize=True) pop_state pop_state.to_csv('pop_state.csv') # most popular countries df['Main Office Country'].value_counts().head(15) # Did you have $1 billion or more in assets on the last day of your most recent fiscal year? df['1O'].value_counts() # If yes, what is the approximate amount of your assets: df['1O - If yes, approx. amount of assets'].value_counts() # Didn't list a web address df['Website Address'].isnull().sum() # Total number of offices other than Princpal Office df['Total number of offices, other than your Principal Office and place of business'].value_counts() # Number of Employees df['5A'].value_counts() # Number of Employees that perform investment advisory functions including research df['5B(1)'].value_counts() # + compensation_labels = ['5E(1)', '5E(2)', '5E(3)', '5E(4)', '5E(5)', '5E(6)', '5E(7)', '5E(7)-Other'] compensation_arrangements = ['A percentage of assets under your management', 'Hourly charges', 'Subscription fees (for a newsletter of periodical)', 'Fixed fees (other than subscription fees)', 'Commissions', 'Performance-based fees', 'Other (specify)'] for label, arrangement in zip(compensation_labels, compensation_arrangements): print(arrangement) print(df[label].value_counts(normalize=True)) # - df['5F(1)'].value_counts() # + assets = ['5F(2)(a)', '5F(2)(b)', '5F(2)(c)'] names = ['Discretionary AUM', 'Non-Discretionary AUM', 'Total AUM'] for asset, name in zip(assets, names): print(name) print('${:,.2f}'.format(df[asset].sum())) # + accounts = ['5F(2)(d)', '5F(2)(e)', '5F(2)(f)'] names = ['Discretionary Accounts', 'Non-Discretionary Accounts', 'Total Accounts'] for account, name in zip(accounts, names): print(name) print(df[account].sum()) # - aum_by_state = df.groupby('Main Office State').agg({'5F(2)(c)': ['mean', 'min', 'max']}) aum_by_state df.groupby('Main Office State')['5F(2)(c)'].mean().sort_values(ascending=False).astype(int)
.ipynb_checkpoints/RIA-data-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Calculate cosmological distances with CCL # In this example, we will calculate various cosmological distances for an example cosmology. import numpy as np import pylab as plt import pyccl as ccl # %matplotlib inline # ### Set up a Cosmology object # `Cosmology` objects contain the parameters and metadata needed as inputs to most functions. Each `Cosmology` object has a set of cosmological parameters attached to it. In this example, we will only use the parameters of a vanilla LCDM model, but simple extensions (like curvature, neutrino mass, and w0/wa) are also supported. # # `Cosmology` objects also contain precomputed data (e.g. splines) to help speed-up certain calculations. As such, `Cosmology` objects are supposed to be immutable; you should create a new `Cosmology` object when you want to change the values of any cosmological parameters. cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.045, h=0.67, A_s=2.1e-9, n_s=0.96) print(cosmo) # As you can see, a number of cosmological parameters have been set to default values, or derived from the input parameters. Some, like `sigma8`, have been left undefined; this is because calculating them from the input parameters is non-trivial, so this will only be done if needed (or if the user explicitly requests it). # Parameter values can be accessed from the `Cosmology` object contains, like so: print(cosmo['Omega_c']) # ### Cosmological Distances # # With a cosmology in hand, we can begin performing some calculations. We can start with the most basic measure, the comoving radial distance. z = 0.5 ccl.comoving_radial_distance(cosmo, 1/(1+z)) # Mpc # Note that all distance function calls require scale factors, not redshifts. This function can take a `numpy` array of values as well. zs = np.arange(0, 1, 0.1) ccl.comoving_radial_distance(cosmo, 1/(1+zs)) # CCL also supports calculation of the comoving angular distance. In flat spacetime (like the cosmology we have here) it is the same as the radial distance. ccl.comoving_angular_distance(cosmo, 1/(1+z)) # If we create a cosmology with curvature, we'll get a different result. # + curved_cosmo = ccl.Cosmology(Omega_k = 0.1, Omega_c=0.17, Omega_b=0.045, h=0.67, A_s=2.1e-9, n_s=0.96) chi_rad = ccl.comoving_radial_distance(curved_cosmo, 1/(1+z)) chi_curved = ccl.comoving_angular_distance(curved_cosmo, 1/(1+z)) print ('Radial Dist. = %.2f Mpc \t Angular Dist. = %.2f Mpc'%(chi_rad, chi_curved)) # - # CCL explictly supports the calculation of the luminosity distance and the distance modulus too: chi_lum = ccl.luminosity_distance(cosmo, 1/(1+z)) DM = ccl.distance_modulus(cosmo, 1/(1+z)) print('Luminosity Dist = %.2f Mpc \t Distance Modulus = %.2f ' % (chi_lum, DM)) # Finally, CCL supports an inverse operation, which calculates the scale factor for a given comoving distance: ccl.scale_factor_of_chi(cosmo, 1962.96)
Distance Calculations Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # cd # !pip install --user jupyterplot #currently display has been inactivated with # # Modification: for real-time presentation of some time series values “jupyterplot" needs to be installed # the initialization is at the main last cell, it takes some time >30 sec # # Road Following - Live demo (TensorRT) # In this notebook, we will use model we trained to used Jetracer for jetbot interference script for a chosen category to run the Jetbot # # TensorRT import torch device = torch.device('cuda') # Load the TRT optimized model by executing the cell below - # ## NEED TO CHOSE CATEGORY AND PRE TRAINED MODEL # + import torch from torch2trt import TRTModule CATEGORIES = ['apex','bottle'] #CATEGORIES = ['road','bottle'] # if no Categories then activate like Jetbot trained model then: #CATEGORIES = [] model_trt = TRTModule() blob=model_trt.load_state_dict(torch.load('road_following_model_trt.pth')) #jetracer for jetbot model from training and build with TRT #model_trt.load_state_dict(torch.load('best_steering_model_xy_trt.pth')) #jetbot model from training and build TRT # - # ### Creating the Pre-Processing Function # We have now loaded our model, but there's a slight issue. The format that we trained our model doesnt exactly match the format of the camera. To do that, we need to do some preprocessing. This involves the following steps: # # 1. Convert from HWC layout to CHW layout # 2. Normalize using same parameters as we did during training (our camera provides values in [0, 255] range and training loaded images in [0, 1] range so we need to scale by 255.0 # 3. Transfer the data from CPU memory to GPU memory # 4. Add a batch dimension # + import torchvision.transforms as transforms import torch.nn.functional as F import cv2 import PIL.Image import numpy as np mean = torch.Tensor([0.485, 0.456, 0.406]).cuda().half() std = torch.Tensor([0.229, 0.224, 0.225]).cuda().half() def preprocess(image): image = PIL.Image.fromarray(image) image = transforms.functional.to_tensor(image).to(device).half() image.sub_(mean[:, None, None]).div_(std[:, None, None]) return image[None, ...] # - # Awesome! We've now defined our pre-processing function which can convert images from the camera format to the neural network input format. # # Now, let's start and display our camera. You should be pretty familiar with this by now. # + from IPython.display import display import ipywidgets import traitlets from jetbot import Camera, bgr8_to_jpeg camera = Camera() # + import ipywidgets.widgets as widgets import time import IPython target_widget = widgets.Image(format='jpeg', width=224, height=224) x_slider = ipywidgets.FloatSlider(min=-1.0, max=1.0, description='x') y_slider = ipywidgets.FloatSlider(min=0, max=1.0, orientation='horizontal', description='y') def display_xy(camera_image): image = np.copy(camera_image) x = x_slider.value y = y_slider.value x = int(x * 224 / 2 + 112) y = int(y * -224 / 2 + 112) image = cv2.circle(image, (x, y), 8, (0, 255, 0), 3) image = cv2.circle(image, (112, 224), 8, (0, 0,255), 3) image = cv2.line(image, (x,y), (112,224), (255,0,0), 3) jpeg_image = bgr8_to_jpeg(image) return jpeg_image #time.sleep(1) traitlets.dlink((camera, 'value'), (target_widget, 'value'), transform=display_xy) display(widgets.HBox([target_widget])) d2 = IPython.display.display("", display_id=2) # - # We'll also create our robot instance which we'll need to drive the motors. # + from jetbot import Robot robot = Robot() # - # Now, we will define sliders to control JetBot # > Note: We have initialize the slider values for best known configurations, however these might not work for your dataset, therefore please increase or decrease the sliders according to your setup and environment # # 1. Speed Control (speed_gain_slider): To start your JetBot increase ``speed_gain_slider`` # 2. Steering Gain Control (steering_gain_sloder): If you see JetBot is woblling, you need to reduce ``steering_gain_slider`` till it is smooth # 3. Steering Bias control (steering_bias_slider): If you see JetBot is biased towards extreme right or extreme left side of the track, you should control this slider till JetBot start following line or track in the center. This accounts for motor biases as well as camera offsets # # > Note: You should play around above mentioned sliders with lower speed to get smooth JetBot road following behavior. # + speed_gain_slider = ipywidgets.FloatSlider(min=0.0, max=1.0, step=0.01, description='speed gain') steering_gain_slider = ipywidgets.FloatSlider(min=0.0, max=1.0, step=0.01, value=0.09, description='steering gain') steering_dgain_slider = ipywidgets.FloatSlider(min=0.0, max=0.5, step=0.001, value=0.24, description='steering kd') steering_bias_slider = ipywidgets.FloatSlider(min=-0.3, max=0.3, step=0.01, value=0.0, description='steering bias') display(speed_gain_slider, steering_gain_slider, steering_dgain_slider, steering_bias_slider) blocked_slider = ipywidgets.FloatSlider(description='blocked', min=0.0, max=1.0, orientation='horizontal') stopduration_slider= ipywidgets.IntSlider(min=1, max=1000, step=1, value=10, description='Manu. time stop') #anti-collision stop time block_threshold= ipywidgets.FloatSlider(min=0, max=1, step=0.1, value=0.8, description='Manu. bl threshold') #anti-collision block probability display(ipywidgets.HBox([ blocked_slider, stopduration_slider, block_threshold])) #create new New View for Output # - # Next, let's display some sliders that will let us see what JetBot is thinking. The x and y sliders will display the predicted x, y values. # # The steering slider will display our estimated steering value. Please remember, this value isn't the actual angle of the target, but simply a value that is # nearly proportional. When the actual angle is ``0``, this will be zero, and it will increase / decrease with the actual angle. # + steering_slider = ipywidgets.FloatSlider(min=-1.0, max=1.0, description='steering') speed_slider = ipywidgets.FloatSlider(min=0, max=1.0, orientation='horizontal', description='speed') category_widget = ipywidgets.Dropdown(options=np.array(CATEGORIES), description='category') display(y_slider, x_slider, speed_slider, steering_slider) #choose category for road or object following category_execution_widget = ipywidgets.VBox([category_widget]) display(category_execution_widget) # + state_widget = ipywidgets.ToggleButtons(options=['stop', 'live'], description='state', value='stop') prediction_widget = ipywidgets.FloatText(description='prediction') score_widgets = [] axis_categories=['x','y'] categories_number=np.array(CATEGORIES).size for category in np.array(CATEGORIES): for i in axis_categories: category_text=category+'---'+i score_widget = ipywidgets.FloatSlider(min=0.0, max=1.0, description=category_text, orientation='vertical') score_widgets.append(score_widget) live_execution_widget = ipywidgets.VBox([ ipywidgets.HBox(score_widgets), prediction_widget ]) display(live_execution_widget) # + def start_category(change): global category_index if not CATEGORIES: category_index=0 else: category_index=CATEGORIES.index(category_widget.value) return category_widget.observe(start_category, names='value') #repeated for initialization if not CATEGORIES: print("List is empty.") category_index=0; else: category_index=CATEGORIES.index(category_widget.value) # - # Next, we'll create a function that will get called whenever the camera's value changes. This function will do the following steps # # 1. Pre-process the camera image # 2. Execute the neural network # 3. Compute the approximate steering value # 4. Control the motors using proportional / derivative control (PD) # + import time count=0 count_stops=0 stop_time=50 #(for how many frames the bot should go backwards, see and of script) angle = 0.0 angle_last = 0.0 go_on=1 max_x=camera.width min_x=0 max_y=camera.height min_y=0 sum_bottle=0.0 def execute(change): global angle, angle_last, category_index, count, count_stops,stop_time,go_on,block_threshold,stop_time,max_x, min_x,max_y,min_y,sum_bottle count +=1 t1 = time.time() image = change['new'] xy = model_trt(preprocess(image)).detach().float().cpu().numpy().flatten() x = float(xy[2 * category_index]) y = float(xy[2 * category_index + 1] ) #normalized probability output for categories*two axis output= np.exp(xy)/sum(np.exp(xy)) if CATEGORIES: # indices = [2, 3] sum_bottle=output[indices].sum() for i, score in enumerate(list(output)): #probability slider for categories*x and y score_widgets[i].value = score category_number = output.argmax() prob_blocked=sum_bottle #pobability of second category for x and y together, blocked_slider.value = prob_blocked stop_time=stopduration_slider.value prediction_widget.value = category_number if go_on==1: if prob_blocked > block_threshold.value: #in case it recognizes bottle (prob_blocked) then stop count_stops +=1 x=0.0 #set steering zero y=0.0 #set steering zero speed_slider.value=0.0 # set speed zero or negative or turn go_on=2 #anti_collision------- else: #if prediction is not (e.g. bottle) then go on go_on=1 count_stops=0 x = int(max_x * (x / 2.0 + 0.5)) y = int(max_y * (y / 2.0 + 0.5)) speed_slider.value = speed_gain_slider.value # else: count_stops=count_stops+1 if count_stops<stop_time: #how many frames bot should pause x=0.0 #set steering zero y=0 #set steering zero speed_slider.value=0 # set speed zero or negative or turn else: go_on=1 count_stops=0 x_joysticklike=((x-max_x/2.0)-min_x)/(max_x-min_x) y_joysticklike=((max_y-y)-min_y)/(max_y-min_y) x_slider.value = x_joysticklike y_slider.value = y_joysticklike #--------- angle = np.arctan2(x_joysticklike, y_joysticklike) pid = angle * steering_gain_slider.value + (angle - angle_last) * steering_dgain_slider.value angle_last = angle steering_slider.value = pid + steering_bias_slider.value robot.left_motor.value = max(min(speed_slider.value + steering_slider.value, 1.0), 0.0) robot.right_motor.value = max(min(speed_slider.value - steering_slider.value, 1.0), 0.0) #--------- t2 = time.time() s = f"""{int(1/(t2-t1))} FPSS""" d2.update(IPython.display.HTML(s) ) execute({'new': camera.value}) # - # The jupyter plot shows the x and y values, x value is more variable, y value is more or less the same (y might be useful later for velocity regulation), # The initialization of jupyterplot takes >30 sec, so need to wait, once 0 FPSS appears, activate system with camera.observe below # # Cool! We've created our neural network execution function, but now we need to attach it to the camera for processing. # # We accomplish that with the observe function. # >WARNING: This code will move the robot!! Please make sure your robot has clearance and it is on Lego or Track you have collected data on. The road follower should work, but the neural network is only as good as the data it's trained on! camera.observe(execute, names='value') # Awesome! If your robot is plugged in it should now be generating new commands with each new camera frame. # # You can now place JetBot on Lego or Track you have collected data on and see whether it can follow track. # # If you want to stop this behavior, you can unattach this callback by executing the code below. # + import time camera.unobserve(execute, names='value') time.sleep(0.1) # add a small sleep to make sure frames have finished processing robot.stop() # - # ### Conclusion # That's it for this live demo! Hopefully you had some fun seeing your JetBot moving smoothly on track follwing the road!!! # # If your JetBot wasn't following road very well, try to spot where it fails. The beauty is that we can collect more data for these failure scenarios and the JetBot should get even better :)
CategoryRoad_Jetracer_2_Jetbot/trt_jetracer_categoryModel_for_jetbot_with_stop.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Reading data from a social network survey # Let us start by downloading and installing the package import numpy as np # %matplotlib inline import matplotlib.pyplot as plt import networkx as nx import pandas as pd # Read the nodes and check the different columns via index.values G = nx.DiGraph() fn=pd.read_csv('../Lecture 2/SchoolNodes.csv',delimiter=' ',index_col=0).transpose() n_attr=len(fn.index.values) attr=fn.index.values fn.columns.values # Question 1: Check the types of the variables above and what do they contain type(n_attr) #Number of attributes for nodes in the columns type(attr) len(attr) type(G) #Class Digraph from networkx type(fn) #DataFrame from pandas fn #fn.index.values has the different rodes of the 7 attributes #fn.clolumns.values has the ids fn.index.values fn.columns.values for n in fn.columns: attr_node=dict(list(zip(attr, fn[n].values))) #print(attr," ",fn[n].values[6]) #print(n) G.add_node(n,label=fn[n].values[0],nodes=fn[n].values[1],sex=fn[n].values[2], race=fn[n].values[3],grade=fn[n].values[4],scode=fn[n].values[5],totalnoms=fn[n].values[6]) G.nodes() # Question 2: Write the grade of node 33 G.nodes[33]['grade'] for node in G.nodes(): print (node," is in grade",G.nodes[node]['grade']) fl=pd.read_csv('../Lecture 2/SchoolEdges.csv',delimiter=' ') fl.columns = list(map(str.lower, fl.columns)) fl fl.columns.values fl.index.values for L in fl.index.values: G.add_edge(fl['source'][L],fl['target'][L],weight=fl['weight'][L]) G.in_degree() deg = G.degree() to_keep = [] for node in G.nodes(): if deg[node] != 0: to_keep.append(node) else: print("Node: ",node," degree: ",deg[node]) #Create the network only with connected nodes G=G.subgraph(to_keep) print ("Nodes: ", G.nodes()) # #### Write the neighbours of each node for node in G.nodes(): print ("Neighbors of ", node, " are : ", list(G.neighbors(node))) # Question 3: # Check here https://networkx.github.io/documentation/networkx-1.10/reference/classes.digraph.html # The function DiGraph.predecessors() and write the nodes the links that nominated each node # for node in G.nodes(): print ("Neighbors of ", node, " are : ", list(G.predecessors(node))) #print (np.sum(list(G.predecessors(node)))) # Question 4: Write the number of nodes and edges #Number of nodes and links of G NumNodes = G.number_of_nodes() NumEdges = G.number_of_edges() print("Number of nodes: ",NumNodes) print(("Number of links: ",NumEdges)) # Question 5: Write the average in_degree, out_degree, and degree of the network # print("Average out_degree: ",np.mean(list(dict(G.out_degree()).values()))) print("Average in_degree: ",np.mean(list(dict(G.in_degree()).values()))) print("Average degree: ",np.mean(list(dict(G.degree()).values()))) # + #other options for in_degree and degree respectively # - G.number_of_edges()/G.number_of_nodes() 2.0*G.number_of_edges()/G.number_of_nodes() for node in G.nodes(): print ("Neighbors of ", node, " are : ", list(G.predecessors(node))) #print (np.sum(list(G.predecessors(node)))) av_outweightsNodes = {} for node in G.nodes(): sumw=0 lfriends = list(G.successors(node)) print ("Neighbors of ", node, " are : ", list(G.successors(node))) for i in lfriends: sumw=sumw+G[node][i]['weight'] print (node,i,G[node][i]['weight']) if G.out_degree(node)>0: print (sumw,G.out_degree(node),sumw/G.out_degree(node)) av_outweightsNodes[node]=(sumw/G.out_degree(node)) av_outweightsNodes av_inweightsNodes = {} for node in G.nodes(): sumw=0 lfriends = list(G.predecessors(node)) print ("Neighbors of ", node, " are : ", list(G.predecessors(node))) for i in lfriends: sumw=sumw+G[i][node]['weight'] print (node,i,G[i][node]['weight']) if G.in_degree(node)>0: print (sumw,G.in_degree(node),sumw/G.in_degree(node)) av_inweightsNodes[node]=(sumw/G.in_degree(node)) av_inweightsNodes av_inweightsNodes{2}.values() type(av_inweightsNodes) av_inweightsNodes[3] # ## Question 6: Calculate Average Clustering Coefficient, and average shortest path in the Network # nx.average_clustering(G) nx.average_shortest_path_length(G)
Lecture 3/.ipynb_checkpoints/Exercise_SocialNetworkSoln-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This notebook was prepared by [<NAME>](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). # # Challenge Notebook # ## Problem: Determine if a tree is a valid binary search tree. # # * [Constraints](#Constraints) # * [Test Cases](#Test-Cases) # * [Algorithm](#Algorithm) # * [Code](#Code) # * [Unit Test](#Unit-Test) # * [Solution Notebook](#Solution-Notebook) # ## Constraints # # * Can the tree have duplicates? # * Yes # * If this is called on a None input, should we raise an exception? # * Yes # * Can we assume we already have a Node class? # * Yes # * Can we assume this fits in memory? # * Yes # ## Test Cases # # <pre> # Valid: # 5 # / \ # 5 8 # / / # 4 6 # \ # 7 # # Invalid: # 5 # / \ # 5 8 # / \ / # 4 9 7 # </pre> # ## Algorithm # # Refer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/bst_validate/bst_validate_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. # ## Code # + # # %load ../bst/bst.py class Node(object): def __init__(self, data): self.data = data self.left = None self.right = None self.parent = None def __repr__(self): return str(self.data) class Bst(object): def __init__(self, root=None): self.root = root def insert(self, data): if data is None: raise TypeError('data cannot be None') if self.root is None: self.root = Node(data) return self.root else: return self._insert(self.root, data) def _insert(self, node, data): if node is None: return Node(data) if data <= node.data: if node.left is None: node.left = self._insert(node.left, data) node.left.parent = node return node.left else: return self._insert(node.left, data) else: if node.right is None: node.right = self._insert(node.right, data) node.right.parent = node return node.right else: return self._insert(node.right, data) # - class BstValidate(Bst): def _validate(self, node, upper, lower): if node is None: return True upper_met = node.data <= upper if upper else True lower_met = node.data > lower if lower else True return upper_met and lower_met and self._validate(node.left, node.data, lower) and self._validate(node.right, upper, node.data) def validate(self): if not self.root: raise TypeError return self._validate(self.root, None, None) # ## Unit Test # **The following unit test is expected to fail until you solve the challenge.** # + # # %load test_bst_validate.py from nose.tools import assert_equal from nose.tools import raises class TestBstValidate(object): @raises(Exception) def test_bst_validate_empty(self): validate_bst(None) def test_bst_validate(self): bst = BstValidate(Node(5)) bst.insert(8) bst.insert(5) bst.insert(6) bst.insert(4) bst.insert(7) assert_equal(bst.validate(), True) bst = BstValidate(Node(5)) left = Node(5) right = Node(8) invalid = Node(20) bst.root.left = left bst.root.right = right bst.root.left.right = invalid assert_equal(bst.validate(), False) print('Success: test_bst_validate') def main(): test = TestBstValidate() test.test_bst_validate_empty() test.test_bst_validate() if __name__ == '__main__': main() # - # ## Solution Notebook # # Review the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/bst_validate/bst_validate_solution.ipynb) for a discussion on algorithms and code solutions.
graphs_trees/bst_validate/bst_validate_challenge.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Inverse problems from IPython.core.display import HTML css_file = 'https://raw.githubusercontent.com/ngcm/training-public/master/ipython_notebook_styles/ngcmstyle.css' HTML(url=css_file) # So far we've looked at a variety of tests applied to *working, correct* code. All these tests have shown that the code is behaving as expected, that are mental model of the problem is being reproduced by the concrete computational model implemented. # # However, what if one of the tests failed? How could we go from a failing test to understanding what's wrong with the code? If we want to be formal about it, this falls into the category of [inverse problems](http://en.wikipedia.org/wiki/Inverse_problem), and in general [inverse problems are hard](http://en.wikipedia.org/wiki/Inverse_problem#Mathematical_considerations). # ## Examples # Let's take the n-body code again: import numpy from matplotlib import pyplot # %matplotlib inline from matplotlib import rcParams rcParams['font.family'] = 'serif' rcParams['font.size'] = 16 # + # The Computer Language Benchmarks Game # http://benchmarksgame.alioth.debian.org/ # # originally by <NAME> # modified by Tupteq, <NAME>, and <NAME> # modified by <NAME> # 2to3 import sys def combinations(l): result = [] for x in range(len(l) - 1): ls = l[x+1:] for y in ls: result.append((l[x],y)) return result PI = 3.14159265358979323 SOLAR_MASS = 4 * PI * PI DAYS_PER_YEAR = 365.24 BODIES = { 'sun': ([0.0, 0.0, 0.0], [0.0, 0.0, 0.0], SOLAR_MASS), 'jupiter': ([4.84143144246472090e+00, -1.16032004402742839e+00, -1.03622044471123109e-01], [1.66007664274403694e-03 * DAYS_PER_YEAR, 7.69901118419740425e-03 * DAYS_PER_YEAR, -6.90460016972063023e-05 * DAYS_PER_YEAR], 9.54791938424326609e-04 * SOLAR_MASS), 'saturn': ([8.34336671824457987e+00, 4.12479856412430479e+00, -4.03523417114321381e-01], [-2.76742510726862411e-03 * DAYS_PER_YEAR, 4.99852801234917238e-03 * DAYS_PER_YEAR, 2.30417297573763929e-05 * DAYS_PER_YEAR], 2.85885980666130812e-04 * SOLAR_MASS), 'uranus': ([1.28943695621391310e+01, -1.51111514016986312e+01, -2.23307578892655734e-01], [2.96460137564761618e-03 * DAYS_PER_YEAR, 2.37847173959480950e-03 * DAYS_PER_YEAR, -2.96589568540237556e-05 * DAYS_PER_YEAR], 4.36624404335156298e-05 * SOLAR_MASS), 'neptune': ([1.53796971148509165e+01, -2.59193146099879641e+01, 1.79258772950371181e-01], [2.68067772490389322e-03 * DAYS_PER_YEAR, 1.62824170038242295e-03 * DAYS_PER_YEAR, -9.51592254519715870e-05 * DAYS_PER_YEAR], 5.15138902046611451e-05 * SOLAR_MASS) } SYSTEM = list(BODIES.values()) PAIRS = combinations(SYSTEM) def advance(dt, n, bodies=SYSTEM, pairs=PAIRS): for i in range(n): for (([x1, y1, z1], v1, m1), ([x2, y2, z2], v2, m2)) in pairs: dx = x1 - x2 dy = y1 - y2 dz = z1 - z2 mag = dt * ((dx * dx + dy * dy + dz * dz) ** (-1.5)) b1m = m1 * mag b2m = m2 * mag v1[0] -= dx * b2m v1[1] -= dy * b2m v1[2] -= dz * b2m v2[0] += dx * b1m v2[1] += dy * b1m v2[2] += dz * b1m for (r, [vx, vy, vz], m) in bodies: r[0] += dt * vx r[1] += dt * vy r[2] += dt * vz # - # Now, let's define a number of `advance` functions with small errors in. def advance_dx_error(dt, n, bodies=SYSTEM, pairs=PAIRS): for i in range(n): for (([x1, y1, z1], v1, m1), ([x2, y2, z2], v2, m2)) in pairs: dx = x1 - x2 dy = y1 - y2 dz = z1 - z2 mag = dt * ((dx * dx + dy * dy + dz * dz) ** (-1.5)) b1m = m1 * mag b2m = m2 * mag v1[0] -= dx * b2m v1[1] -= dx * b2m v1[2] -= dx * b2m v2[0] += dx * b1m v2[1] += dy * b1m v2[2] += dz * b1m for (r, [vx, vy, vz], m) in bodies: r[0] += dt * vx r[1] += dt * vy r[2] += dt * vz def advance_sign_error(dt, n, bodies=SYSTEM, pairs=PAIRS): for i in range(n): for (([x1, y1, z1], v1, m1), ([x2, y2, z2], v2, m2)) in pairs: dx = x1 - x2 dy = y1 - y2 dz = z1 - z2 mag = dt * ((dx * dx + dy * dy + dz * dz) ** (-1.5)) b1m = m1 * mag b2m = m2 * mag v1[0] += dx * b2m v1[1] += dy * b2m v1[2] += dz * b2m v2[0] += dx * b1m v2[1] += dy * b1m v2[2] += dz * b1m for (r, [vx, vy, vz], m) in bodies: r[0] += dt * vx r[1] += dt * vy r[2] += dt * vz def advance_power_error(dt, n, bodies=SYSTEM, pairs=PAIRS): for i in range(n): for (([x1, y1, z1], v1, m1), ([x2, y2, z2], v2, m2)) in pairs: dx = x1 - x2 dy = y1 - y2 dz = z1 - z2 mag = dt * ((dx * dx + dy * dy + dz * dz) ** (-0.5)) b1m = m1 * mag b2m = m2 * mag v1[0] -= dx * b2m v1[1] -= dy * b2m v1[2] -= dz * b2m v2[0] += dx * b1m v2[1] += dy * b1m v2[2] += dz * b1m for (r, [vx, vy, vz], m) in bodies: r[0] += dt * vx r[1] += dt * vy r[2] += dt * vz # Which of our previous tests passes, and which fails? Could we go from a failing test to understanding which parts of the codes are wrong?
05-inverse-problems.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # IPython notebook for data analysis diss # ## Imports: # + import pandas as pd import statsmodels.api as sm from statsmodels.formula.api import ols from statsmodels.stats.anova import anova_lm from statsmodels.graphics.factorplots import interaction_plot import matplotlib.pyplot as plt from scipy import stats import scipy # - # ## Functions needed by two-way ANOVA: # + def eta_squared(aov): aov['eta_sq'] = 'NaN' aov['eta_sq'] = aov[:-1]['sum_sq']/sum(aov['sum_sq']) return aov def omega_squared(aov): mse = aov['sum_sq'][-1]/aov['df'][-1] aov['omega_sq'] = 'NaN' aov['omega_sq'] = (aov[:-1]['sum_sq']-(aov[:-1]['df']*mse))/(sum(aov['sum_sq'])+mse) return aov # - # ## Function for conducting two-way ANOVA: def two_way_anova(value, group1, group2): formula = value + ' ~ C(' + group1 + ') + C(' + group2 + ') + C(' + group1 + '):C(' + group2 + ')' model = ols(formula, data_all).fit() aov_table = anova_lm(model, typ=2) eta_squared(aov_table) omega_squared(aov_table) print(aov_table) res = model.resid fig = sm.qqplot(res, line='s') plt.show() # ## Function for conducting Independent-Samples t-Test def independent_samples_t_test(): zmf_Zeit = data_zmf.Zeit rest_Zeit = data_rest.Zeit zmf_Zeit_3 = data_zmf.Zeit_3 rest_Zeit_3 = data_rest.Zeit_3 twosample_results_Zeit = scipy.stats.ttest_ind(zmf_Zeit, rest_Zeit) twosample_results_Zeit_3 = scipy.stats.ttest_ind(zmf_Zeit_3, rest_Zeit_3) zmf_Score = data_zmf.Score rest_Score = data_rest.Score zmf_Score_3 = data_zmf.Score_3 rest_Score_3 = data_rest.Score_3 twosample_results_Score = scipy.stats.ttest_ind(zmf_Score, rest_Score) twosample_results_Score_3 = scipy.stats.ttest_ind(zmf_Score_3, rest_Score_3) print('p-Value Zeit (Mautner-Test):\tp-Value Zeit (Referenz-Test):\n'+str(twosample_results_Zeit.pvalue)+'\t '+str(twosample_results_Zeit_3.pvalue)) print('\np-Value Score (Mautner-Test):\tp-ValueScore (Referenz-Test):\n'+str(twosample_results_Score.pvalue)+'\t '+str(twosample_results_Score_3.pvalue)) # ## Reading all necessary files # # It is important for the analysis that the required datafiles are in the same directory and are prepared accordingly so that the notebook is able to retrieve the data. # + all_players = "players.csv" rest = "rest.csv" zmf = "zmf.csv" data_all = pd.read_csv(all_players) data_zmf = pd.read_csv(zmf) data_rest = pd.read_csv(rest) # - # ## Result Independent Samples t-Test # # This test examines the difference between the ZMF and the REST in both testings. independent_samples_t_test() # ## Result two-way ANOVA # # This test examines the effect of age (divided into two groups: U15U16 and U18U19) and position (again divided into two groups: ZMF and REST) on Zeit and Score for two testings. # # ### Result when measured against Zeit (Mautner-Test) two_way_anova('Zeit', 'Position2', 'Age') # ### Result when measured against Score (Mautner-Test) two_way_anova('Score', 'Position2', 'Age') # ### Result when measured against Zeit (Referenz-Test) two_way_anova('Zeit_3', 'Position2', 'Age') # ### Result when measured against Score (Referenz-Test) two_way_anova('Score_3', 'Position2', 'Age')
main_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## This project has two parts. # # In the first part, you will run a regression, and identify and remove the 10% of points that have the largest residual errors. Then you’ll remove those outliers from the dataset and refit the regression, just like the strategy that Sebastian suggested in the lesson videos. # # In the second part, you will get acquainted with some of the outliers in the Enron finance data, and learn if/how to remove them. # # ### Slope of Regression with Outliers # # Implementing the algorithm for improving a regression, by removing outliers. To summarize, what you'll do is fit the regression on all training points discard the 10% of points that have the largest errors between the actual y values, and the regression-predicted y values refit on the remaining points. import pickle import sys import matplotlib.pyplot sys.path.append("../tools/") from feature_format import featureFormat, targetFeatureSplit import sys import pickle sys.path.append("../tools/") from feature_format import featureFormat, targetFeatureSplit dictionary = pickle.load( open("../final_project/final_project_dataset_modified.pkl", "rb") ) # + import random import numpy import matplotlib.pyplot as plt import pickle from outlier_cleaner_new import outlierCleaner # heb outlier_cleaner van andere github gast gepakt # - ### load up some practice data with outliers in it ages = pickle.load( open("practice_outliers_ages.pkl", "rb") ) net_worths = pickle.load( open("practice_outliers_net_worths.pkl", "rb") ) ### ages and net_worths need to be reshaped into 2D numpy arrays ### second argument of reshape command is a tuple of integers: (n_rows, n_columns) ### by convention, n_rows is the number of data points and n_columns is the number of features ages = numpy.reshape( numpy.array(ages), (len(ages), 1)) net_worths = numpy.reshape( numpy.array(net_worths), (len(net_worths), 1)) from sklearn.model_selection import train_test_split ages_train, ages_test, net_worths_train, net_worths_test = train_test_split(ages, net_worths, test_size=0.1, random_state=42) # + ### fill in a regression here! Name the regression object reg so that ### the plotting code below works, and you can see what your regression looks like from sklearn import linear_model reg = linear_model.LinearRegression() reg.fit(ages_train, net_worths_train) # - # ### Slope of Regression with Outliers print ('The slope for this regression is: ', reg.coef_) # ### Score of Regression with Outliers # What is the score you get when using regression to make predictions with the test data? print ("The score when using regression to make predictions with the test data : "), reg.score(ages_test,net_worths_test) try: plt.plot(ages, reg.predict(ages), color="blue") except NameError: pass plt.scatter(ages, net_worths) plt.xlabel("ages") plt.ylabel("net worths") plt.show() predictions = reg.predict(ages_train) len(predictions) # + ### identify and remove the most outlier-y points cleaned_data = [] try: predictions = reg.predict(ages_train) # this cleaning is based on training-data (`ages_train`) cleaned_data = outlierCleaner( predictions, ages_train, net_worths_train ) except NameError: print ("your regression object doesn't exist, or isn't name reg") print ("can't make predictions to use in identifying outliers") # - # hiermee zijn dus 9 values removed len(cleaned_data) # + ### only run this code if cleaned_data is returning data if len(cleaned_data) > 0: ages, net_worths, errors = zip(*cleaned_data) ages = numpy.reshape( numpy.array(ages), (len(ages), 1)) net_worths = numpy.reshape( numpy.array(net_worths), (len(net_worths), 1)) ### refit your cleaned data! try: reg.fit(ages, net_worths) plt.plot(ages, reg.predict(ages), color="blue") except NameError: print ("you don't seem to have regression imported/created,") print (" or else your regression object isn't named reg") print (" either way, only draw the scatter plot of the cleaned data") plt.scatter(ages, net_worths) plt.xlabel("ages") plt.ylabel("net worths") plt.show() else: print ("outlierCleaner() is returning an empty list, no refitting to be done") # - # ## Slope of Regression without Outliers print ('The slope for this regression is : ', reg.coef_) # ## Score After Cleaning print ("The score using regression to make predictions with the test data : ", reg.score(ages_test,net_worths_test)) # # Enron Outliers import pickle import sys import matplotlib.pyplot sys.path.append("../tools/") from feature_format import featureFormat, targetFeatureSplit # Reads in the data (in dictionary form) and converts it into a sklearn-ready numpy array. Since there are two features being extracted from the dictionary (“salary” and “bonus”), the resulting numpy array will be of dimension N x 2, where N is the number of data points and 2 is the number of features. This is perfect input for a scatterplot; we’ll use the matplotlib.pyplot module to make that plot. ### read in data dictionary, convert to numpy array data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "rb") ) features = ["salary", "bonus"] data = featureFormat(data_dict, features) # convert to numpy array type(data) # + for point in data: salary = point[0] bonus = point[1] matplotlib.pyplot.scatter( salary, bonus ) matplotlib.pyplot.xlabel("salary") matplotlib.pyplot.ylabel("bonus") matplotlib.pyplot.show() # - # To find the value of largest bonus (outlier here) salary_max, bonus_max = data.max(axis = 0) bonus_max salary_max for k in data_dict.keys(): if bonus_max == data_dict[k]['bonus']: print ("Person having maximum bonus : ", k) break # oefening dirk: for k in data_dict.keys(): if salary_max == data_dict[k]['salary']: print ("Person having maximum salary : ", k) break data_dict['TOTAL'] # So we are getting the __Total__ of that column as an outlier. # The spreadsheet added up all the data points for us, and we need to take that "point" out. # ### Removing the outlier # (This step must have been done earlier but for future reference and study , i have repeated the steps) data_dict.pop('TOTAL') features = ["salary", "bonus"] data = featureFormat(data_dict, features) # + for point in data: salary = point[0] bonus = point[1] matplotlib.pyplot.scatter( salary, bonus ) plt.title('Scatterplot of salary and bonus') plt.xlabel("Salary") matplotlib.pyplot.ylabel("Bonus") matplotlib.pyplot.show() # - # ## Observe : There are four more outliers # + data_sort = data[ data[:,1].argsort() ] outlier_bonus = [] for i in range(-5,0): outlier_bonus.append( data_sort[i][1] ) outlier_bonus # - data_sort.shape data_sort[-5:,:] # ## Identifying the corresponding persons for k in data_dict.keys(): if data_dict[k]['bonus'] in outlier_bonus: print (" {} had bonus of {} . ".format(k, data_dict[k]['bonus'])) # + jupyter={"outputs_hidden": true} # - for k,v in data_dict.items(): salary =float(v.get("salary")) bonus =float(v.get("bonus")) if(salary>1000000 and bonus>=5000000): print(k)
8_Outliers_Mini-Project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import numpy as np import matplotlib.pyplot as plt from scipy.stats import kendalltau, rankdata # + # initialize parameter settings parameter_names = ['N','p','K','size_ratio','eta'] parameter_names_display = [r'$n$',r'$p$',r'$K$',r'$\rho$',r'$\eta$'] # for matplotlib parameter_values = [[1000,5000,10000, 1150], [0.01,0.1,0.02,0.5,1], [2, 3, 5, 4, 10, 20], [1.5, 1, 2], [0, 0.05,0.1,0.15,0.2, 0.25,0.3, 0.35, 0.4, 0.45]] parameter_dict = dict(zip(parameter_names, parameter_values)) compare_names = ['A','sns','dns','L','L_sym','BNC','BRC','SPONGE','SPONGE_sym','SSSNET'] compare_names_all = [] compare_names_all.extend(compare_names[:-1]) feature_options = ['A_reg'] feat_choice = '_'.join(feature_options) for feat in feature_options: compare_names_all.append(compare_names[-1]+'_'+feat) compare_names_all = compare_names method_str = 'SpectralSSSNET' figure_markers = ['*','P','<','s','8','+','H','|','D','>','v','^','d'] hop=2 tau = 0.5 seed_ratio = 0.1 hidden = 32 lr = 0.01 train_ratio = 0.8 test_ratio = 0.1 alpha = 0 link_sign_loss = 0 link_sign_loss_ratio = 0.1 supervised_loss_ratio = 50 triplet_loss_ratio = 0.1 seeds = [10,20,30,40,50] # - def SSBM_comparison_plot(K=2,p=0.1,size_ratio=1.5,N=200, eta=0.1, save=True): change_var_ind = -1 change_parameter = 'eta' change_var_values = [0, 0.05,0.1,0.15,0.2, 0.25,0.3,0.35,0.4, 0.45] var_name_display = r'$\eta$' default_values = [N, p,K,size_ratio,eta] # initialize default values # extract results results_mean = np.zeros([len(compare_names_all),len(change_var_values)]) results_std = np.zeros([len(compare_names_all),len(change_var_values)]) nmi_mean = np.zeros([len(compare_names_all),len(change_var_values)]) nmi_std = np.zeros([len(compare_names_all),len(change_var_values)]) all_not_found = True dir_name = '../0811result_arrays/SSBM/' for i, var in enumerate(change_var_values): file_not_found = True updated_values = default_values.copy() updated_values[-1] = var N, p,K,size_ratio,eta = updated_values # update parameter settings param_values = [p,eta,K,N,hop,tau,size_ratio, seed_ratio, alpha, lr, hidden, triplet_loss_ratio, link_sign_loss, link_sign_loss_ratio, supervised_loss_ratio] result_save_name = '_'.join([str(int(100*value)) for value in param_values])+'_'+feat_choice+'_'+method_str result_save_name += 'seeds' + '_'.join([str(value) for value in np.array(seeds).flatten()])+'.npy' file_name = dir_name + 'test'+ result_save_name if os.path.exists(file_name): res = np.load(file_name) nmi = np.load(dir_name + 'test_NMI'+ result_save_name) all_not_found = False file_not_found = False if not file_not_found: results_mean[:,i] = np.nanmean(res, axis=0) results_std[:,i] = np.nanstd(res, axis=0) nmi_mean[:,i] = np.nanmean(nmi, axis=0) nmi_std[:,i] = np.nanstd(nmi, axis=0) else: print(result_save_name) results_mean[:,i] = np.nan results_std[:,i] = np.nan nmi_mean[:,i] = np.nan nmi_std[:,i] = np.nan save_name_base = result_save_name[:-4]+'Change_{}'.format(change_parameter)+'.pdf' dir_name = '../comparison_plots/0811results/' if os.path.isdir(dir_name) == False: try: os.makedirs(dir_name) except FileExistsError: print('Folder exists!') if all_not_found: # print("Plot empty."+save_name) return # ranking comparison ranking_kendalltau = np.zeros([len(change_var_values)]) for i in range(len(change_var_values)): ranking_kendalltau[i], _ = kendalltau(rankdata(results_mean[:,i]), rankdata(nmi_mean[:,i])) print('KandallTau results are {} with mean {:.3f} and standard deviation {:.3f}.'.format(ranking_kendalltau, np.nanmean(ranking_kendalltau), np.nanstd(ranking_kendalltau))) # plot ARIs results_mean_mean = results_mean.mean(0) ind_all = np.arange(len(change_var_values)) valid_ind = list(set(ind_all[results_mean_mean>0.01]).intersection(set(ind_all[results_mean_mean<0.99]))) if len(valid_ind): start = max(0, min(valid_ind)-2) end = min(len(ind_all), max(valid_ind)+2) results_mean = results_mean[:,start:end] results_std = results_std[:,start:end] change_var_values = change_var_values[start:end] plt.figure(figsize=[8,6]) plt.rcParams.update({'font.size': 23.5}) change_var_values = np.array(change_var_values) if change_var_values.max()-change_var_values.min() > 0.3: plt.xticks(np.arange(change_var_values.min(),change_var_values.max()+0.1,step=0.1)) else: plt.xticks(np.arange(change_var_values.min(),change_var_values.max()+0.05,step=0.05)) for j in range(len(compare_names)-1): plt.errorbar(change_var_values, results_mean[j], yerr=results_std[j], label=compare_names_all[j],alpha=0.7, fmt=figure_markers[j], ls='None') for j in range(len(compare_names)-1,len(compare_names_all)): plt.errorbar(change_var_values, results_mean[j], yerr=results_std[j], label=compare_names_all[j],alpha=0.8, fmt=figure_markers[j%len(figure_markers)], ls='--') plt.xlabel(var_name_display,fontsize=22) positive_labelpad = np.sum(results_mean-results_std<-0.1) > 0 plt.ylabel('ARI',fontsize=22,labelpad=-15*positive_labelpad) plt.rcParams.update({'font.size': 12}) title_name = 'SSBM ' for i, def_var in enumerate(parameter_names_display): if i != (len(parameter_names_display) - 1): title_name = title_name + def_var + '={}.'.format(default_values[i]) plt.title(title_name) if save: print('Saving figure!') save_name = dir_name + 'SSBM_ARI_'+save_name_base plt.savefig(save_name,format='pdf') plt.legend(loc='best',framealpha=0.0,fontsize=20) if save: print('Saving figure!') save_name = dir_name + 'legend_SSBM_ARI_'+save_name_base plt.savefig(save_name,format='pdf') plt.show() return ranking_kendalltau # ### Comparison results kendalltau_list = [] kendalltau_list.extend(SSBM_comparison_plot(K=5,p=0.01,eta=0.05,size_ratio=1.5,N=1000)) kendalltau_list.extend(SSBM_comparison_plot(K=5,p=0.01,eta=0.05,size_ratio=1.5,N=5000)) kendalltau_list.extend(SSBM_comparison_plot(K=5,p=0.01,eta=0.05,size_ratio=1.5,N=10000)) kendalltau_list.extend(SSBM_comparison_plot(K=5,p=0.001,eta=0.05,size_ratio=1.5,N=30000)) # initialize parameter settings for polazied SSBMs parameter_names = ['total_n','p','num_com','size_ratio','eta'] parameter_names_display = [r'$n$',r'$p$',r'$N_{c}$',r'$\rho$',r'$\eta$'] # for matplotlib parameter_values = [[1050,5000,10000, 1150], [0.01,0.1,0.02,0.5,1], [2, 3, 5, 4, 10, 20], [1.5, 1, 2], [0, 0.05,0.1,0.15,0.2, 0.25,0.3, 0.35, 0.4, 0.45]] parameter_dict = dict(zip(parameter_names, parameter_values)) def polarized_comparison_plot(total_n, num_com, K=2,p=0.1,size_ratio=1.5,N=200, eta=0.1, save=True): change_var_ind = -1 change_parameter = 'eta' change_var_values = [0, 0.05,0.1,0.15,0.2, 0.25,0.3,0.35,0.4, 0.45] var_name_display = r'$\eta$' default_values = [total_n, p, num_com, size_ratio, eta] # initialize default values # extract results results_mean = np.zeros([len(compare_names_all),len(change_var_values)]) results_std = np.zeros([len(compare_names_all),len(change_var_values)]) nmi_mean = np.zeros([len(compare_names_all),len(change_var_values)]) nmi_std = np.zeros([len(compare_names_all),len(change_var_values)]) all_not_found = True dir_name = '../0811result_arrays/polarized/' for i, var in enumerate(change_var_values): updated_values = default_values.copy() updated_values[-1] = var total_n, p, num_com, size_ratio, eta = updated_values # update parameter settings param_values = [total_n, num_com, p,eta,K,N,hop,tau,size_ratio, seed_ratio, alpha, lr, hidden, triplet_loss_ratio, link_sign_loss, link_sign_loss_ratio, supervised_loss_ratio] result_save_name = '_'.join([str(int(100*value)) for value in param_values])+'_'+feat_choice+'_'+method_str result_save_name += 'seeds' + '_'.join([str(value) for value in np.array(seeds).flatten()])+'.npy' file_name = dir_name + 'test'+ result_save_name if os.path.exists(file_name): res = np.load(file_name) nmi = np.load(dir_name + 'test_NMI'+ result_save_name) all_not_found = False file_not_found = False if not file_not_found: results_mean[:,i] = np.nanmean(res, axis=0) results_std[:,i] = np.nanstd(res, axis=0) nmi_mean[:,i] = np.nanmean(nmi, axis=0) nmi_std[:,i] = np.nanstd(nmi, axis=0) else: print(result_save_name) results_mean[:,i] = np.nan results_std[:,i] = np.nan nmi_mean[:,i] = np.nan nmi_std[:,i] = np.nan save_name_base = result_save_name[:-4]+'Change_{}'.format(change_parameter)+'.pdf' dir_name = '../comparison_plots/0811results/' if os.path.isdir(dir_name) == False: try: os.makedirs(dir_name) except FileExistsError: print('Folder exists!') if all_not_found: # print("Plot empty."+save_name) return # ranking comparison ranking_kendalltau = np.zeros([len(change_var_values)]) for i in range(len(change_var_values)): ranking_kendalltau[i], _ = kendalltau(rankdata(results_mean[:,i]), rankdata(nmi_mean[:,i])) print('KandallTau results are {} with mean {:.3f} and standard deviation {:.3f}.'.format(ranking_kendalltau, np.nanmean(ranking_kendalltau), np.nanstd(ranking_kendalltau))) # plot ARIs results_mean_mean = results_mean.mean(0) ind_all = np.arange(len(change_var_values)) valid_ind = list(set(ind_all[results_mean_mean>0.01]).intersection(set(ind_all[results_mean_mean<0.99]))) if len(valid_ind): start = max(0, min(valid_ind)-2) end = min(len(ind_all), max(valid_ind)+2) results_mean = results_mean[:,start:end] results_std = results_std[:,start:end] change_var_values = change_var_values[start:end] plt.figure(figsize=[8,6]) plt.rcParams.update({'font.size': 23.5}) change_var_values = np.array(change_var_values) if change_var_values.max()-change_var_values.min() > 0.3: plt.xticks(np.arange(change_var_values.min(),change_var_values.max()+0.1,step=0.1)) else: plt.xticks(np.arange(change_var_values.min(),change_var_values.max()+0.05,step=0.05)) for j in range(len(compare_names)-1): plt.errorbar(change_var_values, results_mean[j], yerr=results_std[j], label=compare_names_all[j],alpha=0.7, fmt=figure_markers[j], ls='None') for j in range(len(compare_names)-1,len(compare_names_all)): plt.errorbar(change_var_values, results_mean[j], yerr=results_std[j], label=compare_names_all[j],alpha=0.8, fmt=figure_markers[j%len(figure_markers)], ls='--') plt.xlabel(var_name_display,fontsize=22) plt.ylabel('ARI',fontsize=22) plt.rcParams.update({'font.size': 12}) title_name = 'polarized ' for i, def_var in enumerate(parameter_names_display): if i != (len(parameter_names_display) - 1): title_name = title_name + def_var + '={}.'.format(default_values[i]) plt.title(title_name) if save: print('Saving figure!') save_name = dir_name + 'ARI_'+save_name_base plt.savefig(save_name,format='pdf') plt.legend(loc='best',framealpha=0.0,fontsize=18) if save: print('Saving figure!') save_name = dir_name + 'legend_polarized_ARI_'+save_name_base plt.savefig(save_name,format='pdf') plt.show() return ranking_kendalltau kendalltau_list.extend(polarized_comparison_plot(total_n=1050, num_com=2,K=2,p=0.1,eta=0.05,size_ratio=1,N=200)) kendalltau_list.extend(polarized_comparison_plot(total_n=5000, num_com=3,K=2,p=0.1,eta=0.05,size_ratio=1.5,N=500)) kendalltau_list.extend(polarized_comparison_plot(total_n=5000, num_com=5,K=2,p=0.1,eta=0.05,size_ratio=1.5,N=500)) kendalltau_list.extend(polarized_comparison_plot(total_n=10000, num_com=2,K=2,p=0.01,eta=0.05,size_ratio=1.5,N=2000)) ranking_kendalltau = np.array(kendalltau_list) print('KandallTau results are {} with mean {:.3f} and standard deviation {:.3f}.'.format(ranking_kendalltau, np.nanmean(ranking_kendalltau), np.nanstd(ranking_kendalltau))) # ### Summary # This is a sample notebook to compare results on synthetic data.
result_analysis/synthetic_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Setting Up the Sample Specific Environment # # In this section, we will be creating environmental variables specific to this sample. # ## Get Global Variables import sys sys.path.append('../common') from env_variables import * # ## Set Environmental Variables Specific to This Sample # # In the following sections, we will be creating a VM to act as our IoT Edge device. The following cell will set the type of VM that will be created. # # Verify that the VM type is available in your region. You may view this page for a full list of [VMs by region](https://azure.microsoft.com/en-us/global-infrastructure/services/?regions=non-regional,us-east,us-east-2,us-central,us-north-central,us-south-central,us-west-central,us-west,us-west-2&products=virtual-machines). # # For this sample, we will be using a Standard_DS3_v2 (CPU tier) VM. # vm_type = "Standard_DS3_v2" #CPU tier VM tempVar = set_key(envPath, "VM_TYPE", vm_type) # The following cell will set a sample folder path absolute to the root folder of the repository. lvaSamplePath = "utilities/video-analysis/notebooks/customvision" tempVar = set_key(envPath, "LVA_SAMPLE_PATH", lvaSamplePath) # In later sections, we will be creating a Docker container image for our inference solution. The following cell will set the name of the Docker image to be used later. containerImageName = "lvaextension:customvision" tempVar = set_key(envPath, "CONTAINER_IMAGE_NAME", containerImageName) # The following cell will set the folder to which debug files will be outputted in the IoT Edge device. The default location for debug files is `/tmp` folder in your IoT Edge device. If you want debug files to be sent elsewhere, you can change the value of the `debugOutputFolder` variable below. debugOutputFolder = "/tmp" tempVar = set_key(envPath, "DEBUG_OUTPUT_FOLDER", debugOutputFolder) # The following cell will set the name of the media graph file to be used in this sample. We provide a variety of sample media graph files in the **live-video-analytics/MediaGraph** folder. To learn more about media graphs, [read our documentation here](https://docs.microsoft.com/en-us/azure/media-services/live-video-analytics-edge/media-graph-concept). topologyFile = "motion-with-httpExtension/2.0/topology.json" tempVar = set_key(envPath, "TOPOLOGY_FILE", topologyFile) # The following cell will extract the name of our sample media graph topology file. # + import json import os.path with open(os.path.join("../../../../MediaGraph/topologies/", topologyFile)) as f: data = json.load(f) topologyName = data["name"] tempVar = set_key(envPath, "TOPOLOGY_NAME", topologyName) # - # The following cell will set the name of the media graph instance that will be used in later sections. With LVA, you may set more than one topology instance, so be sure to give each instance a unique name. graphInstanceName = "Sample-Graph-Instance" tempVar = set_key(envPath, "GRAPH_INSTANCE_NAME", graphInstanceName) # The following cell will set the media graph parameters specific to this sample. # + # Address of the RTSP camera stream source rtspUrl = "rtsp://rtspsim:554/media/truck_video.mkv" # Sensitivity of the motion detector low|medium|high motionSensitivity = "medium" # The address of the inference server httpAIServerAddress = "http://lvaExtension/score" # Name associated to the sink output hubSinkOutputName = "inferences" # Image file formats. Supported formats are jpeg, bmp and png imageEncoding = "jpeg" # preserveAspectRatio | pad imageScaleMode = "pad" # - # The following cell will create parameters in JSon format to be used while deploying the media graph. # + mediaGraphTopologyParameters = { "@apiVersion": "2.0", "name": graphInstanceName, "properties": { "topologyName": topologyName, "parameters": [ { "name": "rtspUrl", "value": rtspUrl }, { "name": "motionSensitivity", "value": motionSensitivity }, { "name": "httpAIServerAddress", "value": httpAIServerAddress }, { "name": "hubSinkOutputName", "value": hubSinkOutputName }, { "name": "imageEncoding", "value": imageEncoding }, { "name": "imageScaleMode", "value": imageScaleMode } ] } } with open("../common/.media_graph_topology_parameters.json", "w") as f: json.dump(mediaGraphTopologyParameters, f, indent=4)
utilities/video-analysis/notebooks/customvision/setup_specific_environment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import tensorflow as tf from cnn_train import cnn_graph from captcha_gen import gen_captcha_text_and_image from util import vec2text, convert2gray from util import CAPTCHA_LIST, CAPTCHA_WIDTH, CAPTCHA_HEIGHT, CAPTCHA_LEN import matplotlib.pyplot as plt def captcha2text(image_list, height=CAPTCHA_HEIGHT, width=CAPTCHA_WIDTH): ''' 验证码图片转化为文本 :param image_list: :param height: :param width: :return: ''' x = tf.placeholder(tf.float32, [None, height * width]) keep_prob = tf.placeholder(tf.float32) y_conv = cnn_graph(x, keep_prob, (height, width)) saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, saver.restore(sess, tf.train.latest_checkpoint('.'))) predict = tf.argmax(tf.reshape(y_conv, [-1, CAPTCHA_LEN, len(CAPTCHA_LIST)]), 2) vector_list = sess.run(predict, feed_dict={x: image_list, keep_prob: 1}) vector_list = vector_list.tolist() text_list = [vec2text(vector) for vector in vector_list] return text_list if __name__ == '__main__': text, image = gen_captcha_text_and_image() plt.figure('color') plt.imshow(image, cmap='gray') plt.axis('off') plt.show() image = convert2gray(image) #image = image.flatten() / 255 plt.imshow(image, cmap='gray') plt.axis('off') plt.show() #pre_text = captcha2text([image]) #print('Label:', text, ' Predict:', pre_text) # + import tensorflow as tf from cnn_train import cnn_graph from captcha_gen import gen_captcha_text_and_image from util import vec2text, convert2gray from util import CAPTCHA_LIST, CAPTCHA_WIDTH, CAPTCHA_HEIGHT, CAPTCHA_LEN import matplotlib.pyplot as plt def captcha2text(image_list, height=CAPTCHA_HEIGHT, width=CAPTCHA_WIDTH): ''' 验证码图片转化为文本 :param image_list: :param height: :param width: :return: ''' x = tf.placeholder(tf.float32, [None, height * width]) keep_prob = tf.placeholder(tf.float32) y_conv = cnn_graph(x, keep_prob, (height, width)) saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, saver.restore(sess, tf.train.latest_checkpoint('.'))) predict = tf.argmax(tf.reshape(y_conv, [-1, CAPTCHA_LEN, len(CAPTCHA_LIST)]), 2) vector_list = sess.run(predict, feed_dict={x: image_list, keep_prob: 1}) vector_list = vector_list.tolist() text_list = [vec2text(vector) for vector in vector_list] return text_list if __name__ == '__main__': text, image = gen_captcha_text_and_image() plt.figure('color') plt.imshow(image, cmap='gray') plt.axis('off') plt.show() image = convert2gray(image) #image = image.flatten() / 255 plt.imshow(image, cmap='gray') plt.axis('off') plt.show() #pre_text = captcha2text([image]) #print('Label:', text, ' Predict:', pre_text) # - print(tf.__version__) import captcha print(captcha.__version__)
AI/exp/04_Captcha/draft/cnn_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/chrismarkella/California-Housing-Prices/blob/master/Requirements.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="SuaM9KOe8o0i" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7bb36adb-68e1-4eec-9a9c-d7f91673e574" import sys sys.version # + id="7fH7CbAj8sCO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3e209ddc-014b-497d-ae91-228dc90c1cb6" version = sys.version.split(' ')[0] version # + id="6DIOW4rb84K0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="530abecc-82b4-4241-d42d-d6fd20a1357f" version.split('.') # + id="TJb-Z6XK9FZg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bb278965-d6a7-4706-d844-6f1292308044" tuple(int(_) for _ in version.split('.')) # + id="LoDabKe79MuS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1d6284da-25f7-4529-9fed-5f5a38da835d" condition = tuple(int(_) for _ in version.split('.')) >= (3, 5, 10) condition # + id="D1pXBVg89bxZ" colab_type="code" colab={} assert condition, 'Python verison is too low. Required version is at least 3.5' # + id="n41YfwB-9zCp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ab6adf13-33a4-43e0-a9d1-90d6b9958847" condition2 = tuple(int(_) for _ in version.split('.')) >= (3, 7) print(f'condition2: {condition2}') # + id="ptvNK-4f929g" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 162} outputId="6b67c50c-691b-4dac-d531-81fd7543bd46" assert condition2, 'Python verison is too low. Required version is at least 3.7' # + id="Fzk1J9k8-LAZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="21d39331-1f47-48e2-bb1f-3d3d46fe2bf7" import sklearn sklearn.__version__ # + id="lgJBMeAw-Zk-" colab_type="code" colab={} assert sklearn.__version__ >= '0.20', 'Sklearn version is too low. Required version is at least 0.20' # + id="TBj8gKB2-qtB" colab_type="code" colab={}
Requirements.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # The Basics # NumPy’s main object is the homogeneous multidimensional array. It is a table of elements (usually numbers), all of the same type, indexed by a tuple of non-negative integers. In NumPy dimensions are called axes. # # For example, the coordinates of a point in 3D space `[1, 2, 1]` has one axis. That axis has 3 elements in it, so we say it has a length of 3. In the example pictured below, the array has 2 axes. The first axis has a length of 2, the second axis has a length of 3. [[ 1., 0., 0.], [ 0., 1., 2.]] # NumPy’s array class is called `ndarray`. It is also known by the alias `array`. Note that `numpy.array` is not the same as the Standard Python Library class `array.array`, which only handles one-dimensional arrays and offers less functionality. The more important attributes of an `ndarray` object are: import numpy as np # + my_array = np.arange(15).reshape(3, 5) print(my_array) # - print(type(my_array)) # # ndarray.ndim # the number of axes (dimensions) of the array. print(my_array.ndim) # # ndarray.shape # the dimensions of the array. This is a tuple of integers indicating the size of the array in each dimension. For a matrix with n rows and m columns, `shape` will be `(n,m)`. The length of the `shape` tuple is therefore the number of axes, `ndim`. print(my_array.shape) # # ndarray.size # the total number of elements of the array. This is equal to the product of the elements of `shape`. print(my_array.size) # # ndarray.dtype # an object describing the type of the elements in the array. One can create or specify dtype’s using standard Python types. Additionally NumPy provides types of its own. numpy.int32, numpy.int16, and numpy.float64 are some examples. print(my_array.dtype.name) # # ndarray.itemsize # the size in bytes of each element of the array. For example, an array of elements of type `float64` has `itemsize` 8 (=64/8), while one of type `complex32` has `itemsize` 4 (=32/8). It is equivalent to `ndarray.dtype.itemsize`. print(my_array.itemsize) # # ndarray.data # the buffer containing the actual elements of the array. Normally, we won’t need to use this attribute because we will access the elements in an array using indexing facilities. print(my_array.data) # # Array Creation # There are several ways to create arrays. # # For example, you can create an array from a regular Python list or tuple using the `array` function. The type of the resulting array is deduced from the type of the elements in the sequences. # + my_int_array = np.array([2,3,4]) print(my_int_array) print(my_int_array.dtype) # + my_float_array = np.array([1.2, 3.5, 5.1]) print(my_float_array) print(my_float_array.dtype) # - # > A frequent error consists in calling `array` with multiple arguments, rather than providing a single sequence as an argument. # + # wrong_array = np.array(1,2,3,4) # WRONG right_array = np.array([1,2,3,4]) # RIGHT # - # > `array` transforms sequences of sequences into two-dimensional arrays, sequences of sequences of sequences into three-dimensional arrays, and so on. # + my_array = np.array([(1.5,2,3), (4,5,6)]) print(my_array) # - # > The type of the array can also be explicitly specified at creation time: # + my_array = np.array( [ [1,2], [3,4] ], dtype=complex ) print(my_array) print(my_array.dtype) # - # ## np.zeros(), np.ones(), np.empty() # Often, the elements of an array are originally unknown, but its size is known. Hence, NumPy offers several functions to create arrays with initial placeholder content. These minimize the necessity of growing arrays, an expensive operation. # # > The function `zeros` creates an array full of zeros, the function `ones` creates an array full of ones, and the function `empty` creates an array whose initial content is random and depends on the state of the memory. By default, the dtype of the created array is float64. # + my_zeros_array = np.zeros((3, 4)) print(my_zeros_array) # + my_ones_array = np.ones((2,3,4), dtype=np.int16) # dtype can also be specified print(my_ones_array) # + my_empty_array = np.empty((2,3)) # uninitialized print(my_empty_array) # - # ## np.arange() # To create sequences of numbers, NumPy provides the `arange` function which is analogous to the Python built-in `range`, but returns an array. # + my_array = np.arange(10, 30) print(my_array) # + my_array = np.arange(0, 2, 0.3) # it accepts float arguments print(my_array) # - # ## np.linspace() # When `arange` is used with floating point arguments, it is generally not possible to predict the number of elements obtained, due to the finite floating point precision. For this reason, it is usually better to use the function `linspace` that receives as an argument the number of elements that we want, instead of the step: my_array = np.linspace(0, 2, 9) # 9 numbers from 0 to 2 np.linspace() print(my_array) # + my_array = np.linspace( 0, 2*np.pi, 100 ) # useful to evaluate function at lots of points f = np.sin(my_array) print(f) # - # # Printing Arrays # When you print an array, NumPy displays it in a similar way to nested lists, but with the following layout: # # * the last axis is printed from left to right, # # * the second-to-last is printed from top to bottom, # # * the rest are also printed from top to bottom, with each slice separated from the next by an empty line. # # One-dimensional arrays are then printed as rows, bidimensionals as matrices and tridimensionals as lists of matrices. # + my_array = np.arange(6) # 1d array print(my_array) # + my_array = np.arange(12).reshape(4,3) # 2d array print(my_array) # + my_array = np.arange(24).reshape(2,3,4) # 3d array print(my_array) # - # > If an array is too large to be printed, NumPy automatically skips the central part of the array and only prints the corners: # + my_array = np.arange(10000).reshape(100,100) print(my_array) # - # > To disable this behaviour and force NumPy to print the entire array, you can change the printing options using `set_printoptions`. # + import sys np.set_printoptions(threshold=sys.maxsize) # sys module should be imported my_array = np.arange(10000).reshape(100,100) print(my_array) # - # # Basic Operations # Arithmetic operators on arrays apply elementwise. A new array is created and filled with the result. # + my_array00 = np.array([[20,30,40,50], [60,70,80,90]]) my_array01 = np.arange(1,9).reshape((2,4)) print(my_array00, my_array01, sep='\n\n') # + sum_result = my_array00 + my_array01 print('sum is:', sum_result,'\n', sep='\n') # + minus_result = my_array00 - my_array01 print('minus is:', minus_result, sep='\n') # + double_result = my_array00 * 2 print('double is:', double_result,'\n', sep='\n') # + sin_result = np.sin(my_array00) print('sin is:', sin_result,'\n', sep='\n') # + comparison_result = my_array00 > 30 print('comparsion is:', comparison_result, sep='\n') # - # > Unlike in many matrix languages, the product operator `*` operates elementwise in NumPy arrays. The matrix product can be performed using the `@` operator (in python >=3.5) or the `dot` function or method: # + my_array00 = np.array([[1,1], [0,1]]) my_array01 = np.array([[2,0], [3,4]]) # + elementwise_product_result = my_array00 * my_array01 # elementwise product print('elementwise product:', elementwise_product_result,'\n', sep='\n') # + matrix_product_result00 = my_array00 @ my_array01 # matrix product print('matrix product:', matrix_product_result00,'\n', sep='\n') # + matrix_product_result01 = my_array00.dot(my_array01) # another matrix product print('matrix product:', matrix_product_result01, sep='\n') # - # > Some operations, such as `+=` and `*=`, act in place to modify an existing array rather than create a new one. # + my_array00 = np.ones((2,3), dtype=float) my_array01 = np.random.randint(10,size=(2,3)) print('my_array00 dtype is: ',my_array00.dtype,'\n') print('my_array01 dtype is: ',my_array01.dtype,'\n') print(my_array00,my_array01, sep='\n\n') # + my_array00 += my_array01 print('\nmy_array00 is:', my_array00, sep='\n') # - # > When operating with arrays of different types, the type of the resulting array corresponds to the more general or precise one (a behavior known as upcasting). # + my_array01 += my_array00 # b is not automatically converted to integer type print('my_array01 is:', my_array00, sep='\n') # - # ## unary operations # Many unary operations, such as computing the sum of all the elements in the array, are implemented as methods of the `ndarray` class. # + my_array = np.random.random((2,3)) print(my_array) # + sum_of_array_elements = my_array.sum() print('sum of array elements is {}'.format(sum_of_array_elements)) # + min_of_array_elements = my_array.min() print('min of array elements is {}'.format(min_of_array_elements)) # + max_of_array_elements = my_array.max() print('max of array elements is {}'.format(max_of_array_elements)) # - # > By default, these operations apply to the array as though it were a list of numbers, regardless of its shape. However, by specifying the axis parameter you can apply an operation along the specified `axis` of an array: # + my_array = np.arange(12).reshape(3,4) print(my_array) # + columns_sum = my_array.sum(axis=0) # sum of each column print('columns sum is: {}'.format(columns_sum)) # + row_cumulative_sum = my_array.cumsum(axis=1) # cumulative sum along each row print('cumulative sum along each row:\n {}'.format(row_cumulative_sum)) # - # # Universal Functions # NumPy provides familiar mathematical functions such as sin, cos, and exp. In NumPy, these are called “universal functions”(`ufunc`). Within NumPy, these functions operate elementwise on an array, producing an array as output. # + my_array00 = np.arange(5) print('array00 is: ', my_array00,'\n') my_array01 = np.array([2., -1., 4., 3., 6.]) print('array01 is: ', my_array01,'\n') # + my_array_exp = np.exp(my_array00) print('array exp is: ', my_array_exp,'\n') # + my_array_sqrt = np.sqrt(my_array00) print('array sqrt is: ', my_array_sqrt,'\n') # + array_add = np.add(my_array00, my_array01) print('sum of arrays is: ', array_add,'\n') # - # # Indexing, Slicing and Iterating # # **One-dimensional** arrays can be indexed, sliced and iterated over, much like lists and other Python sequences. # + my_array = np.arange(10)**3 print(my_array) # - print(my_array[3]) print(my_array[2:5]) # + my_array[:6:2] = 1000 print(my_array) # - print(my_array[::-1]) for i in my_array: print(i) # **Multidimensional** arrays can have one index per axis. These indices are given in a tuple separated by commas: def f(x,y): return 10*x+y # + my_array = np.fromfunction(f,(5,4),dtype=int) print(my_array) # - print(my_array[0,2]) print(my_array[0:5, 1]) # each row in the second column of b print(my_array[ : ,1]) # equivalent to the previous example print(my_array[1:3, : ]) # each column in the second and third row of b # > When fewer indices are provided than the number of axes, the missing indices are considered complete slices: print(my_array[-1]) # the last row. Equivalent to b[-1,:] # The expression within brackets in `b[i]` is treated as an `i` followed by as many instances of `:` as needed to represent the remaining axes. NumPy also allows you to write this using dots as `b[i,...]`. # # The dots (`...`) represent as many colons as needed to produce a complete indexing tuple. For example, if x is an array with 5 axes, then # # * `x[1,2,...]` is equivalent to `x[1,2,:,:,:]`, # # * `x[...,3]` to `x[:,:,:,:,3]` and # # * `x[4,...,5,:]` to `x[4,:,:,5,:]`. # + my_array = np.array( [[[ 0, 1, 2], # a 3D array (two stacked 2D arrays) [ 10, 12, 13]], [[100,101,102], [110,112,113]]]) print(my_array) # - print(my_array.shape) print(my_array[1,...]) # same as c[1,:,:] or c[1] print(my_array[...,2]) # same as c[:,:,2] # **Iterating** over multidimensional arrays is done with respect to the first axis: my_array = np.arange(12).reshape(3,4) for row in my_array: print(row) # However, if one wants to perform an operation on each element in the array, one can use the `flat` attribute which is an iterator over all the elements of the array: for i in my_array.flat: print(i) # # Shape Manipulation # ## Changing the shape of an array # An array has a shape given by the number of elements along each axis: # + my_array = np.arange(12).reshape(3,4) print(my_array) # - print(my_array.shape) # The shape of an array can be changed with various commands. Note that the following three commands all return a modified array, but do not change the original array: # + my_flated_array = my_array.ravel() # returns the array, flattened print(my_flated_array) # + my_reshaped_array = my_array.reshape(6,2) # returns the array with a modified shape print(my_reshaped_array) # + my_transposed_array = my_array.T # returns the array, transposed print(my_transposed_array) # - print('shape of orginal array is: ',my_array.shape) print('shape of transposed array is: ',my_transposed_array.shape) # The order of the elements in the array resulting from ravel() is normally “C-style”, that is, the rightmost index “changes the fastest”, so the element after a[0,0] is a[0,1]. If the array is reshaped to some other shape, again the array is treated as “C-style”. NumPy normally creates arrays stored in this order, so ravel() will usually not need to copy its argument, but if the array was made by taking slices of another array or created with unusual options, it may need to be copied. The functions ravel() and reshape() can also be instructed, using an optional argument, to use FORTRAN-style arrays, in which the leftmost index changes the fastest. # The `reshape` function returns its argument with a modified shape, whereas the `ndarray.resize` method modifies the array itself: # + my_array = np.arange(12).reshape(3,4) print(my_array) # - # If a dimension is given as -1 in a reshaping operation, the other dimensions are automatically calculated: # + my_array = my_array.reshape(4,-1) print(my_array) # + my_array.reshape(2,6) print(my_array) # + my_array.resize(2,6) print(my_array) # - # # Stacking together different arrays # Several arrays can be stacked together along different axes: # + my_array00 = np.arange(0,4).reshape(2,2) print(my_array00) # + my_array01 = np.arange(4,8).reshape(2,2) print(my_array01) # + vstacked_array = np.vstack((my_array00,my_array01)) print(vstacked_array) # + hstacked_array = np.hstack((my_array00,my_array01)) print(hstacked_array) # - # # Copies and Views # When operating and manipulating arrays, their data is sometimes copied into a new array and sometimes not. This is often a source of confusion for beginners. There are three cases: # ## No Copy at All # Simple assignments make no copy of objects or their data. my_array00 = np.array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) my_array01 = my_array00 # no new object is created print(my_array01 is my_array00) # my_array00 and my_array01 are two names for the same ndarray object # ## View or Shallow Copy # Different array objects can share the same data. The `view` method creates a new array object that looks at the same data. # + my_array02 = my_array00.view() print(my_array02) # - print(my_array02 is my_array00) # my_array00 and my_array02 are two names for the same ndarray object my_array02.base is my_array00 # my_array02 is a view of the data owned by my_array00 print(my_array02.flags.owndata) # + my_array02 = my_array02.reshape((2, 6)) # my_array00's shape doesn't change print('shape of my_array00 is: ', my_array00.shape) print('shape of my_array02 is: ', my_array02.shape) # + my_array00[0,0] = 1000 print(my_array02) # - # ## Deep Copy # The copy method makes a complete copy of the array and its data. # + my_array03 = my_array00.copy() print(my_array03) # - print(my_array03 is my_array00) # my_array00 and my_array03 are two names for the same ndarray object my_array03.base is my_array00 # my_array03 is a view of the data owned by my_array00 # + my_array03[0,0] = 0 print(my_array00,'\n') print(my_array03) # - # Sometimes copy should be called after slicing if the original array is not required anymore. For example, suppose a is a huge intermediate result and the final result b only contains a small fraction of a, a deep copy should be made when constructing b with slicing: my_array00 = np.arange(int(1e8)) my_array01 = my_array00[:100].copy() del my_array00 # the memory of ``my_array00`` can be released. # > If b = a[:100] is used instead, a is referenced by b and will persist in memory even if del a is executed. # https://github.com/njiix/py4ds
Week04/Numpy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Get country icons import requests from bs4 import BeautifulSoup url = 'https://www.nationsonline.org/oneworld/country_code_list.htm' page = requests.get(url) bsoup = BeautifulSoup(page.content, 'html5lib') bsoup.find_all('td') # + request.post('wbes2474:8910/api/lda_docs_by_topic_composition', data={'model_id': 'WB', 'corpus_id': 'ALL_50', 'topn': 5 .,}) {'model_id': 'WB', 'corpus_id': 'ALL_50', 'topn': 5, 'topic_percentage': {0:0.25, 8:0.1}}
notebooks/archive/SCRIPTS/countries/country_metadata.ipynb