markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
**LSTM for Regression Using the Window Method**
# load the dataset dataframe = pandas.read_csv('airline-passengers.csv', usecols=[1], engine='python') dataset = dataframe.values dataset = dataset.astype('float32') # normalize the dataset scaler = MinMaxScaler(feature_range=(0, 1)) dataset = scaler.fit_transform(dataset) # split into train and test sets train_size = int(len(dataset) * 0.67) test_size = len(dataset) - train_size train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:] # reshape into X=t and Y=t+1 look_back = 3 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) # reshape input to be [samples, time steps, features] trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1])) # create and fit the LSTM network model = Sequential() model.add(LSTM(4, input_shape=(1, look_back))) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=0) # make predictions trainPredict = model.predict(trainX) testPredict = model.predict(testX) # invert predictions trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY]) # calculate root mean squared error trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0])) print('Train Score: %.2f RMSE' % (trainScore)) testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0])) print('Test Score: %.2f RMSE' % (testScore)) # shift train predictions for plotting trainPredictPlot = numpy.empty_like(dataset) trainPredictPlot[:, :] = numpy.nan trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict # shift test predictions for plotting testPredictPlot = numpy.empty_like(dataset) testPredictPlot[:, :] = numpy.nan testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict # plot baseline and predictions plt.plot(scaler.inverse_transform(dataset)) plt.plot(trainPredictPlot) plt.plot(testPredictPlot) plt.show()
_____no_output_____
MIT
time-series-prediction.ipynb
srivarshan-s/LSTM-Trials
**LSTM for Regression with Time Steps**
# load the dataset dataframe = pandas.read_csv('airline-passengers.csv', usecols=[1], engine='python') dataset = dataframe.values dataset = dataset.astype('float32') # normalize the dataset scaler = MinMaxScaler(feature_range=(0, 1)) dataset = scaler.fit_transform(dataset) # split into train and test sets train_size = int(len(dataset) * 0.67) test_size = len(dataset) - train_size train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:] # reshape into X=t and Y=t+1 look_back = 3 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) # reshape input to be [samples, time steps, features] trainX = numpy.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1)) testX = numpy.reshape(testX, (testX.shape[0], testX.shape[1], 1)) # create and fit the LSTM network model = Sequential() model.add(LSTM(4, input_shape=(look_back, 1))) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=0) # make predictions trainPredict = model.predict(trainX) testPredict = model.predict(testX) # invert predictions trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY]) # calculate root mean squared error trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0])) print('Train Score: %.2f RMSE' % (trainScore)) testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0])) print('Test Score: %.2f RMSE' % (testScore)) # shift train predictions for plotting trainPredictPlot = numpy.empty_like(dataset) trainPredictPlot[:, :] = numpy.nan trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict # shift test predictions for plotting testPredictPlot = numpy.empty_like(dataset) testPredictPlot[:, :] = numpy.nan testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict # plot baseline and predictions plt.plot(scaler.inverse_transform(dataset)) plt.plot(trainPredictPlot) plt.plot(testPredictPlot) plt.show()
_____no_output_____
MIT
time-series-prediction.ipynb
srivarshan-s/LSTM-Trials
**LSTM with Memory Between Batches**
# load the dataset dataframe = pandas.read_csv('airline-passengers.csv', usecols=[1], engine='python') dataset = dataframe.values dataset = dataset.astype('float32') # normalize the dataset scaler = MinMaxScaler(feature_range=(0, 1)) dataset = scaler.fit_transform(dataset) # split into train and test sets train_size = int(len(dataset) * 0.67) test_size = len(dataset) - train_size train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:] # reshape into X=t and Y=t+1 look_back = 3 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) # reshape input to be [samples, time steps, features] trainX = numpy.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1)) testX = numpy.reshape(testX, (testX.shape[0], testX.shape[1], 1)) # create and fit the LSTM network batch_size = 1 model = Sequential() model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True)) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') for i in range(100): model.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=0, shuffle=False) model.reset_states() # make predictions trainPredict = model.predict(trainX, batch_size=batch_size) model.reset_states() testPredict = model.predict(testX, batch_size=batch_size) # invert predictions trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY]) # calculate root mean squared error trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0])) print('Train Score: %.2f RMSE' % (trainScore)) testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0])) print('Test Score: %.2f RMSE' % (testScore)) # shift train predictions for plotting trainPredictPlot = numpy.empty_like(dataset) trainPredictPlot[:, :] = numpy.nan trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict # shift test predictions for plotting testPredictPlot = numpy.empty_like(dataset) testPredictPlot[:, :] = numpy.nan testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict # plot baseline and predictions plt.plot(scaler.inverse_transform(dataset)) plt.plot(trainPredictPlot) plt.plot(testPredictPlot) plt.show()
_____no_output_____
MIT
time-series-prediction.ipynb
srivarshan-s/LSTM-Trials
**Stacked LSTMs with Memory Between Batches**
# load the dataset dataframe = pandas.read_csv('airline-passengers.csv', usecols=[1], engine='python') dataset = dataframe.values dataset = dataset.astype('float32') # normalize the dataset scaler = MinMaxScaler(feature_range=(0, 1)) dataset = scaler.fit_transform(dataset) # split into train and test sets train_size = int(len(dataset) * 0.67) test_size = len(dataset) - train_size train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:] # reshape into X=t and Y=t+1 look_back = 3 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) # reshape input to be [samples, time steps, features] trainX = numpy.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1)) testX = numpy.reshape(testX, (testX.shape[0], testX.shape[1], 1)) # create and fit the LSTM network batch_size = 1 model = Sequential() model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True, return_sequences=True)) model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True)) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') for i in tqdm(range(100)): model.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=0, shuffle=False) model.reset_states() # make predictions trainPredict = model.predict(trainX, batch_size=batch_size) model.reset_states() testPredict = model.predict(testX, batch_size=batch_size) # invert predictions trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY]) # calculate root mean squared error trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0])) print('Train Score: %.2f RMSE' % (trainScore)) testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0])) print('Test Score: %.2f RMSE' % (testScore)) # shift train predictions for plotting trainPredictPlot = numpy.empty_like(dataset) trainPredictPlot[:, :] = numpy.nan trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict # shift test predictions for plotting testPredictPlot = numpy.empty_like(dataset) testPredictPlot[:, :] = numpy.nan testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict # plot baseline and predictions plt.plot(scaler.inverse_transform(dataset)) plt.plot(trainPredictPlot) plt.plot(testPredictPlot) plt.show()
_____no_output_____
MIT
time-series-prediction.ipynb
srivarshan-s/LSTM-Trials
**Time series prediction of TESLA closing stock price**
# Importing libraries import numpy import matplotlib.pyplot as plt from pandas import read_csv import math from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error ! pip install nsepy from nsepy import get_history from datetime import date from tqdm import tqdm # convert an array of values into a dataset matrix def create_dataset(dataset, look_back=1): dataX, dataY = [], [] for i in range(len(dataset)-look_back-1): a = dataset[i:(i+look_back), 0] dataX.append(a) dataY.append(dataset[i + look_back, 0]) return numpy.array(dataX), numpy.array(dataY) # load the dataset dataframe = pandas.read_csv('TSLA.csv', usecols=[4], engine='python') dataset = dataframe.values dataset = dataset.astype('float32') # normalize the dataset scaler = MinMaxScaler(feature_range=(0, 1)) dataset = scaler.fit_transform(dataset) # split into train and test sets train_size = int(len(dataset) * 0.67) test_size = len(dataset) - train_size train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:] # reshape into X=t and Y=t+1 look_back = 3 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) # reshape input to be [samples, time steps, features] trainX = numpy.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1)) testX = numpy.reshape(testX, (testX.shape[0], testX.shape[1], 1)) # create and fit the LSTM network batch_size = 1 model = Sequential() model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True, return_sequences=True)) model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True)) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') for i in tqdm(range(300)): model.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=0, shuffle=False) model.reset_states() # make predictions trainPredict = model.predict(trainX, batch_size=batch_size) model.reset_states() testPredict = model.predict(testX, batch_size=batch_size) # invert predictions trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY]) # calculate root mean squared error trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0])) print('Train Score: %.2f RMSE' % (trainScore)) testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0])) print('Test Score: %.2f RMSE' % (testScore)) # shift train predictions for plotting trainPredictPlot = numpy.empty_like(dataset) trainPredictPlot[:, :] = numpy.nan trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict # shift test predictions for plotting testPredictPlot = numpy.empty_like(dataset) testPredictPlot[:, :] = numpy.nan testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict # plot baseline and predictions plt.plot(scaler.inverse_transform(dataset)) plt.plot(trainPredictPlot) plt.plot(testPredictPlot) plt.show()
_____no_output_____
MIT
time-series-prediction.ipynb
srivarshan-s/LSTM-Trials
The YUSAG Football Model by Matt Robinson, matthew.robinson@yale.edu, Yale Undergraduate Sports Analytics Group This notebook introduces the model we at the Yale Undergraduate Sports Analytics Group (YUSAG) use for our college football rankings. This specific notebook details our FBS rankings at the beginning of the 2017 season.
import numpy as np import pandas as pd import math
_____no_output_____
MIT
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
Let's start by reading in the NCAA FBS football data from 2013-2016:
df_1 = pd.read_csv('NCAA_FBS_Results_2013_.csv') df_2 = pd.read_csv('NCAA_FBS_Results_2014_.csv') df_3 = pd.read_csv('NCAA_FBS_Results_2015_.csv') df_4 = pd.read_csv('NCAA_FBS_Results_2016_.csv') df = pd.concat([df_1,df_2,df_3,df_4],ignore_index=True) df.head()
_____no_output_____
MIT
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
As you can see, the `OT` column has some `NaN` values that we will replace with 0.
# fill missing data with 0 df = df.fillna(0) df.head()
_____no_output_____
MIT
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
I'm also going to make some weights for when we run our linear regression. I have found that using the factorial of the difference between the year and 2012 seems to work decently well. Clearly, the most recent seasons are weighted quite heavily in this scheme.
# update the weights based on a factorial scheme df['weights'] = (df['year']-2012) df['weights'] = df['weights'].apply(lambda x: math.factorial(x))
_____no_output_____
MIT
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
And now, we also are going to make a `scorediff` column that we can use in our linear regression.
df['scorediff'] = (df['teamscore']-df['oppscore']) df.head()
_____no_output_____
MIT
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
Since we need numerical values for the linear regression algorithm, I am going to replace the locations with what seem like reasonable numbers:* Visiting = -1* Neutral = 0* Home = 1The reason we picked these exact numbers will become clearer in a little bit.
df['location'] = df['location'].replace('V',-1) df['location'] = df['location'].replace('N',0) df['location'] = df['location'].replace('H',1) df.head()
_____no_output_____
MIT
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
The way our linear regression model works is a little tricky to code up in scikit-learn. It's much easier to do in R, but then you don't have a full understanding of what's happening when we make the model.In simplest terms, our model predicts the score differential (`scorediff`) of each game based on three things: the strength of the `team`, the strength of the `opponent`, and the `location`.You'll notice that the `team` and `opponent` features are categorical, and thus are not curretnly ripe for use with linear regression. However, we can use what is called 'one hot encoding' in order to transform these features into a usable form. One hot encoding works by taking the `team` feature, for example, and transforming it into many features such as `team_Yale` and `team_Harvard`. This `team_Yale` feature will usally equal zero, except when the team is actually Yale, then `team_Yale` will equal 1. In this way, it's a binary encoding (which is actually very useful for us as we'll see later).One can use `sklearn.preprocessing.OneHotEncoder` for this task, but I am going to use Pandas instead:
# create dummy variables, need to do this in python b/c does not handle automatically like R team_dummies = pd.get_dummies(df.team, prefix='team') opponent_dummies = pd.get_dummies(df.opponent, prefix='opponent') df = pd.concat([df, team_dummies, opponent_dummies], axis=1) df.head()
_____no_output_____
MIT
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
Now let's make our training data, so that we can construct the model. At this point, I am going to use all the avaiable data to train the model, using our predetermined hyperparameters. This way, the model is ready to make predictions for the 2017 season.
# make the training data X = df.drop(['year','month','day','team','opponent','teamscore','oppscore','D1','OT','weights','scorediff'], axis=1) y = df['scorediff'] weights = df['weights'] X.head() y.head() weights.head()
_____no_output_____
MIT
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
Now let's train the linear regression model. You'll notice that I'm actually using ridge regression (adds an l2 penalty with alpha = 1.0) because that prevents the model from overfitting and also limits the values of the coefficients to be more interpretable. If I did not add this penalty, the coefficients would be huge.
from sklearn.linear_model import Ridge ridge_reg = Ridge() ridge_reg.fit(X, y, sample_weight=weights) # get the R^2 value r_squared = ridge_reg.score(X, y, sample_weight=weights) print('R^2 on the training data:') print(r_squared)
R^2 on the training data: 0.495412735743
MIT
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
Now that the model is trained, we can use it to provide our rankings. Note that in this model, a team's ranking is simply defined as its linear regression coefficient, which we call the YUSAG coefficient. When predicting a game's score differential on a neutral field, the predicted score differential (`scorediff`) is just the difference in YUSAG coefficients. The reason this works is the binary encoding we did earlier. More details below on how it actually worksOk, so you may have noticed that every game in our dataframe is actually duplicated, just with the `team` and `opponent` variables switched. This may have seemed like a mistake but it is actually useful for making the model more interpretable. When we run the model, we get a coefficient for the `team_Yale` variable, which we call the YUSAG coefficient, and a coefficient for the `opponent_Yale` variable. Since we allow every game to be repeated, these variables end up just being negatives of each other. So let's think about what we are doing when we predict the score differential for the Harvard-Penn game with `team` = Harvard and `opponent` = Penn.In our model, the coefficients are as follows:- team_Harvard_coef = 7.78- opponent_Harvard_coef = -7.78- team_Penn_coef = 6.68- opponent_Penn_coef = -6.68when we go to use the model for this game, it looks like this:`scorediff` = (location_coef $*$ `location`) + (team_Harvard_coef $*$ `team_Harvard`) + (opponent_Harvard_coef $*$ `opponent_Harvard`) + (team_Penn_coef $*$ `team_Penn`) + (opponent_Penn_coef $*$ `opponent_Penn`) + (team_Yale_coef $*$ `team_Yale`) + (opponent_Yale_coef $*$ `opponent_Yale`) + $\cdots$where the $\cdots$ represent data for many other teams, which will all just equal $0$.To put numbers in for the variables, the model looks like this:`scorediff` = (location_coef $*$ $0$) + (team_Harvard_coef $*$ $1$) + (opponent_Harvard_coef $*$ $0$) + (team_Penn_coef $*$ $0$) + (opponent_Penn_coef $*$ $1$) + (team_Yale_coef $*$ $0$) + (opponent_Yale_coef $*$ $0$) + $\cdots$Which is just:`scorediff` = (location_coef $*$ $0$) + (7.78 $*$ $1$) + (-6.68 $*$ $1$) = $7.78 - 6.68$ = Harvard_YUSAG_coef - Penn_YUSAG_coefThus showing how the difference in YUSAG coefficients is the same as the predicted score differential. Furthermore, the higher YUSAG coefficient a team has, the better they are.Lastly, if the Harvard-Penn game was to be home at Harvard, we would just add the location_coef:`scorediff` = (location_coef $*$ $1$) + (team_Harvard_coef $*$ $1$) + (opponent_Penn_coef $*$ $1$) = $1.77 + 7.78 - 6.68$ = Location_coef + Harvard_YUSAG_coef - Penn_YUSAG_coef
# get the coefficients for each feature coef_data = list(zip(X.columns,ridge_reg.coef_)) coef_df = pd.DataFrame(coef_data,columns=['feature','feature_coef']) coef_df.head()
_____no_output_____
MIT
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
Let's get only the team variables, so that it is a proper ranking
# first get rid of opponent_ variables team_df = coef_df[~coef_df['feature'].str.contains("opponent")] # get rid of the location variable team_df = team_df.iloc[1:] team_df.head() # rank them by coef, not alphabetical order ranked_team_df = team_df.sort_values(['feature_coef'],ascending=False) # reset the indices at 0 ranked_team_df = ranked_team_df.reset_index(drop=True); ranked_team_df.head()
_____no_output_____
MIT
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
I'm goint to change the name of the columns and remove the 'team_' part of every string:
ranked_team_df.rename(columns={'feature':'team', 'feature_coef':'YUSAG_coef'}, inplace=True) ranked_team_df['team'] = ranked_team_df['team'].str.replace('team_', '') ranked_team_df.head()
_____no_output_____
MIT
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
Lastly, I'm just going to shift the index to start at 1, so that it corresponds to the ranking.
ranked_team_df.index = ranked_team_df.index + 1 ranked_team_df.to_csv("FBS_power_rankings.csv")
_____no_output_____
MIT
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
Additional stuff: Testing the modelThis section is mostly about how own could test the performance of the model or how one could choose appropriate hyperparamters. Creating a new dataframeFirst let's take the original dataframe and sort it by date, so that the order of games in the dataframe matches the order the games were played.
# sort by date and reset the indices to 0 df_dated = df.sort_values(['year', 'month','day'], ascending=[True, True, True]) df_dated = df_dated.reset_index(drop=True) df_dated.head()
_____no_output_____
MIT
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
Let's first make a dataframe with training data (the first three years of results)
thirteen_df = df_dated.loc[df_dated['year']==2013] fourteen_df = df_dated.loc[df_dated['year']==2014] fifteen_df = df_dated.loc[df_dated['year']==2015] train_df = pd.concat([thirteen_df,fourteen_df,fifteen_df], ignore_index=True)
_____no_output_____
MIT
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
Now let's make an initial testing dataframe with the data from this past year.
sixteen_df = df_dated.loc[df_dated['year']==2016] seventeen_df = df_dated.loc[df_dated['year']==2017] test_df = pd.concat([sixteen_df,seventeen_df], ignore_index=True)
_____no_output_____
MIT
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
I am now going to set up a testing/validation scheme for the model. It works like this:First I start off where my training data is all games from 2012-2015. Using the model trained on this data, I then predict games from the first week of the 2016 season and look at the results.Next, I add that first week's worth of games to the training data, and now I train on all 2012-2015 results plus the first week from 2016. After training the model on this data, I then test on the second week of games. I then add that week's games to the training data and repeat the same procedure week after week.In this way, I am never testing on a result that I have trained on. Though, it should be noted that I have also used this as a validation scheme, so I have technically done some sloppy 'data snooping' and this is not a great predictor of my generalization error.
def train_test_model(train_df, test_df): # make the training data X_train = train_df.drop(['year','month','day','team','opponent','teamscore','oppscore','D1','OT','weights','scorediff'], axis=1) y_train = train_df['scorediff'] weights_train = train_df['weights'] # train the model ridge_reg = Ridge() ridge_reg.fit(X_train, y_train, weights_train) fit = ridge_reg.score(X_train,y_train,sample_weight=weights_train) print('R^2 on the training data:') print(fit) # get the test data X_test = test_df.drop(['year','month','day','team','opponent','teamscore','oppscore','D1','OT','weights','scorediff'], axis=1) y_test = test_df['scorediff'] # get the metrics compare_data = list(zip(ridge_reg.predict(X_test),y_test)) right_count = 0 for tpl in compare_data: if tpl[0] >= 0 and tpl[1] >=0: right_count = right_count + 1 elif tpl[0] <= 0 and tpl[1] <=0: right_count = right_count + 1 accuracy = right_count/len(compare_data) print('accuracy on this weeks games') print(right_count/len(compare_data)) total_squared_error = 0.0 for tpl in compare_data: total_squared_error = total_squared_error + (tpl[0]-tpl[1])**2 RMSE = (total_squared_error / float(len(compare_data)))**(0.5) print('RMSE on this weeks games:') print(RMSE) return fit, accuracy, RMSE, right_count, total_squared_error #Now the code for running the week by week testing. base_df = train_df new_indices = [] # this is the hash for the first date last_date_hash = 2018 fit_list = [] accuracy_list = [] RMSE_list = [] total_squared_error = 0 total_right_count = 0 for index, row in test_df.iterrows(): year = row['year'] month = row['month'] day = row['day'] date_hash = year+month+day if date_hash != last_date_hash: last_date_hash = date_hash test_week = test_df.iloc[new_indices] fit, accuracy, RMSE, correct_calls, squared_error = train_test_model(base_df,test_week) fit_list.append(fit) accuracy_list.append(accuracy) RMSE_list.append(RMSE) total_squared_error = total_squared_error + squared_error total_right_count = total_right_count + correct_calls base_df = pd.concat([base_df,test_week],ignore_index=True) new_indices = [index] else: new_indices.append(index) # get the number of games it called correctly in 2016 total_accuracy = total_right_count/test_df.shape[0] total_accuracy # get the Root Mean Squared Error overall_RMSE = (total_squared_error/test_df.shape[0])**(0.5) overall_RMSE
_____no_output_____
MIT
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
Using fmriprep [fmriprep](https://fmriprep.readthedocs.io/en/stable/) is a package developed by the Poldrack lab to do the minimal preprocessing of fMRI data required. It covers brain extraction, motion correction, field unwarping, and registration. It uses a combination of well-known software packages (e.g., FSL, SPM, ANTS, AFNI) and selects the 'best' implementation of each preprocessing step.Once installed, `fmriprep` can be invoked from the command line. We can even run it inside this notebook! The following command should work after you remove the 'hashtag' ``. However, running fmriprep takes quite some time (we included the hashtag to prevent you from accidentally running it). You'll most likely want to run it in parallel on a computing cluster.
#!fmriprep \ # --ignore slicetiming \ # --ignore fieldmaps \ # --output-space template \ # --template MNI152NLin2009cAsym \ # --template-resampling-grid 2mm \ # --fs-no-reconall \ # --fs-license-file \ # ../license.txt \ # ../data/ds000030 ../data/ds000030/derivatives/fmriprep participant
_____no_output_____
CC-BY-4.0
code/01_preprocessing.ipynb
maxim-belkin/SDC-BIDS-fMRI
Скачайте данные в формате csv, выберите из таблицы данные по России, начиная с 3 марта 2020 г. (в этот момент впервые стало больше 2 заболевших). В качестве целевой переменной возьмём число случаев заболевания (столбцы total_cases и new_cases); для упрощения обработки можно заменить в столбце new_cases все нули на единицы. Для единообразия давайте зафиксируем тренировочный набор в виде первых 50 отсчётов (дней), начиная с 3 марта; остальные данные можно использовать в качестве тестового набора (и он даже будет увеличиваться по мере выполнения задания).- Постройте графики целевых переменных. Вы увидите, что число заболевших растёт очень быстро, на первый взгляд экспоненциально. Для первого подхода к снаряду давайте это и используем.- Используя линейную регрессию, обучите модель с экспоненциальным ростом числа заболевших: y ~ exp(линейная функция от x), где x — номер текущего дня.- Найдите апостериорное распределение параметров этой модели для достаточно широкого априорного распределения. Требующееся для этого значение дисперсии шума в данных оцените, исходя из вашей же максимальной апостериорной модели (это фактически первый шаг эмпирического Байеса).- Посэмплируйте много разных экспонент, постройте графики. Сколько, исходя из этих сэмплов, предсказывается случаев коронавируса в России к 1 мая? к 1 июня? к 1 сентября? Постройте предсказательные распределения (можно эмпирически, исходя из данных сэмплирования).Предсказания экспоненциальной модели наверняка получились грустными. Но это, конечно, чересчур пессимистично — экспоненциальный рост в природе никак не может продолжаться вечно. Кривая общего числа заболевших во время эпидемии в реальности имеет сигмоидальный вид: после начальной фазы экспоненциального роста неизбежно происходит насыщение. В качестве конкретной формы такой сигмоиды давайте возьмём форму функции распределения для гауссиана. Естественно, в нашем случае сигмоида стремится не к единице, т.е. константа перед интегралом может быть произвольной (и её можно внести в экспоненту), а в экспоненте под интегралом может быть произвольная квадратичная функция от t.- Предложите способ обучать параметры такой сигмоидальной функции при помощи линейной регрессии.- Обучите эти параметры на датасете случаев коронавируса в России. Найдите апостериорное распределение параметров этой модели для достаточно широкого априорного распределения. Требующееся для этого значение дисперсии шума в данных оцените, исходя из вашей же максимальной апостериорной модели.- Посэмплируйте много разных сигмоид из апостериорного распределения, постройте графики. Сколько, исходя из этих сэмплов, будет всего случаев коронавируса в России? Постройте эмпирическое предсказательное распределение, нарисуйте графики. Каков ваш прогноз числа случаев коронавируса в пессимистичном сценарии (90-й процентиль в выборке числа случаев)? В оптимистичном сценарии (10-й процентиль)?Бонус: проведите такой же анализ для других стран (здесь придётся руками подобрать дни начала моделирования — коронавирус приходил в разные страны в разное время). Насколько разные параметры получаются? Можно ли разделить страны на кластеры (хотя бы чисто визуально) в зависимости от этих параметров?[Эта часть задания не оценивается, здесь нет правильных и неправильных ответов, но буду рад узнать, что вы думаете]Что вы поняли из этого упражнения? Что можно сказать про коронавирус по итогам такого моделирования? Как принять решение, например, о том, нужно ли вводить карантин?
from datetime import datetime import pandas as pd import numpy as np import scipy from sklearn.base import BaseEstimator, TransformerMixin from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = 16, 6
_____no_output_____
MIT
homework2.ipynb
x-sile/made_ml
Загрузка и предобработка данных
# загрузим данные df = pd.read_csv('full_data.csv') df = df[(df['location'] == 'Russia') & (df['date'] >= '2020-03-03')].reset_index(drop=True) df.loc[df['new_cases'] == 0, 'new_cases'] = 1 df['day'] = df.index start_day = datetime.strptime('2020-03-03', '%Y-%m-%d') may_first = datetime.strptime('2020-05-01', '%Y-%m-%d') june_first = datetime.strptime('2020-06-01', '%Y-%m-%d') sept_first = datetime.strptime('2020-09-01', '%Y-%m-%d') year_end = datetime.strptime('2020-12-31', '%Y-%m-%d') till_may = (may_first - start_day).days till_june = (june_first - start_day).days till_sept = (sept_first - start_day).days till_year_end = (year_end - start_day).days
_____no_output_____
MIT
homework2.ipynb
x-sile/made_ml
Разделим на трейн и тест
# разделим на трейн и тест. Возьмем 60! дней, т.к. результаты получаются более адекватные TRAIN_DAYS = 60 train = df[:TRAIN_DAYS] test = df[TRAIN_DAYS:]
_____no_output_____
MIT
homework2.ipynb
x-sile/made_ml
Код для байесовской регрессии
class BayesLR(BaseEstimator, TransformerMixin): def __init__(self, mu, sigma, noise=None): self.mu = mu self.sigma = sigma self.noise = None def _estimate_noise(self, X, y): return np.std(y - X.dot(np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y))) # linear regression def _add_intercept(self, X): return np.hstack((np.ones((len(X), 1)), X)) def fit(self, X, y): """ X: (n_samples, n_features) y: (n_samples, ) """ X = self._add_intercept(X) if self.noise is None: self.noise = self._estimate_noise(X, y) beta = 1 / self.noise ** 2 mu_prev = self.mu sigma_prev = self.sigma self.sigma = np.linalg.inv(np.linalg.inv(sigma_prev) + beta * np.dot(X.T, X)) self.mu = np.dot(self.sigma, np.dot(np.linalg.inv(sigma_prev), mu_prev) + beta * np.dot(X.T, y)) return self def predict(self, X): X = self._add_intercept(X) return X.dot(self.mu) def sample_w(self, n_samples=1000): return np.random.multivariate_normal(self.mu, self.sigma, n_samples) def sample(self, X, n_samples=1000): X = self._add_intercept(X) w = self.sample_w(n_samples) return X.dot(w.T) def plot_sampled(sampled, true=None): for i in range(sampled.shape[1]): plt.plot(sampled[:, i], 'k-', lw=.4)
_____no_output_____
MIT
homework2.ipynb
x-sile/made_ml
Часть 1: моделирование экспонентной 1.1 Графики
plt.plot(train['total_cases'], label='общее число зараженных') plt.plot(train['new_cases'], label='количество новых случаев за день') plt.title('Графики целевых переменных') plt.legend();
_____no_output_____
MIT
homework2.ipynb
x-sile/made_ml
1.2 Линейная регрессия y ~ exp(wX) Чтобы построить линейную регрессию для такого случая, прологарифмируем целевую переменную (общее количество зараженных).
X_tr = train[['day']].values y_tr = np.log(train['total_cases'].values) X_te = test[['day']].values y_te = np.log(test['total_cases'].values) X_full = np.arange(till_year_end + 1).reshape(-1, 1) # до конца года # Выберем uninformative prior mu_prior = np.array([0, 0]) sigma_prior = 100 * np.array([[1, 0], [0, 1]]) bayes_lr = BayesLR(mu_prior, sigma_prior) bayes_lr.fit(X_tr, y_tr) print(bayes_lr.mu) print(bayes_lr.sigma) # Семплируем параметры модели w = bayes_lr.sample_w(n_samples=10000) fig, ax = plt.subplots(1, 2) ax[0].hist(w[:, 0], bins=100) ax[0].set_title('Распределение свободного члена') ax[1].hist(w[:, 1], bins=100) ax[1].set_title('Распределение коэффициента наклона') plt.show()
_____no_output_____
MIT
homework2.ipynb
x-sile/made_ml
1.3 Предсказания
# Семплируем экспоненты для трейна sampled_train = np.exp(bayes_lr.sample(X_tr)) plot_sampled(sampled_train) plt.plot(np.exp(y_tr), color='red', label='Реальное число зараженных') plt.legend() plt.title('Предсказания для трейна'); # Посемплируем экспоненты для теста sampled_test = np.exp(bayes_lr.sample(X_te, n_samples=10000)) # Делаем предсказания preds_full = np.exp(bayes_lr.predict(X_full)) plot_sampled(sampled_test) plt.plot(np.exp(y_te), color='red', label='Реальное число зараженных') plt.legend() plt.title('Предсказания для теста'); print(f'1 мая: {preds_full[till_may] / 1_000_000:.4f} млн зараженных') print(f'1 июня: {preds_full[till_june] / 1_000_000:.4f} млн зараженных') print(f'1 сентября: {preds_full[till_sept] / 1_000_000:.4f} млн зараженных')
1 мая: 0.3274 млн зараженных 1 июня: 99.7141 млн зараженных 1 сентября: 2342098539.3834 млн зараженных
MIT
homework2.ipynb
x-sile/made_ml
Получается, что к 1 июня 2/3 России вымрет, не очень реалистично.
# Посемплируем экспоненты на будущее sampled_full = np.exp(bayes_lr.sample(X_full, n_samples=10000)) fig, ax = plt.subplots(2, 2, figsize=(16, 10)) ax[0][0].hist(sampled_full[till_may], bins=50) ax[0][0].set_title('Предсказательное распределение количества зараженных к маю') ax[0][1].hist(sampled_full[till_june], bins=50) ax[0][1].set_title('Предсказательное распределение количества зараженных к июню') ax[1][0].hist(sampled_full[till_sept], bins=50) ax[1][0].set_title('Предсказательное распределение количества зараженных к сентябрю') ax[1][1].hist(sampled_test.mean(0), bins=30) ax[1][1].set_title('Распределение среднего числа зараженных для тестовой выборки') plt.show()
_____no_output_____
MIT
homework2.ipynb
x-sile/made_ml
Вывод: моделирование экспонентой - это шляпа =) Часть 2: моделирование сигмоидой 2.1 Как такое обучать Справа у нас интеграл - можем взять производную, а затем прологарифмировать, в итоге получим:$ln$($\Delta$y) = w_2 * x^2 + w_1 * x + w_0 Другими словами, мы можем замоделировать количество новых случаев заражения с помощью плотности нормального распределения. В качестве функции в экспоненте возьмет квадратичную функцию от дня. 2.2 Обучаем
# Функция для приведения наших предсказаний приростов к общему числу зараженных def to_total(preds): return 2 + np.cumsum(np.exp(preds), axis=0) X_tr = np.hstack([X_tr, X_tr ** 2]) y_tr = np.log(train['new_cases'].values) X_te = np.hstack([X_te, X_te ** 2]) y_te = np.log(test['new_cases'].values) X_full = np.hstack([X_full, X_full ** 2]) # Выберем uninformative prior mu_prior = np.array([0, 0, 0]) sigma_prior = 1000 * np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) bayes_lr = BayesLR(mu_prior, sigma_prior) bayes_lr.fit(X_tr, y_tr) print(bayes_lr.mu) print(bayes_lr.sigma) # Семплируем параметры модели w = bayes_lr.sample_w(n_samples=10000) fig, ax = plt.subplots(1, 3) ax[0].hist(w[:, 0], bins=100) ax[0].set_title('Распределение свободного члена') ax[1].hist(w[:, 1], bins=100) ax[1].set_title('Распределение коэффициента при X') ax[2].hist(w[:, 2], bins=100) ax[2].set_title('Распределение коэффициента при X^2') plt.show()
_____no_output_____
MIT
homework2.ipynb
x-sile/made_ml
2.3 Предсказываем
# Семплируем сигмоиды для трейна sampled_train = to_total(bayes_lr.sample(X_tr)) plot_sampled(sampled_train) plt.plot(to_total(y_tr), color='red', label='Реальное число зараженных') plt.legend() plt.title('Предсказания для трейна'); # Посемплируем сигмоиды для теста sampled_test = to_total(bayes_lr.sample(X_te)) # Делаем предсказания preds_full = to_total(bayes_lr.predict(X_full)) plt.plot(preds_full) plt.plot(to_total(np.hstack([y_tr, y_te])), color='red', label='Реальное известное число зараженных') plt.legend() plt.title('Среднее наших предсказаний по числу зараженных до конца года'); plot_sampled(sampled_test) plt.plot(to_total(y_te), color='red', label='Реальное число зараженных') plt.legend() plt.title('Предсказания для теста'); print(f'1 мая: {preds_full[till_may] / 1_000_000:.4f} млн зараженных') print(f'1 июня: {preds_full[till_june] / 1_000_000:.4f} млн зараженных') print(f'1 сентября: {preds_full[till_sept] / 1_000_000:.4f} млн зараженных') # Посемплируем сигмоиды на будущее sampled_full = to_total(bayes_lr.sample(X_full, n_samples=100)) plot_sampled(sampled_full) plt.ylim(0, 1_000_000) plt.title('Предсказания до конца года'); # Посемплируем больше сигмоид на будущее sampled_full = to_total(bayes_lr.sample(X_full, n_samples=10000)) fig, ax = plt.subplots(3, 2, figsize=(16, 16)) SHOW_THR = 3_000_000 ax[0][0].hist(sampled_full[till_may], bins=50) ax[0][0].set_title('Предсказательное распределение количества зараженных к маю') ax[0][1].hist(sampled_full[till_june][sampled_full[till_june] < SHOW_THR], bins=50) ax[0][1].set_title('Предсказательное распределение количества зараженных к июню') ax[1][0].hist(sampled_full[till_sept][sampled_full[till_sept] < SHOW_THR], bins=50) ax[1][0].set_title('Предсказательное распределение количества зараженных к сентябрю') ax[1][1].hist(sampled_full[-1][sampled_full[-1] < SHOW_THR], bins=50) ax[1][1].set_title('Предсказательное распределение количества зараженных к концу года') ax[2][0].hist(sampled_test.mean(0), bins=30) ax[2][0].set_title('Распределение среднего числа зараженных для тестовой выборки') ax[2][1].hist(sampled_full.mean(0)[sampled_full.mean(0) < SHOW_THR], bins=30) ax[2][1].set_title('Распределение среднего числа зараженных до конца года') plt.show() print(f'Оптимистичный прогноз к концу года: {int(np.quantile(sampled_full[-1], 0.1)) / 1_000_000:.4f} млн человек') print(f'Пессимистичный прогноз к концу года: {int(np.quantile(sampled_full[-1], 0.9)) / 1_000_000:.4f} млн человек')
Оптимистичный прогноз к концу года: 0.2409 млн человек Пессимистичный прогноз к концу года: 1.3295 млн человек
MIT
homework2.ipynb
x-sile/made_ml
SVM
import pandas as pd from sklearn import svm, metrics from sklearn.model_selection import train_test_split wesad_eda = pd.read_csv('D:\data\wesad-chest-combined-classification-eda.csv') # need to adjust a path of dataset wesad_eda.columns original_column_list = ['MEAN', 'MAX', 'MIN', 'RANGE', 'KURT', 'SKEW', 'MEAN_1ST_GRAD', 'STD_1ST_GRAD', 'MEAN_2ND_GRAD', 'STD_2ND_GRAD', 'ALSC', 'INSC', 'APSC', 'RMSC', 'subject id', 'MEAN_LOG', 'INSC_LOG', 'APSC_LOG', 'RMSC_LOG', 'RANGE_LOG', 'ALSC_LOG', 'MIN_LOG', 'MEAN_1ST_GRAD_LOG', 'MEAN_2ND_GRAD_LOG', 'MIN_LOG_LOG', 'MEAN_1ST_GRAD_LOG_LOG', 'MEAN_2ND_GRAD_LOG_LOG', 'APSC_LOG_LOG', 'ALSC_LOG_LOG', 'APSC_BOXCOX', 'RMSC_BOXCOX', 'RANGE_BOXCOX', 'MEAN_YEO_JONSON', 'SKEW_YEO_JONSON', 'KURT_YEO_JONSON', 'APSC_YEO_JONSON', 'MIN_YEO_JONSON', 'MAX_YEO_JONSON', 'MEAN_1ST_GRAD_YEO_JONSON', 'RMSC_YEO_JONSON', 'STD_1ST_GRAD_YEO_JONSON', 'RANGE_SQRT', 'RMSC_SQUARED', 'MEAN_2ND_GRAD_CUBE', 'INSC_APSC', 'condition', 'SSSQ class', 'SSSQ Label', 'condition label'] original_column_list_withoutString = ['MEAN', 'MAX', 'MIN', 'RANGE', 'KURT', 'SKEW', 'MEAN_1ST_GRAD', 'STD_1ST_GRAD', 'MEAN_2ND_GRAD', 'STD_2ND_GRAD', 'ALSC', 'INSC', 'APSC', 'RMSC', 'MEAN_LOG', 'INSC_LOG', 'APSC_LOG', 'RMSC_LOG', 'RANGE_LOG', 'ALSC_LOG', 'MIN_LOG', 'MEAN_1ST_GRAD_LOG', 'MEAN_2ND_GRAD_LOG', 'MIN_LOG_LOG', 'MEAN_1ST_GRAD_LOG_LOG', 'MEAN_2ND_GRAD_LOG_LOG', 'APSC_LOG_LOG', 'ALSC_LOG_LOG', 'APSC_BOXCOX', 'RMSC_BOXCOX', 'RANGE_BOXCOX', 'MEAN_YEO_JONSON', 'SKEW_YEO_JONSON', 'KURT_YEO_JONSON', 'APSC_YEO_JONSON', 'MIN_YEO_JONSON', 'MAX_YEO_JONSON', 'MEAN_1ST_GRAD_YEO_JONSON', 'RMSC_YEO_JONSON', 'STD_1ST_GRAD_YEO_JONSON', 'RANGE_SQRT', 'RMSC_SQUARED', 'MEAN_2ND_GRAD_CUBE', 'INSC_APSC'] selected_colum_list = ['MEAN', 'MAX', 'MIN', 'RANGE', 'KURT', 'SKEW', 'MEAN_1ST_GRAD', 'STD_1ST_GRAD', 'MEAN_2ND_GRAD', 'STD_2ND_GRAD', 'ALSC', 'INSC', 'APSC', 'RMSC', 'subject id', 'MEAN_LOG', 'INSC_LOG', 'APSC_LOG', 'RMSC_LOG', 'RANGE_LOG', 'ALSC_LOG', 'MIN_LOG'] stress_data = wesad_eda[original_column_list_withoutString] stress_label = wesad_eda['condition label'] stress_data train_data, test_data, train_label, test_label = train_test_split(stress_data, stress_label) from sklearn.decomposition import PCA pca = PCA(n_components=2) pca.fit(train_data) X_t_train = pca.transform(train_data) X_t_test = pca.transform(test_data) model = svm.SVC() model.fit(X_t_train, train_label) predict = model.predict(X_t_test) acc_score = metrics.accuracy_score(test_label, predict) print(acc_score) import pickle from sklearn.externals import joblib saved_model = pickle.dumps(model) joblib.dump(model, 'SVMmodel1.pkl') model_from_pickle = joblib.load('SVMmodel1.pkl') predict = model_from_pickle.predict(test_data) acc_score = metrics.accuracy_score(test_label, predict) print(acc_score)
0.9998672250025533
MIT
SVM/SVM_wesad_eda.ipynb
aiLocsRnD/classification
Time series analysis on AWS*Chapter 1 - Time series analysis overview* Initializations---
!pip install --quiet tqdm kaggle tsia ruptures
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
Imports
import matplotlib.colors as mpl_colors import matplotlib.dates as mdates import matplotlib.ticker as ticker import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import ruptures as rpt import sys import tsia import warnings import zipfile from matplotlib import gridspec from sklearn.preprocessing import normalize from tqdm import tqdm from urllib.request import urlretrieve
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
Parameters
RAW_DATA = os.path.join('..', 'Data', 'raw') DATA = os.path.join('..', 'Data') warnings.filterwarnings("ignore") os.makedirs(RAW_DATA, exist_ok=True) %matplotlib inline # plt.style.use('Solarize_Light2') plt.style.use('fivethirtyeight') prop_cycle = plt.rcParams['axes.prop_cycle'] colors = prop_cycle.by_key()['color'] plt.rcParams['figure.dpi'] = 300 plt.rcParams['lines.linewidth'] = 0.3 plt.rcParams['axes.titlesize'] = 6 plt.rcParams['axes.labelsize'] = 6 plt.rcParams['xtick.labelsize'] = 4.5 plt.rcParams['ytick.labelsize'] = 4.5 plt.rcParams['grid.linewidth'] = 0.2 plt.rcParams['legend.fontsize'] = 5
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
Helper functions
def progress_report_hook(count, block_size, total_size): mb = int(count * block_size // 1048576) if count % 500 == 0: sys.stdout.write("\r{} MB downloaded".format(mb)) sys.stdout.flush()
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
Downloading datasets **Dataset 1:** Household energy consumption
ORIGINAL_DATA = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00321/LD2011_2014.txt.zip' ARCHIVE_PATH = os.path.join(RAW_DATA, 'energy-consumption.zip') FILE_NAME = 'energy-consumption.csv' FILE_PATH = os.path.join(DATA, 'energy', FILE_NAME) FILE_DIR = os.path.dirname(FILE_PATH) if not os.path.isfile(FILE_PATH): print("Downloading dataset (258MB), can take a few minutes depending on your connection") urlretrieve(ORIGINAL_DATA, ARCHIVE_PATH, reporthook=progress_report_hook) os.makedirs(os.path.join(DATA, 'energy'), exist_ok=True) print("\nExtracting data archive") zip_ref = zipfile.ZipFile(ARCHIVE_PATH, 'r') zip_ref.extractall(FILE_DIR + '/') zip_ref.close() !rm -Rf $FILE_DIR/__MACOSX !mv $FILE_DIR/LD2011_2014.txt $FILE_PATH else: print("File found, skipping download")
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
**Dataset 2:** Nasa Turbofan remaining useful lifetime
ok = True ok = ok and os.path.exists(os.path.join(DATA, 'turbofan', 'train_FD001.txt')) ok = ok and os.path.exists(os.path.join(DATA, 'turbofan', 'test_FD001.txt')) ok = ok and os.path.exists(os.path.join(DATA, 'turbofan', 'RUL_FD001.txt')) if (ok): print("File found, skipping download") else: print('Some datasets are missing, create working directories and download original dataset from the NASA repository.') # Making sure the directory already exists: os.makedirs(os.path.join(DATA, 'turbofan'), exist_ok=True) # Download the dataset from the NASA repository, unzip it and set # aside the first training file to work on: !wget https://ti.arc.nasa.gov/c/6/ --output-document=$RAW_DATA/CMAPSSData.zip !unzip $RAW_DATA/CMAPSSData.zip -d $RAW_DATA !cp $RAW_DATA/train_FD001.txt $DATA/turbofan/train_FD001.txt !cp $RAW_DATA/test_FD001.txt $DATA/turbofan/test_FD001.txt !cp $RAW_DATA/RUL_FD001.txt $DATA/turbofan/RUL_FD001.txt
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
**Dataset 3:** Human heartbeat
ECG_DATA_SOURCE = 'http://www.timeseriesclassification.com/Downloads/ECG200.zip' ARCHIVE_PATH = os.path.join(RAW_DATA, 'ECG200.zip') FILE_NAME = 'ecg.csv' FILE_PATH = os.path.join(DATA, 'ecg', FILE_NAME) FILE_DIR = os.path.dirname(FILE_PATH) if not os.path.isfile(FILE_PATH): urlretrieve(ECG_DATA_SOURCE, ARCHIVE_PATH) os.makedirs(os.path.join(DATA, 'ecg'), exist_ok=True) print("\nExtracting data archive") zip_ref = zipfile.ZipFile(ARCHIVE_PATH, 'r') zip_ref.extractall(FILE_DIR + '/') zip_ref.close() !mv $DATA/ecg/ECG200_TRAIN.txt $FILE_PATH else: print("File found, skipping download")
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
**Dataset 4:** Industrial pump dataTo download this dataset from Kaggle, you will need to have an account and create a token that you install on your machine. You can follow [**this link**](https://www.kaggle.com/docs/api) to get started with the Kaggle API. Once generated, make sure your Kaggle token is stored in the `~/.kaggle/kaggle.json` file, or the next cells will issue an error. In some cases, you may still have an error while using this location. Try moving your token in this location instead: `~/kaggle/kaggle.json` (not the absence of the `.` in the folder name).To get a Kaggle token, go to kaggle.com and create an account. Then navigate to **My account** and scroll down to the API section. There, click the **Create new API token** button:
FILE_NAME = 'pump-sensor-data.zip' ARCHIVE_PATH = os.path.join(RAW_DATA, FILE_NAME) FILE_PATH = os.path.join(DATA, 'pump', 'sensor.csv') FILE_DIR = os.path.dirname(FILE_PATH) if not os.path.isfile(FILE_PATH): if not os.path.exists('/home/ec2-user/.kaggle/kaggle.json'): os.makedirs('/home/ec2-user/.kaggle/', exist_ok=True) raise Exception('The kaggle.json token was not found.\nCreating the /home/ec2-user/.kaggle/ directory: put your kaggle.json file there once you have generated it from the Kaggle website') else: print('The kaggle.json token file was found: making sure it is not readable by other users on this system.') !chmod 600 /home/ec2-user/.kaggle/kaggle.json os.makedirs(os.path.join(DATA, 'pump'), exist_ok=True) !kaggle datasets download -d nphantawee/pump-sensor-data -p $RAW_DATA print("\nExtracting data archive") zip_ref = zipfile.ZipFile(ARCHIVE_PATH, 'r') zip_ref.extractall(FILE_DIR + '/') zip_ref.close() else: print("File found, skipping download")
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
**Dataset 5:** London household energy consumption with weather data
FILE_NAME = 'smart-meters-in-london.zip' ARCHIVE_PATH = os.path.join(RAW_DATA, FILE_NAME) FILE_PATH = os.path.join(DATA, 'energy-london', 'smart-meters-in-london.zip') FILE_DIR = os.path.dirname(FILE_PATH) # Checks if the data were already downloaded: if os.path.exists(os.path.join(DATA, 'energy-london', 'acorn_details.csv')): print("File found, skipping download") else: # Downloading and unzipping datasets from Kaggle: print("Downloading dataset (2.26G), can take a few minutes depending on your connection") os.makedirs(os.path.join(DATA, 'energy-london'), exist_ok=True) !kaggle datasets download -d jeanmidev/smart-meters-in-london -p $RAW_DATA print('Unzipping files...') zip_ref = zipfile.ZipFile(ARCHIVE_PATH, 'r') zip_ref.extractall(FILE_DIR + '/') zip_ref.close() !rm $DATA/energy-london/*zip !rm $DATA/energy-london/*gz !mv $DATA/energy-london/halfhourly_dataset/halfhourly_dataset/* $DATA/energy-london/halfhourly_dataset !rm -Rf $DATA/energy-london/halfhourly_dataset/halfhourly_dataset !mv $DATA/energy-london/daily_dataset/daily_dataset/* $DATA/energy-london/daily_dataset !rm -Rf $DATA/energy-london/daily_dataset/daily_dataset
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
Dataset visualization--- **1.** Household energy consumption
%%time FILE_PATH = os.path.join(DATA, 'energy', 'energy-consumption.csv') energy_df = pd.read_csv(FILE_PATH, sep=';', decimal=',') energy_df = energy_df.rename(columns={'Unnamed: 0': 'Timestamp'}) energy_df['Timestamp'] = pd.to_datetime(energy_df['Timestamp']) energy_df = energy_df.set_index('Timestamp') energy_df.iloc[100000:, 1:5].head() fig = plt.figure(figsize=(5, 1.876)) plt.plot(energy_df['MT_002']) plt.title('Energy consumption for household MT_002') plt.show()
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
**2.** NASA Turbofan data
FILE_PATH = os.path.join(DATA, 'turbofan', 'train_FD001.txt') turbofan_df = pd.read_csv(FILE_PATH, header=None, sep=' ') turbofan_df.dropna(axis='columns', how='all', inplace=True) print('Shape:', turbofan_df.shape) turbofan_df.head(5) columns = [ 'unit_number', 'cycle', 'setting_1', 'setting_2', 'setting_3', ] + ['sensor_{}'.format(s) for s in range(1,22)] turbofan_df.columns = columns turbofan_df.head() # Add a RUL column and group the data by unit_number: turbofan_df['rul'] = 0 grouped_data = turbofan_df.groupby(by='unit_number') # Loops through each unit number to get the lifecycle counts: for unit, rul in enumerate(grouped_data.count()['cycle']): current_df = turbofan_df[turbofan_df['unit_number'] == (unit+1)].copy() current_df['rul'] = rul - current_df['cycle'] turbofan_df[turbofan_df['unit_number'] == (unit+1)] = current_df df = turbofan_df.iloc[:, [0,1,2,3,4,5,6,25,26]].copy() df = df[df['unit_number'] == 1] def highlight_cols(s): return f'background-color: rgba(0, 143, 213, 0.3)' df.head(10).style.applymap(highlight_cols, subset=['rul'])
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
**3.** ECG Data
FILE_PATH = os.path.join(DATA, 'ecg', 'ecg.csv') ecg_df = pd.read_csv(FILE_PATH, header=None, sep=' ') print('Shape:', ecg_df.shape) ecg_df.head() plt.rcParams['lines.linewidth'] = 0.7 fig = plt.figure(figsize=(5,2)) label_normal = False label_ischemia = False for i in range(0,100): label = ecg_df.iloc[i, 0] if (label == -1): color = colors[1] if label_ischemia: plt.plot(ecg_df.iloc[i,1:96], color=color, alpha=0.5, linestyle='--', linewidth=0.5) else: plt.plot(ecg_df.iloc[i,1:96], color=color, alpha=0.5, label='Ischemia', linestyle='--') label_ischemia = True else: color = colors[0] if label_normal: plt.plot(ecg_df.iloc[i,1:96], color=color, alpha=0.5) else: plt.plot(ecg_df.iloc[i,1:96], color=color, alpha=0.5, label='Normal') label_normal = True plt.title('Human heartbeat activity') plt.legend(loc='upper right', ncol=2) plt.show()
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
**4.** Industrial pump data
FILE_PATH = os.path.join(DATA, 'pump', 'sensor.csv') pump_df = pd.read_csv(FILE_PATH, sep=',') pump_df.drop(columns={'Unnamed: 0'}, inplace=True) pump_df['timestamp'] = pd.to_datetime(pump_df['timestamp'], format='%Y-%m-%d %H:%M:%S') pump_df = pump_df.set_index('timestamp') pump_df['machine_status'].replace(to_replace='NORMAL', value=np.nan, inplace=True) pump_df['machine_status'].replace(to_replace='BROKEN', value=1, inplace=True) pump_df['machine_status'].replace(to_replace='RECOVERING', value=1, inplace=True) print('Shape:', pump_df.shape) pump_df.head() file_structure_df = pump_df.iloc[:, 0:10].resample('5D').mean() plt.rcParams['hatch.linewidth'] = 0.5 plt.rcParams['lines.linewidth'] = 0.5 fig = plt.figure(figsize=(5,1)) ax1 = fig.add_subplot(1,1,1) plot1 = ax1.plot(pump_df['sensor_00'], label='Healthy pump') ax2 = ax1.twinx() plot2 = ax2.fill_between( x=pump_df.index, y1=0.0, y2=pump_df['machine_status'], color=colors[1], linewidth=0.0, edgecolor='#000000', alpha=0.5, hatch="//////", label='Broken pump' ) ax2.grid(False) ax2.set_yticks([]) labels = [plot1[0].get_label(), plot2.get_label()] plt.legend(handles=[plot1[0], plot2], labels=labels, loc='lower center', ncol=2, bbox_to_anchor=(0.5, -.4)) plt.title('Industrial pump sensor data') plt.show()
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
**5.** London household energy consumption with weather data We want to filter out households that are are subject to the dToU tariff and keep only the ones with a known ACORN (i.e. not in the ACORN-U group): this will allow us to better model future analysis by adding the Acorn detail informations (which by definitions, won't be available for the ACORN-U group).
household_filename = os.path.join(DATA, 'energy-london', 'informations_households.csv') household_df = pd.read_csv(household_filename) household_df = household_df[(household_df['stdorToU'] == 'Std') & (household_df['Acorn'] == 'ACORN-E')] print(household_df.shape) household_df.head()
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
Associating households with they energy consumption dataEach household (with an ID starting by `MACxxxxx` in the table above) has its consumption data stored in a block file name `block_xx`. This file is also available from the `informations_household.csv` file extracted above. We have the association between `household_id` and `block_file`: we can open each of them and keep the consumption for the households of interest. All these data will be concatenated into an `energy_df` dataframe:
%%time household_ids = household_df['LCLid'].tolist() consumption_file = os.path.join(DATA, 'energy-london', 'hourly_consumption.csv') min_data_points = ((pd.to_datetime('2020-12-31') - pd.to_datetime('2020-01-01')).days + 1)*24*2 if os.path.exists(consumption_file): print('Half-hourly consumption file already exists, loading from disk...') energy_df = pd.read_csv(consumption_file) energy_df['timestamp'] = pd.to_datetime(energy_df['timestamp'], format='%Y-%m-%d %H:%M:%S.%f') print('Done.') else: print('Half-hourly consumption file not found. We need to generate it.') # We know have the block number we can use to open the right file: energy_df = pd.DataFrame() target_block_files = household_df['file'].unique().tolist() print('- {} block files to process: '.format(len(target_block_files)), end='') df_list = [] for block_file in tqdm(target_block_files): # Reads the current block file: current_filename = os.path.join(DATA, 'energy-london', 'halfhourly_dataset', '{}.csv'.format(block_file)) df = pd.read_csv(current_filename) # Set readable column names and adjust data types: df.columns = ['household_id', 'timestamp', 'energy'] df = df.replace(to_replace='Null', value=0.0) df['energy'] = df['energy'].astype(np.float64) df['timestamp'] = pd.to_datetime(df['timestamp'], format='%Y-%m-%d %H:%M:%S.%f') # We filter on the households sampled earlier: df_list.append(df[df['household_id'].isin(household_ids)].reset_index(drop=True)) # Concatenate with the main dataframe: energy_df = pd.concat(df_list, axis='index', ignore_index=True) datapoints = energy_df.groupby(by='household_id').count() datapoints = datapoints[datapoints['timestamp'] < min_data_points] hhid_to_remove = datapoints.index.tolist() energy_df = energy_df[~energy_df['household_id'].isin(hhid_to_remove)] # Let's save this dataset to disk, we will use it from now on: print('Saving file to disk... ', end='') energy_df.to_csv(consumption_file, index=False) print('Done.') start = np.min(energy_df['timestamp']) end = np.max(energy_df['timestamp']) weather_filename = os.path.join(DATA, 'energy-london', 'weather_hourly_darksky.csv') weather_df = pd.read_csv(weather_filename) weather_df['time'] = pd.to_datetime(weather_df['time'], format='%Y-%m-%d %H:%M:%S') weather_df = weather_df.drop(columns=['precipType', 'icon', 'summary']) weather_df = weather_df.sort_values(by='time') weather_df = weather_df.set_index('time') weather_df = weather_df[start:end] # Let's make sure we have one datapoint per hour to match # the frequency used for the household energy consumption data: weather_df = weather_df.resample(rule='1H').mean() # This will generate NaN values timestamp missing data weather_df = weather_df.interpolate(method='linear') # This will fill the missing values with the average print(weather_df.shape) weather_df energy_df = energy_df.set_index(['household_id', 'timestamp']) energy_df hhid = household_ids[2] hh_energy = energy_df.loc[hhid, :] start = '2012-07-01' end = '2012-07-15' fig = plt.figure(figsize=(5,1)) ax1 = fig.add_subplot(1,1,1) plot2 = ax1.fill_between( x=weather_df.loc[start:end, 'temperature'].index, y1=0.0, y2=weather_df.loc[start:end, 'temperature'], color=colors[1], linewidth=0.0, edgecolor='#000000', alpha=0.25, hatch="//////", label='Temperature' ) ax1.set_ylim((0,40)) ax1.grid(False) ax2 = ax1.twinx() ax2.plot(hh_energy[start:end], label='Energy consumption', linewidth=2, color='#FFFFFF', alpha=0.5) plot1 = ax2.plot(hh_energy[start:end], label='Energy consumption', linewidth=0.7) ax2.set_title(f'Energy consumption for household {hhid}') labels = [plot1[0].get_label(), plot2.get_label()] plt.legend(handles=[plot1[0], plot2], labels=labels, loc='upper left', fontsize=3, ncol=2) plt.show() acorn_filename = os.path.join(DATA, 'energy-london', 'acorn_details.csv') acorn_df = pd.read_csv(acorn_filename, encoding='ISO-8859-1') acorn_df = acorn_df.sample(10).loc[:, ['MAIN CATEGORIES', 'CATEGORIES', 'REFERENCE', 'ACORN-A', 'ACORN-B', 'ACORN-E']] acorn_df
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
File structure exploration---
from IPython.display import display_html def display_multiple_dataframe(*args, max_rows=None, max_cols=None): html_str = '' for df in args: html_str += df.to_html(max_cols=max_cols, max_rows=max_rows) display_html(html_str.replace('table','table style="display:inline"'), raw=True) display_multiple_dataframe( file_structure_df[['sensor_00']], file_structure_df[['sensor_01']], file_structure_df[['sensor_03']], max_rows=10, max_cols=None ) display_multiple_dataframe( file_structure_df.loc['2018-04', :].head(6), file_structure_df.loc['2018-05', :].head(6), file_structure_df.loc['2018-06', :].head(6), max_rows=None, max_cols=2 ) display_multiple_dataframe( file_structure_df.loc['2018-04', ['sensor_00']].head(6), file_structure_df.loc['2018-05', ['sensor_00']].head(6), file_structure_df.loc['2018-06', ['sensor_00']].head(6), max_rows=10, max_cols=None ) display_multiple_dataframe( file_structure_df.loc['2018-04', ['sensor_01']].head(6), file_structure_df.loc['2018-05', ['sensor_01']].head(6), file_structure_df.loc['2018-06', ['sensor_01']].head(6), max_rows=10, max_cols=None ) print('.\n.\n.') display_multiple_dataframe( file_structure_df.loc['2018-04', ['sensor_09']].head(6), file_structure_df.loc['2018-05', ['sensor_09']].head(6), file_structure_df.loc['2018-06', ['sensor_09']].head(6), max_rows=10, max_cols=None ) df1 = pump_df.iloc[:, [0]].resample('5D').mean() df2 = pump_df.iloc[:, [1]].resample('2D').mean() df3 = pump_df.iloc[:, [2]].resample('7D').mean() display_multiple_dataframe( df1.head(10), df2.head(10), df3.head(10), pd.merge(pd.merge(df1, df2, left_index=True, right_index=True, how='outer'), df3, left_index=True, right_index=True, how='outer').head(10), max_rows=None, max_cols=None ) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 10) pd.merge(pd.merge(df1, df2, left_index=True, right_index=True, how='outer'), df3, left_index=True, right_index=True, how='outer').head(10) plt.figure(figsize=(5,1)) for i in range(len(colors)): plt.plot(file_structure_df[f'sensor_0{i}'], linewidth=2, alpha=0.5, label=colors[i]) plt.legend() plt.show()
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
Visualization---
fig = plt.figure(figsize=(5,1)) ax1 = fig.add_subplot(1,1,1) ax2 = ax1.twinx() plot_sensor_0 = ax1.plot(pump_df['sensor_00'], label='Sensor 0', color=colors[0], linewidth=1, alpha=0.8) plot_sensor_1 = ax2.plot(pump_df['sensor_01'], label='Sensor 1', color=colors[1], linewidth=1, alpha=0.8) ax2.grid(False) plt.title('Pump sensor values (2 sensors)') plt.legend(handles=[plot_sensor_0[0], plot_sensor_1[0]], ncol=2, loc='lower right') plt.show() reduced_pump_df = pump_df.loc[:, 'sensor_00':'sensor_14'] reduced_pump_df = reduced_pump_df.replace([np.inf, -np.inf], np.nan) reduced_pump_df = reduced_pump_df.fillna(0.0) reduced_pump_df = reduced_pump_df.astype(np.float32) scaled_pump_df = pd.DataFrame(normalize(reduced_pump_df), index=reduced_pump_df.index, columns=reduced_pump_df.columns) scaled_pump_df fig = plt.figure(figsize=(5,1)) for i in range(0,15): plt.plot(scaled_pump_df.iloc[:, i], alpha=0.6) plt.title('Pump sensor values (15 sensors)') plt.show() pump_df2 = pump_df.copy() pump_df2 = pump_df2.replace([np.inf, -np.inf], np.nan) pump_df2 = pump_df2.fillna(0.0) pump_df2 = pump_df2.astype(np.float32) pump_description = pump_df2.describe().T constant_signals = pump_description[pump_description['min'] == pump_description['max']].index.tolist() pump_df2 = pump_df2.drop(columns=constant_signals) features = pump_df2.columns.tolist() def hex_to_rgb(hex_color): """ Converts a color string in hexadecimal format to RGB format. PARAMS ====== hex_color: string A string describing the color to convert from hexadecimal. It can include the leading # character or not RETURNS ======= rgb_color: tuple Each color component of the returned tuple will be a float value between 0.0 and 1.0 """ hex_color = hex_color.lstrip('#') rgb_color = tuple(int(hex_color[i:i+2], base=16) / 255.0 for i in [0, 2, 4]) return rgb_color def plot_timeseries_strip_chart(binned_timeseries, signal_list, fig_width=12, signal_height=0.15, dates=None, day_interval=7): # Build a suitable colormap: colors_list = [ hex_to_rgb('#DC322F'), hex_to_rgb('#B58900'), hex_to_rgb('#2AA198') ] cm = mpl_colors.LinearSegmentedColormap.from_list('RdAmGr', colors_list, N=len(colors_list)) fig = plt.figure(figsize=(fig_width, signal_height * binned_timeseries.shape[0])) ax = fig.add_subplot(1,1,1) # Devising the extent of the actual plot: if dates is not None: dnum = mdates.date2num(dates) start = dnum[0] - (dnum[1]-dnum[0])/2. stop = dnum[-1] + (dnum[1]-dnum[0])/2. extent = [start, stop, 0, signal_height * (binned_timeseries.shape[0])] else: extent = None # Plot the matrix: im = ax.imshow(binned_timeseries, extent=extent, aspect="auto", cmap=cm, origin='lower') # Adjusting the x-axis if we provide dates: if dates is not None: ax.xaxis.set_major_locator(mdates.MonthLocator()) ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d')) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(4) tick.label.set_rotation(60) tick.label.set_fontweight('bold') ax.tick_params(axis='x', which='major', pad=7, labelcolor='#000000') plt.xticks(ha='right') # Adjusting the y-axis: ax.yaxis.set_major_locator(ticker.MultipleLocator(signal_height)) ax.set_yticklabels(signal_list, verticalalignment='bottom', fontsize=4) ax.set_yticks(np.arange(len(signal_list)) * signal_height) plt.grid() return ax from IPython.display import display, Markdown, Latex # Build a list of dataframes, one per sensor: df_list = [] for f in features[:1]: df_list.append(pump_df2[[f]]) # Discretize each signal in 3 bins: array = tsia.markov.discretize_multivariate(df_list) fig = plt.figure(figsize=(5.5, 0.6)) plt.plot(pump_df2['sensor_00'], linewidth=0.7, alpha=0.6) plt.title('Line plot of the pump sensor 0') plt.show() display(Markdown('<img src="arrow.png" align="left" style="padding-left: 730px"/>')) # Plot the strip chart: ax = plot_timeseries_strip_chart( array, signal_list=features[:1], fig_width=5.21, signal_height=0.2, dates=df_list[0].index.to_pydatetime(), day_interval=2 ) ax.set_title('Strip chart of the pump sensor 0'); # Build a list of dataframes, one per sensor: df_list = [] for f in features: df_list.append(pump_df2[[f]]) # Discretize each signal in 3 bins: array = tsia.markov.discretize_multivariate(df_list) # Plot the strip chart: fig = plot_timeseries_strip_chart( array, signal_list=features, fig_width=5.5, signal_height=0.1, dates=df_list[0].index.to_pydatetime(), day_interval=2 )
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
Recurrence plot
from pyts.image import RecurrencePlot from pyts.image import GramianAngularField from pyts.image import MarkovTransitionField hhid = household_ids[2] hh_energy = energy_df.loc[hhid, :] pump_extract_df = pump_df.iloc[:800, 0].copy() rp = RecurrencePlot(threshold='point', percentage=30) weather_rp = rp.fit_transform(weather_df.loc['2013-01-01':'2013-01-31']['temperature'].values.reshape(1, -1)) energy_rp = rp.fit_transform(hh_energy['2012-07-01':'2012-07-15'].values.reshape(1, -1)) pump_rp = rp.fit_transform(pump_extract_df.values.reshape(1, -1)) fig = plt.figure(figsize=(5.5, 2.4)) gs = gridspec.GridSpec(nrows=3, ncols=2, width_ratios=[3,1], hspace=0.8, wspace=0.0) # Pump sensor 0: ax = fig.add_subplot(gs[0]) ax.plot(pump_extract_df, label='Pump sensor 0') ax.set_title(f'Pump sensor 0') ax = fig.add_subplot(gs[1]) ax.imshow(pump_rp[0], cmap='binary', origin='lower') ax.axis('off') # Energy consumption line plot and recurrence plot: ax = fig.add_subplot(gs[2]) plot1 = ax.plot(hh_energy['2012-07-01':'2012-07-15'], color=colors[1]) ax.set_title(f'Energy consumption for household {hhid}') ax = fig.add_subplot(gs[3]) ax.imshow(energy_rp[0], cmap='binary', origin='lower') ax.axis('off') # Daily temperature line plot and recurrence plot: ax = fig.add_subplot(gs[4]) start = '2012-07-01' end = '2012-07-15' ax.plot(weather_df.loc['2013-01-01':'2013-01-31']['temperature'], color=colors[2]) ax.set_title(f'Daily temperature') ax = fig.add_subplot(gs[5]) ax.imshow(weather_rp[0], cmap='binary', origin='lower') ax.axis('off') plt.show() hhid = household_ids[2] hh_energy = energy_df.loc[hhid, :] pump_extract_df = pump_df.iloc[:800, 0].copy() gaf = GramianAngularField(image_size=48, method='summation') weather_gasf = gaf.fit_transform(weather_df.loc['2013-01-01':'2013-01-31']['temperature'].values.reshape(1, -1)) energy_gasf = gaf.fit_transform(hh_energy['2012-07-01':'2012-07-15'].values.reshape(1, -1)) pump_gasf = gaf.fit_transform(pump_extract_df.values.reshape(1, -1)) fig = plt.figure(figsize=(5.5, 2.4)) gs = gridspec.GridSpec(nrows=3, ncols=2, width_ratios=[3,1], hspace=0.8, wspace=0.0) # Pump sensor 0: ax = fig.add_subplot(gs[0]) ax.plot(pump_extract_df, label='Pump sensor 0') ax.set_title(f'Pump sensor 0') ax = fig.add_subplot(gs[1]) ax.imshow(pump_gasf[0], cmap='RdBu_r', origin='lower') ax.axis('off') # Energy consumption line plot and recurrence plot: ax = fig.add_subplot(gs[2]) plot1 = ax.plot(hh_energy['2012-07-01':'2012-07-15'], color=colors[1]) ax.set_title(f'Energy consumption for household {hhid}') ax = fig.add_subplot(gs[3]) ax.imshow(energy_gasf[0], cmap='RdBu_r', origin='lower') ax.axis('off') # Daily temperature line plot and recurrence plot: ax = fig.add_subplot(gs[4]) start = '2012-07-01' end = '2012-07-15' ax.plot(weather_df.loc['2013-01-01':'2013-01-31']['temperature'], color=colors[2]) ax.set_title(f'Daily temperature') ax = fig.add_subplot(gs[5]) ax.imshow(weather_gasf[0], cmap='RdBu_r', origin='lower') ax.axis('off') plt.show() mtf = MarkovTransitionField(image_size=48) weather_mtf = mtf.fit_transform(weather_df.loc['2013-01-01':'2013-01-31']['temperature'].values.reshape(1, -1)) energy_mtf = mtf.fit_transform(hh_energy['2012-07-01':'2012-07-15'].values.reshape(1, -1)) pump_mtf = mtf.fit_transform(pump_extract_df.values.reshape(1, -1)) fig = plt.figure(figsize=(5.5, 2.4)) gs = gridspec.GridSpec(nrows=3, ncols=2, width_ratios=[3,1], hspace=0.8, wspace=0.0) # Pump sensor 0: ax = fig.add_subplot(gs[0]) ax.plot(pump_extract_df, label='Pump sensor 0') ax.set_title(f'Pump sensor 0') ax = fig.add_subplot(gs[1]) ax.imshow(pump_mtf[0], cmap='RdBu_r', origin='lower') ax.axis('off') # Energy consumption line plot and recurrence plot: ax = fig.add_subplot(gs[2]) plot1 = ax.plot(hh_energy['2012-07-01':'2012-07-15'], color=colors[1]) ax.set_title(f'Energy consumption for household {hhid}') ax = fig.add_subplot(gs[3]) ax.imshow(energy_mtf[0], cmap='RdBu_r', origin='lower') ax.axis('off') # Daily temperature line plot and recurrence plot: ax = fig.add_subplot(gs[4]) start = '2012-07-01' end = '2012-07-15' ax.plot(weather_df.loc['2013-01-01':'2013-01-31']['temperature'], color=colors[2]) ax.set_title(f'Daily temperature') ax = fig.add_subplot(gs[5]) ax.imshow(weather_mtf[0], cmap='RdBu_r', origin='lower') ax.axis('off') plt.show() import matplotlib import matplotlib.cm as cm import networkx as nx import community def compute_network_graph(markov_field): G = nx.from_numpy_matrix(markov_field[0]) # Uncover the communities in the current graph: communities = community.best_partition(G) nb_communities = len(pd.Series(communities).unique()) cmap = 'autumn' # Compute node colors and edges colors for the modularity encoding: edge_colors = [matplotlib.colors.to_hex(cm.get_cmap(cmap)(communities.get(v)/(nb_communities - 1))) for u,v in G.edges()] node_colors = [communities.get(node) for node in G.nodes()] node_size = [nx.average_clustering(G, [node])*90 for node in G.nodes()] # Builds the options set to draw the network graph in the "modularity" configuration: options = { 'node_size': 10, 'edge_color': edge_colors, 'node_color': node_colors, 'linewidths': 0, 'width': 0.1, 'alpha': 0.6, 'with_labels': False, 'cmap': cmap } return G, options fig = plt.figure(figsize=(5.5, 2.4)) gs = gridspec.GridSpec(nrows=3, ncols=2, width_ratios=[3,1], hspace=0.8, wspace=0.0) # Pump sensor 0: ax = fig.add_subplot(gs[0]) ax.plot(pump_extract_df, label='Pump sensor 0') ax.set_title(f'Pump sensor 0') ax = fig.add_subplot(gs[1]) G, options = compute_network_graph(weather_mtf) nx.draw_networkx(G, **options, pos=nx.spring_layout(G), ax=ax) ax.axis('off') # Energy consumption line plot and recurrence plot: ax = fig.add_subplot(gs[2]) plot1 = ax.plot(hh_energy['2012-07-01':'2012-07-15'], color=colors[1]) ax.set_title(f'Energy consumption for household {hhid}') ax = fig.add_subplot(gs[3]) G, options = compute_network_graph(energy_mtf) nx.draw_networkx(G, **options, pos=nx.spring_layout(G), ax=ax) ax.axis('off') # Daily temperature line plot and recurrence plot: ax = fig.add_subplot(gs[4]) start = '2012-07-01' end = '2012-07-15' ax.plot(weather_df.loc['2013-01-01':'2013-01-31']['temperature'], color=colors[2]) ax.set_title(f'Daily temperature') ax = fig.add_subplot(gs[5]) G, options = compute_network_graph(weather_mtf) nx.draw_networkx(G, **options, pos=nx.spring_layout(G), ax=ax) ax.axis('off') plt.show()
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
Symbolic representation---
from pyts.bag_of_words import BagOfWords window_size, word_size = 30, 5 bow = BagOfWords(window_size=window_size, word_size=word_size, window_step=window_size, numerosity_reduction=False) X = weather_df.loc['2013-01-01':'2013-01-31']['temperature'].values.reshape(1, -1) X_bow = bow.transform(X) time_index = weather_df.loc['2013-01-01':'2013-01-31']['temperature'].index len(X_bow[0].replace(' ', '')) # Plot the considered subseries plt.figure(figsize=(5, 2)) splits_series = np.linspace(0, X.shape[1], 1 + X.shape[1] // window_size, dtype='int64') for start, end in zip(splits_series[:-1], np.clip(splits_series[1:] + 1, 0, X.shape[1])): plt.plot(np.arange(start, end), X[0, start:end], 'o-', linewidth=0.5, ms=0.1) # Plot the corresponding letters splits_letters = np.linspace(0, X.shape[1], 1 + word_size * X.shape[1] // window_size) splits_letters = ((splits_letters[:-1] + splits_letters[1:]) / 2) splits_letters = splits_letters.astype('int64') for i, (x, text) in enumerate(zip(splits_letters, X_bow[0].replace(' ', ''))): t = plt.text(x, X[0, x], text, color="C{}".format(i // 5), fontsize=3.5) t.set_bbox(dict(facecolor='#FFFFFF', alpha=0.5, edgecolor="C{}".format(i // 5), boxstyle='round4')) plt.title('Bag-of-words representation for weather temperature') plt.tight_layout() plt.show() from pyts.transformation import WEASEL from sklearn.preprocessing import LabelEncoder X_train = ecg_df.iloc[:, 1:].values y_train = ecg_df.iloc[:, 0] y_train = LabelEncoder().fit_transform(y_train) weasel = WEASEL(word_size=3, n_bins=3, window_sizes=[10, 25], sparse=False) X_weasel = weasel.fit_transform(X_train, y_train) vocabulary_length = len(weasel.vocabulary_) plt.figure(figsize=(5,1.5)) width = 0.4 x = np.arange(vocabulary_length) - width / 2 for i in range(len(X_weasel[y_train == 0])): if i == 0: plt.bar(x, X_weasel[y_train == 0][i], width=width, alpha=0.25, color=colors[1], label='Time series for Ischemia') else: plt.bar(x, X_weasel[y_train == 0][i], width=width, alpha=0.25, color=colors[1]) for i in range(len(X_weasel[y_train == 1])): if i == 0: plt.bar(x+width, X_weasel[y_train == 1][i], width=width, alpha=0.25, color=colors[0], label='Time series for Normal heartbeat') else: plt.bar(x+width, X_weasel[y_train == 1][i], width=width, alpha=0.25, color=colors[0]) plt.xticks( np.arange(vocabulary_length), np.vectorize(weasel.vocabulary_.get)(np.arange(X_weasel[0].size)), fontsize=2, rotation=60 ) plt.legend(loc='upper right') plt.show()
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
Statistics---
plt.rcParams['xtick.labelsize'] = 3 import statsmodels.api as sm fig = plt.figure(figsize=(5.5, 3)) gs = gridspec.GridSpec(nrows=3, ncols=2, width_ratios=[1,1], hspace=0.8) # Pump ax = fig.add_subplot(gs[0]) ax.plot(pump_extract_df, label='Pump sensor 0') ax.set_title(f'Pump sensor 0') ax.tick_params(axis='x', which='both', labelbottom=False) ax = fig.add_subplot(gs[1]) sm.graphics.tsa.plot_acf(pump_extract_df.values.squeeze(), ax=ax, markersize=1, title='') ax.set_ylim(-1.2, 1.2) ax.tick_params(axis='x', which='major', labelsize=4) # Energy consumption ax = fig.add_subplot(gs[2]) ax.plot(hh_energy['2012-07-01':'2012-07-15'], color=colors[1]) ax.set_title(f'Energy consumption for household {hhid}') ax.tick_params(axis='x', which='both', labelbottom=False) ax = fig.add_subplot(gs[3]) sm.graphics.tsa.plot_acf(hh_energy['2012-07-01':'2012-07-15'].values.squeeze(), ax=ax, markersize=1, title='') ax.set_ylim(-0.3, 0.3) ax.tick_params(axis='x', which='major', labelsize=4) # Daily temperature: ax = fig.add_subplot(gs[4]) start = '2012-07-01' end = '2012-07-15' ax.plot(weather_df.loc['2013-01-01':'2013-01-31']['temperature'], color=colors[2]) ax.set_title(f'Daily temperature') ax.tick_params(axis='x', which='both', labelbottom=False) ax = fig.add_subplot(gs[5]) sm.graphics.tsa.plot_acf(weather_df.loc['2013-01-01':'2013-01-31']['temperature'].values.squeeze(), ax=ax, markersize=1, title='') ax.set_ylim(-1.2, 1.2) ax.tick_params(axis='x', which='major', labelsize=4) plt.show() from statsmodels.tsa.seasonal import STL endog = endog.resample('30T').mean() plt.rcParams['lines.markersize'] = 1 title = f'Energy consumption for household {hhid}' endog = hh_energy['2012-07-01':'2012-07-15'] endog.columns = [title] endog = endog[title] stl = STL(endog, period=48) res = stl.fit() fig = res.plot() fig = plt.gcf() fig.set_size_inches(5.5, 4) plt.show()
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
Binary segmentation---
signal = weather_df.loc['2013-01-01':'2013-01-31']['temperature'].values.squeeze() algo = rpt.Binseg(model='l2').fit(signal) my_bkps = algo.predict(n_bkps=3) my_bkps = [0] + my_bkps my_bkps fig = plt.figure(figsize=(5.5,1)) start = '2012-07-01' end = '2012-07-15' plt.plot(weather_df.loc['2013-01-01':'2013-01-31']['temperature'], color='#FFFFFF', linewidth=1.2, alpha=0.8) plt.plot(weather_df.loc['2013-01-01':'2013-01-31']['temperature'], color=colors[2], linewidth=0.7) plt.title(f'Daily temperature') plt.xticks(rotation=60, fontsize=4) weather_index = weather_df.loc['2013-01-01':'2013-01-31']['temperature'].index for index, bkps in enumerate(my_bkps[:-1]): x1 = weather_index[my_bkps[index]] x2 = weather_index[np.clip(my_bkps[index+1], 0, len(weather_index)-1)] plt.axvspan(x1, x2, color=colors[index % 5], alpha=0.2) plt.title('Daily temperature segmentation') plt.show()
_____no_output_____
MIT
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
MOHID visualisation tools
from IPython.display import HTML HTML('''<script> code_show=true; function code_toggle() { if (code_show){ $('div.input').hide(); } else { $('div.input').show(); } code_show = !code_show } $( document ).ready(code_toggle); </script> <form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''') import matplotlib.pyplot as plt import xarray as xr import numpy as np import cmocean %matplotlib inline
_____no_output_____
Apache-2.0
climatology_analysis_notebooks/mohid_viz.ipynb
MIDOSS/analysis-ashutosh
How to Parse time into datetime64 string format
from datetime import datetime, timedelta from dateutil.parser import parse def to_datetime64(time): """Convert string to string in datetime64[s] format :arg time: string :return datetime64: str in datetime64[s] format """ time = parse(time) # parse to datetime format # now just take care of formatting year, month, day, hour, minute, second = str(time.year), str(time.month), str(time.day), str(time.hour), str(time.minute), str(time.second) if len(month) < 2: month = '0' + month if len(day) < 2: day = '0' + day if len(hour) < 2: hour = '0' + hour if len(minute) < 2: minute = '0' + minute if len(second) < 2: second = '0' + second datetime64 = '{}-{}-{}T{}:{}:{}'.format(year, month, day, hour, minute, second) return datetime64
_____no_output_____
Apache-2.0
climatology_analysis_notebooks/mohid_viz.ipynb
MIDOSS/analysis-ashutosh
Usage:
to_datetime64('1 Jan 2016')
_____no_output_____
Apache-2.0
climatology_analysis_notebooks/mohid_viz.ipynb
MIDOSS/analysis-ashutosh
Generate heat maps of vertical velocities Getting depth slices
# load a profile sog2015 = xr.open_dataset('Vertical_velocity_profiles/sog2015.nc') sog2015 # slice by layer index sog2015.vovecrtz.isel(depthw = slice(0,11)) # slice explicitly by layer depth # print depth with corresponding index for i in zip(range(40), sog2015.depthw.values): print(i) sog2015.vovecrtz.sel(depthw = slice(0.0, 10.003407))
_____no_output_____
Apache-2.0
climatology_analysis_notebooks/mohid_viz.ipynb
MIDOSS/analysis-ashutosh
Getting time slices using parsing
# this is where to_datetime64 comes in handy # getting the first week in january sog2015.sel(time_counter = slice(to_datetime64('1 jan 2015'), to_datetime64('7 jan 2015')))
_____no_output_____
Apache-2.0
climatology_analysis_notebooks/mohid_viz.ipynb
MIDOSS/analysis-ashutosh
Slicing by time and depth at the same time
slice_example = sog2015.vovecrtz.sel(time_counter = slice(to_datetime64('1 jan 2015'), to_datetime64('7 jan 2015'))).isel(depthw = slice(0,11)) slice_example
_____no_output_____
Apache-2.0
climatology_analysis_notebooks/mohid_viz.ipynb
MIDOSS/analysis-ashutosh
Plotting the slice
slice_example.T.plot(cmap = 'RdBu') # transposed to have depth on y axis. cmap specified as RdBu. plt.gca().invert_yaxis()
_____no_output_____
Apache-2.0
climatology_analysis_notebooks/mohid_viz.ipynb
MIDOSS/analysis-ashutosh
Extracting the data you just visualised
a_slice.data()
_____no_output_____
Apache-2.0
climatology_analysis_notebooks/mohid_viz.ipynb
MIDOSS/analysis-ashutosh
Plotting the trend of the depth of maximum vertical change
def find_bottom(array): """Find the bottom depth layer index :arg array: one dimesional array (profile at giventime stamp) :returns bottom: int, 1 + index of sea floor layer """ i=-1 for value in np.flip(array): if value != 0: bottom = 39-i return bottom else: i=i+1 def max_delta(depths, truncated_array): """return raw plot data for depth of maximum delta """ # time is axis 0, depth is axis 1 difference = np.abs(np.diff(truncated_array, axis=1)) data = (depths[np.argmax(difference, axis=1)]) return data, difference depths = sog2015.depthw.values array = sog2015.vovecrtz.sel(time_counter = slice(convert_timestamp('1 Jan 2015'), convert_timestamp('7 Jan 2015'))) bottom_index = find_bottom(array[0].values) truncated_array = array.isel(depthw = slice(0,bottom_index)).values times = array.time_counter.values delta, difference = max_delta(depths,truncated_array) fig = plt.figure(figsize=(10,5)) plt.plot(times, delta) plt.xlim(times[0], times[-1]) plt.ylim(depths[0], depths[-1]) plt.hlines(depths[bottom_index-1], times[0], times[-1], label = 'sea floor') plt.hlines(depths[0:bottom_index], times[0], times[-1], linewidth = 0.25, label='layer depths') plt.gca().invert_yaxis() plt.ylabel('layer depth (m)') plt.title('Timeseries of depth of maximum chnage in vertical velocity') plt.legend()
_____no_output_____
Apache-2.0
climatology_analysis_notebooks/mohid_viz.ipynb
MIDOSS/analysis-ashutosh
Salinity profiles with shaded range region
import seaborn as sns palette = sns.color_palette("Reds", n_colors = 14) sal_sog2015 = xr.open_dataset('salinity_profiles/salinity_sog2015.nc') A = sal_sog2015.sel(time_counter = slice(to_datetime64('1 Jan 2015'),to_datetime64('8 Jan 2015'))) fig = plt.figure(figsize = (10,10)) ax = plt.subplot(111) depths = A.deptht.values.T #bottom = find_bottom(A.isel(time_counter= 0).vosaline.values) bottom = 11 try: for i in range(14): plt.plot(A.vosaline.isel(time_counter = 12*i).values[0: bottom],depths[0: bottom], label = A.time_counter.values[12*i], color = palette[i]) except IndexError: pass # find the fill_between values low, high = np.min(A.vosaline.values,axis = 0)[0: bottom], np.max(A.vosaline.values, axis=0)[0:bottom] mean = np.average(A.vosaline.values,axis = 0)[0: bottom] stddev = np.std(A.vosaline.values,axis = 0)[0: bottom] plt.plot(mean,depths[0: bottom], 'k--',label = 'Average Salinity') plt.fill_betweenx(depths[0:bottom],low, high, facecolor = 'lightgray', label = 'Range') plt.fill_betweenx(depths[0:bottom], mean-stddev, mean+stddev,facecolor = 'deepskyblue', label = '1 Std. Dev') ax.set_ylim(depths[0], depths[bottom-1]) plt.gca().invert_yaxis() plt.legend(loc='lower left') plt.ylabel('Ocean Depth [m]') plt.xlabel('Salinity [g kg-1]') plt.title('Salinity profiles over a week, showing profile every 12th hour')
_____no_output_____
Apache-2.0
climatology_analysis_notebooks/mohid_viz.ipynb
MIDOSS/analysis-ashutosh
Heat maps of Salinity
salinity_slice = sal_sog2015.sel(time_counter=slice(to_datetime64('1 Jan 2015'), to_datetime64('7 jan 2015'))) salinity_slice.vosaline.T.plot(cmap = cmocean.cm.haline) plt.gca().invert_yaxis()
_____no_output_____
Apache-2.0
climatology_analysis_notebooks/mohid_viz.ipynb
MIDOSS/analysis-ashutosh
Difference between surface and botttom salinity
salinity_slice = sal_sog2015.sel(time_counter=slice(to_datetime64('1 Jan 2015'), to_datetime64('7 jan 2015'))) bottom = find_bottom(sal_sog2015.vosaline.isel(time_counter=0).values) # plot the difference between the surface and bottom salinity diff = salinity_slice.isel(deptht = 0) - salinity_slice.isel(deptht = bottom-1) diff.vosaline.plot() plt.title('(Surface Salinity - Bottom Salinity) [g.cm-3]') plt.ylabel('(Surface - Bottom Salinity) [g kg-1]') depths = sal_sog2015.deptht.values array = sal_sog2015.vosaline.sel(time_counter = slice(convert_timestamp('1 Jan 2015'), convert_timestamp('7 Jan 2015'))) bottom_index = find_bottom(array[0].values) truncated_array = array.isel(deptht = slice(0,bottom_index)).values times = array.time_counter.values delta, difference = max_delta(depths,truncated_array) fig = plt.figure(figsize=(10,5)) plt.plot(times, delta) plt.xlim(times[0], times[-1]) plt.ylim(depths[0], depths[-1]) plt.hlines(depths[bottom_index-1], times[0], times[-1], label = 'sea floor') plt.hlines(depths[0:bottom_index], times[0], times[-1], linewidth = 0.25, label='layer depths') plt.gca().invert_yaxis() plt.ylabel('layer depth (m)') plt.title('Timeseries of Halocline depth') plt.legend()
_____no_output_____
Apache-2.0
climatology_analysis_notebooks/mohid_viz.ipynb
MIDOSS/analysis-ashutosh
ML Lab 3 Neural NetworksIn the following exercise class we explore how to design and train neural networks in various ways. Prerequisites:In order to follow the exercises you need to:1. Activate your conda environment from last week via: `source activate ` 2. Install tensorflow (https://www.tensorflow.org) via: `pip install tensorflow` (CPU-only)3. Install keras (provides high level wrapper for tensorflow) (https://keras.io) via: `pip install keras` Exercise 1: Create a 2 layer network that acts as an XOR gate using numpy.XOR is a fundamental logic gate that outputs a one whenever there is an odd parity of ones in its input and zero otherwise. For two inputs this can be thought of as an exclusive or operation and the associated boolean function is fully characterized by the following truth table.| X | Y | XOR(X,Y) ||---|---|----------|| 0 | 0 | 0 || 0 | 1 | 1 || 1 | 0 | 1 || 1 | 1 | 0 |The function of an XOR gate can also be understood as a classification problem on $v \in \{0,1\}^2$ and we can think about designing a classifier acting as an XOR gate. It turns out that this problem is not solvable by any single layer perceptron (https://en.wikipedia.org/wiki/Perceptron) because the set of points $\{(0,0), (0,1), (1,0), (1,1)\}$ is not linearly seperable.**Design a two layer perceptron using basic numpy matrix operations that implements an XOR Gate on two inputs. Think about the flow of information and accordingly set the weight values by hand.** Data
import numpy as np def generate_xor_data(): X = [(i,j) for i in [0,1] for j in [0,1]] y = [int(np.logical_xor(x[0], x[1])) for x in X] return X, y print(generate_xor_data())
([(0, 0), (0, 1), (1, 0), (1, 1)], [0, 1, 1, 0])
Apache-2.0
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
HintsA single layer in a multilayer perceptron can be described by the equation $y = f(\vec{b} + W\vec{x})$ with $f$ the logistic function, a smooth and differentiable version of the step function, and defined as $f(z) = \frac{1}{1+e^{-z}}$. $\vec{b}$ is the so called bias, a constant offset vector and $W$ is the weight matrix. However, since we set the weights by hand feel free to use hard thresholding instead of using the logistic function. Write down the equation for a two layer MLP and implement it with numpy. For documentation see https://docs.scipy.org/doc/numpy-1.13.0/reference/
""" Implement your solution here. """
_____no_output_____
Apache-2.0
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
Solution | X | Y | AND(NOT X, Y) | AND(X,NOT Y) | OR[AND(NOT X, Y), AND(X, NOT Y)]| XOR(X,Y) ||---|---|---------------|--------------|---------------------------------|----------|| 0 | 0 | 0 | 0 | 0 | 0 || 0 | 1 | 1 | 0 | 1 | 1 || 1 | 0 | 0 | 1 | 1 | 1 || 1 | 1 | 0 | 0 | 0 | 0 |Implement XOR as a combination of 2 AND Gates and 1 OR gate where each neuron in the network acts as one of these gates.
""" Definitions: Input = np.array([X,Y]) 0 if value < 0.5 1 if value >= 0.5 """ def threshold(vector): return (vector>=0.5).astype(float) def mlp(x, W0, W1, b0, b1, f): x0 = f(np.dot(W0, x) + b0) x1 = f(np.dot(W1, x0) + b1) return x1 # AND(NOT X, Y) w_andnotxy = np.array([-1.0, 1.0]) # AND(X, NOT Y) w_andxnoty = np.array([1.0, -1.0]) # W0 weight matrix: W0 = np.vstack([w_andnotxy, w_andxnoty]) # OR(X,Y) w_or = np.array([1., 1.]) W1 = w_or # No biases needed b0 = np.array([0.0,0.0]) b1 = 0.0 print("Input", "Output", "XOR") xx,yy = generate_xor_data() for x,y in zip(xx, yy): print(x, int(mlp(x, W0, W1, b0, b1, threshold))," ", y)
Input Output XOR (0, 0) 0 0 (0, 1) 1 1 (1, 0) 1 1 (1, 1) 0 0
Apache-2.0
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
Exercise 2: Use Keras to design, train and evaluate a neural network that can classify points on a 2D plane. Data generator
import numpy as np import matplotlib.pyplot as plt def generate_spiral_data(n_points, noise=1.0): n = np.sqrt(np.random.rand(n_points,1)) * 780 * (2*np.pi)/360 d1x = -np.cos(n)*n + np.random.rand(n_points,1) * noise d1y = np.sin(n)*n + np.random.rand(n_points,1) * noise return (np.vstack((np.hstack((d1x,d1y)),np.hstack((-d1x,-d1y)))), np.hstack((np.zeros(n_points),np.ones(n_points))))
_____no_output_____
Apache-2.0
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
Training data
X_train, y_train = generate_spiral_data(1000) plt.title('Training set') plt.plot(X_train[y_train==0,0], X_train[y_train==0,1], '.', label='Class 1') plt.plot(X_train[y_train==1,0], X_train[y_train==1,1], '.', label='Class 2') plt.legend() plt.show()
_____no_output_____
Apache-2.0
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
Test data
X_test, y_test = generate_spiral_data(1000) plt.title('Test set') plt.plot(X_test[y_test==0,0], X_test[y_test==0,1], '.', label='Class 1') plt.plot(X_test[y_test==1,0], X_test[y_test==1,1], '.', label='Class 2') plt.legend() plt.show()
_____no_output_____
Apache-2.0
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
2.1. Design and train your modelThe current model performs badly, try to find a more advanced architecture that is able to solve the classification problem. Read the following code snippet and understand the involved functions. Vary width and depth of the network and play around with activation functions, loss functions and optimizers to achieve a better result. Read up on parameters and functions for sequential models at https://keras.io/getting-started/sequential-model-guide/.
from keras.models import Sequential from keras.layers import Dense """ Replace the following model with yours and try to achieve better classification performance """ bad_model = Sequential() bad_model.add(Dense(12, input_dim=2, activation='tanh')) bad_model.add(Dense(1, activation='sigmoid')) bad_model.compile(loss='mean_squared_error', optimizer='SGD', # SGD = Stochastic Gradient Descent metrics=['accuracy']) # Train the model bad_model.fit(X_train, y_train, epochs=150, batch_size=10, verbose=0)
_____no_output_____
Apache-2.0
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
Predict
bad_prediction = np.round(bad_model.predict(X_test).T[0])
_____no_output_____
Apache-2.0
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
Visualize
plt.subplot(1,2,1) plt.title('Test set') plt.plot(X_test[y_test==0,0], X_test[y_test==0,1], '.') plt.plot(X_test[y_test==1,0], X_test[y_test==1,1], '.') plt.subplot(1,2,2) plt.title('Bad model classification') plt.plot(X_test[bad_prediction==0,0], X_test[bad_prediction==0,1], '.') plt.plot(X_test[bad_prediction==1,0], X_test[bad_prediction==1,1], '.') plt.show()
_____no_output_____
Apache-2.0
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
2.2. Visualize the decision boundary of your model.
""" Implement your solution here. """
_____no_output_____
Apache-2.0
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
Solution Model design and training
from keras.layers import Dense, Dropout good_model = Sequential() good_model.add(Dense(64, input_dim=2, activation='relu')) good_model.add(Dense(64, activation='relu')) good_model.add(Dense(64, activation='relu')) good_model.add(Dense(1, activation='sigmoid')) good_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) good_model.fit(X_train, y_train, epochs=150, batch_size=10, verbose=0)
_____no_output_____
Apache-2.0
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
Prediction
good_prediction = np.round(good_model.predict(X_test).T[0])
_____no_output_____
Apache-2.0
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
Visualization Performance
plt.subplot(1,2,1) plt.title('Test set') plt.plot(X_test[y_test==0,0], X_test[y_test==0,1], '.') plt.plot(X_test[y_test==1,0], X_test[y_test==1,1], '.') plt.subplot(1,2,2) plt.title('Good model classification') plt.plot(X_test[good_prediction==0,0], X_test[good_prediction==0,1], '.') plt.plot(X_test[good_prediction==1,0], X_test[good_prediction==1,1], '.') plt.show()
_____no_output_____
Apache-2.0
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
Decision boundary
# Generate grid: line = np.linspace(-15,15) xx, yy = np.meshgrid(line,line) grid = np.stack((xx,yy)) # Reshape to fit model input size: grid = grid.T.reshape(-1,2) # Predict: good_prediction = good_model.predict(grid) bad_prediction = bad_model.predict(grid) # Reshape to grid for visualization: plt.title("Good Decision Boundary") good_prediction = good_prediction.T[0].reshape(len(line),len(line)) plt.contourf(xx,yy,good_prediction) plt.show() plt.title("Bad Decision Boundary") bad_prediction = bad_prediction.T[0].reshape(len(line),len(line)) plt.contourf(xx,yy,bad_prediction) plt.show()
_____no_output_____
Apache-2.0
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
Design, train and test a neural network that is able to classify MNIST digits using Keras. Data
from keras.datasets import mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() """ Returns: 2 tuples: x_train, x_test: uint8 array of grayscale image data with shape (num_samples, 28, 28). y_train, y_test: uint8 array of digit labels (integers in range 0-9) with shape (num_samples,). """ # Show example data plt.subplot(1,4,1) plt.imshow(x_train[0], cmap=plt.get_cmap('gray')) plt.subplot(1,4,2) plt.imshow(x_train[1], cmap=plt.get_cmap('gray')) plt.subplot(1,4,3) plt.imshow(x_train[2], cmap=plt.get_cmap('gray')) plt.subplot(1,4,4) plt.imshow(x_train[3], cmap=plt.get_cmap('gray')) plt.show() """ Implement your solution here. """
_____no_output_____
Apache-2.0
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
Solution
from keras.utils import to_categorical from keras.models import Sequential from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPooling2D """ We need to add a channel dimension to the image input. """ x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1) x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1) """ Train the image using 32-bit floats normalized between 0 and 1 for numerical stability. """ x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 input_shape = (x_train.shape[1], x_train.shape[2], 1) """ Output should be a 10 dimensional 1-hot vector, not just an integer denoting the digit. This is due to our use of softmax to "squish" network output for classification. """ y_train = to_categorical(y_train, 10) y_test = to_categorical(y_test, 10) """ We construct a CNN with 2 convolution layers and use max-pooling between each convolution layer; we finish with two dense layers for classification. """ cnn_model = Sequential() cnn_model.add(Conv2D(filters=32, kernel_size=(3,3), activation='relu', input_shape=input_shape)) cnn_model.add(MaxPooling2D(pool_size=(2, 2))) cnn_model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu')) cnn_model.add(MaxPooling2D(pool_size=(2, 2))) cnn_model.add(Flatten()) cnn_model.add(Dense(64, activation='relu')) cnn_model.add(Dense(10, activation='softmax')) # softmax for classification cnn_model.compile(loss='categorical_crossentropy', optimizer='adagrad', # adaptive optimizer (still similar to SGD) metrics=['accuracy']) """Train the CNN model and evaluate test accuracy.""" cnn_model.fit(x_train, y_train, batch_size=128, epochs=10, verbose=1, validation_data=(x_test, y_test)) # never actually validate using test data! score = cnn_model.evaluate(x_test, y_test, verbose=0) print('MNIST test set accuracy:', score[1]) """Visualize some test data and network output.""" y_predict = cnn_model.predict(x_test, verbose=0) y_predict_digits = [np.argmax(y_predict[i]) for i in range(y_predict.shape[0])] plt.subplot(1,4,1) plt.imshow(x_test[0,:,:,0], cmap=plt.get_cmap('gray')) plt.subplot(1,4,2) plt.imshow(x_test[1,:,:,0], cmap=plt.get_cmap('gray')) plt.subplot(1,4,3) plt.imshow(x_test[2,:,:,0], cmap=plt.get_cmap('gray')) plt.subplot(1,4,4) plt.imshow(x_test[3,:,:,0], cmap=plt.get_cmap('gray')) plt.show() print("CNN predictions: {0}, {1}, {2}, {3}".format(y_predict_digits[0], y_predict_digits[1], y_predict_digits[2], y_predict_digits[3]))
Train on 60000 samples, validate on 10000 samples Epoch 1/10 60000/60000 [==============================] - 38s 630us/step - loss: 0.1783 - acc: 0.9452 - val_loss: 0.0650 - val_acc: 0.9800 Epoch 2/10 60000/60000 [==============================] - 38s 636us/step - loss: 0.0683 - acc: 0.9798 - val_loss: 0.0501 - val_acc: 0.9847 Epoch 3/10 60000/60000 [==============================] - 36s 597us/step - loss: 0.0536 - acc: 0.9844 - val_loss: 0.0448 - val_acc: 0.9855 Epoch 4/10 60000/60000 [==============================] - 50s 839us/step - loss: 0.0457 - acc: 0.9867 - val_loss: 0.0391 - val_acc: 0.9873 Epoch 5/10 60000/60000 [==============================] - 40s 668us/step - loss: 0.0407 - acc: 0.9878 - val_loss: 0.0392 - val_acc: 0.9876 Epoch 6/10 60000/60000 [==============================] - 35s 586us/step - loss: 0.0366 - acc: 0.9888 - val_loss: 0.0391 - val_acc: 0.9869 Epoch 7/10 60000/60000 [==============================] - 38s 640us/step - loss: 0.0333 - acc: 0.9903 - val_loss: 0.0364 - val_acc: 0.9883 Epoch 8/10 60000/60000 [==============================] - 39s 645us/step - loss: 0.0310 - acc: 0.9910 - val_loss: 0.0345 - val_acc: 0.9876 Epoch 9/10 60000/60000 [==============================] - 38s 629us/step - loss: 0.0288 - acc: 0.9913 - val_loss: 0.0325 - val_acc: 0.9898 Epoch 10/10 60000/60000 [==============================] - 35s 579us/step - loss: 0.0266 - acc: 0.9923 - val_loss: 0.0318 - val_acc: 0.9889 MNIST test set accuracy: 0.9889
Apache-2.0
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
Linearly Weighted Moving Average https://www.investopedia.com/terms/l/linearlyweightedmovingaverage.asp
import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") # fix_yahoo_finance is used to fetch data import fix_yahoo_finance as yf yf.pdr_override() # input symbol = 'AAPL' start = '2018-08-01' end = '2019-01-01' # Read data df = yf.download(symbol,start,end) # View Columns df.head() def linear_weight_moving_average(close, n): lwma = [np.nan] * n for i in range(n, len(close)): lwma.append((close[i - n : i] * (np.arange(n) + 1)).sum()/(np.arange(n + 1).sum())) return lwma df['LWMA'] = linear_weight_moving_average(df['Adj Close'], 5) df.head(10) fig = plt.figure(figsize=(14,10)) ax1 = plt.subplot(2, 1, 1) ax1.plot(df['Adj Close']) ax1.set_title('Stock '+ symbol +' Closing Price') ax1.set_ylabel('Price') ax2 = plt.subplot(2, 1, 2) ax2.plot(df['LWMA'], label='Linearly Weighted Moving Average', color='red') #ax2.axhline(y=0, color='blue', linestyle='--') #ax2.axhline(y=0.5, color='darkblue') #ax2.axhline(y=-0.5, color='darkblue') ax2.grid() ax2.set_ylabel('Linearly Weighted Moving Average') ax2.set_xlabel('Date') ax2.legend(loc='best')
_____no_output_____
BSD-3-Clause
src/reference/Python_Stock/Technical_Indicators/Linear_Weighted_Moving_Average.ipynb
sumukshashidhar/toreda
Candlestick with Linearly Weighted Moving Average
from matplotlib import dates as mdates import datetime as dt dfc = df.copy() dfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close'] #dfc = dfc.dropna() dfc = dfc.reset_index() dfc['Date'] = pd.to_datetime(dfc['Date']) dfc['Date'] = dfc['Date'].apply(mdates.date2num) dfc.head() from mpl_finance import candlestick_ohlc fig = plt.figure(figsize=(14,10)) ax1 = plt.subplot(2, 1, 1) candlestick_ohlc(ax1,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0) ax1.xaxis_date() ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y')) ax1.grid(True, which='both') ax1.minorticks_on() ax1v = ax1.twinx() colors = dfc.VolumePositive.map({True: 'g', False: 'r'}) ax1v.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4) ax1v.axes.yaxis.set_ticklabels([]) ax1v.set_ylim(0, 3*df.Volume.max()) ax1.set_title('Stock '+ symbol +' Closing Price') ax1.set_ylabel('Price') ax2 = plt.subplot(2, 1, 2) ax2.plot(df['LWMA'], label='Linearly Weighted Moving Average', color='red') ax2.grid() ax2.set_ylabel('Linearly Weighted Moving Average') ax2.set_xlabel('Date') ax2.legend(loc='best')
_____no_output_____
BSD-3-Clause
src/reference/Python_Stock/Technical_Indicators/Linear_Weighted_Moving_Average.ipynb
sumukshashidhar/toreda
data acquisition / processing homework 2> I pledge my Honor that I have abided by the Stevens Honor System. - Joshua Schmidt 2/27/21 Problem 1a. For a stationary AR(1) time series x(t), x(t) is uncorrelated to x(t-l) for l>=2.This is false. For AR(1), $x(t) = a_0 + a_1 \cdot x(t - 1) + \epsilon_t$. In this expression, $x(t)$ is correlated to $x(t - 1)$, with a value of $a_1$. $x(t - 1)$ can be expanded to $a_0 + a_1 \cdot x(t - 2) + \epsilon_{t - 1}$, with a correlation of $a_1^2$. Subsequent members or the series can be expanded, for any value of l. Therefore, for all values of l>=2, $x(t)$ is correlated to $x(t-l)$.b. For a stationary MA(1) time series x(t), you will observe a coefficient cliff after time lag l>=1 in the ACF plot.This is true. In the ACF plot, there is decrease in the coefficients when lagging the time by 1>=1 in the plot. This is because noise is uncorrelated, and contains no information. ACF(K) = 0. Problem 2Find the best predictive model for each of the time series, using the techniques in the lecture.
# imports import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from statsmodels.graphics.tsaplots import plot_acf, plot_pacf from statsmodels.tsa.arima.model import ARIMA q2_data = pd.read_csv('./q2.csv', header=None) print('question 2 samples:') q2_data.head() q2_plot = sns.lineplot(data=q2_data) q2_plot.set_title('q2 data') q2_plot.set(xlabel='count', ylabel='value') plt.show() # graph looks stationary, not much variance plot_acf(q2_data, title='q2 acf') plt.show() plot_pacf(q2_data, title='q2 pacf', zero=False) plt.show()
_____no_output_____
MIT
assignments/hw2/hw2.ipynb
jschmidtnj/ee627
Looking at these plots, the acf quickly converges towards 0 (like a cliff), but the pacf takes a lag of 9 before finally converging towards 0 (it is gradual). Therefore, the best predictive model of this time series is most likely an MA model, maybe moving average of 2.
q2_model = ARIMA(q2_data, order=(0, 0, 4)) q2_model_fit = q2_model.fit() q2_model_fit.summary() q2_residuals = pd.DataFrame(q2_model_fit.resid) plot_acf(q2_residuals, title='q2 residuals acf') plt.show() plot_pacf(q2_residuals, title='q2 residuals pacf', zero=False) plt.show() q3_data = pd.read_csv('./q3.csv', header=None) print('question 3 samples:') q3_data.head() q3_plot = sns.lineplot(data=q3_data) q3_plot.set_title('q3 data') q3_plot.set(xlabel='count', ylabel='value') plt.show() # graph does not look stationary plot_acf(q3_data, title='q3 acf') plt.show() plot_pacf(q3_data, title='q3 pacf', zero=False) plt.show()
_____no_output_____
MIT
assignments/hw2/hw2.ipynb
jschmidtnj/ee627
Looking at these plots, the acf does not converge to 0, but instead slowly decreases in value while the pacf quickly converges towards 0 (like a cliff). This suggests that there are correlation values, and it is not a statistical fluke.
q3_model = ARIMA(q3_data, order=(3, 1, 2)) q3_model_fit = q3_model.fit() q3_model_fit.summary() q3_residuals = pd.DataFrame(q3_model_fit.resid) plot_acf(q3_residuals, title='q3 residuals acf') plt.show() plot_pacf(q3_residuals, title='q3 residuals pacf', zero=False) plt.show()
_____no_output_____
MIT
assignments/hw2/hw2.ipynb
jschmidtnj/ee627
Initiate the vissim instance
# COM-Server import win32com.client as com import igraph import qgrid from VISSIM_helpers import VissimRoadNet from os.path import abspath, join, exists import os from shutil import copyfile import pandas as pd import math from pythoncom import com_error
_____no_output_____
Apache-2.0
SO runner.ipynb
EngTurtle/VISSIM_Routing_Thesis
Add autocompletion for VISSIM COM Object
from IPython.utils.generics import complete_object @complete_object.register(com.DispatchBaseClass) def complete_dispatch_base_class(obj, prev_completions): try: ole_props = set(obj._prop_map_get_).union(set(obj._prop_map_put_)) return list(ole_props) + prev_completions except AttributeError: pass
_____no_output_____
Apache-2.0
SO runner.ipynb
EngTurtle/VISSIM_Routing_Thesis
Start Vissim and load constants
Vissim = com.gencache.EnsureDispatch("Vissim.Vissim") from win32com.client import constants as c
_____no_output_____
Apache-2.0
SO runner.ipynb
EngTurtle/VISSIM_Routing_Thesis
Setting the parameters used for simulation
DTA_Parameters = dict( # DTA Parameters EvalInt = 600, # seconds ScaleTotVol = False, ScaleTotVolPerc = 1, CostFile = 'costs.bew', ChkEdgOnReadingCostFile = True, PathFile = 'paths.weg', ChkEdgOnReadingPathFile = True, CreateArchiveFiles = True, VehClasses = '', ) # Simulation parameters Sim_Parameters = dict( NumRuns = 1, RandSeedIncr = 0, UseMaxSimSpeed = True, SimBreakAt = 600, NumCores = 8, ) FileName = abspath(r"..\SO sim files\Vol100per.inpx") WorkingFolder = abspath(r"..\SO sim files") def current_period(): return int(math.ceil(Vissim.Simulation.SimulationSecond / DTA_Parameters['EvalInt']))
_____no_output_____
Apache-2.0
SO runner.ipynb
EngTurtle/VISSIM_Routing_Thesis
Resetting edge and path cost files
default_cost_file = abspath('..\SO sim files\costs_020.bew') defualt_path_file = abspath('..\SO sim files\paths_020.weg') current_cost_file = abspath(join(WorkingFolder, DTA_Parameters['CostFile'])) if exists(current_cost_file): os.remove(current_cost_file) copyfile(default_cost_file, current_cost_file) current_path_file = abspath(join(WorkingFolder, DTA_Parameters['PathFile'])) if exists(current_path_file): os.remove(current_path_file) copyfile(defualt_path_file, current_path_file)
_____no_output_____
Apache-2.0
SO runner.ipynb
EngTurtle/VISSIM_Routing_Thesis
Load the test network
Vissim.LoadNet(FileName)
_____no_output_____
Apache-2.0
SO runner.ipynb
EngTurtle/VISSIM_Routing_Thesis
Read dynamic assignment network
vis_net = Vissim.Net vis_net.Paths.ReadDynAssignPathFile() network_graph = VissimRoadNet(vis_net)
_____no_output_____
Apache-2.0
SO runner.ipynb
EngTurtle/VISSIM_Routing_Thesis
Check if dynamic assignment graph has changed
ref_edge_list = pd.read_pickle("edges_attr.pkl.gz") assert (network_graph.visedges['ToNode'] == ref_edge_list['ToNode']).all() network_graph.save(join(WorkingFolder, "network_graph.pkl.gz"), format="picklez")
_____no_output_____
Apache-2.0
SO runner.ipynb
EngTurtle/VISSIM_Routing_Thesis
We start by opening the network to be tested and adjust its settings
DynamicAssignment = Vissim.Net.DynamicAssignment for attname, attvalue in DTA_Parameters.items(): DynamicAssignment.SetAttValue(attname, attvalue) Simulation = Vissim.Net.Simulation for attname, attvalue in Sim_Parameters.items(): Simulation.SetAttValue(attname, attvalue)
_____no_output_____
Apache-2.0
SO runner.ipynb
EngTurtle/VISSIM_Routing_Thesis
Run first DTA period as usual
Vissim.Graphics.CurrentNetworkWindow.SetAttValue("QuickMode", 1) Simulation.RunSingleStep() while current_period() < 2: network_graph.update_volume(vis_net) Simulation.RunSingleStep()
_____no_output_____
Apache-2.0
SO runner.ipynb
EngTurtle/VISSIM_Routing_Thesis
Run simulation with custom route assignment
bad_paths = [] while True: network_graph.update_weights(vis_net) new_vehs = vis_net.Vehicles.GetDeparted() for veh in new_vehs: origin_lot = int(veh.AttValue('OrigParkLot')) destination_lot = int(veh.AttValue('DestParkLot')) node_paths, edge_paths = network_graph.parking_lot_routes(origin_lot, destination_lot) try: vis_path = vis_net.Paths.AddPath(origin_lot, destination_lot, [str(node) for node in node_paths[0]]) veh.AssignPath(vis_path) except com_error: bad_paths.append((node_paths[0], edge_paths[0])) network_graph.update_volume(vis_net) if Vissim.Simulation.SimulationSecond > 4499: break Vissim.Simulation.RunSingleStep() Vissim.Simulation.RunContinuous() vis_net.Paths.AddPath(origin_lot, destination_lot, [str(node) for node in node_paths[0]]) veh.AttValue('No') from pythoncom import com_error node_paths[0] edge_weights = network_graph.es[[ed - 1 for ed in edge_paths[0]]]['weight'] print(sum(edge_weights)) pd.DataFrame(list(zip(edge_paths[0], edge_weights)), columns=['edge', 'edge_weights']) edges = [int(ed) for ed in veh.Path.AttValue('EdgeSeq').split(',')] edge_weights = network_graph.es[[ed - 1 for ed in edges]]['weight'] print(sum(edge_weights)) pd.DataFrame(list(zip(edges, edge_weights)), columns=['edge', 'edge_weights']) Vissim.Simulation.RunContinuous() Vissim.Exit()
_____no_output_____
Apache-2.0
SO runner.ipynb
EngTurtle/VISSIM_Routing_Thesis
Some more on ```spaCy``` and ```pandas``` First we want to import some of the packages we need.
import os import spacy # Remember we need to initialise spaCy nlp = spacy.load("en_core_web_sm")
_____no_output_____
MIT
notebooks/session4_inclass_rdkm.ipynb
agnesbn/cds-language