markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Lecture de la vรฉritable rรฉponse
AUS_response = pd.read_csv('AUS_test_Rain_tomorrow.csv', index_col=['Date'], parse_dates=True) y_true = (AUS_response['RainTomorrow'] == 'Yes').astype(int)
_____no_output_____
MIT
projet2/projet2-solution.ipynb
tiombo/Projet-Final
Calcul du log-loss
log_loss(y_true, y_test_pred_proba_best)
_____no_output_____
MIT
projet2/projet2-solution.ipynb
tiombo/Projet-Final
Aire sous la courbe
print(f'AUC = {roc_auc_score(y_true, y_test_pred_proba_best)}')
AUC = 0.8737077575165082
MIT
projet2/projet2-solution.ipynb
tiombo/Projet-Final
Courbe ROC
fpr_rf, tpr_rf, thresholds = roc_curve(y_true, y_test_pred_proba_best) fig = plt.figure(4, figsize=(6, 6)) plt.plot([0, 1], [0, 1], 'k--') plt.plot(fpr_rf, tpr_rf, label='Meilleur modรจle') plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.title('ROC curve') plt.show()
_____no_output_____
MIT
projet2/projet2-solution.ipynb
tiombo/Projet-Final
Score F1
f1_score(y_true, y_test_pred_proba_best > selected_threshold) # Attention, utiliser le seuil trouvรฉ par validation croisรฉe !
_____no_output_____
MIT
projet2/projet2-solution.ipynb
tiombo/Projet-Final
Matrice de confusion
fig = plt.figure(3, figsize=(6, 6)) cnf_matrix = confusion_matrix(y_true, y_test_pred_proba_best > selected_threshold) np.set_printoptions(precision=2) plot_confusion_matrix(cnf_matrix, classes=['0','1'], title='Matrice de confusion') # Accuracy (26094+7500)/(26094+7500+7045+2018)
_____no_output_____
MIT
projet2/projet2-solution.ipynb
tiombo/Projet-Final
**Data Pre Processing**
DATA_DIR = "Beijing-Pollution-DataSet/" from pandas import read_csv from datetime import datetime from random import randint def select_month(sequences, n_samples=250): X, y = list(), list() rand_hour = randint(0, 24) rand_day = randint(0, 7) for i in range(0, n_samples): start_ix = rand_hour + rand_day*24 + 672 * i # 168 : Week hours! idxs = [] for j in range(0, 4): if j <=2: idx = start_ix + (j * 168) # Add different weeks idxs.append(idx) if j == 3: # Target idy = start_ix + (j * 168) seq_x = sequences[idxs, :] seq_y = sequences[idy, 0] y.append(seq_y) X.append(seq_x) return X, y # split a multivariate sequence into samples def split_sequences(sequences, n_steps, n_samples=12000, start_from=0): X, y = list(), list() for i in range(start_from, (start_from + n_samples)): # find the end of this pattern end_ix = i + n_steps # check if we are beyond the dataset # if end_ix > len(sequences): # break # gather input and output parts of the pattern seq_x = sequences[i:end_ix, :] seq_y = sequences[end_ix, 0] y.append(seq_y) X.append(seq_x) return array(X), array(y) # load dataset DATA_DIR = "Beijing-Pollution-DataSet/" data = np.load(DATA_DIR + 'polution_dataSet.npy') scaled_data = data x, y = select_month(data, n_samples=65) print("X shape => ", np.array(x).shape) print("y shape => ", np.array(y).shape) x = np.array(x) y = np.array(y) dataset = data train_X, train_y = x[0:50], y[0:50] #split_sequences(dataset, n_timesteps, n_samples=15000, start_from=0) valid_X, valid_y = x[50:], y[50:] #split_sequences(dataset, n_timesteps, n_samples=3000, start_from=15000) test_loader_X = torch.utils.data.DataLoader(dataset=(train_X), batch_size=20, shuffle=False) # train_X = torch.tensor(train_X, dtype=torch.float32) # train_y = torch.tensor(train_y, dtype=torch.float32) print("Train X Shape :=> ", train_X.shape) print("Train Y Shape :=> ", train_y.shape) print("####################################") print("Test X Shape :=> ", valid_X.shape) print("Test Y Shape :=> ", valid_y.shape) class RNN(torch.nn.Module): def __init__(self, n_features=8, n_output=1, seq_length=6, n_hidden_layers=233, n_layers=1): super(RNN, self).__init__() self.n_features = n_features self.seq_len = seq_length self.n_output = n_output self.n_hidden = n_hidden_layers # number of hidden states self.n_layers = n_layers # number of LSTM layers (stacked) # define RNN with specified parameters # bath_first means that the first dim of the input and output will be the batch_size self.rnn = nn.RNN(input_size=self.n_features, hidden_size=self.n_hidden, num_layers=self.n_layers, batch_first=True) # last, fully connected layer self.l_linear = torch.nn.Linear(self.n_hidden*self.seq_len, self.n_output) def forward(self, x, hidden): # hidden_state = torch.zeros(self.n_layers, x.size(0), self.n_hidden).requires_grad_() # cell_state = torch.zeros(self.n_layers, x.size(0), self.n_hidden).requires_grad_() batch_size = x.size(0) rnn_out, hidden = self.rnn(x, hidden) # print(rnn_out.shape) rnn_out = rnn_out.contiguous().view(batch_size, -1) # lstm_out(with batch_first = True) is # (batch_size,seq_len,num_directions * hidden_size) # for following linear layer we want to keep batch_size dimension and merge rest # .contiguous() -> solves tensor compatibility error # x = lstm_out.contiguous().view(batch_size, -1) out = self.l_linear(rnn_out) return out, hidden torch.manual_seed(13) model = RNN(n_features=8, n_output=1, seq_length=3, n_hidden_layers=233, n_layers=1) criterion = nn.MSELoss() optimizer = torch.optim.Adagrad(model.parameters(), lr=0.001) model = model#.to(device) criterion = criterion#.to(device) for p in model.parameters(): print(p.numel()) import time start_time = time.time() hidden = None hidden_test = None epochs = 200 model.train() batch_size = 5 running_loss_history = [] val_running_loss_history = [] for epoch in range(epochs): running_loss = 0.0 val_running_loss = 0.0 model.train() for b in range(0, len(train_X), batch_size): inpt = train_X[b:b+batch_size, :, :] target = train_y[b:b+batch_size] # print("Input Shape :=> ", inpt.shape) x_batch = torch.tensor(inpt, dtype=torch.float32) y_batch = torch.tensor(target, dtype=torch.float32) output, hidden = model(x_batch, hidden) hidden = hidden.data loss = criterion(output.view(-1), y_batch) running_loss += loss.item() loss.backward() optimizer.step() optimizer.zero_grad() else: with torch.no_grad(): # it will temprerorerly set all the required grad flags to be false model.eval() for b in range(0, len(valid_X), batch_size): inpt = valid_X[b:b+batch_size, :, :] target = valid_y[b:b+batch_size] x_batch_test = torch.tensor(inpt, dtype=torch.float32) y_batch_test = torch.tensor(target, dtype=torch.float32) # model.init_hidden(x_batch_test.size(0)) output_test, hidden_test = model(x_batch_test, hidden_test) hidden_test = hidden_test.data loss_valid = criterion(output_test.view(-1), y_batch_test) val_running_loss += loss_valid.item() val_epoch_loss = val_running_loss / len(valid_X) val_running_loss_history.append(val_epoch_loss) epoch_loss = running_loss / len(train_X) running_loss_history.append(epoch_loss) print('step : ' , epoch , ' Train loss : ' , epoch_loss, ', Valid Loss : => ', val_epoch_loss) print("***->>>-----------------------------------------------<<<-***") total_time = time.time() - start_time print("===========================================================") print("*********************************************************") print("The total Training Time is Equal with ==> : {0} Sec.".format(total_time)) print("*********************************************************") print("===========================================================") f, ax = plt.subplots(1, 1, figsize=(10, 7)) plt.title("Train & Valid Loss - RNN", fontsize=18) plt.xlabel("Epoch") plt.ylabel("Loss") plt.plot(running_loss_history, label='Train') plt.plot(val_running_loss_history, label='Test') # pyplot.plot(history.history['val_loss'], label='test') plt.legend() plt.show() test_x, test_y = x[50:], y[50:] model.eval() test_x = torch.tensor(test_x, dtype=torch.float32) test_y = torch.tensor(test_y, dtype=torch.float32) res, hid = model(test_x, None) loss_test = criterion(res.view(-1), test_y) future = 100 window_size = 11 # preds = dataset[15000:15100, 0].tolist() # print(len(preds)) # print(preds) # for i in range (future): # # seq = torch.FloatTensor(preds[-window_size:]) # with torch.no_grad(): # # seq = torch.tensor(seq, dtype=torch.float32).view(1, 11, 8) # # model.hidden = (torch.zeros(1, 1, model.hidden_size), # # torch.zeros(1, 1, model.hidden_size)) # preds.append(model(seq)) # print(preds[11:]) fig = plt.figure(figsize=(20, 7)) plt.title("Beijing Polution Prediction - RNN", fontsize=18) plt.ylabel('Polution') plt.xlabel('Num data') plt.grid(True) plt.autoscale(axis='x', tight=True) fig.autofmt_xdate() plt.plot(test_y, label="Real") plt.plot(res.detach().numpy(), label="Prediction") plt.legend() plt.show() test_x, test_y = x[50:], y[50:] model.eval() test_running_loss = 0 with torch.no_grad(): # it will temprerorerly set all the required grad flags to be false model.eval() for b in range(0, len(test_x), batch_size): inpt = test_x[b:b+batch_size, :, :] target = test_y[b:b+batch_size] x_batch_test = torch.tensor(inpt, dtype=torch.float32) y_batch_test = torch.tensor(target, dtype=torch.float32) # model.init_hidden(x_batch_test.size(0)) output_test, hidden_test = model(x_batch_test, hidden_test) hidden_test = hidden_test.data loss_test = criterion(output_test.view(-1), y_batch_test) test_running_loss += loss_test.item() test_epoch_loss = test_running_loss / len(test_x) print("##########################################################") print(">>>>---------------------------------------------------<<<<") print(">>>>----------***************************--------------<<<<") print("**** Test Loss :==>>> ", test_epoch_loss) print(">>>>----------***************************--------------<<<<") print(">>>>---------------------------------------------------<<<<") print("##########################################################")
_____no_output_____
MIT
Q1 PartD/Q1 PartD - Monthly/MiniProj_RNN_ADAgrad_MSE_Q1_PartD_Pytorch_Monthly.ipynb
parhamzm/Beijing-Weather-Prediction
Preprocessing
raw_data.describe(include='all') data = raw_data.drop(['Model'],axis=1) data.describe(include='all') # dropped the Model category because it is irrelevant to increasing the accuracy of my model. data.isnull().sum() #this shows the missing values in my dataset data_no_mv = data.dropna(axis=0) #I drop the missing values here, which is acceptable because it is less than 5% of the observations. data_no_mv.describe(include='all')
_____no_output_____
Apache-2.0
Linear Regression w sklearn Case Exercise.ipynb
jovemmanuelre/Practical-Case-Example-Regression-with-sklearn
PDFs Here I check the Probability Distribution Functions (PDF) of the Independent Variables Price, Year, Mileage, and Engine Volume to identify and weed out the Outliers. They can adversely affect the accuracy of my Regression model because a Regression attempts to draw a line closest to all the data; including the Outliers might inflate/deflate my model.
sns.distplot(data_no_mv['Price']) q = data_no_mv['Price'].quantile(0.99) data_1 = data_no_mv[data_no_mv['Price']<q] data_1.describe(include='all') # I decided to exclude the observations in the 99th percentile and above to get rid of the Outliers. sns.distplot(data_1['Price']) # Now the Price variable only includes observations up to the 98th percentile and has much fewer Outliers. sns.distplot(data_no_mv['Mileage']) q = data_1['Mileage'].quantile(0.99) data_2 = data_1[data_1['Mileage']<q] # Similar to the Price variable, I decided to exclude the observations in the 99th percentile and beyond to remove the Outliers. sns.distplot(data_2['Mileage']) sns.distplot(data_no_mv['EngineV']) # The PDF looks unusual compared to the previous two. data_3 = data_2[data_2['EngineV']<6.6] # After research, I found out that the normal interval of the Engine Volume falls between 06. to 6.5. # The observations beyond 6.5 are mostly 99.99 - a variable that was used in the past to label missing values. It is a bad idea to label missing values in this manner now. # I decided to remove such observations as they are Outliers. sns.distplot(data_3['EngineV']) sns.distplot(data_no_mv['Year']) # Most cars are newer but there are a few vintage cars in the variable. q = data_3['Year'].quantile(0.01) data_4 = data_3[data_3['Year']>q] # I decided to remove the 1st percentile and keep the rest sns.distplot(data_4['Year']) data_cleaned = data_4.reset_index(drop=True) #I reset the index to completely forget the old index. data_cleaned.describe(include='all') # This excludes ~250 problematic observations that could've hindered the accuracy of my model if left unchecked.
_____no_output_____
Apache-2.0
Linear Regression w sklearn Case Exercise.ipynb
jovemmanuelre/Practical-Case-Example-Regression-with-sklearn
Checking the OLS assumptions Distribution
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize =(15,3)) ax1.scatter(data_cleaned['Year'],data_cleaned['Price']) ax1.set_title('Price and Year') ax2.scatter(data_cleaned['EngineV'],data_cleaned['Price']) ax2.set_title('Price and EngineV') ax3.scatter(data_cleaned['Mileage'],data_cleaned['Price']) ax3.set_title('Price and Mileage') plt.show() # These are not linear regressions and shows that I should first transform one or more variables to run the Regression. sns.distplot(data_cleaned['Price']) #Here I check the distribution of the dependent variable Price. log_price = np.log(data_cleaned['Price']) # Here I used the log transformation to fix heteroscedasticity and remove outliers from the variable Price. data_cleaned['log_price'] = log_price data_cleaned f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize =(15,3)) ax1.scatter(data_cleaned['Year'],data_cleaned['log_price']) ax1.set_title('Log Price and Year') ax2.scatter(data_cleaned['EngineV'],data_cleaned['log_price']) ax2.set_title('Log Price and EngineV') ax3.scatter(data_cleaned['Mileage'],data_cleaned['log_price']) ax3.set_title('Log Price and Mileage') plt.show() # After performing the log transformation on Price, the PDFs now show a linear regression line. data_cleaned = data_cleaned.drop(['Price'],axis=1) # Here I dropped the variable Price and replaced it with log_Price because the former has no statistical significance to my model.
_____no_output_____
Apache-2.0
Linear Regression w sklearn Case Exercise.ipynb
jovemmanuelre/Practical-Case-Example-Regression-with-sklearn
Multicollinearity
data_cleaned.columns.values from statsmodels.stats.outliers_influence import variance_inflation_factor variables = data_cleaned[['Mileage','Year','EngineV']] vif = pd.DataFrame() vif["VIF"] = [variance_inflation_factor(variables.values, i) for i in range(variables.shape[1])] vif["features"] = variables.columns # Through Statsmodels, I used the Variance Inflation Factor here to check for multicollinearity in my variables. # While I expect multicollinearity in my data, I wanted to check the variables the introduce unnacceptable correlation to my model; they have high VIFs. vif data_no_multicollinearity = data_cleaned.drop(['Year'],axis=1) # Dropped 'Year' because it has an unacceptably high VIF and is therefore a feature that introduces correlation in my data data_with_dummies = pd.get_dummies(data_no_multicollinearity, drop_first=True) # This identifies categorical variables and creates dummies automatically to avoid multicollinearity in my Model data_with_dummies.head() data_with_dummies.columns.values cols = ['log_price', 'Mileage', 'EngineV', 'Brand_BMW', 'Brand_Mercedes-Benz', 'Brand_Mitsubishi', 'Brand_Renault', 'Brand_Toyota', 'Brand_Volkswagen', 'Body_hatch', 'Body_other', 'Body_sedan', 'Body_vagon', 'Body_van', 'Engine Type_Gas', 'Engine Type_Other', 'Engine Type_Petrol', 'Registration_yes'] data_preprocessed = data_with_dummies[cols] data_preprocessed.head() # Here I arranged the data into a table.
_____no_output_____
Apache-2.0
Linear Regression w sklearn Case Exercise.ipynb
jovemmanuelre/Practical-Case-Example-Regression-with-sklearn
Training my Regression Model
targets = data_preprocessed['log_price'] inputs = data_preprocessed.drop(['log_price'], axis=1) # I removed log_price in the inputs to exclude the transformed dependent variable from my inputs. from sklearn.preprocessing import StandardScaler scaler = StandardScaler () scaler.fit(inputs) inputs_scaled = scaler.transform(inputs) # This standardizes my inputs; in other words, it subtractrs the mean and divide by the standard deviation from each observation. from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(inputs_scaled, targets, test_size=0.2, random_state=365) # I did this to avoid overfitting my model to my data. # The default setting of the train-test split is 75-25, but here I chose 80-20. # I used 'random_state' to ensure that I get the same random shuffle every time I split my data. reg = LinearRegression() reg.fit(x_train, y_train) y_hat = reg.predict(x_train) plt.scatter(y_train, y_hat) plt.xlabel('Targets(y_train)', size=20) plt.ylabel('Predictions(y_hat)', size=20) plt.xlim(6,13) plt.ylim(6,13) plt.show()
_____no_output_____
Apache-2.0
Linear Regression w sklearn Case Exercise.ipynb
jovemmanuelre/Practical-Case-Example-Regression-with-sklearn
Using Residuals to check the Model
sns.distplot(y_train-y_hat) plt.title("Residuals PDF", size=20) #to check whether the Residuals is normally distributed and the variability of the outcome reg.score(x_train, y_train) reg.intercept_ # The intercept or bias calibrates the model: without it, each feature will be off the mark. reg.coef_ reg_summary=pd.DataFrame(inputs.columns.values, columns=['Features']) reg_summary['Weights']=reg.coef_ reg_summary # A feature with a coefficient of 0 means that it has no significance to the model. #to know the categorical variables of my features data_cleaned['Brand'].unique() data_cleaned['Body'].unique() data_cleaned['Engine Type'].unique() data_cleaned['Registration'].unique()
_____no_output_____
Apache-2.0
Linear Regression w sklearn Case Exercise.ipynb
jovemmanuelre/Practical-Case-Example-Regression-with-sklearn
Testing my Model
y_hat_test = reg.predict(x_test) plt.scatter(y_test, y_hat_test, alpha=0.2) plt.xlabel('Targets(y_test)', size=20) plt.ylabel('Predictions(y_hat_test)', size=20) plt.xlim(6, 13) plt.ylim(6, 13) plt.show() df_pf = pd.DataFrame(np.exp(y_hat_test), columns=['Prediction']) #this returns the exponential of Y Hat Test and removes the log. df_pf.head() y_test = y_test.reset_index(drop=True) y_test.head() df_pf['Target'] = np.exp(y_test) df_pf df_pf['Residual'] = df_pf['Target'] - df_pf['Prediction'] df_pf['Difference%'] = np.absolute(df_pf['Residual']/df_pf['Target']*100) df_pf pd.options.display.max_rows = 999 pd.set_option('display.float_format', lambda x: '%2f' % x) df_pf.sort_values(by=['Difference%']) # This table shows the difference in percetage of the prediction and the target using the test data. # I included the Residuals because examining them as the same as examining the heart of the alogirthm.
_____no_output_____
Apache-2.0
Linear Regression w sklearn Case Exercise.ipynb
jovemmanuelre/Practical-Case-Example-Regression-with-sklearn
Simple Sentiment Analysis PipelineHere we train simple 2 layer neural network for sentiment analysis. - Model: 2 Fully Connected layer NN (PyTorch)- Dataset: Sentiment Analysis- Embedding: spacy en_core_web_lg (mean aggregated embeddings of the text) Install Requirements from [repository](https://github.com/AnzorGozalishvili/active_learning_playground)
!wget https://raw.githubusercontent.com/AnzorGozalishvili/active_learning_playground/main/requirements.txt !pip install -r requirements.txt !rm requirements.txt !pip install spacy-sentence-bert==0.1.2
_____no_output_____
MIT
notebooks/regular_sentiment_analysis_pipeline.ipynb
AnzorGozalishvili/active_learning_playground
Imports
# system import os import sys # data and models import numpy as np import pandas as pd import scipy # utilities import random import re import datetime # text embeddings import spacy import spacy_sentence_bert # scikit-learn stuff import sklearn from sklearn.model_selection import train_test_split from sklearn.metrics import f1_score, roc_auc_score, precision_score, recall_score # PyTorch stuff import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim # visualization import matplotlib.pyplot as plt from tqdm import tqdm # dataset retrieval from io import BytesIO from zipfile import ZipFile from urllib.request import urlopen
_____no_output_____
MIT
notebooks/regular_sentiment_analysis_pipeline.ipynb
AnzorGozalishvili/active_learning_playground
Set Random SeedsFor reproducibility we set several random seeds which are recommended by PyTorch. ([See here](https://pytorch.org/docs/stable/notes/randomness.html))
random.seed(hash("setting random seeds") % 2**32 - 1) np.random.seed(hash("improves reproducibility") % 2**32 - 1) torch.manual_seed(hash("PyTorch") % 2**32 - 1) RANDOM_SEED = 42
_____no_output_____
MIT
notebooks/regular_sentiment_analysis_pipeline.ipynb
AnzorGozalishvili/active_learning_playground
DatasetLet's download dataset from given [url](https://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip), then take a look at samples. Retrieve dataset
def get_dataset(): resp = urlopen("https://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip") zipfile = ZipFile(BytesIO(resp.read())) lines = list() for line in zipfile.open('SMSSpamCollection').readlines(): lines.append(line.decode('utf-8')) data = pd.DataFrame(data=lines) new = data[0].str.split("\t", n = 1, expand = True) data["text"]= new[1] data["label"]= new[0] data.drop(columns=[0], inplace = True) return data dataset = get_dataset()
_____no_output_____
MIT
notebooks/regular_sentiment_analysis_pipeline.ipynb
AnzorGozalishvili/active_learning_playground
Explore Samples
dataset.head() dataset.shape
_____no_output_____
MIT
notebooks/regular_sentiment_analysis_pipeline.ipynb
AnzorGozalishvili/active_learning_playground
Generate Train/Test splits and move forward We see the imbalance in target variable
dataset.label.value_counts()
_____no_output_____
MIT
notebooks/regular_sentiment_analysis_pipeline.ipynb
AnzorGozalishvili/active_learning_playground
We have duplicated records
dataset.duplicated().sum()
_____no_output_____
MIT
notebooks/regular_sentiment_analysis_pipeline.ipynb
AnzorGozalishvili/active_learning_playground
remove these duplicates
dataset.drop_duplicates(inplace=True) dataset.reset_index(drop=True, inplace=True)
_____no_output_____
MIT
notebooks/regular_sentiment_analysis_pipeline.ipynb
AnzorGozalishvili/active_learning_playground
split into train/test splits with 20/80 ratio
train, test = train_test_split(dataset, test_size=0.2, random_state=RANDOM_SEED)
_____no_output_____
MIT
notebooks/regular_sentiment_analysis_pipeline.ipynb
AnzorGozalishvili/active_learning_playground
Store these sets into dataset directory
DATASET_NAME = "SMSSpamCollection" if not os.path.exists('data'): os.mkdir('data') if not os.path.exists(f'data/{DATASET_NAME}'): os.mkdir(f'data/{DATASET_NAME}') train.to_csv(f'data/{DATASET_NAME}/train.csv') test.to_csv(f'data/{DATASET_NAME}/test.csv')
_____no_output_____
MIT
notebooks/regular_sentiment_analysis_pipeline.ipynb
AnzorGozalishvili/active_learning_playground
Load again and continue
train = pd.read_csv(f'data/{DATASET_NAME}/train.csv', index_col=0) test = pd.read_csv(f'data/{DATASET_NAME}/test.csv', index_col=0) train.shape, test.shape train.head(2)
_____no_output_____
MIT
notebooks/regular_sentiment_analysis_pipeline.ipynb
AnzorGozalishvili/active_learning_playground
Generate EmbeddingsWe use spacy embeddings to vectorize our samples
class Vectorizer: """Generates text embedding using deep learning model""" def __init__(self, *args, **kwargs): self.model = spacy_sentence_bert.load_model(kwargs.get('model', 'en_paraphrase_distilroberta_base_v1')) def __call__(self, text): if not text: text = "" return self.model(text).vector vectorizer = Vectorizer() EMBEDDING_DIM = vectorizer('sample text for embedding').shape[0]; EMBEDDING_DIM train['vector'] = train.text.apply(vectorizer).apply(lambda x: x.tolist()) test['vector'] = test.text.apply(vectorizer).apply(lambda x: x.tolist()) DATASET_NAME = "SMSSpamCollection" if not os.path.exists('data'): os.mkdir('data') if not os.path.exists(f'data/{DATASET_NAME}'): os.mkdir(f'data/{DATASET_NAME}') train.to_csv(f'data/{DATASET_NAME}/train_vectorized.csv') test.to_csv(f'data/{DATASET_NAME}/test_vectorized.csv') train = pd.read_csv(f'data/{DATASET_NAME}/train_vectorized.csv', index_col=0) test = pd.read_csv(f'data/{DATASET_NAME}/test_vectorized.csv', index_col=0) train['vector'] = train.vector.apply(eval) test['vector'] = test.vector.apply(eval)
_____no_output_____
MIT
notebooks/regular_sentiment_analysis_pipeline.ipynb
AnzorGozalishvili/active_learning_playground
PyTorch ML Pipeline ModelExample of model is taken from [here](https://github.com/rmunro/pytorch_active_learning/blob/master/active_learning_basics.py)
class MLP(nn.Module): """Simple 2 Layer Fully Connected NN (MLP)""" def __init__(self, num_labels, emb_dim): super(MLP, self).__init__() # Define model with one hidden layer with 128 neurons self.linear1 = nn.Linear(emb_dim, 128) self.linear2 = nn.Linear(128, num_labels) def forward(self, vector): hidden1 = self.linear1(vector).clamp(min=0) # ReLU output = self.linear2(hidden1) return F.log_softmax(output, dim=1) MLP(num_labels=2, emb_dim=EMBEDDING_DIM) train.sample() torch.Tensor(train.vector.iloc[:10].values.tolist()) class Trainer: """Trains PyTorch model on training data and also evaluated""" def __init__(self, *args, **kwargs): self.model = kwargs.get('model', MLP(num_labels=2, emb_dim=EMBEDDING_DIM)) self.loss_function = kwargs.get('loss_function', nn.NLLLoss()) self.optimizer = kwargs.get('optimizer', optim.SGD(self.model.parameters(), lr=0.01)) self.label_to_idx = kwargs.get('label_to_idx', {'ham': 0, 'spam': 1}) self.idx_to_label = {v:k for k,v in self.label_to_idx.items()} self.batch_size = kwargs.get('batch_size', 64) self.losses = [] def train(self, training_data, test_data, epochs): for epoch in range(epochs): print(f'Epoch: {str(epoch)}') shuffled_training_data = training_data.sample(frac=1.0, random_state=RANDOM_SEED + epoch) for batch_idx, start_idx in enumerate(range(0, len(shuffled_training_data), self.batch_size)): vecs = torch.Tensor( shuffled_training_data.vector.iloc[start_idx:start_idx+self.batch_size].tolist() ) targets = torch.LongTensor( shuffled_training_data.label.iloc[start_idx:start_idx+self.batch_size].apply(lambda x: self.label_to_idx[x]).tolist() ) self.model.zero_grad() log_probs = self.model(vecs) loss = self.loss_function(log_probs, targets) loss.backward() self.optimizer.step() self.losses.append(loss.item()) print(f"\tBatch: {batch_idx}\tLoss: {self.losses[-1]}") eval_results = self.evaluate(test_data) print(f"Evaluation Results: {repr(eval_results)}") # save model to path that is alphanumeric and includes number of items and accuracies in filename timestamp = re.sub('\.[0-9]*','_',str(datetime.datetime.now())).replace(" ", "_").replace("-", "").replace(":","") f1_score = str(eval_results['f1']) model_path = "models/"+timestamp+f1_score+".params" if not os.path.exists('models'): os.mkdir('models') torch.save(self.model.state_dict(), model_path) return model_path def evaluate(self, dataset): targets = [] preds = [] probs = [] with torch.no_grad(): for idx, row in dataset.iterrows(): vec = torch.Tensor(row.vector).view(1, -1) target = self.label_to_idx[row.label] logits = self.model(vec) prob = np.exp(logits.cpu().data.numpy()[0]).tolist() pred = np.argmax(prob) probs.append(prob[1]) preds.append(pred) targets.append(target) results = { "f1": round(f1_score(targets, preds, pos_label=1), 3), "precision": round(precision_score(targets, preds, pos_label=1), 3), "recall": round(recall_score(targets, preds, pos_label=1), 3), "roc_auc": round(roc_auc_score(targets, probs, labels=list(self.label_to_idx.keys())), 3), } return results def plot_loss(self): plt.figure(figsize=(10, 6)) plt.plot(self.losses) plt.show() LABEL_TO_IDX = {item:idx for idx, item in enumerate(sorted(train.label.unique().tolist()))}; LABEL_TO_IDX mlp = MLP(num_labels=2, emb_dim=EMBEDDING_DIM) trainer = Trainer( **{ "model": mlp, "loss_function": nn.NLLLoss(), "optimizer": optim.SGD(mlp.parameters(), lr=0.01), "label_to_idx": LABEL_TO_IDX, "batch_size": 256, } ) trainer.train(training_data=train, test_data=test, epochs=10) trainer.plot_loss()
_____no_output_____
MIT
notebooks/regular_sentiment_analysis_pipeline.ipynb
AnzorGozalishvili/active_learning_playground
Bias Removal Climate models can have biases relative to different verification datasets. Commonly, biases are removed by postprocessing before verification of forecasting skill. `climpred` provides convenience functions to do so.
import climpred import xarray as xr import matplotlib.pyplot as plt from climpred import HindcastEnsemble hind = climpred.tutorial.load_dataset('CESM-DP-SST') # CESM-DPLE hindcast ensemble output. obs = climpred.tutorial.load_dataset('ERSST') # observations hind["lead"].attrs["units"] = "years"
_____no_output_____
MIT
docs/source/bias_removal.ipynb
andersy005/climpred
We begin by removing a mean climatology for the observations, since `CESM-DPLE` generates its anomalies over this same time period.
obs = obs - obs.sel(time=slice('1964', '2014')).mean('time') hindcast = HindcastEnsemble(hind) hindcast = hindcast.add_observations(obs) hindcast.plot()
/Users/rileybrady/Desktop/dev/climpred/climpred/utils.py:141: UserWarning: Assuming annual resolution due to numeric inits. Change init to a datetime if it is another resolution. "Assuming annual resolution due to numeric inits. "
MIT
docs/source/bias_removal.ipynb
andersy005/climpred
The warming of the `observations` is similar to `initialized`. Mean bias removal Typically, bias depends on lead-time and therefore should therefore also be removed depending on lead-time.
bias = hindcast.verify(metric='bias', comparison='e2o', dim=[], alignment='same_verifs') bias.SST.plot()
_____no_output_____
MIT
docs/source/bias_removal.ipynb
andersy005/climpred
Against `observations`, there is small cold bias in 1980 and 1990 initialization years and warm bias before and after.
# lead-time dependant mean bias over all initializations is quite small but negative mean_bias = bias.mean('init') mean_bias.SST.plot()
_____no_output_____
MIT
docs/source/bias_removal.ipynb
andersy005/climpred
Cross ValidatationTo remove the mean bias quickly, the mean bias over all initializations is subtracted. For formally correct bias removal with cross validation, the given initialization is left out when subtracting the mean bias. `climpred` wraps these functions in `HindcastEnsemble.remove_bias(how='mean', cross_validate={bool})`.
hindcast.remove_bias(how='mean', cross_validate=True, alignment='same_verifs').plot() plt.title('hindcast lead timeseries removed for unconditional mean bias') plt.show()
_____no_output_____
MIT
docs/source/bias_removal.ipynb
andersy005/climpred
Skill Distance-based accuracy metrics like (`mse`,`rmse`,`nrmse`,...) are sensitive to mean bias removal. Correlations like (`pearson_r`, `spearman_r`) are insensitive to bias correction.
metric='rmse' hindcast.verify(metric=metric, comparison='e2o', dim='init', alignment='same_verifs')['SST'].plot(label='no bias correction') hindcast.remove_bias(cross_validate=False, alignment='same_verifs') \ .verify(metric=metric, comparison='e2o', dim='init', alignment='same_verifs').SST.plot(label='bias correction without cross validation') hindcast.remove_bias(cross_validate=True, alignment='same_verifs') \ .verify(metric=metric, comparison='e2o', dim='init', alignment='same_verifs').SST.plot(label='formally correct bias correction with cross validation') plt.legend() plt.title(f"{metric.upper()} SST evaluated against observations") plt.show()
_____no_output_____
MIT
docs/source/bias_removal.ipynb
andersy005/climpred
ๆฉŸๆขฐๅญฆ็ฟ’ใƒฉใ‚คใƒ–ใƒฉใƒชๆฉŸๆขฐๅญฆ็ฟ’ใƒฉใ‚คใƒ–ใƒฉใƒชใ€KerasใจPyTorchใฎใ‚ณใƒผใƒ‰ใ‚’็ดนไป‹ใ—ใพใ™ใ€‚ ไปŠๅ›žใฏใ‚ณใƒผใƒ‰ใฎ่ฉณใ—ใ„่งฃ่ชฌใฏ่กŒใ„ใพใ›ใ‚“ใŒใ€ๅฎŸ่ฃ…ใฎๅคงใพใ‹ใชๆตใ‚Œใ‚’ๆŠŠๆกใ—ใพใ—ใ‚‡ใ†ใ€‚ โ— Kerasใฎใ‚ณใƒผใƒ‰ไปฅไธ‹ใฎใ‚ณใƒผใƒ‰ใฏใ€Kerasใซใ‚ˆใ‚‹ใ‚ทใƒณใƒ—ใƒซใชใƒ‹ใƒฅใƒผใƒฉใƒซใƒใƒƒใƒˆใƒฏใƒผใ‚ฏใฎๅฎŸ่ฃ…ใงใ™ใ€‚ Irisใฎๅ„่Šฑใ‚’ใ€SetosaใจVersicolorใซๅˆ†้กžใ—ใพใ™ใ€‚ ไปฅไธ‹ใฎใ‚ณใƒผใƒ‰ใงใฏใ€`Sequential`ใงใƒขใƒ‡ใƒซใ‚’ไฝœใ‚Šใ€ๅฑคใ‚„ๆดปๆ€งๅŒ–้–ขๆ•ฐใ‚’่ฟฝๅŠ ใ—ใฆใ„ใพใ™ใ€‚
import numpy as np from sklearn import datasets from sklearn import preprocessing from sklearn.model_selection import train_test_split iris = datasets.load_iris() iris_data = iris.data sl_data = iris_data[:100, 0] # SetosaใจVersicolorใ€Sepal length sw_data = iris_data[:100, 1] # SetosaใจVersicolorใ€Sepal width # ๅนณๅ‡ๅ€คใ‚’0ใซ sl_ave = np.average(sl_data) # ๅนณๅ‡ๅ€ค sl_data -= sl_ave # ๅนณๅ‡ๅ€คใ‚’ๅผ•ใ sw_ave = np.average(sw_data) sw_data -= sw_ave # ๅ…ฅๅŠ›ใ‚’ใƒชใ‚นใƒˆใซๆ ผ็ด input_data = [] correct_data = [] for i in range(100): input_data.append([sl_data[i], sw_data[i]]) correct_data.append([iris.target[i]]) # ่จ“็ทดใƒ‡ใƒผใ‚ฟใจใƒ†ใ‚นใƒˆใƒ‡ใƒผใ‚ฟใซๅˆ†ๅ‰ฒ input_data = np.array(input_data) # NumPyใฎ้…ๅˆ—ใซๅค‰ๆ› correct_data = np.array(correct_data) x_train, x_test, t_train, t_test = train_test_split(input_data, correct_data) # ------ ใ“ใ“ใ‹ใ‚‰Kerasใฎใ‚ณใƒผใƒ‰ ------ from keras.utils import np_utils from keras.models import Sequential from keras.layers import Dense, Activation from keras.optimizers import SGD model = Sequential() model.add(Dense(2, input_dim=2)) # ๅ…ฅๅŠ›:2ใ€ไธญ้–“ๅฑคใฎใƒ‹ใƒฅใƒผใƒญใƒณๆ•ฐ:2 model.add(Activation("sigmoid")) # ใ‚ทใ‚ฐใƒขใ‚คใƒ‰้–ขๆ•ฐ model.add(Dense(1)) # ๅ‡บๅŠ›ๅฑคใฎใƒ‹ใƒฅใƒผใƒญใƒณๆ•ฐ:1 model.add(Activation("sigmoid")) # ใ‚ทใ‚ฐใƒขใ‚คใƒ‰้–ขๆ•ฐ model.compile(optimizer=SGD(lr=0.3), loss="mean_squared_error", metrics=["accuracy"]) model.fit(x_train, t_train, epochs=32, batch_size=1) # ่จ“็ทด loss, accuracy = model.evaluate(x_test, t_test) print("ๆญฃ่งฃ็އ: " + str(accuracy*100) + "%")
_____no_output_____
MIT
section_7/ml_libraries.ipynb
kiyoshion/minna-ai
โ— PyTorchใฎใ‚ณใƒผใƒ‰ไปฅไธ‹ใฎใ‚ณใƒผใƒ‰ใฏใ€PyTorchใ‚ˆใ‚‹ใ‚ทใƒณใƒ—ใƒซใชใƒ‹ใƒฅใƒผใƒฉใƒซใƒใƒƒใƒˆใƒฏใƒผใ‚ฏใฎๅฎŸ่ฃ…ใงใ™ใ€‚ Irisใฎๅ„่Šฑใ‚’ใ€SetosaใจVersicolorใซๅˆ†้กžใ—ใพใ™ใ€‚ ไปฅไธ‹ใฎใ‚ณใƒผใƒ‰ใงใฏใ€KerasใจๅŒๆง˜ใซ`Sequential`ใงใƒขใƒ‡ใƒซใ‚’ไฝœใ‚Šใ€ๅฑคใ‚„ๆดปๆ€งๅŒ–้–ขๆ•ฐใ‚’ไธฆในใฆใ„ใพใ™ใ€‚ PyTorchใงใฏใ€ๅ…ฅๅŠ›ใ‚„ๆญฃ่งฃใ‚’Tensorๅฝขๅผใฎใƒ‡ใƒผใ‚ฟใซๅค‰ๆ›ใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚Šใพใ™ใ€‚
import numpy as np from sklearn import datasets from sklearn import preprocessing from sklearn.model_selection import train_test_split iris = datasets.load_iris() iris_data = iris.data sl_data = iris_data[:100, 0] # SetosaใจVersicolorใ€Sepal length sw_data = iris_data[:100, 1] # SetosaใจVersicolorใ€Sepal width # ๅนณๅ‡ๅ€คใ‚’0ใซ sl_ave = np.average(sl_data) # ๅนณๅ‡ๅ€ค sl_data -= sl_ave # ๅนณๅ‡ๅ€คใ‚’ๅผ•ใ sw_ave = np.average(sw_data) sw_data -= sw_ave # ๅ…ฅๅŠ›ใ‚’ใƒชใ‚นใƒˆใซๆ ผ็ด input_data = [] correct_data = [] for i in range(100): input_data.append([sl_data[i], sw_data[i]]) correct_data.append([iris.target[i]]) # ่จ“็ทดใƒ‡ใƒผใ‚ฟใจใƒ†ใ‚นใƒˆใƒ‡ใƒผใ‚ฟใซๅˆ†ๅ‰ฒ input_data = np.array(input_data) # NumPyใฎ้…ๅˆ—ใซๅค‰ๆ› correct_data = np.array(correct_data) x_train, x_test, t_train, t_test = train_test_split(input_data, correct_data) # ------ ใ“ใ“ใ‹ใ‚‰PyTorchใฎใ‚ณใƒผใƒ‰ ------ import torch from torch import nn from torch import optim # Tensorใซๅค‰ๆ› x_train = torch.tensor(x_train, dtype=torch.float32) t_train = torch.tensor(t_train, dtype=torch.float32) x_test = torch.tensor(x_test, dtype=torch.float32) t_test = torch.tensor(t_test, dtype=torch.float32) net = nn.Sequential( nn.Linear(2, 2), # ๅ…ฅๅŠ›:2ใ€ไธญ้–“ๅฑคใฎใƒ‹ใƒฅใƒผใƒญใƒณๆ•ฐ:2 nn.Sigmoid(), # ใ‚ทใ‚ฐใƒขใ‚คใƒ‰้–ขๆ•ฐ nn.Linear(2, 1), # ๅ‡บๅŠ›ๅฑคใฎใƒ‹ใƒฅใƒผใƒญใƒณๆ•ฐ:1 nn.Sigmoid() # ใ‚ทใ‚ฐใƒขใ‚คใƒ‰้–ขๆ•ฐ ) loss_fnc = nn.MSELoss() optimizer = optim.SGD(net.parameters(), lr=0.3) # 1000ใ‚จใƒใƒƒใ‚ฏๅญฆ็ฟ’ for i in range(1000): # ๅ‹พ้…ใ‚’0ใซ optimizer.zero_grad() # ้ †ไผๆ’ญ y_train = net(x_train) y_test = net(x_test) # ่ชคๅทฎใ‚’ๆฑ‚ใ‚ใ‚‹ loss_train = loss_fnc(y_train, t_train) loss_test = loss_fnc(y_test, t_test) # ้€†ไผๆ’ญ๏ผˆๅ‹พ้…ใ‚’ๆฑ‚ใ‚ใ‚‹๏ผ‰ loss_train.backward() # ใƒ‘ใƒฉใƒกใƒผใ‚ฟใฎๆ›ดๆ–ฐ optimizer.step() if i%100 == 0: print("Epoch:", i, "Loss_Train:", loss_train.item(), "Loss_Test:", loss_test.item()) y_test = net(x_test) count = ((y_test.detach().numpy()>0.5) == (t_test.detach().numpy()==1.0)).sum().item() print("ๆญฃ่งฃ็އ: " + str(count/len(y_test)*100) + "%")
_____no_output_____
MIT
section_7/ml_libraries.ipynb
kiyoshion/minna-ai
Transfer Learning Template
%load_ext autoreload %autoreload 2 %matplotlib inline import os, json, sys, time, random import numpy as np import torch from torch.optim import Adam from easydict import EasyDict import matplotlib.pyplot as plt from steves_models.steves_ptn import Steves_Prototypical_Network from steves_utils.lazy_iterable_wrapper import Lazy_Iterable_Wrapper from steves_utils.iterable_aggregator import Iterable_Aggregator from steves_utils.ptn_train_eval_test_jig import PTN_Train_Eval_Test_Jig from steves_utils.torch_sequential_builder import build_sequential from steves_utils.torch_utils import get_dataset_metrics, ptn_confusion_by_domain_over_dataloader from steves_utils.utils_v2 import (per_domain_accuracy_from_confusion, get_datasets_base_path) from steves_utils.PTN.utils import independent_accuracy_assesment from torch.utils.data import DataLoader from steves_utils.stratified_dataset.episodic_accessor import Episodic_Accessor_Factory from steves_utils.ptn_do_report import ( get_loss_curve, get_results_table, get_parameters_table, get_domain_accuracies, ) from steves_utils.transforms import get_chained_transform
_____no_output_____
MIT
experiments/tl_2/cores_wisig-oracle.run2.framed/trials/1/trial.ipynb
stevester94/csc500-notebooks
Allowed ParametersThese are allowed parameters, not defaultsEach of these values need to be present in the injected parameters (the notebook will raise an exception if they are not present)Papermill uses the cell tag "parameters" to inject the real parameters below this cell.Enable tags to see what I mean
required_parameters = { "experiment_name", "lr", "device", "seed", "dataset_seed", "n_shot", "n_query", "n_way", "train_k_factor", "val_k_factor", "test_k_factor", "n_epoch", "patience", "criteria_for_best", "x_net", "datasets", "torch_default_dtype", "NUM_LOGS_PER_EPOCH", "BEST_MODEL_PATH", } from steves_utils.CORES.utils import ( ALL_NODES, ALL_NODES_MINIMUM_1000_EXAMPLES, ALL_DAYS ) from steves_utils.ORACLE.utils_v2 import ( ALL_DISTANCES_FEET_NARROWED, ALL_RUNS, ALL_SERIAL_NUMBERS, ) standalone_parameters = {} standalone_parameters["experiment_name"] = "STANDALONE PTN" standalone_parameters["lr"] = 0.001 standalone_parameters["device"] = "cuda" standalone_parameters["seed"] = 1337 standalone_parameters["dataset_seed"] = 1337 standalone_parameters["n_way"] = 8 standalone_parameters["n_shot"] = 3 standalone_parameters["n_query"] = 2 standalone_parameters["train_k_factor"] = 1 standalone_parameters["val_k_factor"] = 2 standalone_parameters["test_k_factor"] = 2 standalone_parameters["n_epoch"] = 50 standalone_parameters["patience"] = 10 standalone_parameters["criteria_for_best"] = "source_loss" standalone_parameters["datasets"] = [ { "labels": ALL_SERIAL_NUMBERS, "domains": ALL_DISTANCES_FEET_NARROWED, "num_examples_per_domain_per_label": 100, "pickle_path": os.path.join(get_datasets_base_path(), "oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl"), "source_or_target_dataset": "source", "x_transforms": ["unit_mag", "minus_two"], "episode_transforms": [], "domain_prefix": "ORACLE_" }, { "labels": ALL_NODES, "domains": ALL_DAYS, "num_examples_per_domain_per_label": 100, "pickle_path": os.path.join(get_datasets_base_path(), "cores.stratified_ds.2022A.pkl"), "source_or_target_dataset": "target", "x_transforms": ["unit_power", "times_zero"], "episode_transforms": [], "domain_prefix": "CORES_" } ] standalone_parameters["torch_default_dtype"] = "torch.float32" standalone_parameters["x_net"] = [ {"class": "nnReshape", "kargs": {"shape":[-1, 1, 2, 256]}}, {"class": "Conv2d", "kargs": { "in_channels":1, "out_channels":256, "kernel_size":(1,7), "bias":False, "padding":(0,3), },}, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm2d", "kargs": {"num_features":256}}, {"class": "Conv2d", "kargs": { "in_channels":256, "out_channels":80, "kernel_size":(2,7), "bias":True, "padding":(0,3), },}, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm2d", "kargs": {"num_features":80}}, {"class": "Flatten", "kargs": {}}, {"class": "Linear", "kargs": {"in_features": 80*256, "out_features": 256}}, # 80 units per IQ pair {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm1d", "kargs": {"num_features":256}}, {"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}}, ] # Parameters relevant to results # These parameters will basically never need to change standalone_parameters["NUM_LOGS_PER_EPOCH"] = 10 standalone_parameters["BEST_MODEL_PATH"] = "./best_model.pth" # Parameters parameters = { "experiment_name": "cores+wisig -> oracle.run2.framed", "device": "cuda", "lr": 0.001, "seed": 1337, "dataset_seed": 1337, "n_shot": 3, "n_query": 2, "train_k_factor": 3, "val_k_factor": 2, "test_k_factor": 2, "torch_default_dtype": "torch.float32", "n_epoch": 50, "patience": 3, "criteria_for_best": "target_loss", "x_net": [ {"class": "nnReshape", "kargs": {"shape": [-1, 1, 2, 256]}}, { "class": "Conv2d", "kargs": { "in_channels": 1, "out_channels": 256, "kernel_size": [1, 7], "bias": False, "padding": [0, 3], }, }, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm2d", "kargs": {"num_features": 256}}, { "class": "Conv2d", "kargs": { "in_channels": 256, "out_channels": 80, "kernel_size": [2, 7], "bias": True, "padding": [0, 3], }, }, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm2d", "kargs": {"num_features": 80}}, {"class": "Flatten", "kargs": {}}, {"class": "Linear", "kargs": {"in_features": 20480, "out_features": 256}}, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm1d", "kargs": {"num_features": 256}}, {"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}}, ], "NUM_LOGS_PER_EPOCH": 10, "BEST_MODEL_PATH": "./best_model.pth", "n_way": 16, "datasets": [ { "labels": [ "1-10.", "1-11.", "1-15.", "1-16.", "1-17.", "1-18.", "1-19.", "10-4.", "10-7.", "11-1.", "11-14.", "11-17.", "11-20.", "11-7.", "13-20.", "13-8.", "14-10.", "14-11.", "14-14.", "14-7.", "15-1.", "15-20.", "16-1.", "16-16.", "17-10.", "17-11.", "17-2.", "19-1.", "19-16.", "19-19.", "19-20.", "19-3.", "2-10.", "2-11.", "2-17.", "2-18.", "2-20.", "2-3.", "2-4.", "2-5.", "2-6.", "2-7.", "2-8.", "3-13.", "3-18.", "3-3.", "4-1.", "4-10.", "4-11.", "4-19.", "5-5.", "6-15.", "7-10.", "7-14.", "8-18.", "8-20.", "8-3.", "8-8.", ], "domains": [1, 2, 3, 4, 5], "num_examples_per_domain_per_label": 100, "pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/cores.stratified_ds.2022A.pkl", "source_or_target_dataset": "source", "x_transforms": ["unit_mag"], "episode_transforms": [], "domain_prefix": "C_A_", }, { "labels": [ "1-10", "1-12", "1-14", "1-16", "1-18", "1-19", "1-8", "10-11", "10-17", "10-4", "10-7", "11-1", "11-10", "11-19", "11-20", "11-4", "11-7", "12-19", "12-20", "12-7", "13-14", "13-18", "13-19", "13-20", "13-3", "13-7", "14-10", "14-11", "14-12", "14-13", "14-14", "14-19", "14-20", "14-7", "14-8", "14-9", "15-1", "15-19", "15-6", "16-1", "16-16", "16-19", "16-20", "17-10", "17-11", "18-1", "18-10", "18-11", "18-12", "18-13", "18-14", "18-15", "18-16", "18-17", "18-19", "18-2", "18-20", "18-4", "18-5", "18-7", "18-8", "18-9", "19-1", "19-10", "19-11", "19-12", "19-13", "19-14", "19-15", "19-19", "19-2", "19-20", "19-3", "19-4", "19-6", "19-7", "19-8", "19-9", "2-1", "2-13", "2-15", "2-3", "2-4", "2-5", "2-6", "2-7", "2-8", "20-1", "20-12", "20-14", "20-15", "20-16", "20-18", "20-19", "20-20", "20-3", "20-4", "20-5", "20-7", "20-8", "3-1", "3-13", "3-18", "3-2", "3-8", "4-1", "4-10", "4-11", "5-1", "5-5", "6-1", "6-15", "6-6", "7-10", "7-11", "7-12", "7-13", "7-14", "7-7", "7-8", "7-9", "8-1", "8-13", "8-14", "8-18", "8-20", "8-3", "8-8", "9-1", "9-7", ], "domains": [1, 2, 3, 4], "num_examples_per_domain_per_label": 100, "pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/wisig.node3-19.stratified_ds.2022A.pkl", "source_or_target_dataset": "source", "x_transforms": ["unit_mag"], "episode_transforms": [], "domain_prefix": "W_A_", }, { "labels": [ "3123D52", "3123D65", "3123D79", "3123D80", "3123D54", "3123D70", "3123D7B", "3123D89", "3123D58", "3123D76", "3123D7D", "3123EFE", "3123D64", "3123D78", "3123D7E", "3124E4A", ], "domains": [32, 38, 8, 44, 14, 50, 20, 26], "num_examples_per_domain_per_label": 2000, "pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl", "source_or_target_dataset": "target", "x_transforms": ["unit_mag"], "episode_transforms": [], "domain_prefix": "ORACLE.run2_", }, ], } # Set this to True if you want to run this template directly STANDALONE = False if STANDALONE: print("parameters not injected, running with standalone_parameters") parameters = standalone_parameters if not 'parameters' in locals() and not 'parameters' in globals(): raise Exception("Parameter injection failed") #Use an easy dict for all the parameters p = EasyDict(parameters) supplied_keys = set(p.keys()) if supplied_keys != required_parameters: print("Parameters are incorrect") if len(supplied_keys - required_parameters)>0: print("Shouldn't have:", str(supplied_keys - required_parameters)) if len(required_parameters - supplied_keys)>0: print("Need to have:", str(required_parameters - supplied_keys)) raise RuntimeError("Parameters are incorrect") ################################### # Set the RNGs and make it all deterministic ################################### np.random.seed(p.seed) random.seed(p.seed) torch.manual_seed(p.seed) torch.use_deterministic_algorithms(True) ########################################### # The stratified datasets honor this ########################################### torch.set_default_dtype(eval(p.torch_default_dtype)) ################################### # Build the network(s) # Note: It's critical to do this AFTER setting the RNG ################################### x_net = build_sequential(p.x_net) start_time_secs = time.time() p.domains_source = [] p.domains_target = [] train_original_source = [] val_original_source = [] test_original_source = [] train_original_target = [] val_original_target = [] test_original_target = [] # global_x_transform_func = lambda x: normalize(x.to(torch.get_default_dtype()), "unit_power") # unit_power, unit_mag # global_x_transform_func = lambda x: normalize(x, "unit_power") # unit_power, unit_mag def add_dataset( labels, domains, pickle_path, x_transforms, episode_transforms, domain_prefix, num_examples_per_domain_per_label, source_or_target_dataset:str, iterator_seed=p.seed, dataset_seed=p.dataset_seed, n_shot=p.n_shot, n_way=p.n_way, n_query=p.n_query, train_val_test_k_factors=(p.train_k_factor,p.val_k_factor,p.test_k_factor), ): if x_transforms == []: x_transform = None else: x_transform = get_chained_transform(x_transforms) if episode_transforms == []: episode_transform = None else: raise Exception("episode_transforms not implemented") episode_transform = lambda tup, _prefix=domain_prefix: (_prefix + str(tup[0]), tup[1]) eaf = Episodic_Accessor_Factory( labels=labels, domains=domains, num_examples_per_domain_per_label=num_examples_per_domain_per_label, iterator_seed=iterator_seed, dataset_seed=dataset_seed, n_shot=n_shot, n_way=n_way, n_query=n_query, train_val_test_k_factors=train_val_test_k_factors, pickle_path=pickle_path, x_transform_func=x_transform, ) train, val, test = eaf.get_train(), eaf.get_val(), eaf.get_test() train = Lazy_Iterable_Wrapper(train, episode_transform) val = Lazy_Iterable_Wrapper(val, episode_transform) test = Lazy_Iterable_Wrapper(test, episode_transform) if source_or_target_dataset=="source": train_original_source.append(train) val_original_source.append(val) test_original_source.append(test) p.domains_source.extend( [domain_prefix + str(u) for u in domains] ) elif source_or_target_dataset=="target": train_original_target.append(train) val_original_target.append(val) test_original_target.append(test) p.domains_target.extend( [domain_prefix + str(u) for u in domains] ) else: raise Exception(f"invalid source_or_target_dataset: {source_or_target_dataset}") for ds in p.datasets: add_dataset(**ds) # from steves_utils.CORES.utils import ( # ALL_NODES, # ALL_NODES_MINIMUM_1000_EXAMPLES, # ALL_DAYS # ) # add_dataset( # labels=ALL_NODES, # domains = ALL_DAYS, # num_examples_per_domain_per_label=100, # pickle_path=os.path.join(get_datasets_base_path(), "cores.stratified_ds.2022A.pkl"), # source_or_target_dataset="target", # x_transform_func=global_x_transform_func, # domain_modifier=lambda u: f"cores_{u}" # ) # from steves_utils.ORACLE.utils_v2 import ( # ALL_DISTANCES_FEET, # ALL_RUNS, # ALL_SERIAL_NUMBERS, # ) # add_dataset( # labels=ALL_SERIAL_NUMBERS, # domains = list(set(ALL_DISTANCES_FEET) - {2,62}), # num_examples_per_domain_per_label=100, # pickle_path=os.path.join(get_datasets_base_path(), "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl"), # source_or_target_dataset="source", # x_transform_func=global_x_transform_func, # domain_modifier=lambda u: f"oracle1_{u}" # ) # from steves_utils.ORACLE.utils_v2 import ( # ALL_DISTANCES_FEET, # ALL_RUNS, # ALL_SERIAL_NUMBERS, # ) # add_dataset( # labels=ALL_SERIAL_NUMBERS, # domains = list(set(ALL_DISTANCES_FEET) - {2,62,56}), # num_examples_per_domain_per_label=100, # pickle_path=os.path.join(get_datasets_base_path(), "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl"), # source_or_target_dataset="source", # x_transform_func=global_x_transform_func, # domain_modifier=lambda u: f"oracle2_{u}" # ) # add_dataset( # labels=list(range(19)), # domains = [0,1,2], # num_examples_per_domain_per_label=100, # pickle_path=os.path.join(get_datasets_base_path(), "metehan.stratified_ds.2022A.pkl"), # source_or_target_dataset="target", # x_transform_func=global_x_transform_func, # domain_modifier=lambda u: f"met_{u}" # ) # # from steves_utils.wisig.utils import ( # # ALL_NODES_MINIMUM_100_EXAMPLES, # # ALL_NODES_MINIMUM_500_EXAMPLES, # # ALL_NODES_MINIMUM_1000_EXAMPLES, # # ALL_DAYS # # ) # import steves_utils.wisig.utils as wisig # add_dataset( # labels=wisig.ALL_NODES_MINIMUM_100_EXAMPLES, # domains = wisig.ALL_DAYS, # num_examples_per_domain_per_label=100, # pickle_path=os.path.join(get_datasets_base_path(), "wisig.node3-19.stratified_ds.2022A.pkl"), # source_or_target_dataset="target", # x_transform_func=global_x_transform_func, # domain_modifier=lambda u: f"wisig_{u}" # ) ################################### # Build the dataset ################################### train_original_source = Iterable_Aggregator(train_original_source, p.seed) val_original_source = Iterable_Aggregator(val_original_source, p.seed) test_original_source = Iterable_Aggregator(test_original_source, p.seed) train_original_target = Iterable_Aggregator(train_original_target, p.seed) val_original_target = Iterable_Aggregator(val_original_target, p.seed) test_original_target = Iterable_Aggregator(test_original_target, p.seed) # For CNN We only use X and Y. And we only train on the source. # Properly form the data using a transform lambda and Lazy_Iterable_Wrapper. Finally wrap them in a dataloader transform_lambda = lambda ex: ex[1] # Original is (<domain>, <episode>) so we strip down to episode only train_processed_source = Lazy_Iterable_Wrapper(train_original_source, transform_lambda) val_processed_source = Lazy_Iterable_Wrapper(val_original_source, transform_lambda) test_processed_source = Lazy_Iterable_Wrapper(test_original_source, transform_lambda) train_processed_target = Lazy_Iterable_Wrapper(train_original_target, transform_lambda) val_processed_target = Lazy_Iterable_Wrapper(val_original_target, transform_lambda) test_processed_target = Lazy_Iterable_Wrapper(test_original_target, transform_lambda) datasets = EasyDict({ "source": { "original": {"train":train_original_source, "val":val_original_source, "test":test_original_source}, "processed": {"train":train_processed_source, "val":val_processed_source, "test":test_processed_source} }, "target": { "original": {"train":train_original_target, "val":val_original_target, "test":test_original_target}, "processed": {"train":train_processed_target, "val":val_processed_target, "test":test_processed_target} }, }) from steves_utils.transforms import get_average_magnitude, get_average_power print(set([u for u,_ in val_original_source])) print(set([u for u,_ in val_original_target])) s_x, s_y, q_x, q_y, _ = next(iter(train_processed_source)) print(s_x) # for ds in [ # train_processed_source, # val_processed_source, # test_processed_source, # train_processed_target, # val_processed_target, # test_processed_target # ]: # for s_x, s_y, q_x, q_y, _ in ds: # for X in (s_x, q_x): # for x in X: # assert np.isclose(get_average_magnitude(x.numpy()), 1.0) # assert np.isclose(get_average_power(x.numpy()), 1.0) ################################### # Build the model ################################### model = Steves_Prototypical_Network(x_net, device=p.device, x_shape=(2,256)) optimizer = Adam(params=model.parameters(), lr=p.lr) ################################### # train ################################### jig = PTN_Train_Eval_Test_Jig(model, p.BEST_MODEL_PATH, p.device) jig.train( train_iterable=datasets.source.processed.train, source_val_iterable=datasets.source.processed.val, target_val_iterable=datasets.target.processed.val, num_epochs=p.n_epoch, num_logs_per_epoch=p.NUM_LOGS_PER_EPOCH, patience=p.patience, optimizer=optimizer, criteria_for_best=p.criteria_for_best, ) total_experiment_time_secs = time.time() - start_time_secs ################################### # Evaluate the model ################################### source_test_label_accuracy, source_test_label_loss = jig.test(datasets.source.processed.test) target_test_label_accuracy, target_test_label_loss = jig.test(datasets.target.processed.test) source_val_label_accuracy, source_val_label_loss = jig.test(datasets.source.processed.val) target_val_label_accuracy, target_val_label_loss = jig.test(datasets.target.processed.val) history = jig.get_history() total_epochs_trained = len(history["epoch_indices"]) val_dl = Iterable_Aggregator((datasets.source.original.val,datasets.target.original.val)) confusion = ptn_confusion_by_domain_over_dataloader(model, p.device, val_dl) per_domain_accuracy = per_domain_accuracy_from_confusion(confusion) # Add a key to per_domain_accuracy for if it was a source domain for domain, accuracy in per_domain_accuracy.items(): per_domain_accuracy[domain] = { "accuracy": accuracy, "source?": domain in p.domains_source } # Do an independent accuracy assesment JUST TO BE SURE! # _source_test_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.test, p.device) # _target_test_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.test, p.device) # _source_val_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.val, p.device) # _target_val_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.val, p.device) # assert(_source_test_label_accuracy == source_test_label_accuracy) # assert(_target_test_label_accuracy == target_test_label_accuracy) # assert(_source_val_label_accuracy == source_val_label_accuracy) # assert(_target_val_label_accuracy == target_val_label_accuracy) experiment = { "experiment_name": p.experiment_name, "parameters": dict(p), "results": { "source_test_label_accuracy": source_test_label_accuracy, "source_test_label_loss": source_test_label_loss, "target_test_label_accuracy": target_test_label_accuracy, "target_test_label_loss": target_test_label_loss, "source_val_label_accuracy": source_val_label_accuracy, "source_val_label_loss": source_val_label_loss, "target_val_label_accuracy": target_val_label_accuracy, "target_val_label_loss": target_val_label_loss, "total_epochs_trained": total_epochs_trained, "total_experiment_time_secs": total_experiment_time_secs, "confusion": confusion, "per_domain_accuracy": per_domain_accuracy, }, "history": history, "dataset_metrics": get_dataset_metrics(datasets, "ptn"), } ax = get_loss_curve(experiment) plt.show() get_results_table(experiment) get_domain_accuracies(experiment) print("Source Test Label Accuracy:", experiment["results"]["source_test_label_accuracy"], "Target Test Label Accuracy:", experiment["results"]["target_test_label_accuracy"]) print("Source Val Label Accuracy:", experiment["results"]["source_val_label_accuracy"], "Target Val Label Accuracy:", experiment["results"]["target_val_label_accuracy"]) json.dumps(experiment)
_____no_output_____
MIT
experiments/tl_2/cores_wisig-oracle.run2.framed/trials/1/trial.ipynb
stevester94/csc500-notebooks
Supplementary
def show_results_dist_1_3(frames, title, y_axe_name, dims=(18, 12), save=False, file_name='trash', legend_size=13): size = len(frames) a4_dims = dims fig, axs = plt.subplots(1, 3, figsize=a4_dims) for i in range(3): sns.lineplot(x="Error = 1 - Recall@1", y=y_axe_name,hue="algorithm", markers=True, style="algorithm", dashes=False, data=frames[i], ax=axs[i], linewidth=2, ms=10) axs[i].set_title(title[i], size='20') lx = axs[i].get_xlabel() ly = axs[i].get_ylabel() axs[i].set_xlabel(lx, fontsize=20) axs[i].set_ylabel(ly, fontsize=20) axs[i].set_xscale('log') if i == 0: axs[i].set_xticks([0.001, 0.01, .1]) else: axs[i].set_xticks([0.01, 0.1]) axs[i].get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) axs[i].tick_params(axis='both', which='both', labelsize=15) if i > 0: axs[i].set_ylabel('') plt.setp(axs[i].get_legend().get_texts(), fontsize=legend_size) if save: fig.savefig(file_name + ".pdf", bbox_inches='tight') y_axe = 7 y_axe_name = "dist calc" model_names = [['kNN', 'kNN + Kl + llf 4', 'kNN + Kl + llf 8', 'kNN + Kl + llf 16', 'kNN + Kl + llf 32']] num_show = [0, 1, 2, 3, 4] num_exper = [6] num_models = [5] file_names = [path + '5_nlt.txt'] df_kl_4 = get_df(num_models, num_exper, file_names, model_names, y_axe, y_axe_name, title="trash", num_show=num_show) num_exper = [6] num_models = [5] file_names = [path + '9_nlt.txt'] df_kl_8 = get_df(num_models, num_exper, file_names, model_names, y_axe, y_axe_name, title="trash", num_show=num_show) num_exper = [6] num_models = [5] file_names = [path + '17_nlt.txt'] df_kl_16 = get_df(num_models, num_exper, file_names, model_names, y_axe, y_axe_name, title="trash", num_show=num_show) frames = [df_kl_4, df_kl_8, df_kl_16] show_results_dist_1_3(frames, ['d = 4', 'd = 8', 'd = 16'], y_axe_name, dims=(20, 6), save=False, file_name='suppl_figure_optimal_kl_number') path_end = '_llt.txt' y_axe = 7 y_axe_name = "dist calc" model_names = [['kNN', 'thrNN', 'kNN + Kl-dist + llf', 'kNN + Kl-rank + llf', 'kNN + Kl-rank sample + llf']] num_show = [0, 1, 2, 3, 4] num_exper = [6] num_models = [5] file_names = [path + '5' + path_end] df_kl_4 = get_df(num_models, num_exper, file_names, model_names, y_axe, y_axe_name, title="trash", num_show=num_show) num_exper = [6] num_models = [5] file_names = [path + '9' + path_end] df_kl_8 = get_df(num_models, num_exper, file_names, model_names, y_axe, y_axe_name, title="trash", num_show=num_show) num_exper = [6] num_models = [5] file_names = [path + '17' + path_end] df_kl_16 = get_df(num_models, num_exper, file_names, model_names, y_axe, y_axe_name, title="trash", num_show=num_show) frames = [df_kl_4, df_kl_8, df_kl_16] show_results_dist_1_3(frames, ['d = 4', 'd = 8', 'd = 16'], y_axe_name, dims=(20, 6), save=False, file_name='suppl_figure_optimal_kl_type', legend_size=10) path_start = "~/Desktop/results/distr_to_1_" path_end = ".txt" a4_dims = (7, 3) fig, ax = plt.subplots(figsize=a4_dims) ax.set_yticks([]) file_name = path_start + "sift" + path_end file_name = os.path.expanduser(file_name) distr = np.genfromtxt(file_name) sns.distplot(distr, label="SIFT") file_name = path_start + "d_9" + path_end file_name = os.path.expanduser(file_name) distr = np.genfromtxt(file_name) sns.distplot(distr, label="d=8") file_name = path_start + "d_17" + path_end file_name = os.path.expanduser(file_name) distr = np.genfromtxt(file_name) sns.distplot(distr, label="d=16") file_name = path_start + "d_33" + path_end file_name = os.path.expanduser(file_name) distr = np.genfromtxt(file_name) sns.distplot(distr, label="d=32") file_name = path_start + "d_65" + path_end file_name = os.path.expanduser(file_name) distr = np.genfromtxt(file_name) sns.distplot(distr, label="d=64") plt.legend() fig.savefig("suppl_dist_disrt.pdf", bbox_inches='tight')
_____no_output_____
MIT
results/draw_results.ipynb
Shekhale/gbnns_theory
y = 0.14766146x + 12.10283073
lr.score(x,y) #์ •ํ™•๋„
_____no_output_____
Apache-2.0
autompg_regression-Copy1.ipynb
power3247/test_machinlearning
์‚ฌ์ดํ‚ท ๋Ÿฐ ๋ชจ๋ธ๋ง์„ ํ†ตํ•ด์„œ ๋‘ ๋ฐ์ดํ„ฐ๋ฅผ ์ฃผ์–ด์ฃผ๊ณ  ์„ ํ˜•๋ชจ๋ธ์„ ๋งŒ๋“ ๊ฒƒ
y_predicted = lr.predict([[99]]) y_predicted
_____no_output_____
Apache-2.0
autompg_regression-Copy1.ipynb
power3247/test_machinlearning
1. ์ •๋ณด๋‹จ๊ณ„: ์ˆ˜์ง‘ ๊ฐ€๊ณต- ๋ฌธ์ œ ๋ฐ์ดํ„ฐ ํ™•์ธ ๋ฐ ์ฒ˜๋ฆฌ2. ๊ต์œก ๋‹จ๊ณ„: ๋จธ์‹  ๋Œ€์ƒ- ์ปฌ๋Ÿผ์„ ํƒ- ๋ชจ๋ธ ์„ ํƒ- ๊ต์œก- ์ •ํ™•๋„ ํ™•์ธ3. ์„œ๋น„์Šค ๋‹จ๊ณ„:๊ณ ๊ฐ์‘๋Œ€ X์˜ ๊ฐ’์„ ๋Š˜๋ฆด์ˆ˜ ์žˆ๋‹ค
from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(x, y) X_train, X_test, Y_train, Y_test lr.fit(X_train, Y_train) lr.coef_, lr.intercept_ #๊ธฐ์šธ๊ธฐ, ์ ˆํŽธ lr.score(X_train, Y_train) lr.score(X_test,Y_test) lr.predict([[3504.0, 8]]) lr.predict([[2790.0, 4]]) import pickle pickle.dump(lr,open('./saves/autompg_lr.pkl','wb')) abc = pickle.load(open('./saves/autompg_lr.pkl','rb'))
_____no_output_____
Apache-2.0
autompg_regression-Copy1.ipynb
power3247/test_machinlearning
Fig.3 - Comparisons of MDR's AUC across 12 different sets Change plot's default size and font
import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = [20, 12] rc = {"font.family" : "sans-serif", "font.style" : "normal", "mathtext.fontset" : "dejavusans"} plt.rcParams.update(rc) plt.rcParams["font.sans-serif"] = ["Myriad Pro"] + plt.rcParams["font.sans-serif"] from constant import HEADER_NAME, COLUMNS_TO_DROP import pandas as pd TITLE_FONTSIZE = 20 XLABEL_FONTSIZE = 18
_____no_output_____
MIT
mdr_analysis/3Fig-version0-AUC_IQR_across_therapy_and_sets-separate.ipynb
bonilab/malariaibm-generation-of-MDR-mutants
PfPR = 0.1%
fig, axes = plt.subplots(5, 3, sharex='col', sharey='row', gridspec_kw={'hspace': 0, 'wspace': 0}) fig.patch.set_facecolor('white') fig.suptitle('PfPR = 0.1%', y=0.93, fontweight='bold', fontsize=TITLE_FONTSIZE) # plot IQR of AUC for most-dangerous-triple and -double from plotter import fig3_dangerous_triple_or_double_AUC_IQR for (a,b,idx) in zip([1,5,9],[20,40,60],[0,1,2]): # set#, coverage, index# file_path_cyc = 'raw_data/set%s_c/monthly/set%sc_' % (a,a) +'%smonthly_data_0.txt' file_path_mft = 'raw_data/set%s_m/monthly/set%sm_' % (a,a) + '%smonthly_data_0.txt' file_path_adpcyc = 'raw_data/set%s_ac/monthly/set%sac_' % (a,a) + '%smonthly_data_0.txt' dflist_cyc = [] dflist_mft = [] dflist_adpcyc = [] for i in range(1,101): dflist_cyc.append(pd.read_csv(file_path_cyc % i, index_col=False, \ names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)) dflist_mft.append(pd.read_csv(file_path_mft % i, index_col=False, \ names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)) dflist_adpcyc.append(pd.read_csv(file_path_adpcyc % i, index_col=False, \ names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)) axes[0][idx].set_title('%s%% coverage' % b, fontsize=XLABEL_FONTSIZE) fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[0][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='TYY..Y2.', option='triple') fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[1][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='KNF..Y2.', option='triple') fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[2][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='DHA-PPQ', option='double') fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[3][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='ASAQ', option='double') fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[4][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='AL', option='double') for c in range(5): for tick in axes[c][0].yaxis.get_major_ticks(): tick.label.set_fontsize(XLABEL_FONTSIZE) axes[0][0].set_xlim(-1, 22) # left col axes[0][1].set_xlim(-90/22, 90) # mid col fig.add_subplot(111, frameon=False) # hide tick and tick label of the big axis plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False) # add common x- and y-labels plt.xlabel('AUC', fontsize=XLABEL_FONTSIZE)
_____no_output_____
MIT
mdr_analysis/3Fig-version0-AUC_IQR_across_therapy_and_sets-separate.ipynb
bonilab/malariaibm-generation-of-MDR-mutants
PfPR = 1%
fig, axes = plt.subplots(5, 3, sharex='col', sharey='row', gridspec_kw={'hspace': 0, 'wspace': 0}) fig.patch.set_facecolor('white') fig.suptitle('PfPR = 1%', y=0.93, fontweight='bold', fontsize=TITLE_FONTSIZE) # plot IQR of AUC for most-dangerous-triple and -double from plotter import fig3_dangerous_triple_or_double_AUC_IQR for (a,b,idx) in zip([2,6,10],[20,40,60],[0,1,2]): # set#, coverage, index# file_path_cyc = 'raw_data/set%s_c/monthly/set%sc_' % (a,a) +'%smonthly_data_0.txt' file_path_mft = 'raw_data/set%s_m/monthly/set%sm_' % (a,a) + '%smonthly_data_0.txt' file_path_adpcyc = 'raw_data/set%s_ac/monthly/set%sac_' % (a,a) + '%smonthly_data_0.txt' dflist_cyc = [] dflist_mft = [] dflist_adpcyc = [] for i in range(1,101): dflist_cyc.append(pd.read_csv(file_path_cyc % i, index_col=False, \ names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)) dflist_mft.append(pd.read_csv(file_path_mft % i, index_col=False, \ names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)) dflist_adpcyc.append(pd.read_csv(file_path_adpcyc % i, index_col=False, \ names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)) axes[0][idx].set_title('%s%% coverage' % b, fontsize=XLABEL_FONTSIZE) fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[0][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='TYY..Y2.', option='triple') fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[1][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='KNF..Y2.', option='triple') fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[2][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='DHA-PPQ', option='double') fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[3][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='ASAQ', option='double') fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[4][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='AL', option='double') for c in range(5): for tick in axes[c][0].yaxis.get_major_ticks(): tick.label.set_fontsize(XLABEL_FONTSIZE) fig.add_subplot(111, frameon=False) # hide tick and tick label of the big axis plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False) # add common x- and y-labels plt.xlabel('AUC', fontsize=XLABEL_FONTSIZE)
_____no_output_____
MIT
mdr_analysis/3Fig-version0-AUC_IQR_across_therapy_and_sets-separate.ipynb
bonilab/malariaibm-generation-of-MDR-mutants
PfPR = 5%
fig, axes = plt.subplots(5, 3, sharex='col', sharey='row', gridspec_kw={'hspace': 0, 'wspace': 0}) fig.patch.set_facecolor('white') fig.suptitle('PfPR = 5%', y=0.93, fontweight='bold', fontsize=TITLE_FONTSIZE) # plot IQR of AUC for most-dangerous-triple and -double from plotter import fig3_dangerous_triple_or_double_AUC_IQR for (a,b,idx) in zip([3,7,11],[20,40,60],[0,1,2]): # set#, coverage, index# file_path_cyc = 'raw_data/set%s_c/monthly/set%sc_' % (a,a) +'%smonthly_data_0.txt' file_path_mft = 'raw_data/set%s_m/monthly/set%sm_' % (a,a) + '%smonthly_data_0.txt' file_path_adpcyc = 'raw_data/set%s_ac/monthly/set%sac_' % (a,a) + '%smonthly_data_0.txt' dflist_cyc = [] dflist_mft = [] dflist_adpcyc = [] for i in range(1,101): dflist_cyc.append(pd.read_csv(file_path_cyc % i, index_col=False, \ names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)) dflist_mft.append(pd.read_csv(file_path_mft % i, index_col=False, \ names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)) dflist_adpcyc.append(pd.read_csv(file_path_adpcyc % i, index_col=False, \ names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)) axes[0][idx].set_title('%s%% coverage' % b, fontsize=XLABEL_FONTSIZE) fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[0][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='TYY..Y2.', option='triple') fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[1][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='KNF..Y2.', option='triple') fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[2][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='DHA-PPQ', option='double') fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[3][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='ASAQ', option='double') fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[4][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='AL', option='double') for c in range(5): for tick in axes[c][0].yaxis.get_major_ticks(): tick.label.set_fontsize(XLABEL_FONTSIZE) fig.add_subplot(111, frameon=False) # hide tick and tick label of the big axis plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False) # add common x- and y-labels plt.xlabel('AUC', fontsize=XLABEL_FONTSIZE)
_____no_output_____
MIT
mdr_analysis/3Fig-version0-AUC_IQR_across_therapy_and_sets-separate.ipynb
bonilab/malariaibm-generation-of-MDR-mutants
PfPR = 20%
fig, axes = plt.subplots(5, 3, sharex='col', sharey='row', gridspec_kw={'hspace': 0, 'wspace': 0}) fig.patch.set_facecolor('white') fig.suptitle('PfPR = 20%', y=0.93, fontweight='bold', fontsize=TITLE_FONTSIZE) # plot IQR of AUC for most-dangerous-triple and -double from plotter import fig3_dangerous_triple_or_double_AUC_IQR for (a,b,idx) in zip([4,8,12],[20,40,60],[0,1,2]): # set#, coverage, index# file_path_cyc = 'raw_data/set%s_c/monthly/set%sc_' % (a,a) +'%smonthly_data_0.txt' file_path_mft = 'raw_data/set%s_m/monthly/set%sm_' % (a,a) + '%smonthly_data_0.txt' file_path_adpcyc = 'raw_data/set%s_ac/monthly/set%sac_' % (a,a) + '%smonthly_data_0.txt' dflist_cyc = [] dflist_mft = [] dflist_adpcyc = [] for i in range(1,101): dflist_cyc.append(pd.read_csv(file_path_cyc % i, index_col=False, \ names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)) dflist_mft.append(pd.read_csv(file_path_mft % i, index_col=False, \ names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)) dflist_adpcyc.append(pd.read_csv(file_path_adpcyc % i, index_col=False, \ names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)) axes[0][idx].set_title('%s%% coverage' % b, fontsize=XLABEL_FONTSIZE) fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[0][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='TYY..Y2.', option='triple') fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[1][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='KNF..Y2.', option='triple') fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[2][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='DHA-PPQ', option='double') fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[3][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='ASAQ', option='double') fig3_dangerous_triple_or_double_AUC_IQR(ax=axes[4][idx], m=dflist_mft, c=dflist_cyc, a=dflist_adpcyc, pattern='AL', option='double') for c in range(5): for tick in axes[c][0].yaxis.get_major_ticks(): tick.label.set_fontsize(XLABEL_FONTSIZE) fig.add_subplot(111, frameon=False) # hide tick and tick label of the big axis plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False) # add common x- and y-labels plt.xlabel('AUC', fontsize=XLABEL_FONTSIZE)
_____no_output_____
MIT
mdr_analysis/3Fig-version0-AUC_IQR_across_therapy_and_sets-separate.ipynb
bonilab/malariaibm-generation-of-MDR-mutants
Image ClassificationIn this project, you'll classify images from the [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html). The dataset consists of airplanes, dogs, cats, and other objects. You'll preprocess the images, then train a convolutional neural network on all the samples. The images need to be normalized and the labels need to be one-hot encoded. You'll get to apply what you learned and build a convolutional, max pooling, dropout, and fully connected layers. At the end, you'll get to see your neural network's predictions on the sample images. Get the DataRun the following cell to download the [CIFAR-10 dataset for python](https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz).
""" DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ from urllib.request import urlretrieve from os.path import isfile, isdir from tqdm import tqdm import problem_unittests as tests import tarfile cifar10_dataset_folder_path = 'cifar-10-batches-py' # Use Floyd's cifar-10 dataset if present floyd_cifar10_location = '/input/cifar-10/python.tar.gz' if isfile(floyd_cifar10_location): tar_gz_path = floyd_cifar10_location else: tar_gz_path = 'cifar-10-python.tar.gz' class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile(tar_gz_path): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar: urlretrieve( 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz', tar_gz_path, pbar.hook) if not isdir(cifar10_dataset_folder_path): with tarfile.open(tar_gz_path) as tar: tar.extractall() tar.close() tests.test_folder_path(cifar10_dataset_folder_path)
CIFAR-10 Dataset: 171MB [01:58, 1.44MB/s]
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
Explore the DataThe dataset is broken into batches to prevent your machine from running out of memory. The CIFAR-10 dataset consists of 5 batches, named `data_batch_1`, `data_batch_2`, etc.. Each batch contains the labels and images that are one of the following:* airplane* automobile* bird* cat* deer* dog* frog* horse* ship* truckUnderstanding a dataset is part of making predictions on the data. Play around with the code cell below by changing the `batch_id` and `sample_id`. The `batch_id` is the id for a batch (1-5). The `sample_id` is the id for a image and label pair in the batch.Ask yourself "What are all possible labels?", "What is the range of values for the image data?", "Are the labels in order or random?". Answers to questions like these will help you preprocess the data and end up with better predictions.
%matplotlib inline %config InlineBackend.figure_format = 'retina' import helper import numpy as np # Explore the dataset batch_id = 1 sample_id = 5 helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
Stats of batch 1: Samples: 10000 Label Counts: {0: 1005, 1: 974, 2: 1032, 3: 1016, 4: 999, 5: 937, 6: 1030, 7: 1001, 8: 1025, 9: 981} First 20 Labels: [6, 9, 9, 4, 1, 1, 2, 7, 8, 3, 4, 7, 7, 2, 9, 9, 9, 3, 2, 6] Example of Image 5: Image - Min Value: 0 Max Value: 252 Image - Shape: (32, 32, 3) Label - Label Id: 1 Name: automobile
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
Implement Preprocess Functions NormalizeIn the cell below, implement the `normalize` function to take in image data, `x`, and return it as a normalized Numpy array. The values should be in the range of 0 to 1, inclusive. The return object should be the same shape as `x`.
def normalize(x): """ Normalize a list of sample image data in the range of 0 to 1 : x: List of image data. The image shape is (32, 32, 3) : return: Numpy array of normalize data """ min = np.amin(x) max = np.amax(x) return (x - min) / (max - min) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_normalize(normalize)
Tests Passed
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
One-hot encodeJust like the previous code cell, you'll be implementing a function for preprocessing. This time, you'll implement the `one_hot_encode` function. The input, `x`, are a list of labels. Implement the function to return the list of labels as One-Hot encoded Numpy array. The possible values for labels are 0 to 9. The one-hot encoding function should return the same encoding for each value between each call to `one_hot_encode`. Make sure to save the map of encodings outside the function.Hint: Don't reinvent the wheel.
x = np.array([6, 1, 5]) np.array([[1 if i == y else 0 for i in range(10)] for y in x]) def one_hot_encode(x): """ One hot encode a list of sample labels. Return a one-hot encoded vector for each label. : x: List of sample Labels : return: Numpy array of one-hot encoded labels """ return np.array([[1 if i == y else 0 for i in range(10)] for y in x]) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_one_hot_encode(one_hot_encode)
Tests Passed
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
Randomize DataAs you saw from exploring the data above, the order of the samples are randomized. It doesn't hurt to randomize it again, but you don't need to for this dataset. Preprocess all the data and save itRunning the code cell below will preprocess all the CIFAR-10 data and save it to file. The code below also uses 10% of the training data for validation.
""" DON'T MODIFY ANYTHING IN THIS CELL """ # Preprocess Training, Validation, and Testing Data helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
_____no_output_____
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
""" DON'T MODIFY ANYTHING IN THIS CELL """ import pickle import problem_unittests as tests import helper # Load the Preprocessed Validation data valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
_____no_output_____
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
Build the networkFor the neural network, you'll build each layer into a function. Most of the code you've seen has been outside of functions. To test your code more thoroughly, we require that you put each layer in a function. This allows us to give you better feedback and test for simple mistakes using our unittests before you submit your project.>**Note:** If you're finding it hard to dedicate enough time for this course each week, we've provided a small shortcut to this part of the project. In the next couple of problems, you'll have the option to use classes from the [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) packages to build each layer, except the layers you build in the "Convolutional and Max Pooling Layer" section. TF Layers is similar to Keras's and TFLearn's abstraction to layers, so it's easy to pickup.>However, if you would like to get the most out of this course, try to solve all the problems _without_ using anything from the TF Layers packages. You **can** still use classes from other packages that happen to have the same name as ones you find in TF Layers! For example, instead of using the TF Layers version of the `conv2d` class, [tf.layers.conv2d](https://www.tensorflow.org/api_docs/python/tf/layers/conv2d), you would want to use the TF Neural Network version of `conv2d`, [tf.nn.conv2d](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d). Let's begin! InputThe neural network needs to read the image data, one-hot encoded labels, and dropout keep probability. Implement the following functions* Implement `neural_net_image_input` * Return a [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) * Set the shape using `image_shape` with batch size set to `None`. * Name the TensorFlow placeholder "x" using the TensorFlow `name` parameter in the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder).* Implement `neural_net_label_input` * Return a [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) * Set the shape using `n_classes` with batch size set to `None`. * Name the TensorFlow placeholder "y" using the TensorFlow `name` parameter in the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder).* Implement `neural_net_keep_prob_input` * Return a [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) for dropout keep probability. * Name the TensorFlow placeholder "keep_prob" using the TensorFlow `name` parameter in the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder).These names will be used at the end of the project to load your saved model.Note: `None` for shapes in TensorFlow allow for a dynamic size.
import tensorflow as tf def neural_net_image_input(image_shape): """ Return a Tensor for a batch of image input : image_shape: Shape of the images : return: Tensor for image input. """ return tf.placeholder(tf.float32, shape = [None] + list(image_shape), name = "x") def neural_net_label_input(n_classes): """ Return a Tensor for a batch of label input : n_classes: Number of classes : return: Tensor for label input. """ return tf.placeholder(tf.float32, shape = (None, n_classes), name = "y") def neural_net_keep_prob_input(): """ Return a Tensor for keep probability : return: Tensor for keep probability. """ # TODO: Implement Function return tf.placeholder(tf.float32, shape = None, name = "keep_prob") """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tf.reset_default_graph() tests.test_nn_image_inputs(neural_net_image_input) tests.test_nn_label_inputs(neural_net_label_input) tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
Image Input Tests Passed. Label Input Tests Passed. Keep Prob Tests Passed.
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
Convolution and Max Pooling LayerConvolution layers have a lot of success with images. For this code cell, you should implement the function `conv2d_maxpool` to apply convolution then max pooling:* Create the weight and bias using `conv_ksize`, `conv_num_outputs` and the shape of `x_tensor`.* Apply a convolution to `x_tensor` using weight and `conv_strides`. * We recommend you use same padding, but you're welcome to use any padding.* Add bias* Add a nonlinear activation to the convolution.* Apply Max Pooling using `pool_ksize` and `pool_strides`. * We recommend you use same padding, but you're welcome to use any padding.**Note:** You **can't** use [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) for **this** layer, but you can still use TensorFlow's [Neural Network](https://www.tensorflow.org/api_docs/python/tf/nn) package. You may still use the shortcut option for all the **other** layers.
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides): """ Apply convolution then max pooling to x_tensor :param x_tensor: TensorFlow Tensor :param conv_num_outputs: Number of outputs for the convolutional layer :param conv_ksize: kernal size 2-D Tuple for the convolutional layer :param conv_strides: Stride 2-D Tuple for convolution :param pool_ksize: kernel size 2-D Tuple for pool :param pool_strides: Stride 2-D Tuple for pool : return: A tensor that represents convolution and max pooling of x_tensor """ x_size = x_tensor.get_shape().as_list()[1:] W = tf.Variable(tf.truncated_normal(x_size + [conv_num_outputs], stddev=0.05)) b = tf.Variable(tf.zeros(conv_num_outputs)) x = tf.nn.conv2d(x_tensor, W, strides = (1, conv_strides[0], conv_strides[1], 1), padding = "SAME") x = tf.nn.bias_add(x, b) x = tf.nn.relu(x) x = tf.nn.max_pool(x, ksize = (1, pool_ksize[0], pool_ksize[1], 1), strides = (1, pool_strides[0], pool_strides[1], 1), padding = "SAME") return x """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_con_pool(conv2d_maxpool)
Tests Passed
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
Flatten LayerImplement the `flatten` function to change the dimension of `x_tensor` from a 4-D tensor to a 2-D tensor. The output should be the shape (*Batch Size*, *Flattened Image Size*). Shortcut option: you can use classes from the [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) packages for this layer. For more of a challenge, only use other TensorFlow packages.
import numpy as np def flatten(x_tensor): """ Flatten x_tensor to (Batch Size, Flattened Image Size) : x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions. : return: A tensor of size (Batch Size, Flattened Image Size). """ x_size = x_tensor.get_shape().as_list() return tf.reshape(x_tensor, shape=(-1, np.prod(x_size[1:]))) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_flatten(flatten)
Tests Passed
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
Fully-Connected LayerImplement the `fully_conn` function to apply a fully connected layer to `x_tensor` with the shape (*Batch Size*, *num_outputs*). Shortcut option: you can use classes from the [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) packages for this layer. For more of a challenge, only use other TensorFlow packages.
def fully_conn(x_tensor, num_outputs): """ Apply a fully connected layer to x_tensor using weight and bias : x_tensor: A 2-D tensor where the first dimension is batch size. : num_outputs: The number of output that the new tensor should be. : return: A 2-D tensor where the second dimension is num_outputs. """ x_size = x_tensor.get_shape().as_list()[1:] W = tf.Variable(tf.truncated_normal(x_size + [num_outputs], stddev=.05)) b = tf.Variable(tf.zeros(num_outputs)) x = tf.add(tf.matmul(x_tensor, W), b) x = tf.nn.relu(x) return x """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_fully_conn(fully_conn)
Tests Passed
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
Output LayerImplement the `output` function to apply a fully connected layer to `x_tensor` with the shape (*Batch Size*, *num_outputs*). Shortcut option: you can use classes from the [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) packages for this layer. For more of a challenge, only use other TensorFlow packages.**Note:** Activation, softmax, or cross entropy should **not** be applied to this.
def output(x_tensor, num_outputs): """ Apply a output layer to x_tensor using weight and bias : x_tensor: A 2-D tensor where the first dimension is batch size. : num_outputs: The number of output that the new tensor should be. : return: A 2-D tensor where the second dimension is num_outputs. """ x_size = x_tensor.get_shape().as_list()[1:] W = tf.Variable(tf.truncated_normal(x_size + [num_outputs], stddev=.05)) b = tf.Variable(tf.zeros(num_outputs)) x = tf.add(tf.matmul(x_tensor, W), b) return x """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_output(output)
Tests Passed
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
Create Convolutional ModelImplement the function `conv_net` to create a convolutional neural network model. The function takes in a batch of images, `x`, and outputs logits. Use the layers you created above to create this model:* Apply 1, 2, or 3 Convolution and Max Pool layers* Apply a Flatten Layer* Apply 1, 2, or 3 Fully Connected Layers* Apply an Output Layer* Return the output* Apply [TensorFlow's Dropout](https://www.tensorflow.org/api_docs/python/tf/nn/dropout) to one or more layers in the model using `keep_prob`.
def conv_net(x, keep_prob): """ Create a convolutional neural network model : x: Placeholder tensor that holds image data. : keep_prob: Placeholder tensor that hold dropout keep probability. : return: Tensor that represents logits """ # TODO: Apply 1, 2, or 3 Convolution and Max Pool layers # Play around with different number of outputs, kernel size and stride # Function Definition from Above: # conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides) x = conv2d_maxpool(x, conv_num_outputs=64, conv_ksize=2, conv_strides=(1, 1), pool_ksize=(2, 2), pool_strides=(2, 2)) x = tf.layers.dropout(x, keep_prob) # x = conv2d_maxpool(x, conv_num_outputs=128, conv_ksize=3, # conv_strides=(1, 1), pool_ksize=(2, 2), pool_strides=(2, 2)) # x = conv2d_maxpool(x, conv_num_outputs=256, conv_ksize=3, # conv_strides=(1, 1), pool_ksize=(2, 2), pool_strides=(2, 2)) # TODO: Apply a Flatten Layer # Function Definition from Above: # flatten(x_tensor) x = flatten(x) # TODO: Apply 1, 2, or 3 Fully Connected Layers # Play around with different number of outputs # Function Definition from Above: # fully_conn(x_tensor, num_outputs) x = fully_conn(x, 512) x = tf.layers.dropout(x, keep_prob) # x = fully_conn(x, 128) # x = tf.layers.dropout(x, keep_prob) # x = fully_conn(x, 64) # x = tf.layers.dropout(x, keep_prob) # TODO: Apply an Output Layer # Set this to the number of classes # Function Definition from Above: # output(x_tensor, num_outputs) x = output(x, 10) # TODO: return output return x """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ ############################## ## Build the Neural Network ## ############################## # Remove previous weights, bias, inputs, etc.. tf.reset_default_graph() # Inputs x = neural_net_image_input((32, 32, 3)) y = neural_net_label_input(10) keep_prob = neural_net_keep_prob_input() # Model logits = conv_net(x, keep_prob) # Name logits Tensor, so that is can be loaded from disk after training logits = tf.identity(logits, name='logits') # Loss and Optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)) optimizer = tf.train.AdamOptimizer().minimize(cost) # Accuracy correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy') tests.test_conv_net(conv_net)
Neural Network Built!
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
Train the Neural Network Single OptimizationImplement the function `train_neural_network` to do a single optimization. The optimization should use `optimizer` to optimize in `session` with a `feed_dict` of the following:* `x` for image input* `y` for labels* `keep_prob` for keep probability for dropoutThis function will be called for each batch, so `tf.global_variables_initializer()` has already been called.Note: Nothing needs to be returned. This function is only optimizing the neural network.
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch): """ Optimize the session on a batch of images and labels : session: Current TensorFlow session : optimizer: TensorFlow optimizer function : keep_probability: keep probability : feature_batch: Batch of Numpy image data : label_batch: Batch of Numpy label data """ session.run(optimizer, feed_dict={keep_prob: keep_probability, x: feature_batch, y: label_batch}) pass """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_train_nn(train_neural_network)
Tests Passed
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
Show StatsImplement the function `print_stats` to print loss and validation accuracy. Use the global variables `valid_features` and `valid_labels` to calculate validation accuracy. Use a keep probability of `1.0` to calculate the loss and validation accuracy.
def print_stats(session, feature_batch, label_batch, cost, accuracy): """ Print information about loss and validation accuracy : session: Current TensorFlow session : feature_batch: Batch of Numpy image data : label_batch: Batch of Numpy label data : cost: TensorFlow cost function : accuracy: TensorFlow accuracy function """ # print(feature_batch) loss, acc = session.run([cost, accuracy], feed_dict={x: feature_batch, y: label_batch, keep_prob: 1.}) print("Training Loss= " + \ "{:.6f}".format(loss) + ", Training Accuracy= " + \ "{:.5f}".format(acc)) valid_loss, valid_acc = session.run([cost, accuracy], feed_dict={x: valid_features, y: valid_labels, keep_prob: 1.}) print("Validation Loss= " + \ "{:.6f}".format(valid_loss) + ", Validation Accuracy= " + \ "{:.5f}".format(valid_acc)) # batch_cost = session.run(cost, feed_dict={keep_probability: 1, # x: feature_batch, y: label_batch}) # batch_accuracy = session.run(accuracy, feed_dict={keep_probability: 1, # x: feature_batch, y: label_batch}) # valid_cost = session.run(cost, feed_dict={keep_probability: 1, # x: valid_features, y: valid_labels}) # valid_accuracy = session.run(accuracy, feed_dict={keep_probability: 1, # x: valid_features, y: valid_labels}) # print('Training Cost: {}'.format(batch_cost)) # print('Training Accuracy: {}'.format(batch_accuracy)) # print('Validation Cost: {}'.format(valid_cost)) # print('Validation Accuracy: {}'.format(valid_accuracy)) # print('Accuracy: {}'.format(accuracy)) pass
_____no_output_____
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
HyperparametersTune the following parameters:* Set `epochs` to the number of iterations until the network stops learning or start overfitting* Set `batch_size` to the highest number that your machine has memory for. Most people set them to common sizes of memory: * 64 * 128 * 256 * ...* Set `keep_probability` to the probability of keeping a node using dropout
# TODO: Tune Parameters epochs = 10 batch_size = 256 keep_probability = .5
_____no_output_____
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
Train on a Single CIFAR-10 BatchInstead of training the neural network on all the CIFAR-10 batches of data, let's use a single batch. This should save time while you iterate on the model to get a better accuracy. Once the final validation accuracy is 50% or greater, run the model on all the data in the next section.
""" DON'T MODIFY ANYTHING IN THIS CELL """ print('Checking the Training on a Single Batch...') with tf.Session() as sess: # Initializing the variables sess.run(tf.global_variables_initializer()) # Training cycle for epoch in range(epochs): batch_i = 1 for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size): train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels) print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='') print_stats(sess, batch_features, batch_labels, cost, accuracy)
Checking the Training on a Single Batch... Epoch 1, CIFAR-10 Batch 1: Training Loss= 2.303360, Training Accuracy= 0.07500 Validation Loss= 2.302445, Validation Accuracy= 0.10020 Epoch 2, CIFAR-10 Batch 1: Training Loss= 2.300095, Training Accuracy= 0.07500 Validation Loss= 2.300672, Validation Accuracy= 0.10680 Epoch 3, CIFAR-10 Batch 1: Training Loss= 2.133212, Training Accuracy= 0.20000 Validation Loss= 2.013359, Validation Accuracy= 0.25460 Epoch 4, CIFAR-10 Batch 1: Training Loss= 1.994526, Training Accuracy= 0.42500 Validation Loss= 1.930314, Validation Accuracy= 0.30360 Epoch 5, CIFAR-10 Batch 1: Training Loss= 1.805106, Training Accuracy= 0.47500 Validation Loss= 1.899843, Validation Accuracy= 0.31120 Epoch 6, CIFAR-10 Batch 1: Training Loss= 1.622841, Training Accuracy= 0.50000 Validation Loss= 1.794162, Validation Accuracy= 0.35400 Epoch 7, CIFAR-10 Batch 1: Training Loss= 1.484576, Training Accuracy= 0.55000 Validation Loss= 1.768973, Validation Accuracy= 0.36840 Epoch 8, CIFAR-10 Batch 1: Training Loss= 1.343377, Training Accuracy= 0.55000 Validation Loss= 1.755146, Validation Accuracy= 0.37300 Epoch 9, CIFAR-10 Batch 1: Training Loss= 1.148018, Training Accuracy= 0.62500 Validation Loss= 1.719376, Validation Accuracy= 0.38340 Epoch 10, CIFAR-10 Batch 1: Training Loss= 0.982874, Training Accuracy= 0.67500 Validation Loss= 1.727221, Validation Accuracy= 0.38440
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
Fully Train the ModelNow that you got a good accuracy with a single CIFAR-10 batch, try it with all five batches.
""" DON'T MODIFY ANYTHING IN THIS CELL """ save_model_path = './image_classification' print('Training...') with tf.Session() as sess: # Initializing the variables sess.run(tf.global_variables_initializer()) # Training cycle for epoch in range(epochs): # Loop over all batches n_batches = 5 for batch_i in range(1, n_batches + 1): for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size): train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels) print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='') print_stats(sess, batch_features, batch_labels, cost, accuracy) # Save Model saver = tf.train.Saver() save_path = saver.save(sess, save_model_path)
Training... Epoch 1, CIFAR-10 Batch 1: Training Loss= 2.267260, Training Accuracy= 0.10000 Validation Loss= 2.247928, Validation Accuracy= 0.15120 Epoch 1, CIFAR-10 Batch 2: Training Loss= 1.995992, Training Accuracy= 0.17500 Validation Loss= 2.067119, Validation Accuracy= 0.22640 Epoch 1, CIFAR-10 Batch 3: Training Loss= 1.601563, Training Accuracy= 0.42500 Validation Loss= 1.950456, Validation Accuracy= 0.29940 Epoch 1, CIFAR-10 Batch 4: Training Loss= 1.685398, Training Accuracy= 0.37500 Validation Loss= 1.832309, Validation Accuracy= 0.33260 Epoch 1, CIFAR-10 Batch 5: Training Loss= 1.742794, Training Accuracy= 0.42500 Validation Loss= 1.772525, Validation Accuracy= 0.36480 Epoch 2, CIFAR-10 Batch 1: Training Loss= 1.930166, Training Accuracy= 0.42500 Validation Loss= 1.730577, Validation Accuracy= 0.38260 Epoch 2, CIFAR-10 Batch 2: Training Loss= 1.677821, Training Accuracy= 0.52500 Validation Loss= 1.696393, Validation Accuracy= 0.38680 Epoch 2, CIFAR-10 Batch 3: Training Loss= 1.347411, Training Accuracy= 0.52500 Validation Loss= 1.672744, Validation Accuracy= 0.39920 Epoch 2, CIFAR-10 Batch 4: Training Loss= 1.295316, Training Accuracy= 0.62500 Validation Loss= 1.630986, Validation Accuracy= 0.41060 Epoch 2, CIFAR-10 Batch 5: Training Loss= 1.535737, Training Accuracy= 0.47500 Validation Loss= 1.588744, Validation Accuracy= 0.42880 Epoch 3, CIFAR-10 Batch 1: Training Loss= 1.647690, Training Accuracy= 0.47500 Validation Loss= 1.609808, Validation Accuracy= 0.42340 Epoch 3, CIFAR-10 Batch 2: Training Loss= 1.431329, Training Accuracy= 0.52500 Validation Loss= 1.584422, Validation Accuracy= 0.43340 Epoch 3, CIFAR-10 Batch 3: Training Loss= 1.112353, Training Accuracy= 0.62500 Validation Loss= 1.557316, Validation Accuracy= 0.43240 Epoch 3, CIFAR-10 Batch 4: Training Loss= 1.119438, Training Accuracy= 0.65000 Validation Loss= 1.552788, Validation Accuracy= 0.44160 Epoch 3, CIFAR-10 Batch 5: Training Loss= 1.367179, Training Accuracy= 0.50000 Validation Loss= 1.503110, Validation Accuracy= 0.46300 Epoch 4, CIFAR-10 Batch 1: Training Loss= 1.455486, Training Accuracy= 0.57500 Validation Loss= 1.554065, Validation Accuracy= 0.44300 Epoch 4, CIFAR-10 Batch 2: Training Loss= 1.224739, Training Accuracy= 0.52500 Validation Loss= 1.517005, Validation Accuracy= 0.46140 Epoch 4, CIFAR-10 Batch 3: Training Loss= 0.871683, Training Accuracy= 0.82500 Validation Loss= 1.502632, Validation Accuracy= 0.45320 Epoch 4, CIFAR-10 Batch 4: Training Loss= 0.915391, Training Accuracy= 0.72500 Validation Loss= 1.488573, Validation Accuracy= 0.46680 Epoch 4, CIFAR-10 Batch 5: Training Loss= 1.179544, Training Accuracy= 0.57500 Validation Loss= 1.464614, Validation Accuracy= 0.47320 Epoch 5, CIFAR-10 Batch 1: Training Loss= 1.337982, Training Accuracy= 0.60000 Validation Loss= 1.505418, Validation Accuracy= 0.46500 Epoch 5, CIFAR-10 Batch 2: Training Loss= 1.041483, Training Accuracy= 0.60000 Validation Loss= 1.465458, Validation Accuracy= 0.47860 Epoch 5, CIFAR-10 Batch 3: Training Loss= 0.739452, Training Accuracy= 0.80000 Validation Loss= 1.513790, Validation Accuracy= 0.44840 Epoch 5, CIFAR-10 Batch 4: Training Loss= 0.770327, Training Accuracy= 0.72500 Validation Loss= 1.493302, Validation Accuracy= 0.46580 Epoch 5, CIFAR-10 Batch 5: Training Loss= 1.037692, Training Accuracy= 0.67500 Validation Loss= 1.450228, Validation Accuracy= 0.48040 Epoch 6, CIFAR-10 Batch 1: Training Loss= 1.211084, Training Accuracy= 0.70000 Validation Loss= 1.447853, Validation Accuracy= 0.47460 Epoch 6, CIFAR-10 Batch 2: Training Loss= 0.880263, Training Accuracy= 0.72500 Validation Loss= 1.458044, Validation Accuracy= 0.47740 Epoch 6, CIFAR-10 Batch 3: Training Loss= 0.578820, Training Accuracy= 0.87500 Validation Loss= 1.495665, Validation Accuracy= 0.45800 Epoch 6, CIFAR-10 Batch 4: Training Loss= 0.660608, Training Accuracy= 0.80000 Validation Loss= 1.465039, Validation Accuracy= 0.46980 Epoch 6, CIFAR-10 Batch 5: Training Loss= 0.921411, Training Accuracy= 0.72500 Validation Loss= 1.453837, Validation Accuracy= 0.47860 Epoch 7, CIFAR-10 Batch 1: Training Loss= 1.094060, Training Accuracy= 0.70000 Validation Loss= 1.461889, Validation Accuracy= 0.48060 Epoch 7, CIFAR-10 Batch 2: Training Loss= 0.741231, Training Accuracy= 0.82500 Validation Loss= 1.472825, Validation Accuracy= 0.47680 Epoch 7, CIFAR-10 Batch 3: Training Loss= 0.513557, Training Accuracy= 0.95000 Validation Loss= 1.468261, Validation Accuracy= 0.47640 Epoch 7, CIFAR-10 Batch 4: Training Loss= 0.579966, Training Accuracy= 0.82500 Validation Loss= 1.440210, Validation Accuracy= 0.48640 Epoch 7, CIFAR-10 Batch 5: Training Loss= 0.747553, Training Accuracy= 0.82500 Validation Loss= 1.499854, Validation Accuracy= 0.46560 Epoch 8, CIFAR-10 Batch 1: Training Loss= 0.951779, Training Accuracy= 0.70000 Validation Loss= 1.496742, Validation Accuracy= 0.48260 Epoch 8, CIFAR-10 Batch 2: Training Loss= 0.597912, Training Accuracy= 0.87500 Validation Loss= 1.450022, Validation Accuracy= 0.49020 Epoch 8, CIFAR-10 Batch 3: Training Loss= 0.429645, Training Accuracy= 1.00000 Validation Loss= 1.456745, Validation Accuracy= 0.49120 Epoch 8, CIFAR-10 Batch 4: Training Loss= 0.523796, Training Accuracy= 0.85000 Validation Loss= 1.464786, Validation Accuracy= 0.48740 Epoch 8, CIFAR-10 Batch 5: Training Loss= 0.572838, Training Accuracy= 0.85000 Validation Loss= 1.513322, Validation Accuracy= 0.47360 Epoch 9, CIFAR-10 Batch 1: Training Loss= 0.810197, Training Accuracy= 0.80000 Validation Loss= 1.480253, Validation Accuracy= 0.50260 Epoch 9, CIFAR-10 Batch 2: Training Loss= 0.554052, Training Accuracy= 0.87500 Validation Loss= 1.442528, Validation Accuracy= 0.49260 Epoch 9, CIFAR-10 Batch 3: Training Loss= 0.419015, Training Accuracy= 0.97500 Validation Loss= 1.507362, Validation Accuracy= 0.48060 Epoch 9, CIFAR-10 Batch 4: Training Loss= 0.425726, Training Accuracy= 0.95000 Validation Loss= 1.479631, Validation Accuracy= 0.49280 Epoch 9, CIFAR-10 Batch 5: Training Loss= 0.464258, Training Accuracy= 0.90000 Validation Loss= 1.496848, Validation Accuracy= 0.48580 Epoch 10, CIFAR-10 Batch 1: Training Loss= 0.678040, Training Accuracy= 0.77500 Validation Loss= 1.521347, Validation Accuracy= 0.48420 Epoch 10, CIFAR-10 Batch 2: Training Loss= 0.463171, Training Accuracy= 0.95000 Validation Loss= 1.451705, Validation Accuracy= 0.49820 Epoch 10, CIFAR-10 Batch 3: Training Loss= 0.335313, Training Accuracy= 0.97500 Validation Loss= 1.495026, Validation Accuracy= 0.49000 Epoch 10, CIFAR-10 Batch 4: Training Loss= 0.352970, Training Accuracy= 0.97500 Validation Loss= 1.478027, Validation Accuracy= 0.49940 Epoch 10, CIFAR-10 Batch 5: Training Loss= 0.415313, Training Accuracy= 0.97500 Validation Loss= 1.469447, Validation Accuracy= 0.50540
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
CheckpointThe model has been saved to disk. Test ModelTest your model against the test dataset. This will be your final accuracy. You should have an accuracy greater than 50%. If you don't, keep tweaking the model architecture and parameters.
""" DON'T MODIFY ANYTHING IN THIS CELL """ %matplotlib inline %config InlineBackend.figure_format = 'retina' import tensorflow as tf import pickle import helper import random # Set batch size if not already set try: if batch_size: pass except NameError: batch_size = 64 save_model_path = './image_classification' n_samples = 4 top_n_predictions = 3 def test_model(): """ Test the saved model against the test dataset """ test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb')) loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load model loader = tf.train.import_meta_graph(save_model_path + '.meta') loader.restore(sess, save_model_path) # Get Tensors from loaded model loaded_x = loaded_graph.get_tensor_by_name('x:0') loaded_y = loaded_graph.get_tensor_by_name('y:0') loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0') loaded_logits = loaded_graph.get_tensor_by_name('logits:0') loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0') # Get accuracy in batches for memory limitations test_batch_acc_total = 0 test_batch_count = 0 for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size): test_batch_acc_total += sess.run( loaded_acc, feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0}) test_batch_count += 1 print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count)) # Print Random Samples random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples))) random_test_predictions = sess.run( tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions), feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0}) helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions) test_model()
Testing Accuracy: 0.5068359375
MIT
image-classification/dlnd_image_classification.ipynb
mdiaz236/DeepLearningFoundations
Least-squares technique References - Statistics in geography: https://archive.org/details/statisticsingeog0000ebdo/ Imports
from functools import partial import numpy as np from scipy.stats import multivariate_normal, t import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from ipywidgets import interact, IntSlider inv = np.linalg.inv df = pd.read_csv('regression_data.csv') df.head(3)
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
Population 0.5 and 0.2 are NOT the population parameters. Although we used them to generate the population, the population parameters can be different from them.
def get_y(x): ys = x * 0.5 + 0.2 noises = 1 * np.random.normal(size=len(ys)) return ys + noises np.random.seed(52) xs = np.linspace(0, 10, 10000) ys = get_y(xs) np.random.seed(32) np.random.shuffle(xs) np.random.seed(32) np.random.shuffle(ys) plt.scatter(xs, ys, s=5) plt.show()
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
Design matrices
PHI = xs.reshape(-1, 1) PHI = np.hstack([ PHI, np.ones(PHI.shape) ]) T = ys.reshape(-1, 1)
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
Normal equation with regularization
def regularized_least_squares(PHI, T, regularizer=0): assert PHI.shape[0] == T.shape[0] pseudo_inv = inv(PHI.T @ PHI + np.eye(PHI.shape[1]) * regularizer) assert pseudo_inv.shape[0] == pseudo_inv.shape[1] W = pseudo_inv @ PHI.T @ T return {'slope' : float(W[0]), 'intercept' : float(W[1])}
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
Sampling distributions Population parameters
pop_params = regularized_least_squares(PHI, T) pop_slope, pop_intercept = pop_params['slope'], pop_params['intercept']
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
Sample statistics Verify that the sampling distribution for both regression coefficients are normal.
n = 10 # sample size num_samps = 1000 def sample(PHI, T, n): idxs = np.random.randint(PHI.shape[0], size=n) return PHI[idxs], T[idxs] samp_slopes, samp_intercepts = [], [] for i in range(num_samps): PHI_samp, T_samp = sample(PHI, T, n) learned_param = regularized_least_squares(PHI_samp, T_samp) samp_slopes.append(learned_param['slope']); samp_intercepts.append(learned_param['intercept']) np.std(samp_slopes), np.std(samp_intercepts) fig = plt.figure(figsize=(12, 4)) fig.add_subplot(121) sns.kdeplot(samp_slopes) plt.title('Sample distribution of sample slopes') fig.add_subplot(122) sns.kdeplot(samp_intercepts) plt.title('Sample distribution of sample intercepts') plt.show()
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
Note that the two normal distributions above are correlated. This means that we need to be careful when plotting the 95% CI for the regression line, because we can't just plot the regression line with the highest slope and the highest intercept and the regression line with the lowest slope and the lowest intercept.
sns.jointplot(samp_slopes, samp_intercepts, s=5) plt.show()
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
Confidence interval **Caution.** The following computation of confidence intervals does not apply to regularized least squares. Sample one sample
n = 500 PHI_samp, T_samp = sample(PHI, T, n)
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
Compute sample statistics
learned_param = regularized_least_squares(PHI_samp, T_samp) samp_slope, samp_intercept = learned_param['slope'], learned_param['intercept'] samp_slope, samp_intercept
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
Compute standard errors of sample statisticsStandard error is the estimate of the standard deviation of the sampling distribution. $$\hat\sigma = \sqrt{\frac{\text{Sum of all squared residuals}}{\text{Degrees of freedom}}}$$ Standard error for slope:$$\text{SE}(\hat\beta_1)=\hat\sigma \sqrt{\frac{1}{(n-1)s_X^2}}$$ Standard error for intercept:$$\text{SE}(\hat\beta_0)=\hat\sigma \sqrt{\frac{1}{n} + \frac{\bar X^2}{(n-1)s_X^2}}$$ where $\bar X$ is the sample mean of the $X$'s and $s_X^2$ is the sample variance of the $X$'s.
preds = samp_slope * PHI_samp[:,0] + samp_intercept sum_of_squared_residuals = np.sum((T_samp.reshape(-1) - preds) ** 2) samp_sigma_y_give_x = np.sqrt(sum_of_squared_residuals / (n - 2)) samp_sigma_y_give_x samp_mean = np.mean(PHI_samp[:,0]) samp_var = np.var(PHI_samp[:,0]) SE_slope = samp_sigma_y_give_x * np.sqrt(1 / ((n - 1) * samp_var)) SE_intercept = samp_sigma_y_give_x * np.sqrt(1 / n + samp_mean ** 2 / ((n - 1) * samp_var)) SE_slope, SE_intercept
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
Compute confidence intervals for sample statistics
slope_lower, slope_upper = samp_slope - 1.96 * SE_slope, samp_slope + 1.96 * SE_slope slope_lower, slope_upper intercept_lower, intercept_upper = samp_intercept - 1.96 * SE_intercept, samp_intercept + 1.96 * SE_intercept intercept_lower, intercept_upper
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
Compute confidence interval for regression line Boostrapped solution Use a 2-d Guassian to model the joint distribution between boostrapped sample slopes and boostrapped sample intercepts. **Fixed.** `samp_slopes` and `samp_intercepts` used in the cell below are not boostrapped; they are directly sampled from the population. Next time, add the boostrapped version. Using `samp_slopes` and `samp_intercepts` still has its value, though; it shows the population regression line lie right in the middle of all sample regression lines. Remember that, when ever you use bootstrapping to estimate the variance / covariance of the sample distribution of some statistic, there might be an equation that you can use from statistical theory.
num_resamples = 10000 resample_slopes, resample_intercepts = [], [] for i in range(num_resamples): PHI_resample, T_resample = sample(PHI_samp, T_samp, n=len(PHI_samp)) learned_params = regularized_least_squares(PHI_resample, T_resample) resample_slopes.append(learned_params['slope']); resample_intercepts.append(learned_params['intercept'])
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
**Fixed.** The following steps might improve the results, but I don't think they are part of the standard practice.
# means = [np.mean(resample_slopes), np.mean(resample_intercepts)] # cov = np.cov(resample_slopes, resample_intercepts) # model = multivariate_normal(mean=means, cov=cov)
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
Sample 5000 (slope, intercept) pairs from the Gaussian.
# num_pairs_sampled = 10000 # pairs = model.rvs(num_pairs_sampled)
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
Scatter samples, plot regression lines and CI.
plt.figure(figsize=(20, 10)) plt.scatter(PHI_samp[:,0], T_samp.reshape(-1), s=20) # sample granularity = 1000 xs = np.linspace(0, 10, granularity) plt.plot(xs, samp_slope * xs + samp_intercept, label='Sample') # sample regression line plt.plot(xs, pop_slope * xs + pop_intercept, '--', color='black', label='Population') # population regression line lines = np.zeros((num_resamples, granularity)) for i, (slope, intercept) in enumerate(zip(resample_slopes, resample_intercepts)): lines[i] = slope * xs + intercept confidence_level = 95 uppers_95 = np.percentile(lines, confidence_level + (100 - confidence_level) / 2, axis=0) lowers_95 = np.percentile(lines, (100 - confidence_level) / 2, axis=0) confidence_level = 99 uppers_99 = np.percentile(lines, confidence_level + (100 - confidence_level) / 2, axis=0) lowers_99 = np.percentile(lines, (100 - confidence_level) / 2, axis=0) plt.fill_between(xs, lowers_95, uppers_95, color='grey', alpha=0.7, label='95% CI') plt.plot(xs, uppers_99, color='grey', label='99% CI') plt.plot(xs, lowers_99, color='grey') plt.legend() plt.show()
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
Analytic solution **Reference.** Page 97, Statistics of Geograph: A Practical Approach, David Ebdon, 1987. For a particular value $x_0$ of the independent variable $x$, its confidence interval is given by: $$\sqrt{\frac{\sum e^{2}}{n-2}\left[\frac{1}{n}+\frac{\left(x_{0}-\bar{x}\right)^{2}}{\sum x^{2}-n \bar{x}^{2}}\right]}$$ where- $\sum e^2$ is the sum of squares of residuals from regression,- $x$ is the independent variables,- $\bar{x}$ is the sample mean of the independent variables.
sum_of_squared_xs = np.sum(PHI_samp[:,0] ** 2) SEs = np.sqrt( (sum_of_squared_residuals / (n - 2)) * (1 / n + (xs - samp_mean) ** 2 / (sum_of_squared_xs - n * samp_mean ** 2)) ) t_97dot5 = t.ppf(0.975, df=n-2) t_99dot5 = t.ppf(0.995, df=n-2) yhats = samp_slope * xs + samp_intercept uppers_95 = yhats + t_97dot5 * SEs lowers_95 = yhats - t_97dot5 * SEs uppers_99 = yhats + t_99dot5 * SEs lowers_99 = yhats - t_99dot5 * SEs plt.figure(figsize=(20, 10)) plt.scatter(PHI_samp[:,0], T_samp.reshape(-1), s=20) # sample granularity = 1000 xs = np.linspace(0, 10, granularity) plt.plot(xs, samp_slope * xs + samp_intercept, label='Sample') # sample regression line plt.plot(xs, pop_slope * xs + pop_intercept, '--', color='black', label='Population') # population regression line plt.fill_between(xs, lowers_95, uppers_95, color='grey', alpha=0.7, label='95% CI') plt.plot(xs, uppers_99, color='grey', label='99% CI') plt.plot(xs, lowers_99, color='grey') plt.legend() plt.show()
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
Regularized least squares
def plot_regression_line(PHI, T, regularizer): plt.scatter(PHI[:,0], T, s=5) params = regularized_least_squares(PHI, T, regularizer) x_min, x_max = PHI[:,0].min(), PHI[:,0].max() xs = np.linspace(x_min, x_max, 2) ys = params['slope'] * xs + params['intercept'] plt.plot(xs, ys, color='orange') plt.ylim(-3, 10) plt.show() plot_regression_line(PHI, T, regularizer=20) def plot_regression_line_wrapper(regularizer, num_points): plot_regression_line(PHI[:num_points], T[:num_points], regularizer)
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
Yes! The effect of regularization does change with the size of the dataset.
_ = interact( plot_regression_line_wrapper, regularizer=IntSlider(min=0, max=10000, value=5000, continuous_update=False), num_points=IntSlider(min=2, max=1000, value=1000, continuous_update=False) )
_____no_output_____
MIT
c3_single_layer_networks/least_square_method/least_square_method_demo.ipynb
zhihanyang2022/bishop1995_notes
1. ์ •๊ทœ ํ‘œํ˜„์‹* ์ถœ์ฒ˜ : ์„œ์  "์žก์•„๋ผ! ํ…์ŠคํŠธ ๋งˆ์ด๋‹ with ํŒŒ์ด์ฌ"* ๋ฌธ์ž์—ด์ด ์ฃผ์–ด์ง„ ๊ทœ์น™์— ์ผ์น˜ํ•˜๋Š” ์ง€, ์ผ์น˜ํ•˜์ง€ ์•Š๋Š”์ง€ ํŒ๋‹จํ•  ์ˆ˜ ์žˆ๋‹ค. ์ •๊ทœ ํ‘œํ˜„์‹์„ ์ด์šฉํ•˜์—ฌ ํŠน์ • ํŒจํ„ด์„ ์ง€๋‹ˆ๋‹ˆ ๋ฌธ์ž์—ด์„ ์ฐพ์„ ์ˆ˜ ์žˆ์–ด ํ…์ŠคํŠธ ๋ฐ์ดํ„ฐ ์‚ฌ์ „ ์ฒ˜๋ฆฌ ๋ฐ ํฌ๋กค๋ง์—์„œ ์ฃผ๋กœ ์“ฐ์ž„
string = '๊ธฐ์ƒ์ฒญ์€ ์Šˆํผ์ปดํ“จํ„ฐ๋„ ์„œ์šธ์ง€์—ญ์˜ ์ง‘์ค‘ํ˜ธ์šฐ๋ฅผ ์ œ๋Œ€๋กœ ์˜ˆ์ธกํ•˜์ง€ ๋ชปํ–ˆ๋‹ค๊ณ  ์„ค๋ช…ํ–ˆ์Šต๋‹ˆ๋‹ค. ์™œ ์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ–ˆ\ ์Šต๋‹ˆ๋‹ค. ์™œ ์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ–ˆ๋Š”์ง€ ์ž์„ธํžˆ ๋ถ„์„ํ•ด ์˜ˆ์ธก ํ”„๋กœ๊ทธ๋žจ์„ ๋ณด์™„ํ•ด์•ผ ํ•  ๋Œ€๋ชฉ์ž…๋‹ˆ๋‹ค. ๊ด€์ธก ๋ถ„์•ผ๋Š” ๊ฐœ์„ ๋  ์—ฌ์ง€๊ฐ€\ ์žˆ์Šต๋‹ˆ๋‹ค. ์ง€๊ธˆ ๋ณด์‹œ๋Š” ์™ผ์ชฝ ์‚ฌ์ง„์ด ํ˜„์žฌ ์ฒœ๋ฆฌ์•ˆ ์œ„์„ฑ์ด ์ดฌ์˜ํ•œ ๊ฒƒ์ด๊ณ  ์˜ค๋ฅธ์ชฝ์ด ์˜ฌํ•ด ๋ง ์˜์•„ ์˜ฌ๋ฆด ์ฒœ๋ฆฌ์•ˆ 2Aํ˜ธ๊ฐ€ \ ์ดฌ์˜ํ•œ ์˜์ƒ์ž…๋‹ˆ๋‹ค. ์˜ค๋ฅธ์ชฝ์ด ์™ผ์ชฝ๋ณด๋‹ค ํƒœํ’์˜ ๋ˆˆ์ด ์ข€ ๋” ๋šœ๋ ทํ•˜๊ณ  ์ฃผ๋ณ€ ๊ตฌ๋ฆ„๋„ ๋” ์ž˜ ๋ณด์ด์ฃ . ์ƒˆ ์œ„์„ฑ์„ ํ†ตํ•ด ํƒœํ’\ ๊ตฌ๋ฆ„ ๋“ฑ์˜ ์›€์ง์ž„์„ ์ƒ์„ธํžˆ ๋ถ„์„ํ•˜๋ฉด ์ข€ ๋” ์ •ํ™•ํ•œ ์˜ˆ์ธก์„ ํ•  ์ˆ˜ ์žˆ์ง€ ์•Š์„๊นŒ ๊ธฐ๋Œ€ํ•ด ๋ด…๋‹ˆ๋‹ค. ์ •๊ตฌํฌ ๊ธฐ์ž(koohee@sbs.co.kr)' string import re re.sub("\([A-Za-z0-9\._+]+@[A-Za-z]+\.(com|org|edu|net|co.kr)\)", "", string)
_____no_output_____
MIT
[STUDY]ํ…์ŠคํŠธ๋งˆ์ด๋‹_Base.ipynb
kamzzang/ADPStudy
* \([A-Za-z0-9\._+]+ : ์ด๋ฉ”์ผ ์ฃผ์†Œ๊ฐ€ ๊ด„ํ˜ธ๋กœ ์‹œ์ž‘ํ•˜์—ฌ \(ํŠน์ˆ˜๋ฌธ์ž๋ฅผ ์›๋ž˜ ์˜๋ฏธ๋Œ€๋กœ ์“ฐ๊ฒŒ ํ•จ)์™€ (๋กœ ์‹œ์ž‘ ๋Œ€๊ด„ํ˜ธ[ ] ์•ˆ์— ์ด๋ฉ”์ผ ์ฃผ์†Œ์˜ ํŒจํ„ด์„ ์ž…๋ ฅ(์ž…๋ ฅํ•œ ๊ฒƒ ์ค‘ ์•„๋ฌด๊ฑฐ๋‚˜) A-Z = ์•ŒํŒŒ๋ฒณ ๋Œ€๋ฌธ์ž, a-z = ์•ŒํŒŒ๋ฒณ ์†Œ๋ฌธ์ž, 0-9 = ์ˆซ์ž, ._+ = .๋‚˜ _๋‚˜ + ๋งˆ์ง€๋ง‰ +๋Š” ๋ฐ”๋กœ ์•ž์— ์žˆ๋Š” ๊ฒƒ์ด ์ตœ์†Œ ํ•œ๋ฒˆ ์ด์ƒ ๋‚˜์™€์•ผ ํ•œ๋‹ค๋Š” ์˜๋ฏธ * @ : ์ด๋ฉ”์ผ ์ฃผ์†Œ ๋‹ค์Œ์— @ * [A-Za-z]+ : ๋„๋ฉ”์ธ ์ฃผ์†Œ์— ํ•ด๋‹นํ•˜๋Š” ์•ŒํŒŒ๋ฒณ ๋Œ€๋ฌธ์ž๋‚˜ ์†Œ๋ฌธ์ž * \. : ๋„๋ฉ”์ธ ์ฃผ์†Œ ๋‹ค์Œ์˜ . * (com|org|edu|net|co.kr)\) : |๋Š” or์กฐ๊ฑด, ๋„๋ฉ”์ธ ์ฃผ์†Œ ๋งˆ์นจํ‘œ ๋‹ค์Œ์˜ ํŒจํ„ด ๋งˆ์ง€๋ง‰ )๊นŒ์ง€ ์ฐพ์Œ ํŒŒ์ด์ฌ ์ •๊ทœํ‘œํ˜„์‹ ๊ธฐํ˜ธ ์„ค๋ช…* '*' : ๋ฐ”๋กœ ์•ž ๋ฌธ์ž, ํ‘œํ˜„์‹์ด 0๋ฒˆ ์ด์ƒ ๋ฐ˜๋ณต* '+' : ๋ฐ”๋กœ ์•ž ๋ฌธ์ž, ํ‘œํ˜„์‹์ด 1๋ฒˆ ์ด์ƒ ๋ฐ˜๋ณต* '[]' : ๋Œ€๊ด„ํ˜ธ ์•ˆ์˜ ๋ฌธ์ž ์ค‘ ํ•˜๋‚˜* '()' : ๊ด„ํ˜ธ์•ˆ์˜ ์ •๊ทœ์‹์„ ๊ทธ๋ฃน์œผ๋กœ ๋งŒ๋“ฌ* '.' : ์–ด๋–ค ํ˜•ํƒœ๋“  ๋ฌธ์ž 1์ž* '^' : ๋ฐ”๋กœ ๋’ค ๋ฌธ์ž, ํ‘œํ˜„์‹์ด ๋ฌธ์ž์—ด ๋งจ ์•ž์— ๋‚˜ํƒ€๋‚จ* '$' : ๋ฐ”๋กœ ์•ž ๋ฌธ์ž, ํ‘œํ˜„์‹์ด ๋ฌธ์ž์—ด ๋งจ ๋’ค์— ๋‚˜ํƒ€๋‚จ* '{m}' : ๋ฐ”๋กœ ์•ž ๋ฌธ์ž, ํ‘œํ˜„์‹์ด mํšŒ ๋ฐ˜๋ณต* '{m,n}' : ๋ฐ”๋กœ ์•ž ๋ฌธ์ž, ํ‘œํ˜„์‹์ด m๋ฒˆ ์ด์ƒ, n๋ฒˆ ์ดํ•˜ ๋‚˜ํƒ€๋‚จ* '|' : |๋กœ ๋ถ„๋ฆฌ๋œ ๋ฌธ์ž, ๋ฌธ์ž์—ด, ํ‘œํ˜„์‹ ์ค‘ ํ•˜๋‚˜๊ฐ€ ๋‚˜ํƒ€๋‚จ(or์กฐ๊ฑด)* '[^]' : ๋Œ€๊ด„ํ˜ธ ์•ˆ์— ์žˆ๋Š” ๋ฌธ์ž๋ฅผ ์ œ์™ธํ•œ ๋ฌธ์ž๊ฐ€ ๋‚˜ํƒ€๋‚จ
# a๋ฌธ์ž๊ฐ€ 1๋ฒˆ ์ด์ƒ ๋‚˜์˜ค๊ณ  b ๋ฌธ์ž๊ฐ€ 0๋ฒˆ ์ด์ƒ ๋‚˜์˜ค๋Š” ๋ฌธ์ž์—ด ์ฐพ๊ธฐ r = re.compile("a+b*") r.findall("aaaa, cc, bbbb, aabbb") # ๋Œ€๊ด„ํ˜ธ๋ฅผ ์ด์šฉํ•ด ๋Œ€๋ฌธ์ž๋กœ ๊ตฌ์„ฑ๋œ ๋ฌธ์ž์—ด ์ฐพ๊ธฐ r = re.compile("[A-Z]+") r.findall("HOME, home") # ^์™€ .์„ ์ด์šฉํ•˜์—ฌ ๋งจ ์•ž์— a๊ฐ€ ์˜ค๊ณ  ๊ทธ ๋‹ค์Œ์— ์–ด๋– ํ•œ ํ˜•ํƒœ๋“  2๊ฐœ์˜ ๋ฌธ์ž๊ฐ€ ์˜ค๋Š” ๋ฌธ์ž์—ด ์ฐพ๊ธฐ r = re.compile("^a..") r.findall("abc, cba") # ์ค‘๊ด„ํ˜ธ ํ‘œํ˜„์‹ {m,n}์„ ์ด์š”ํ•˜์—ฌ ํ•ด๋‹น ๋ฌธ์ž์—ด์ด m๋ฒˆ ์ด์ƒ n๋ฒˆ ์ดํ•˜ ๋‚˜ํƒ€๋‚˜๋Š” ํŒจํ„ด ์ฐพ๊ธฐ r = re.compile("a{2,3}b{2,3}") r.findall("aabb, aaabb, ab, aab") # compile ๋ฉ”์„œ๋“œ์— ์ •๊ทœ ํ‘œํ˜„์‹ ํŒจํ„ด ์ง€์ •, search๋กœ ์ •๊ทœ ํ‘œํ˜„์‹ ํŒจํ„ด๊ณผ ์ผ์น˜ํ•˜๋Š” ๋ฌธ์ž์—ด์˜ ์œ„์น˜ ์ฐพ๊ธฐ # group์„ ํ†ตํ•ด ํŒจํ„ด๊ณผ ์ผ์น˜ํ•˜๋Š” ๋ฌธ์ž๋“ค์„ ๊ทธ๋ฃนํ•‘ํ•˜์—ฌ ์ถ”์ถœ p = re.compile(".+:") m = p.search("http://google.com") m.group() # sub : ์ •๊ทœ ํ‘œํ˜„์‹๊ณผ ์ผ์น˜ํ•˜๋Š” ๋ถ€๋ถ„์„ ๋‹ค๋ฅธ ๋ฌธ์ž๋กœ ์น˜ํ™˜ p = re.compile("(๋‚ด|๋‚˜์˜|๋‚ด๊บผ)") p.sub("๊ทธ์˜", "๋‚˜์˜ ๋ฌผ๊ฑด์— ์†๋Œ€์ง€ ๋งˆ์‹œ์˜ค.")
_____no_output_____
MIT
[STUDY]ํ…์ŠคํŠธ๋งˆ์ด๋‹_Base.ipynb
kamzzang/ADPStudy
2. ์ „์ฒ˜๋ฆฌ ๋Œ€์†Œ๋ฌธ์ž ํ†ต์ผ
s = 'Hello World' print(s.lower()) print(s.upper())
hello world HELLO WORLD
MIT
[STUDY]ํ…์ŠคํŠธ๋งˆ์ด๋‹_Base.ipynb
kamzzang/ADPStudy
์ˆซ์ž, ๋ฌธ์žฅ๋ถ€ํ˜ธ, ํŠน์ˆ˜๋ฌธ์ž ์ œ๊ฑฐ
# ์ˆซ์ž ์ œ๊ฑฐ p = re.compile("[0-9]+") p.sub("", '์„œ์šธ ๋ถ€๋™์‚ฐ ๊ฐ€๊ฒฉ์ด ์˜ฌํ•ด ๋“ค์–ด ํ‰๊ท  30% ์ƒ์Šนํ–ˆ์Šต๋‹ˆ๋‹ค.') # ๋ฌธ์žฅ๋ถ€ํ˜ธ, ํŠน์ˆ˜๋ฌธ์ž ์ œ๊ฑฐ p = re.compile("\W+") p.sub(" ", "โ˜…์„œ์šธ ๋ถ€๋™์‚ฐ ๊ฐ€๊ฒฉ์ด ์˜ฌํ•ด ๋“ค์–ด ํ‰๊ท  30% ์ƒ์Šนํ–ˆ์Šต๋‹ˆ๋‹ค.") s = p.sub(" ", "์ฃผ์ œ_1: ๊ฑด๊ฐ•ํ•œ ๋ฌผ๊ณผ ๊ฑด๊ฐ•ํ•œ ์ •์‹ !") s p = re.compile("_") p.sub(" ", s)
_____no_output_____
MIT
[STUDY]ํ…์ŠคํŠธ๋งˆ์ด๋‹_Base.ipynb
kamzzang/ADPStudy
๋ถˆ์šฉ์–ด ์ œ๊ฑฐ
words_Korean = ['์ถ”์„','์—ฐํœด','๋ฏผ์กฑ','๋Œ€์ด๋™','์‹œ์ž‘','๋Š˜์–ด','๊ตํ†ต๋Ÿ‰','๊ตํ†ต์‚ฌ๊ณ ','ํŠนํžˆ','์ž๋™์ฐจ','๊ณ ์žฅ', '์ƒ๋‹น์ˆ˜','์ฐจ์ง€','๋‚˜ํƒ€','๊ฒƒ','๊ธฐ์ž'] stopwords = ['๊ฐ€๋‹ค','๋Š˜์–ด','๋‚˜ํƒ€','๊ฒƒ','๊ธฐ์ž'] [i for i in words_Korean if i not in stopwords] from nltk.corpus import stopwords # ๊ทธ๋ƒฅํ•˜๋ฉด LookupError ๋ฐœ์ƒํ•˜๋ฏ€๋กœ ๋‹ค์šด๋กœ๋“œ๊ฐ€ ํ•„์š”ํ•จ # import nltk # nltk.download() or nltk.download('stopwords') words_English = ['chief','justice','roberts',',','president','carter',',','president','clinton',',', 'president','bush',',','president','obama',',','fellow','americans','and','people', 'of','the','world',',','thank','you','.'] [w for w in words_English if not w in stopwords.words('english')]
_____no_output_____
MIT
[STUDY]ํ…์ŠคํŠธ๋งˆ์ด๋‹_Base.ipynb
kamzzang/ADPStudy
๊ฐ™์€ ์–ด๊ทผ ๋™์ผํ™”(stemming)
from nltk.tokenize import word_tokenize from nltk.stem import PorterStemmer ps_stemmer = PorterStemmer() new_text = 'It is important to be immersed while you are pythoning with python.\ All pythoners have pothoned poorly at least once.' words = word_tokenize(new_text) for w in words: print(ps_stemmer.stem(w), end=' ') from nltk.stem.lancaster import LancasterStemmer LS_stemmer = LancasterStemmer() for w in words: print(LS_stemmer.stem(w), end=' ') from nltk.stem.regexp import RegexpStemmer # ํŠน์ •ํ•œ ํ‘œํ˜„์‹์„ ์ผ๊ด„์ ์œผ๋กœ ์ œ๊ฑฐ RS_stemmer = RegexpStemmer('python') for w in words: print(RS_stemmer.stem(w), end=' ')
It is important to be immersed while you are ing with .All ers have pothoned poorly at least once .
MIT
[STUDY]ํ…์ŠคํŠธ๋งˆ์ด๋‹_Base.ipynb
kamzzang/ADPStudy
N-gram* n๋ฒˆ ์—ฐ์ด์–ด ๋“ฑ์žฅํ•˜๋Š” ๋‹จ์–ด๋“ค์˜ ์—ฐ์‡„* ๋‘ ๋ฒˆ : ๋ฐ”์ด๊ทธ๋žจ, ์„ธ ๋ฒˆ : ํŠธ๋ผ์ด๊ทธ๋žจ(ํŠธ๋ผ์ด๊ทธ๋žจ ์ด์ƒ์€ ๋ณดํŽธ์ ์œผ๋กœ ํ™œ์šฉํ•˜์ง€ ์•Š์Œ)
from nltk import ngrams sentence = 'Chief Justice Roberts, Preskdent Carter, President Clinton, President Bush, President Obama, \ fellow Americans and people of the world, thank you. We, the citizens of America are now joined in a great \ national effort to rebuild our country and restore its promise for all of our people. Together, we will \ determine the course of America and the world for many, many years to come. We will face challenges. We \ will confront hardships, but we will get the job done.' grams = ngrams(sentence.split(), 2) for gram in grams: print(gram, end=' ')
('Chief', 'Justice') ('Justice', 'Roberts,') ('Roberts,', 'Preskdent') ('Preskdent', 'Carter,') ('Carter,', 'President') ('President', 'Clinton,') ('Clinton,', 'President') ('President', 'Bush,') ('Bush,', 'President') ('President', 'Obama,') ('Obama,', 'fellow') ('fellow', 'Americans') ('Americans', 'and') ('and', 'people') ('people', 'of') ('of', 'the') ('the', 'world,') ('world,', 'thank') ('thank', 'you.') ('you.', 'We,') ('We,', 'the') ('the', 'citizens') ('citizens', 'of') ('of', 'America') ('America', 'are') ('are', 'now') ('now', 'joined') ('joined', 'in') ('in', 'a') ('a', 'great') ('great', 'national') ('national', 'effort') ('effort', 'to') ('to', 'rebuild') ('rebuild', 'our') ('our', 'country') ('country', 'and') ('and', 'restore') ('restore', 'its') ('its', 'promise') ('promise', 'for') ('for', 'all') ('all', 'of') ('of', 'our') ('our', 'people.') ('people.', 'Together,') ('Together,', 'we') ('we', 'will') ('will', 'determine') ('determine', 'the') ('the', 'course') ('course', 'of') ('of', 'America') ('America', 'and') ('and', 'the') ('the', 'world') ('world', 'for') ('for', 'many,') ('many,', 'many') ('many', 'years') ('years', 'to') ('to', 'come.') ('come.', 'We') ('We', 'will') ('will', 'face') ('face', 'challenges.') ('challenges.', 'We') ('We', 'will') ('will', 'confront') ('confront', 'hardships,') ('hardships,', 'but') ('but', 'we') ('we', 'will') ('will', 'get') ('get', 'the') ('the', 'job') ('job', 'done.')
MIT
[STUDY]ํ…์ŠคํŠธ๋งˆ์ด๋‹_Base.ipynb
kamzzang/ADPStudy
ํ’ˆ์‚ฌ ๋ถ„์„
from konlpy.tag import Hannanum hannanum = Hannanum() print(hannanum.morphs("์นœ์ฒ™๋“ค์ด ๋ชจ์ธ ์ด๋ฒˆ ์ถ”์„ ์ฐจ๋ก€์ƒ์—์„œ๋Š” ๋‹จ์—ฐ '์ง‘๊ฐ’'์ด ํ™”์ œ์— ์˜ฌ๋ž๋‹ค.")) print(hannanum.nouns("์นœ์ฒ™๋“ค์ด ๋ชจ์ธ ์ด๋ฒˆ ์ถ”์„ ์ฐจ๋ก€์ƒ์—์„œ๋Š” ๋‹จ์—ฐ '์ง‘๊ฐ’'์ด ํ™”์ œ์— ์˜ฌ๋ž๋‹ค.")) print(hannanum.pos("์นœ์ฒ™๋“ค์ด ๋ชจ์ธ ์ด๋ฒˆ ์ถ”์„ ์ฐจ๋ก€์ƒ์—์„œ๋Š” ๋‹จ์—ฐ '์ง‘๊ฐ’'์ด ํ™”์ œ์— ์˜ฌ๋ž๋‹ค.", ntags=9)) from konlpy.tag import Kkma kkma = Kkma() print(kkma.morphs("์นœ์ฒ™๋“ค์ด ๋ชจ์ธ ์ด๋ฒˆ ์ถ”์„ ์ฐจ๋ก€์ƒ์—์„œ๋Š” ๋‹จ์—ฐ '์ง‘๊ฐ’'์ด ํ™”์ œ์— ์˜ฌ๋ž๋‹ค.")) print(kkma.nouns("์นœ์ฒ™๋“ค์ด ๋ชจ์ธ ์ด๋ฒˆ ์ถ”์„ ์ฐจ๋ก€์ƒ์—์„œ๋Š” ๋‹จ์—ฐ '์ง‘๊ฐ’'์ด ํ™”์ œ์— ์˜ฌ๋ž๋‹ค.")) print(kkma.pos("์นœ์ฒ™๋“ค์ด ๋ชจ์ธ ์ด๋ฒˆ ์ถ”์„ ์ฐจ๋ก€์ƒ์—์„œ๋Š” ๋‹จ์—ฐ '์ง‘๊ฐ’'์ด ํ™”์ œ์— ์˜ฌ๋ž๋‹ค.")) from konlpy.tag import Twitter twitter = Twitter() print(twitter.morphs("์นœ์ฒ™๋“ค์ด ๋ชจ์ธ ์ด๋ฒˆ ์ถ”์„ ์ฐจ๋ก€์ƒ์—์„œ๋Š” ๋‹จ์—ฐ '์ง‘๊ฐ’'์ด ํ™”์ œ์— ์˜ฌ๋ž๋‹ค.")) print(twitter.nouns("์นœ์ฒ™๋“ค์ด ๋ชจ์ธ ์ด๋ฒˆ ์ถ”์„ ์ฐจ๋ก€์ƒ์—์„œ๋Š” ๋‹จ์—ฐ '์ง‘๊ฐ’'์ด ํ™”์ œ์— ์˜ฌ๋ž๋‹ค.")) print(twitter.pos("์นœ์ฒ™๋“ค์ด ๋ชจ์ธ ์ด๋ฒˆ ์ถ”์„ ์ฐจ๋ก€์ƒ์—์„œ๋Š” ๋‹จ์—ฐ '์ง‘๊ฐ’'์ด ํ™”์ œ์— ์˜ฌ๋ž๋‹ค.")) print(twitter.phrases("์นœ์ฒ™๋“ค์ด ๋ชจ์ธ ์ด๋ฒˆ ์ถ”์„ ์ฐจ๋ก€์ƒ์—์„œ๋Š” ๋‹จ์—ฐ '์ง‘๊ฐ’'์ด ํ™”์ œ์— ์˜ฌ๋ž๋‹ค.")) from nltk import pos_tag tokens = "The little yellow dog barked at the Persian cat.".split() tags_en = pos_tag(tokens) print(tags_en)
[('The', 'DT'), ('little', 'JJ'), ('yellow', 'JJ'), ('dog', 'NN'), ('barked', 'VBD'), ('at', 'IN'), ('the', 'DT'), ('Persian', 'NNP'), ('cat.', 'NN')]
MIT
[STUDY]ํ…์ŠคํŠธ๋งˆ์ด๋‹_Base.ipynb
kamzzang/ADPStudy
Location-based algorithm
location_map = list(set(TRAINING_DATA_GRAD["LOCATION"])) location_map.sort() # print(location_map) list_location = [] list_avg_rm = [] list_avg_yield = [] for val in location_map: avg_rm = np.average(TRAINING_DATA_GRAD[EXPERIMENT_DATA["LOCATION"] == str(val)]["RM"]) avg_yield = np.average(TRAINING_DATA_GRAD[EXPERIMENT_DATA["LOCATION"] == str(val)]["YIELD"]) list_location.append(str(val)) list_avg_rm.append(avg_rm) list_avg_yield.append(avg_yield) # print("{} = {},{}".format(val,avg_rm,avg_yield)) plt.title("Average RM and YIELD for each location") plt.plot(list_avg_rm, list_avg_yield, 'ro') for i, txt in enumerate(list_location): if int(txt) <= 1000: plt.annotate(txt, (list_avg_rm[i],list_avg_yield[i]), color="blue") elif int(txt) <= 2000: plt.annotate(txt, (list_avg_rm[i],list_avg_yield[i]), color="red") elif int(txt) <= 3000: plt.annotate(txt, (list_avg_rm[i],list_avg_yield[i]), color="green") elif int(txt) <= 4000: plt.annotate(txt, (list_avg_rm[i],list_avg_yield[i]), color="black") elif int(txt) <= 5000: plt.annotate(txt, (list_avg_rm[i],list_avg_yield[i]), color="orange") else: plt.annotate(txt, (list_avg_rm[i],list_avg_yield[i]), color="purple") plt.show()
_____no_output_____
MIT
Average_RM_YIELD_location.ipynb
hmc-cs-nsuaysom/BigDataProject
AnalysisFrom the preliminary analysis, we find that the number of different locateion in the dataset is 140. The location in the dataset is encoded as a 4-digit number. We first expected that we can group the quality of the species based on the location parameters. We then plot the average of __RM__ and __YIELD__ for each location, which is shown below: Linear Regression on each group of locationAccording to prior analaysis, it appears that we can possibly categorize species on location. The approach we decide to adopt is to use first digit of the location number as a categorizer. The histogram in the previous section indicates that there exists roughly about 7 groups. Notice that the leftmost and rightmost columns seem to be outliers.
# Calculate the number of possible locations location_set = set(TRAINING_DATA_GRAD["LOCATION"]) print("The number of possible location is {}.".format(len(location_set))) location_histogram_list = [] for location in sorted(location_set): amount = len(TRAINING_DATA_GRAD[TRAINING_DATA_GRAD["LOCATION"] == str(location)]) for j in range(amount): location_histogram_list.append(int(location)) # print("Location {} has {:>3} species".format(location, amount)) plt.title("Histogram of each location") plt.xlabel("Location Number") plt.ylabel("Amount") plt.hist(location_histogram_list, bins=7, range=(0,7000)) plt.savefig("location_histogram.png") plt.show() # Convert location column to numeric TRAINING_DATA_GRAD["LOCATION"] = TRAINING_DATA_GRAD["LOCATION"].apply(pd.to_numeric) # Separate training dataset into 7 groups dataByLocation = [] for i in range(7): dataByLocation.append(TRAINING_DATA_GRAD[(TRAINING_DATA_GRAD["LOCATION"] < ((i+1)*1000)) & (TRAINING_DATA_GRAD["LOCATION"] >= (i*1000))]) for i in range(len(dataByLocation)): data = dataByLocation[i] bagSold = np.log(np.asarray(data["BAGSOLD"]).reshape(-1,1).astype(np.float)) rm = np.asarray(data["RM"]).reshape(-1,1).astype(np.float) # Liear Regression regr = linear_model.LinearRegression() regr.fit(rm, bagSold) model_dict[i] = regr bagSold_prediction = regr.predict(rm) x_axis = np.arange(rm.min(), rm.max(), 0.01).reshape(-1,1) # Find MSE mse = sklearn.metrics.mean_squared_error(bagSold, bagSold_prediction) print(mse, np.sqrt(mse)) detailed_output["number of data point on location {}xxx".format(i)] = len(data) detailed_output["MSE on location {}xxx log scale".format(i)] = mse plt.figure(figsize=(8,8)) # plt.subplot("{}".format(int(str(len(dataByLocation))+str(1)+str(i+1)))) plt.title("Linear Regression RM vs. Log Bagsold on Location {}xxx".format(i)) true_value = plt.plot(rm,bagSold, 'ro', label='True Value') regression_line = plt.plot(x_axis, regr.predict(x_axis), color="green") plt.legend(["true_value", "Regression Line\nMSE = {:e}".format(mse)]) # plt.show() plt.xlim(rm.min(),rm.max()) plt.savefig("location{}.png".format(i))
1.05328277023 1.0262956544 0.762338198505 0.873119807647 0.253807203837 0.503792818366 0.200542622706 0.447819855195 0.29628093697 0.544316945327 0.374676270693 0.612108054753 3.15544362088e-30 1.7763568394e-15
MIT
Average_RM_YIELD_location.ipynb
hmc-cs-nsuaysom/BigDataProject
Test with validation set
# Test with validation set TESTING_DATA_GRAD = TESTING_DATA_GRAD.reset_index(drop=True) Xtest = np.column_stack((TESTING_DATA_GRAD["LOCATION"], TESTING_DATA_GRAD["RM"], TESTING_DATA_GRAD["YIELD"])) ytest = TESTING_DATA_GRAD["BAGSOLD"].astype(np.float) log_ytest = np.log(ytest) ypredicted = [] for row in Xtest: location = row[0] rm_val = row[1] yield_val = row[2] model = model_dict[int(location[0])] prediction = model.predict(rm_val)[0][0] ypredicted.append(prediction) ypredicted = np.array(ypredicted) # MSE error sklearn.metrics.mean_squared_error(log_ytest, ypredicted)
_____no_output_____
MIT
Average_RM_YIELD_location.ipynb
hmc-cs-nsuaysom/BigDataProject
Testing Ridge Reg vs. Linear RegBelow is not used. It's for testing the difference between Ridge Regression and Linear Regression.The result is that the MSE is almsot the same
bagSold = np.log(np.asarray(TRAINING_DATA_GRAD["BAGSOLD"]).reshape(-1, 1).astype(np.float)) rm = np.asarray(TRAINING_DATA_GRAD["RM"]).reshape(-1,1).astype(np.float) yield_val = np.asarray(TRAINING_DATA_GRAD["YIELD"]).reshape(-1,1).astype(np.float) x = np.column_stack((rm, yield_val)) # Liear Regression regr = linear_model.LinearRegression(fit_intercept=True) regr.fit(x, bagSold) bagSold_prediction = regr.predict(x) print("Coefficients = {}".format(regr.coef_)) print("Intercept = {}".format(regr.intercept_)) # Find MSE mse = sklearn.metrics.mean_squared_error(bagSold, bagSold_prediction) print("MSE = {}".format(mse)) bagSold = np.log(np.asarray(TRAINING_DATA_GRAD["BAGSOLD"]).reshape(-1, 1).astype(np.float)) rm = np.asarray(TRAINING_DATA_GRAD["RM"]).reshape(-1,1).astype(np.float) yield_val = np.asarray(TRAINING_DATA_GRAD["YIELD"]).reshape(-1,1).astype(np.float) x = np.column_stack((rm, yield_val)) # Liear Regression regr = linear_model.Ridge(alpha=20000) regr.fit(x, bagSold) bagSold_prediction = regr.predict(x) print("Coefficients = {}".format(regr.coef_)) print("Intercept = {}".format(regr.intercept_)) # Find MSE mse = sklearn.metrics.mean_squared_error(bagSold, bagSold_prediction) print("MSE = {}".format(mse))
Coefficients = [[ 0.0062339 0.00430852]] Intercept = [ 13.11871673] MSE = 0.3483090550422536
MIT
Average_RM_YIELD_location.ipynb
hmc-cs-nsuaysom/BigDataProject
Analyze the range of values from the audio
from scipy.io import wavfile wav_file = "../data/raw/dadosBruno/t02/" + \ "t02_S-ISCA_C1_Aedes female-20-07-2017_1_001_456.wav" wav = wavfile.read(wav_file) # OPTIONAL: Load the "autoreload" extension so that code can change %load_ext autoreload # OPTIONAL: always reload modules so that as you change code in src, it gets loaded %autoreload 2 import numpy as np import pandas as pd from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from joblib import dump, load from src.data import make_dataset from src.data import read_dataset from src.data import util import matplotlib.pyplot as plt import seaborn as sns sns.set() # Set seed for reprodubility np.random.seed(42) from src.data.read_dataset import read_temperature def make_temperatures(conversion, testing=False): num_cols = [x for x in range(11025)] save_cols = num_cols + ["label"] for i in range(2, 8): temperature = f"t0{i}" df = read_temperature(temperature, conversion) train_idx = df["training"] == 1 # get train data train_data = df.loc[train_idx] test_data = df.loc[~train_idx] # Create validation train_data, val_data = train_test_split(train_data, test_size=0.2) # Train scaler scaler = StandardScaler() scaler.fit(train_data[num_cols]) dump(scaler, f"../data/interim/scaler_{conversion}_{temperature}.pkl") # Save the data as compressed numpy arrays np.savez_compressed(f"../data/interim/all_wavs_{conversion}_{temperature}", train=train_data[save_cols].astype(int), val=val_data[save_cols].astype(int), test=test_data[save_cols].astype(int)) make_temperatures("repeat") make_temperatures("zero")
_____no_output_____
FTL
notebooks_tcc/0.3-BrunoGomesCoelho-Show-Normalization-Doesnt-Work.ipynb
BrunoGomesCoelho/mosquito-networking
Found existing installation: tables 3.4.4 Unistalling tables-3.4.4: Successfully unistalled tables-3.4.4 Successfully installed tables-3.6.1 import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns cd "/content/drive/My Drive/Colab Notebooks/matrix/matrix_two/dw_matrix_car" df = pd.read_hdf('data/car.h5') df.shape df.columns.values df['price_value'].hist(bins=100); df['price_value'].describe() def group_and_barplot(feat_groupby, feat_agg='price_value', agg_funcs=[np.mean, np.median, np.size], feat_sort='mean', top=50, subplots=True): return ( df .groupby(feat_groupby)[feat_agg] .agg(agg_funcs) .sort_values(by=feat_sort, ascending=False) .head(top) ).plot(kind='bar', figsize=(15, 5), subplots=subplots) group_and_barplot('param_marka-pojazdu'); group_and_barplot('param_kraj-pochodzenia', feat_sort='size'); group_and_barplot('param_kolor', feat_sort='mean');
_____no_output_____
Apache-2.0
day2_visualisation.ipynb
Kamila86/dw_matrix
็งปๅ‹•ใƒญใƒœใƒƒใƒˆใฎ็Šถๆ…‹้ท็งป๏ผˆใƒŽใ‚คใ‚บใ‚ใ‚Š๏ผ‰ๅƒ่‘‰ๅทฅๆฅญๅคงๅญฆ ไธŠ็”ฐ ้š†ไธ€(c) 2017 Ryuichi UedaThis software is released under the MIT License, see LICENSE. ใฏใ˜ใ‚ใซใ“ใฎใ‚ณใƒผใƒ‰ใฏใ€็งปๅ‹•ใƒญใƒœใƒƒใƒˆใฎ็งปๅ‹•ใฎ็ฐกๅ˜ใชใƒขใƒ‡ใƒซใงใ™ใ€‚
%matplotlib inline import numpy as np import math, random import matplotlib.pyplot as plt # for plotting data
_____no_output_____
MIT
state_equations/with_noise.ipynb
ryuichiueda/probrobo_practice
ใฐใ‚‰ใคใใคใŽใฎใƒซใƒผใƒซใซใ—ใŸใŒใฃใฆใƒญใƒœใƒƒใƒˆใฎ็งปๅ‹•้‡ใซ้›‘้Ÿณใ‚’ๆททๅ…ฅใ—ใฆใฟใ‚‹ใ€‚* ๆŒ‡ไปคๅ€คใ‚’ไธญๅฟƒใซใ€ใ‚ฌใ‚ฆใ‚นๅˆ†ๅธƒใซใ—ใŸใŒใฃใฆๆฌกใฎใ‚ˆใ†ใซใฐใ‚‰ใคใ * ๅ‰้€ฒใ™ใ‚‹ใจใ * ่ท้›ขใŒ่ท้›ขใซๅฏพใ—ใฆ10%ใฎๆจ™ๆบ–ๅๅทฎใงใฐใ‚‰ใคใ * ๅ‘ใใŒๆจ™ๆบ–ๅๅทฎ3[deg]ใงใฐใ‚‰ใคใ * ๅ‘ใใ‚’ๅค‰ใˆใ‚‹ใจใ * ๅค‰ใˆใ‚‹ๅ‘ใใฎ่ง’ๅบฆใซๅฏพใ—ใฆ10%ใฎๆจ™ๆบ–ๅๅทฎใงใฐใ‚‰ใคใ ็Šถๆ…‹ๆ–น็จ‹ๅผใซๅฏพๅฟœใ™ใ‚‹้–ขๆ•ฐไธ‹ใฎไพ‹ใซใคใ„ใฆใ€้–ขๆ•ฐfใ‚’ๆ›ธใใพใ—ใ‚‡ใ†ใ€‚ ```pythonold_x = np.array([0,0,0]) ไปŠๅ›žใฏไธ่ฆใ ใŒnumpyใ‚’ไฝฟ็”จu = np.array([0.1,10/180*math.pi]) ๆฏŽๅ›ž0.1ใ ใ‘้€ฒใ‚ใฆ10[deg]ๅ‘ใใ‚’ๅค‰ใˆใ‚‹def f(x_old,u): ใชใซใ‹ใ‚ณใƒผใƒ‰ใ‚’ๆ›ธใ return x_new``` ่งฃ็ญ”ไพ‹
def f(x_old,u): # ใ‚ใ‹ใ‚Šใซใใ„ใฎใงใƒใƒฉใ™ pos_x, pos_y, pos_theta = x_old act_fw, act_rot = u act_fw = random.gauss(act_fw,act_fw/10) dir_error = random.gauss(0.0, math.pi / 180.0 * 3.0) act_rot = random.gauss(act_rot,act_rot/10) pos_x += act_fw * math.cos(pos_theta + dir_error) pos_y += act_fw * math.sin(pos_theta + dir_error) pos_theta += act_rot return np.array([pos_x,pos_y,pos_theta])
_____no_output_____
MIT
state_equations/with_noise.ipynb
ryuichiueda/probrobo_practice
ๅฎŸ่กŒ๏ผ10ใ‚นใƒ†ใƒƒใƒ—ๅ‹•่ฒธใ—ใฆใฟใพใ—ใ‚‡ใ†ใ€‚
x = np.array([0,0,0]) u = np.array([0.1,10/180*math.pi]) # ๆฏŽๅ›ž0.1ใ ใ‘้€ฒใ‚ใฆ10[deg]ๅ‘ใใ‚’ๅค‰ใˆใ‚‹ print(x) for i in range(10): x = f(x,u) print(x)
[0 0 0] [ 0.11101391 0.01316534 0.20062588] [ 0.21667561 0.03772248 0.36210264] [ 0.30714191 0.06797028 0.53692128] [ 0.38194774 0.11808143 0.72018441] [ 0.45733798 0.17144363 0.89551629] [ 0.5062014 0.23616994 1.07167283] [ 0.55316268 0.32808052 1.24928927] [ 0.59204397 0.42298333 1.41256331] [ 0.61068586 0.52827197 1.60383144] [ 0.60807198 0.62990338 1.78279156]
MIT
state_equations/with_noise.ipynb
ryuichiueda/probrobo_practice
ใ‚ใ‹ใ‚‰ใ‚“ใฎใงๆ็”ป
x = np.array([0,0,0]) u = np.array([0.1,10/180*math.pi]) # ๆฏŽๅ›ž0.1ใ ใ‘้€ฒใ‚ใฆ10[deg]ๅ‘ใใ‚’ๅค‰ใˆใ‚‹ path = [x] for i in range(10): x = f(x,u) path.append(x) fig = plt.figure(i,figsize=(8, 8)) sp = fig.add_subplot(111, aspect='equal') sp.set_xlim(-1.0,1.0) sp.set_ylim(-0.5,1.5) xs = [e[0] for e in path] ys = [e[1] for e in path] vxs = [math.cos(e[2]) for e in path] vys = [math.sin(e[2]) for e in path] plt.quiver(xs,ys,vxs,vys,color="red",label="actual robot motion")
_____no_output_____
MIT
state_equations/with_noise.ipynb
ryuichiueda/probrobo_practice