kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
3,804,937
def pre_process_pca(features,test): Data = np.concatenate(( features,test)) Data = PCA(n_components=.95 ).fit_transform(Data) return Data[0:900,:],Data[900:1340,:]<normalization>
all_df[all_df['Fare'].isnull() ]
Titanic - Machine Learning from Disaster
3,804,937
def pre_process_manifold(features,test): n_neighbors = 15 Data = np.concatenate(( features, test)) Data = manifold.Isomap(n_neighbors, n_components=15 ).fit_transform(Data) return Data[0:900,:],Data[900:1340,:]<import_modules>
sort_fare_features = ['Sex', 'Pclass', 'Title', 'Deck', 'Embarked', 'Alone', 'Age'] sorted_df = all_df.sort_values(by=sort_embarked_features) aux = sorted_df[(sorted_df['Alone'] == 1)& (sorted_df['Sex'] == 'male')& (sorted_df['Pclass'] == 1)& (sorted_df['Embarked'] == 'S')& (sorted_df['Deck'] == 'N')& (sorted_df['Title'] == 'Mr')& (( sorted_df['Age'] >= 55)&(sorted_df['Age'] <= 65)) ] aux
Titanic - Machine Learning from Disaster
3,804,937
from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV from sklearn import svm from sklearn import ensemble from sklearn import naive_bayes from sklearn import neural_network from sklearn import neighbors from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from sklearn.metrics import accuracy_score<load_from_csv>
all_df['Fare'].fillna(value=aux['Fare'].mean() , inplace=True )
Titanic - Machine Learning from Disaster
3,804,937
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )<define_variables>
def impute_num(cols, avg, std): try: avg_value = avg.loc[tuple(cols)][0] except Exception as e: print(f'It is not possible to find an average value for this combination of features values: {cols}') return np.nan try: std_value = std.loc[tuple(cols)][0] except Exception as e: std_value = 0 finally: if pd.isnull(std_value): std_value = 0 while True: value = np.random.randint(avg_value-std_value, avg_value+std_value+1) if value >= 0: break return round(value, 0 )
Titanic - Machine Learning from Disaster
3,804,937
labels = train.TARGET_5Yrs features = train.iloc[:,0:21] <categorify>
group_age_features = ['Title','Relatives','Parch','SibSp','Deck','Pclass','Embarked','Sex','Alone']
Titanic - Machine Learning from Disaster
3,804,937
features = pre_process(features) test = pre_process(test )<choose_model_class>
stat_age = all_df.pivot_table(values='Age', index=group_age_features, aggfunc=['mean','std'] ).round(2 )
Titanic - Machine Learning from Disaster
3,804,937
classifier1 = neural_network.MLPClassifier(hidden_layer_sizes=(200,10),random_state=0) classifier2 = naive_bayes.GaussianNB() classifier3 =neighbors.KNeighborsClassifier(n_neighbors=1) classifier4= svm.NuSVC(kernel='rbf',random_state=0) classifier5 = neural_network.MLPClassifier(hidden_layer_sizes=(20,5),random_state=0) classifier6 = svm.SVC(kernel='rbf', C=.5,random_state=0) classifier7 = ensemble.AdaBoostClassifier(n_estimators=60,learning_rate=.3,random_state=0) classifier8 = ensemble.GradientBoostingClassifier(n_estimators=20,max_depth=30,random_state=0) classifier9 = ensemble.RandomForestClassifier(n_estimators=60,random_state=0) classifier10 = ensemble.ExtraTreesClassifier(n_estimators=10,random_state=0) classifier11 = ensemble.BaggingClassifier(n_estimators=10,random_state=0) classifier13 = neighbors.KNeighborsClassifier(n_neighbors=3) classifier14 = QuadraticDiscriminantAnalysis()<train_model>
comp = pd.DataFrame([all_df[~all_df['Age'].isnull() ]['Age'], ages2, ages1] ).T comp.columns = ['Real Age', 'Predicted Age $(\mu=\mu_{Age}$, $\sigma=\sigma_{Age})$', 'Predicted Age $(\mu=\mu_{Age}$, $\sigma=0)$'] comp.head(10 )
Titanic - Machine Learning from Disaster
3,804,937
classifier12 = ensemble.IsolationForest(n_estimators=100,bootstrap="true",contamination=0.1,random_state=0) classifier12.fit(features, labels) result12 = classifier12.predict(test) outlier = classifier12.predict(features) delfeatures = np.where(outlier == -1 )<feature_engineering>
pd.DataFrame(data=[mean_squared_error(all_df[~all_df['Age'].isnull() ]['Age'], ages1), mean_squared_error(all_df[~all_df['Age'].isnull() ]['Age'], ages2)], index = ['$\mu=\mu_{Age}, \sigma=0$', '$\mu=\mu_{Age}, \sigma=\sigma_{Age}$'], columns=['Mean Square Error'] ).round(1 )
Titanic - Machine Learning from Disaster
3,804,937
for i in np.int32(delfeatures): features[i,:] = np.sqrt(features[i,:] )<choose_model_class>
group_features = ['Title','Pclass','Embarked','Sex']
Titanic - Machine Learning from Disaster
3,804,937
classifier = ensemble.VotingClassifier\ (estimators=[('mlp',classifier1),('svm',classifier4),('knn3',classifier13), ('knn1',classifier3),('nb',classifier2),('mlpmini', classifier5), ('svm.5',classifier6),('ada',classifier7),('gb',classifier8), ('rf', classifier9),('et',classifier10),('bag',classifier11), ('quad',classifier14)], voting='hard',flatten_transform='true', weights=[2,2,1,1,1,1,1,1,1,1,1,1,1] ) classifier.fit(features, labels) result = classifier.predict(test) result[result==-1]=0<compute_train_metric>
stat_age = all_df.pivot_table(values='Age', index=group_features, aggfunc=['mean','std'] ).round(2 )
Titanic - Machine Learning from Disaster
3,804,937
scores = cross_val_score(classifier, features, labels, cv=10,scoring='f1') classification_reports(classifier,features,labels) print("Accuracy: %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) print(scores) <save_to_csv>
def sort_remap(crosstab_df, key): alives = list(crosstab_df[crosstab_df['Survived']==1][key]) deads = list(crosstab_df[crosstab_df['Survived']==0][key]) alives = list(set(deads)- set(alives)) + alives sorted_map = {key:value for value, key in enumerate(alives)} return sorted_map
Titanic - Machine Learning from Disaster
3,804,937
cols = { 'PlayerID': [i+901 for i in range(440)] , 'TARGET_5Yrs': result } submission = pd.DataFrame(cols) submission.to_csv("subm.csv", index=False )<import_modules>
kdis = KBinsDiscretizer(n_bins=8, encode='ordinal', strategy='uniform' ).fit(all_df[['Age', 'Fare']] )
Titanic - Machine Learning from Disaster
3,804,937
from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import Imputer from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.preprocessing import StandardScaler from statsmodels.compat import pandas as pd import pandas as pd from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn import svm, preprocessing from sklearn.decomposition import PCA<prepare_x_and_y>
cat_age_fare = kdis.transform(all_df[['Age', 'Fare']] )
Titanic - Machine Learning from Disaster
3,804,937
test = pd.read_csv(".. /input/test.csv") test_x = test.iloc[:, 2:].values imp = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0) imp=imp.fit(test_x) test_x = imp.transform(test_x) dataset = pd.read_csv(".. /input/train.csv") print(dataset.info()) X = dataset.iloc[:, 2:-1].values imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0) input_dim = X.shape[1] imputer = imputer.fit(X) X = imputer.transform(X) y = dataset.iloc[:, 21].values sc = StandardScaler() X_train = sc.fit_transform(X) x_test = sc.transform(test_x) Xtrain = preprocessing.normalize(X_train, norm='l2') Xtest = preprocessing.normalize(x_test, norm='l2') print(X) print(y) print(test_x )<save_to_csv>
all_df['Cat_Age'] = cat_age_fare[:,0].astype('int') all_df['Cat_Fare'] = cat_age_fare[:,1].astype('int' )
Titanic - Machine Learning from Disaster
3,804,937
cols = { 'PlayerID': [i+901 for i in range(440)] , 'TARGET_5Yrs': [eclf1.predict([test_x[i]])[0] for i in range(440)] } submission = pd.DataFrame(cols) print(submission) submission.to_csv("submission1.csv", index=False )<import_modules>
features_on_off = {'PassengerId':False, 'Survived':False, 'Pclass':True, 'Name':False, 'Sex':True, 'Age':False, 'SibSp':True, 'Parch':True, 'Ticket':False, 'Fare':False, 'Cabin':False, 'Embarked':True, 'Title':True, 'Deck':True, 'Alone':True, 'Relatives':True, 'Cat_Age':True, 'Cat_Fare':True}
Titanic - Machine Learning from Disaster
3,804,937
import numpy as np import pandas as pd<load_from_csv>
poly = PolynomialFeatures(degree=2, include_bias=False ).fit(all_df[features_on]) poly_features = poly.transform(all_df[features_on]) poly_df = pd.DataFrame(data=poly_features, columns=poly.get_feature_names(features_on))
Titanic - Machine Learning from Disaster
3,804,937
df = pd.read_csv('/kaggle/input/data-driven-business-analytics/train.csv', index_col=0) df.head()<groupby>
std_scaler = StandardScaler().fit(poly_df) scaled_features = std_scaler.transform(poly_df) scaled_df = pd.DataFrame(data=scaled_features, columns=poly_df.columns )
Titanic - Machine Learning from Disaster
3,804,937
mean_price = df['Preis'].mean() mean_price<load_from_csv>
new_df = scaled_df.copy() new_df['Survived'] = all_df['Survived']
Titanic - Machine Learning from Disaster
3,804,937
df_test = pd.read_csv('/kaggle/input/data-driven-business-analytics/test.csv', index_col=0) df_test.head()<predict_on_test>
X_train = scaled_df.loc[range(train_df.shape[0])] y_train = all_df.loc[range(train_df.shape[0]), 'Survived'] X_test = scaled_df.loc[range(train_df.shape[0], train_df.shape[0]+test_df.shape[0])] y_test = pd.read_csv('.. /input/true-labels/submission_true.csv')['Survived']
Titanic - Machine Learning from Disaster
3,804,937
predictions = [mean_price] * 322 predictions[:5]<feature_engineering>
log_reg = LogisticRegressionCV(Cs=1000, cv=5, refit=True, random_state=101, max_iter=200 ).fit(X_train, y_train )
Titanic - Machine Learning from Disaster
3,804,937
df_test['Preis'] = predictions df_test.head()<save_to_csv>
print(f'Accuracy on the training set: {100*log_reg.score(X_train, y_train):.1f}%' )
Titanic - Machine Learning from Disaster
3,804,937
df_submission.to_csv('./submission.csv', index=False )<load_from_csv>
print(f'Accuracy on the test set: {100*log_reg.score(X_test, y_test):.1f}%' )
Titanic - Machine Learning from Disaster
3,804,937
train_data = pd.read_csv("/kaggle/input/figure-out-the-formula-dogfooding/train.csv") train_data.head()<load_from_csv>
p = np.linspace(0.01, 1, 50) error_rate = {'train':[], 'test':[]} for p_value in p: prob = log_reg.predict_proba(X_train) y_pred = np.apply_along_axis(lambda pair: 1 if pair[1] > p_value else 0, 1, prob) error_rate['train'].append(mean_squared_error(y_train, y_pred)) prob = log_reg.predict_proba(X_test) y_pred = np.apply_along_axis(lambda pair: 1 if pair[1] > p_value else 0, 1, prob) error_rate['test'].append(mean_squared_error(y_test, y_pred))
Titanic - Machine Learning from Disaster
3,804,937
test_data = pd.read_csv("/kaggle/input/figure-out-the-formula-dogfooding/test.csv") test_data.head()<prepare_x_and_y>
best_p = p[np.array(error_rate['test'] ).argmin() ]
Titanic - Machine Learning from Disaster
3,804,937
knn = KNeighborsRegressor(n_neighbors=3) train_data = train_data.drop("Id", axis=1) x = train_data.drop("Expected", axis=1) y = train_data["Expected"] knn.fit(x, y) print("Done training" )<predict_on_test>
print(f'In this particular case, the best probability limit is around {best_p:.2}.This means that if the model provides a survival value greater than {best_p:.2}, we accept that the passenger will survive or die otherwise.' )
Titanic - Machine Learning from Disaster
3,804,937
solutions = [] for index, row in test_data.iterrows() : row_id = row["Id"] variables = row["a":"h"] prediction = knn.predict([variables]) print("Prediction is " + str(prediction)) solutions.append([row_id, prediction[0]]) solutions<save_to_csv>
prob = log_reg.predict_proba(X_train) y_pred_train = np.apply_along_axis(lambda pair: 1 if pair[1] > best_p else 0, 1, prob) print(f'Accuracy on the training set with the best choice of p: {100*np.mean(y_train == y_pred_train):.1f}%' )
Titanic - Machine Learning from Disaster
3,804,937
submission = pd.DataFrame(solutions, columns=["Id", "Expected"]) submission.to_csv("submission.csv", index=False )<set_options>
prob = log_reg.predict_proba(X_test) y_pred_test = np.apply_along_axis(lambda pair: 1 if pair[1] > best_p else 0, 1, prob) print(f'Accuracy on the testing set with the best choice of p: {100*np.mean(y_pred_test == y_test):.1f}%' )
Titanic - Machine Learning from Disaster
3,804,937
%matplotlib inline<load_from_csv>
print(classification_report(y_pred_train, y_train))
Titanic - Machine Learning from Disaster
3,804,937
df = pd.read_csv('.. /input/train.csv') df.head()<load_from_csv>
print(classification_report(y_pred_test, y_test))
Titanic - Machine Learning from Disaster
3,804,937
class ImageDataset(Dataset): def __init__(self, csv_file, root_dir, transform=None): self.df = pd.read_csv(csv_file) self.dir = root_dir self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, index): img_path = os.path.join(self.dir, self.df['Id'][index]) image = Image.open(img_path ).convert('RGB') label = np.array(self.df['Category'][index]) if self.transform: image = self.transform(image) sample =(image, label) return sample <train_model>
perm = PermutationImportance(log_reg ).fit(X_train, y_train) show_weights(perm, feature_names = list(X_train.columns))
Titanic - Machine Learning from Disaster
3,804,937
gpu = torch.cuda.is_available() if gpu: print("CUDA is available.Training on GPU") else: print("CUDA unavailable.Training on CPU" )<choose_model_class>
weights = pd.DataFrame(data=log_reg.coef_[0], index=scaled_df.columns, columns=['Weights']) weights.loc['Intercept'] = log_reg.intercept_ weights.sort_values('Weights', ascending=False, inplace=True) weights.head(15 )
Titanic - Machine Learning from Disaster
3,804,937
model = models.vgg19(pretrained=True) model.classifier = nn.Sequential(nn.Linear(512 * 7 * 7, 4096), nn.LeakyReLU() , nn.Dropout() , nn.Linear(4096, 512), nn.LeakyReLU() , nn.Dropout() , nn.Linear(512, 67), ) model.cuda() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters() , lr=0.001, momentum=0.9) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1 )<train_model>
output = pd.DataFrame({'PassengerId': test_df['PassengerId'], 'Survived': y_pred_test.astype('int')} )
Titanic - Machine Learning from Disaster
3,804,937
<train_model><EOS>
output.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
11,514,849
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_csv>
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import kurtosis from scipy.stats import skew
Titanic - Machine Learning from Disaster
11,514,849
testset = ImageDataset(csv_file='.. /input/sample_sub.csv', root_dir='.. /input/data/data/', transform=transform) testloader = DataLoader(testset, batch_size=32, num_workers=4, shuffle=False )<load_from_csv>
plt.rcParams['figure.figsize'] =(8,5) plt.style.use("ggplot" )
Titanic - Machine Learning from Disaster
11,514,849
submission = pd.read_csv('.. /input/sample_sub.csv') submission.head()<categorify>
train=pd.read_csv(".. /input/titanic/train.csv") test=pd.read_csv(".. /input/titanic/test.csv" )
Titanic - Machine Learning from Disaster
11,514,849
predictions = [] for inputs, labels in testloader: if gpu: inputs = Variable(inputs.cuda()) else: inputs = Variable(inputs) outputs = model(inputs) _, predicted = torch.max(outputs.data, 1) for i in range(len(predicted)) : predictions.append(int(predicted[i])) submission['Category'] = predictions<save_to_csv>
train.isna().sum()
Titanic - Machine Learning from Disaster
11,514,849
submission.to_csv('VGG19_submission1.csv', index=False, encoding='utf-8' )<set_options>
test.isna().sum()
Titanic - Machine Learning from Disaster
11,514,849
%matplotlib inline<load_from_csv>
train.drop("Cabin",axis=1,inplace=True) test.drop("Cabin",axis=1,inplace=True )
Titanic - Machine Learning from Disaster
11,514,849
df = pd.read_csv('.. /input/train.csv') df.head()<load_from_csv>
df=pd.DataFrame(train.groupby(["Pclass","Sex"] ).sum().Survived) df.reset_index(inplace=True) df
Titanic - Machine Learning from Disaster
11,514,849
class ImageDataset(Dataset): def __init__(self, csv_file, root_dir, transform=None): self.data_frame = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.data_frame) def __getitem__(self, idx): img_name = os.path.join(self.root_dir, self.data_frame['Id'][idx]) image = Image.open(img_name ).convert('RGB') label = np.array(self.data_frame['Category'][idx]) if self.transform: image = self.transform(image) sample =(image, label) return sample<load_from_csv>
train["age_category"]="" for i in range(891): if train.loc[i,"Age"]<1: train.loc[i,"age_category"]="Infants" elif 1<=train.loc[i,"Age"]<18: train.loc[i,"age_category"]="Children" elif 18<=train.loc[i,"Age"]<65: train.loc[i,"age_category"]="Adults" elif train.loc[i,"Age"]>=65: train.loc[i,"age_category"]="Elders" else: train.loc[i,"age_category"]="Unknown" train.head()
Titanic - Machine Learning from Disaster
11,514,849
transform = transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor() ]) trainset = ImageDataset(csv_file = '.. /input/train.csv', root_dir = '.. /input/data/data/', transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=16, shuffle=True, num_workers=0 )<define_variables>
df_2=pd.DataFrame(train.groupby(["age_category",] ).sum().Survived) df_2["Total"]=train["age_category"].value_counts() df_2.reset_index(inplace=True) df_2
Titanic - Machine Learning from Disaster
11,514,849
for i in range(len(trainset)) : sample = trainset[i] print(i, sample[0].size() , " | Label: ", sample[1]) if i == 9: break<train_model>
df_3=train.groupby("SibSp" ).sum().Survived df_3
Titanic - Machine Learning from Disaster
11,514,849
train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('CUDA is not available.Training on CPU...') else: print('CUDA is available! Training on GPU...' )<choose_model_class>
df_4=train.groupby("Parch" ).sum().Survived df_4
Titanic - Machine Learning from Disaster
11,514,849
model = models.resnext101_32x8d(pretrained=True )<find_best_params>
data={"Class":["Class1","Class2","Class3"], "Passengers":[train[train.Pclass==1].shape[0],train[train.Pclass==2].shape[0],train[train.Pclass==3].shape[0]]}
Titanic - Machine Learning from Disaster
11,514,849
for param in model.parameters() : param.requires_grad = True for param in model.parameters() : param.requires_grad = True for name, child in model.named_children() : if name in ['layer3', 'layer4', 'fc']: print(name + ' is unfrozen') for param in child.parameters() : param.requires_grad = True else: print(name + ' is frozen') for param in child.parameters() : param.requires_grad = False<choose_model_class>
data={"Class":["Class1","Class2","Class3"], "Passengers":[train[train.Pclass==1].shape[0],train[train.Pclass==2].shape[0],train[train.Pclass==3].shape[0]]} Class=pd.DataFrame(data,columns=["Class","Passengers"]) Class
Titanic - Machine Learning from Disaster
11,514,849
model.fc = nn.Sequential( nn.Linear(2048, 4096), nn.ReLU() , nn.Linear(4096, 256), nn.ReLU() , nn.Linear(256, 67), nn.LogSoftmax(dim=1))<choose_model_class>
train["status"]="" for i in range(891): if "Mr." in(train.Name[i].split()): train.loc[i,"status"]="Mister" elif "Miss." in(train.Name[i].split()): train.loc[i,"status"]="Miss" elif "Mrs." in(train.Name[i].split()): train.loc[i,"status"]="Mistress" elif "Rev." in(train.Name[i].split()): train.loc[i,"status"]="Reverend" elif "Master." in(train.Name[i].split()): train.loc[i,"status"]="Master" elif "Mlle." in(train.Name[i].split()): train.loc[i,"status"]="Mademoiselle" elif "Dr." in(train.Name[i].split()): train.loc[i,"status"]="Doctor" else: train.loc[i,"status"]="Other"
Titanic - Machine Learning from Disaster
11,514,849
criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=0.0006, momentum=0.9) <train_on_grid>
train.status.value_counts()
Titanic - Machine Learning from Disaster
11,514,849
epochs=20 for e in range(epochs): running_loss = 0 corr = torch.tensor(( 0), dtype=torch.int64) for images, labels in trainloader: images = images.cuda() labels=labels.cuda() optimizer.zero_grad() output = model(images) _, out = torch.max(output, 1) loss = criterion(output, labels) loss.backward() corr+=sum(out==labels) optimizer.step() running_loss += loss.item() else: print("Epoch {} - Training loss: {}".format(e, running_loss/len(trainloader))) print(corr,"/10934") <train_model>
train[train.status=='Other']
Titanic - Machine Learning from Disaster
11,514,849
model.eval() <load_from_csv>
print(train.Sex.value_counts()) print("{:.2f} % of people were males".format(( train.Sex.value_counts() [0]/891)*100)) print("{:.2f} % of people were females".format(( train.Sex.value_counts() [1]/891)*100))
Titanic - Machine Learning from Disaster
11,514,849
submission = pd.read_csv('.. /input/sample_sub.csv') submission.head()<load_from_csv>
print(train.groupby("Sex" ).sum().Survived) print("{:.2f} % of females survived".format(( train.groupby("Sex" ).sum().Survived[0]/314)*100)) print("{:.2f} % of males survived".format(( train.groupby("Sex" ).sum().Survived[1]/577)*100))
Titanic - Machine Learning from Disaster
11,514,849
testset = ImageDataset(csv_file = '.. /input/sample_sub.csv', root_dir = '.. /input/data/data/', transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=16, shuffle=False, num_workers=0 )<prepare_output>
print("Skewness of Age distribution is {:.2f}".format(skew(train.dropna().Age))) print("Kurtosis of Age distribution is {:.2f}".format(kurtosis(train.dropna().Age)) )
Titanic - Machine Learning from Disaster
11,514,849
predictions=[] for data, target in testloader: if train_on_gpu: data, target = data.cuda() , target.cuda() output = model(data) _, pred = torch.max(output, 1) for i in range(len(pred)) : predictions.append(int(pred[i])) <prepare_output>
train.SibSp.value_counts()
Titanic - Machine Learning from Disaster
11,514,849
submission['Category'] = predictions<save_to_csv>
train.Parch.value_counts()
Titanic - Machine Learning from Disaster
11,514,849
submission.to_csv('submission1.csv', index=False, encoding='utf-8') <prepare_x_and_y>
train.drop("Ticket",axis=1,inplace=True) train.head()
Titanic - Machine Learning from Disaster
11,514,849
with open(".. /input/train.json")as f: data = json.load(f) X = [] Y = [] for element in data: img = cv2.resize(np.array(Image.open(".. /input/train/train/" + element["filename"])) ,(64,64)) X.append(np.array(img/255)) Y.append(element["class"]) y_train = np.array(to_categorical(Y)) x_train = np.array(X) base_model = ResNet50(weights='imagenet', include_top=False) x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu' )(x) predictions = Dense(29, activation='softmax' )(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=["accuracy"]) hist = model.fit(x_train, y_train, batch_size=32, validation_split=0.2, epochs=10) <load_from_csv>
train.groupby("Pclass" ).mean().Fare
Titanic - Machine Learning from Disaster
11,514,849
with open(".. /input/sandbox.csv")as f: data = f.readlines() content = [x.strip() for x in data] just_names = [x.split(',')[0] for x in content[1:]]<predict_on_test>
train.Embarked.value_counts()
Titanic - Machine Learning from Disaster
11,514,849
results = [] for c in tqdm(just_names): img = Image.open(".. /input/test/test/" + c) img = np.array(img,dtype=np.float32)/ 255 res = model.predict(np.array([img])) results.append(np.argmax(res[0]))<save_to_csv>
embarked=pd.DataFrame(train[["Pclass","Embarked"]].value_counts(sort=False)) embarked.reset_index(inplace=True) embarked.columns=["Class","Embarked","No.of passengers"] embarked
Titanic - Machine Learning from Disaster
11,514,849
with open("submission.csv","w")as f: f.write("ID,Class"+' ') for ID,Class in zip(just_names,results): f.write(ID + "," + str(Class)+ " " )<import_modules>
train[(train.status=="Master")&(train.Age.isna())]
Titanic - Machine Learning from Disaster
11,514,849
import numpy as np import pandas as pd import skimage.io from skimage.transform import resize import matplotlib.pyplot as plt from tqdm import tqdm import os<load_from_csv>
mean_age_children=round(train[(train["age_category"]=="Children")|(train["age_category"]=="Infants")].Age.mean()) mean_age_children
Titanic - Machine Learning from Disaster
11,514,849
path_to_train = '.. /input/tl-signs-hse-itmo-2020-winter/train/train/' data = pd.read_csv('.. /input/tl-signs-hse-itmo-2020-winter/train.csv') train_dataset_info = [] for name, label in zip(data['filename'], data['class_number']): train_dataset_info.append({ 'path':os.path.join(path_to_train, name), 'label':int(label)- 1}) train_dataset_info = np.array(train_dataset_info )<train_model>
train[(train.status=="Master")&(train.Age.isna())]=train[(train.status=="Master")&(train.Age.isna())].fillna(mean_age_children) train[train.status=="Master"].Age.isna().sum()
Titanic - Machine Learning from Disaster
11,514,849
class data_generator: def create_train(dataset_info, batch_size, shape): while True: random_indexes = np.random.choice(len(dataset_info), batch_size) batch_images = np.empty(( batch_size, shape[0], shape[1], shape[2])) batch_labels = np.zeros(( batch_size, 66)) for i, idx in enumerate(random_indexes): image = data_generator.load_image( dataset_info[idx]['path'], shape) batch_images[i] = image batch_labels[i][dataset_info[idx]['label']] = 1 yield batch_images, batch_labels def load_image(path, shape): image = skimage.io.imread(path) image = resize(image,(shape[0], shape[1]), mode='reflect') return image<train_model>
train[(train.status=="Mistress")&(train.Age.isna())]
Titanic - Machine Learning from Disaster
11,514,849
train_datagen = data_generator.create_train( train_dataset_info, 5,(48, 48, 3))<choose_model_class>
mean_age_mistress=round(train[train.status=="Mistress"].Age.mean()) mean_age_mistress
Titanic - Machine Learning from Disaster
11,514,849
def create_model(input_shape, n_out): model = Sequential([ Conv2D(32,(3,3), padding='same', activation=tf.nn.relu, input_shape=(48, 48, 3)) , MaxPooling2D(( 2, 2), strides=2), Conv2D(64,(3,3), padding='same', activation=tf.nn.relu), MaxPooling2D(( 2, 2), strides=2), Flatten() , Dense(128, activation=tf.nn.relu), Dense(66, activation=tf.nn.softmax) ]) return model<choose_model_class>
train[(train.status=="Mistress")&(train.Age.isna())]=train[(train.status=="Mistress")&(train.Age.isna())].fillna(mean_age_mistress) train[train.status=="Mistress"].Age.isna().sum()
Titanic - Machine Learning from Disaster
11,514,849
keras.backend.clear_session() model = create_model( input_shape=(48, 48, 3), n_out=66) model.compile( loss='categorical_crossentropy', optimizer=Adam(1e-04), metrics=['acc']) model.summary()<train_model>
train[(train.status=="Mister")&(train.Age.isna())]
Titanic - Machine Learning from Disaster
11,514,849
epochs = 100; batch_size = 16 checkpointer = ModelCheckpoint( '.. /working/InceptionResNetV2.model', verbose=2, save_best_only=True) np.random.seed(2018) indexes = np.arange(train_dataset_info.shape[0]) np.random.shuffle(indexes) train_indexes = indexes[:20000] valid_indexes = indexes[20000:] train_generator = data_generator.create_train( train_dataset_info[train_indexes], batch_size,(48, 48, 3)) validation_generator = data_generator.create_train( train_dataset_info[valid_indexes], 100,(48, 48, 3)) history = model.fit_generator( train_generator, steps_per_epoch=100, validation_data=next(validation_generator), epochs=epochs, verbose=1, callbacks=[checkpointer] )<load_from_csv>
mean_age_mister=round(train[train.status=="Mister"].Age.mean()) mean_age_mister
Titanic - Machine Learning from Disaster
11,514,849
submit = pd.read_csv('.. /input/tl-signs-hse-itmo-2020-winter/sample_submission.csv') predicted = [] for name in tqdm(submit['filename']): path = os.path.join('.. /input/tl-signs-hse-itmo-2020-winter/test/test', name) image = data_generator.load_image(path,(48, 48, 3)) score_predict = model.predict(image[np.newaxis])[0] label_predict = np.argmax(score_predict) str_predict_label = str(label_predict + 1) predicted.append(str_predict_label )<save_to_csv>
train[(train.status=="Mister")&(train.Age.isna())]=train[(train.status=="Mister")&(train.Age.isna())].fillna(mean_age_mister) train[train.status=="Mister"].Age.isna().sum()
Titanic - Machine Learning from Disaster
11,514,849
submit['class_number'] = predicted submit.to_csv('submission_neural.csv', index=False )<load_from_csv>
for i in range(888): if train.loc[i,"Age"]<1: train.loc[i,"age_category"]="Infants" elif 1<=train.loc[i,"Age"]<18: train.loc[i,"age_category"]="Children" elif 18<=train.loc[i,"Age"]<65: train.loc[i,"age_category"]="Adults" elif train.loc[i,"Age"]>=65: train.loc[i,"age_category"]="Elders" else: train.loc[i,"age_category"]="Unknown"
Titanic - Machine Learning from Disaster
11,514,849
trainDF = pd.read_csv('.. /input/train.csv') trainDF.head()<split>
print(train.Age.isna().sum()) train[train.Age.isna() ]
Titanic - Machine Learning from Disaster
11,514,849
train_x, val_x, train_y, val_y = train_test_split(trainDF['review'], trainDF['sentiment'], test_size=0.1, shuffle=True )<feature_engineering>
mean_age_maleAdult=round(train[(train["age_category"]=="Adults")&(train.Sex=="male")].Age.mean()) mean_age_maleAdult
Titanic - Machine Learning from Disaster
11,514,849
tfidf_vect = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', max_features=25000) tfidf_vect.fit(trainDF['review']) xtrain_tfidf = tfidf_vect.transform(train_x) xval_tfidf = tfidf_vect.transform(val_x )<categorify>
mean_age_femaleAdult=round(train[(train["age_category"]=="Adults")&(train.Sex=="female")].Age.mean()) print(mean_age_femaleAdult) mean_age_femaleChildren=round(train[(train["age_category"]=="Children")&(train.Sex=="female")].Age.mean()) print(mean_age_femaleChildren )
Titanic - Machine Learning from Disaster
11,514,849
xtrain_tfidf = xtrain_tfidf.toarray() xval_tfidf = xval_tfidf.toarray() train_y = np.array(train_y) val_y = np.array(val_y )<categorify>
train[(train.Age.isna())&(train.Parch==0)]=train[(train.Age.isna())&(train.Parch==0)].fillna(mean_age_femaleAdult) train[train.Age.isna() ]
Titanic - Machine Learning from Disaster
11,514,849
train_dataset = TensorDataset(torch.from_numpy(xtrain_tfidf ).double() , torch.from_numpy(train_y ).double()) train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=64) val_dataset = TensorDataset(torch.from_numpy(xval_tfidf ).double() , torch.from_numpy(val_y ).double()) val_dataloader = DataLoader(val_dataset, shuffle=True, batch_size=64) dataloaders = {'train': train_dataloader, 'val': val_dataloader} dataset_sizes = {'train': len(train_dataset), 'val': len(val_dataset)}<init_hyperparams>
train[train.Age.isna() ]=train[train.Age.isna() ].fillna(mean_age_femaleChildren )
Titanic - Machine Learning from Disaster
11,514,849
epochs = 30 input_dim = 25000 output_dim = 1 lr_rate = 0.001<choose_model_class>
for i in range(891): if train.loc[i,"Age"]<1: train.loc[i,"age_category"]="Infants" elif 1<=train.loc[i,"Age"]<18: train.loc[i,"age_category"]="Children" elif 18<=train.loc[i,"Age"]<65: train.loc[i,"age_category"]="Adults" elif train.loc[i,"Age"]>=65: train.loc[i,"age_category"]="Elders" else: train.loc[i,"age_category"]="Unknown"
Titanic - Machine Learning from Disaster
11,514,849
model = nn.Sequential(nn.Linear(input_dim, 2048), nn.Dropout(0.5), nn.ReLU() , nn.Linear(2048, 256), nn.Dropout(0.5), nn.ReLU() , nn.Linear(256, output_dim), nn.Sigmoid() ).double()<choose_model_class>
train.isna().sum()
Titanic - Machine Learning from Disaster
11,514,849
criterion = torch.nn.BCEWithLogitsLoss() optimizer = torch.optim.Adam(model.parameters() , lr=lr_rate )<train_on_grid>
train[train.Embarked.isna() ]
Titanic - Machine Learning from Disaster
11,514,849
def train_model(model): model = model.cuda() best_acc = 0.0 best_model_wts = copy.deepcopy(model.state_dict()) for epoch in range(int(epochs)) : train_loss = 0 val_loss = 0 val_acc = 0 model.train() for inputs, labels in train_dataloader: inputs, labels = inputs.cuda() , labels.cuda() optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs.squeeze() , labels) loss.backward() optimizer.step() train_loss += loss.item() else: model.eval() num_correct = 0 for inputs, labels in val_dataloader: inputs, labels = inputs.cuda() , labels.cuda() outputs = model(inputs) predictions = torch.round(outputs.squeeze()) loss = criterion(predictions, labels) val_loss += loss.item() equals =(predictions == labels.data) num_correct += torch.sum(equals.data ).item() val_acc = num_correct / len(val_dataset) if val_acc > best_acc: best_acc = val_acc best_model_wts = copy.deepcopy(model.state_dict()) print('---------Epoch {} -----------'.format(epoch)) print('Train Loss: {:.6f} Val Loss: {:.6f} Val Accuracy: {:.6f}'.format( train_loss/len(train_dataset), val_loss/len(val_dataset), val_acc)) model.load_state_dict(best_model_wts) return model<train_model>
train.fillna("S",inplace=True) train.isna().sum()
Titanic - Machine Learning from Disaster
11,514,849
model = train_model(model )<load_from_csv>
test.drop("Ticket",axis=1,inplace=True) test["age_category"]="" for i in range(418): if test.loc[i,"Age"]<1: test.loc[i,"age_category"]="Infants" elif 1<=test.loc[i,"Age"]<18: test.loc[i,"age_category"]="Children" elif 18<=train.loc[i,"Age"]<65: test.loc[i,"age_category"]="Adults" elif test.loc[i,"Age"]>=65: test.loc[i,"age_category"]="Elders" else: test.loc[i,"age_category"]="Unknown" test["status"]="" for i in range(418): if "Mr." in(test.Name[i].split()): test.loc[i,"status"]="Mister" elif "Miss." in(test.Name[i].split()): test.loc[i,"status"]="Miss" elif "Mrs." in(test.Name[i].split()): test.loc[i,"status"]="Mistress" elif "Rev." in(test.Name[i].split()): test.loc[i,"status"]="Reverend" elif "Master." in(test.Name[i].split()): test.loc[i,"status"]="Master" elif "Dr." in(test.Name[i].split()): test.loc[i,"status"]="Doctor" else: test.loc[i,"status"]="Other" mean_age_children=round(test[(test["age_category"]=="Children")|(test["age_category"]=="Infants")].Age.mean()) test[(test.status=="Master")&(test.Age.isna())]=test[(test.status=="Master")&(test.Age.isna())].fillna(mean_age_children) mean_age_mistress=round(test[test.status=="Mistress"].Age.mean()) test[(test.status=="Mistress")&(test.Age.isna())]=test[(test.status=="Mistress")&(test.Age.isna())].fillna(mean_age_mistress) mean_age_mister=round(test[test.status=="Mister"].Age.mean()) test[(test.status=="Mister")&(test.Age.isna())]=test[(test.status=="Mister")&(test.Age.isna())].fillna(mean_age_mister) for i in range(418): if test.loc[i,"Age"]<1: test.loc[i,"age_category"]="Infants" elif 1<=test.loc[i,"Age"]<18: test.loc[i,"age_category"]="Children" elif 18<=train.loc[i,"Age"]<65: test.loc[i,"age_category"]="Adults" elif test.loc[i,"Age"]>=65: test.loc[i,"age_category"]="Elders" else: test.loc[i,"age_category"]="Unknown" mean_age_femaleAdult=round(test[(test["age_category"]=="Adults")&(test.Sex=="female")].Age.mean()) mean_age_femaleChildren=round(test[(test["age_category"]=="Children")&(test.Sex=="female")].Age.mean()) test[(test.Age.isna())&(test.Parch==0)]=test[(test.Age.isna())&(test.Parch==0)].fillna(mean_age_femaleAdult) test[test.Age.isna() ]=test[test.Age.isna() ].fillna(mean_age_femaleChildren) for i in range(418): if test.loc[i,"Age"]<1: test.loc[i,"age_category"]="Infants" elif 1<=test.loc[i,"Age"]<18: test.loc[i,"age_category"]="Children" elif 18<=test.loc[i,"Age"]<65: test.loc[i,"age_category"]="Adults" elif test.loc[i,"Age"]>=65: test.loc[i,"age_category"]="Elders" else: test.loc[i,"age_category"]="Unknown" fare_mean=test[test.Pclass==3].Fare.mean() test.fillna(fare_mean,inplace=True) print(test.isna().sum())
Titanic - Machine Learning from Disaster
11,514,849
test_df = pd.read_csv('.. /input/test.csv') xtest_tfidf = tfidf_vect.transform(test_df['review']) xtest_tfidf = xtest_tfidf.toarray()<prepare_x_and_y>
label_encoder = LabelEncoder() col=["Sex","Embarked","age_category"] for i in col: train[i]=label_encoder.fit_transform(train[i]) test[i]=label_encoder.fit_transform(test[i])
Titanic - Machine Learning from Disaster
11,514,849
test_y = np.zeros(xtest_tfidf.shape[0] )<create_dataframe>
from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score
Titanic - Machine Learning from Disaster
11,514,849
test_dataset = TensorDataset(torch.from_numpy(xtest_tfidf), torch.from_numpy(test_y)) test_dataloader = DataLoader(test_dataset, shuffle=False, batch_size=64 )<predict_on_test>
features1=["Pclass","Sex","Fare","age_category"] X1=train[features1] y1=train.Survived X_train1, X_test1, y_train1,y_test1=train_test_split(X1,y1,test_size=0.22,random_state=0) DT1=DecisionTreeClassifier(random_state=1) DT1.fit(X_train1,y_train1) y_pred1=DT1.predict(X_test1) score=(round(accuracy_score(y_pred1,y_test1)*100,2)) score
Titanic - Machine Learning from Disaster
11,514,849
def predict(model, test_dataloader): model.eval() predictions = [] for inputs, _ in test_dataloader: inputs = inputs.cuda() output = model(inputs) preds = torch.round(output) predictions.extend([p.item() for p in preds]) return predictions predictions = predict(model, test_dataloader )<create_dataframe>
features2=["Pclass","Sex","Fare"] X2=train[features2] y2=train.Survived X_train2, X_test2, y_train2,y_test2=train_test_split(X2,y2,test_size=0.21,random_state=0) DT2=DecisionTreeClassifier(random_state=1) DT2.fit(X_train2,y_train2) y_pred2=DT2.predict(X_test2) score=round(accuracy_score(y_pred2,y_test2)*100,2) score
Titanic - Machine Learning from Disaster
11,514,849
sub_df = pd.DataFrame() sub_df['Id'] = test_df['Id'] sub_df['sentiment'] = [int(p)for p in predictions]<save_to_csv>
xgb = GradientBoostingClassifier(random_state=1) xgb.fit(X_train1, y_train1) y_pred = xgb.predict(X_test1) score_xgb = round(accuracy_score(y_pred, y_test1)* 100, 2) score_xgb
Titanic - Machine Learning from Disaster
11,514,849
sub_df.to_csv('my_submission.csv', index=False) <install_modules>
Jack=[3,1,7.89,0] Rose=[1,0,60,0] predictions=xgb.predict([Jack,Rose]) print("Did Jack survived? {}".format(predictions[0])) print("Did Rose survived? {}".format(predictions[1]))
Titanic - Machine Learning from Disaster
11,514,849
!pip install -q --upgrade efficientnet tensorflow_addons<import_modules>
ids = test['PassengerId'] X_test=test[features1] predictions = xgb.predict(X_test) df = pd.DataFrame({ 'PassengerId' : ids, 'Survived': predictions }) df.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
11,066,913
print(f'Numpy version : {np.__version__}') print(f'Tensorflow version : {tf.__version__}') print(f'Tensorflow Addons version : {tfa.__version__}') print(f'EfficientNet(library)version : {efficientnet.__version__}') print(f'Matplotlib version : {matplotlib.__version__}') print(f'Scipy version : {scipy.__version__}') print(f'Pandas version : {pd.__version__}') print(f'Scikit-Learn version : {sklearn.__version__}' )<define_variables>
%matplotlib inline
Titanic - Machine Learning from Disaster
11,066,913
PRE_TRAINING_TIME_START = datetime.now() AUTO = tf.data.experimental.AUTOTUNE SEED = 42 os.environ['PYTHONHASHSEED']=str(SEED) random.seed(SEED) np.random.seed(SEED) os.environ['TF_DETERMINISTIC_OPS']=str(SEED) tf.random.set_seed(SEED) <set_options>
df_train=pd.read_csv('.. /input/titanic/train.csv') df_test=pd.read_csv('.. /input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
11,066,913
try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Running on TPU ', tpu.master()) except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() print("REPLICAS: ", strategy.num_replicas_in_sync )<define_variables>
ntrain = df_train.shape[0] ntest = df_test.shape[0] y_train = df_train['Survived'].values passId = df_test['PassengerId'] data = pd.concat(( df_train, df_test)) print("data size is: {}".format(data.shape))
Titanic - Machine Learning from Disaster
11,066,913
IMAGE_SIZE =(512, 512) GCS_TRAIN_PATHS = [ KaggleDatasets().get_gcs_path('tfrecords'), KaggleDatasets().get_gcs_path('tfrecords-2') ] TRAINING_FILENAMES = [] for i in GCS_TRAIN_PATHS: TRAINING_FILENAMES.append(tf.io.gfile.glob(i + '/*.tfrecords')) TRAINING_FILENAMES = list(itertools.chain.from_iterable(TRAINING_FILENAMES)) GCS_TEST_PATH = KaggleDatasets().get_gcs_path('tfrecords-3') TEST_FILENAMES = tf.io.gfile.glob(GCS_TEST_PATH + '/*.tfrecords') print(len(TRAINING_FILENAMES)) print(len(TEST_FILENAMES))<define_variables>
print("TRAIN DATA:") df_train.isnull().sum()
Titanic - Machine Learning from Disaster
11,066,913
EPOCHS = 12 DO_AUG = True BATCH_SIZE = 256 current_epoch = 0 chance = 0 NUM_TRAINING_IMAGES = 105390 NUM_TEST_IMAGES = 12186 STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE<define_variables>
print("TEST DATA:") df_test.isnull().sum()
Titanic - Machine Learning from Disaster
11,066,913
CLASSES = [str(c ).zfill(2)for c in range(0, 42)]<categorify>
data['Last_Name'] = data['Name'].apply(lambda x: str.split(x, ",")[0]) data['Fare'].fillna(data['Fare'].mean() , inplace=True) default_survival_chance = 0.5 data['Family_Survival'] = default_survival_chance for grp, grp_df in data[['Survived','Name', 'Last_Name', 'Fare', 'Ticket', 'PassengerId', 'SibSp', 'Parch', 'Age', 'Cabin']].groupby(['Last_Name', 'Fare']): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): data.loc[data['PassengerId'] == passID, 'Family_Survival'] = 1 elif(smin == 0.0): data.loc[data['PassengerId'] == passID, 'Family_Survival'] = 0 print("Number of passengers with family survival information:", data.loc[data['Family_Survival']!=0.5].shape[0] )
Titanic - Machine Learning from Disaster
11,066,913
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "words": tf.io.FixedLenFeature([6633], tf.float32), "label": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) words = example['words'] label = tf.cast(example['label'], tf.int32) return(( image, words), label) def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "words": tf.io.FixedLenFeature([6633], tf.float32), "filename": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) words = example['words'] filename = example['filename'] return(( image, words), filename) def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def get_training_dataset(do_aug=True): dataset = load_dataset(TRAINING_FILENAMES, labeled=True) if do_aug: dataset = dataset.map(image_augmentation, num_parallel_calls=AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False, tta=None): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) if tta == 0: dataset = dataset.map(image_augmentation_tta_0, num_parallel_calls=AUTO) elif tta == 1: dataset = dataset.map(image_augmentation_tta_1, num_parallel_calls=AUTO) elif tta == 2: dataset = dataset.map(image_augmentation_tta_2, num_parallel_calls=AUTO) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset<normalization>
for _, grp_df in data.groupby('Ticket'): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : if(row['Family_Survival'] == 0)|(row['Family_Survival']== 0.5): smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): data.loc[data['PassengerId'] == passID, 'Family_Survival'] = 1 elif(smin==0.0): data.loc[data['PassengerId'] == passID, 'Family_Survival'] = 0 print("Number of passenger with family/group survival information: " +str(data[data['Family_Survival']!=0.5].shape[0]))
Titanic - Machine Learning from Disaster
11,066,913
@tf.function def get_mat(rotation, shear, height_zoom, width_zoom, height_shift, width_shift): rotation = math.pi * rotation / 180. shear = math.pi * shear / 180. c1 = tf.math.cos(rotation) s1 = tf.math.sin(rotation) one = tf.constant([1],dtype='float32') zero = tf.constant([0],dtype='float32') rotation_matrix = tf.reshape(tf.concat([c1,s1,zero, -s1,c1,zero, zero,zero,one],axis=0),[3,3]) c2 = tf.math.cos(shear) s2 = tf.math.sin(shear) shear_matrix = tf.reshape(tf.concat([one,s2,zero, zero,c2,zero, zero,zero,one],axis=0),[3,3]) zoom_matrix = tf.reshape(tf.concat([one/height_zoom,zero,zero, zero,one/width_zoom,zero, zero,zero,one],axis=0),[3,3]) shift_matrix = tf.reshape(tf.concat([one,zero,height_shift, zero,one,width_shift, zero,zero,one],axis=0),[3,3]) return K.dot(K.dot(rotation_matrix, shear_matrix), K.dot(zoom_matrix, shift_matrix)) @tf.function def transform(image): DIM = IMAGE_SIZE[0] XDIM = DIM%2 if tf.random.uniform(shape=[], minval=0, maxval=2, dtype=tf.int32, seed=SEED)== 0: rot = 17.* tf.random.normal([1],dtype='float32') else: rot = tf.constant([0],dtype='float32') if tf.random.uniform(shape=[], minval=0, maxval=2, dtype=tf.int32, seed=SEED)== 0: shr = 5.5 * tf.random.normal([1],dtype='float32') else: shr = tf.constant([0],dtype='float32') if tf.random.uniform(shape=[], minval=0, maxval=3, dtype=tf.int32, seed=SEED)== 0: h_zoom = tf.random.normal([1],dtype='float32')/8.5 if h_zoom > 0: h_zoom = 1.0 + h_zoom * -1 else: h_zoom = 1.0 + h_zoom else: h_zoom = tf.constant([1],dtype='float32') if tf.random.uniform(shape=[], minval=0, maxval=3, dtype=tf.int32, seed=SEED)== 0: w_zoom = tf.random.normal([1],dtype='float32')/8.5 if w_zoom > 0: w_zoom = 1.0 + w_zoom * -1 else: w_zoom = 1.0 + w_zoom else: w_zoom = tf.constant([1],dtype='float32') if tf.random.uniform(shape=[], minval=0, maxval=3, dtype=tf.int32, seed=SEED)== 0: h_shift = 18.* tf.random.normal([1],dtype='float32') else: h_shift = tf.constant([0],dtype='float32') if tf.random.uniform(shape=[], minval=0, maxval=3, dtype=tf.int32, seed=SEED)== 0: w_shift = 18.* tf.random.normal([1],dtype='float32') else: w_shift = tf.constant([0],dtype='float32') m = get_mat(rot,shr,h_zoom,w_zoom,h_shift,w_shift) x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM) y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM]) z = tf.ones([DIM*DIM],dtype='int32') idx = tf.stack([x,y,z]) idx2 = K.dot(m,tf.cast(idx,dtype='float32')) idx2 = K.cast(idx2,dtype='int32') idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2) idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]]) d = tf.gather_nd(image,tf.transpose(idx3)) return tf.reshape(d,[DIM,DIM,3] )<prepare_x_and_y>
data = data.reset_index(drop=True) data = data.drop('Survived', axis=1) data.tail()
Titanic - Machine Learning from Disaster
11,066,913
@tf.function def transform_grid_mark(image, inv_mat, image_shape): h, w, c = image_shape cx, cy = w//2, h//2 new_xs = tf.repeat(tf.range(-cx, cx, 1), h) new_ys = tf.tile(tf.range(-cy, cy, 1), [w]) new_zs = tf.ones([h*w], dtype=tf.int32) old_coords = tf.matmul(inv_mat, tf.cast(tf.stack([new_xs, new_ys, new_zs]), tf.float32)) old_coords_x, old_coords_y = tf.round(old_coords[0, :] + tf.cast(w, tf.float32)//2.) , tf.round(old_coords[1, :] + tf.cast(h, tf.float32)//2.) old_coords_x = tf.cast(old_coords_x, tf.int32) old_coords_y = tf.cast(old_coords_y, tf.int32) clip_mask_x = tf.logical_or(old_coords_x<0, old_coords_x>w-1) clip_mask_y = tf.logical_or(old_coords_y<0, old_coords_y>h-1) clip_mask = tf.logical_or(clip_mask_x, clip_mask_y) old_coords_x = tf.boolean_mask(old_coords_x, tf.logical_not(clip_mask)) old_coords_y = tf.boolean_mask(old_coords_y, tf.logical_not(clip_mask)) new_coords_x = tf.boolean_mask(new_xs+cx, tf.logical_not(clip_mask)) new_coords_y = tf.boolean_mask(new_ys+cy, tf.logical_not(clip_mask)) old_coords = tf.cast(tf.stack([old_coords_y, old_coords_x]), tf.int32) new_coords = tf.cast(tf.stack([new_coords_y, new_coords_x]), tf.int64) rotated_image_values = tf.gather_nd(image, tf.transpose(old_coords)) rotated_image_channel = list() for i in range(c): vals = rotated_image_values[:,i] sparse_channel = tf.SparseTensor(tf.transpose(new_coords), vals, [h, w]) rotated_image_channel.append(tf.sparse.to_dense(sparse_channel, default_value=0, validate_indices=False)) return tf.transpose(tf.stack(rotated_image_channel), [1,2,0]) @tf.function def random_rotate(image, angle, image_shape): def get_rotation_mat_inv(angle): angle = math.pi * angle / 180 cos_val = tf.math.cos(angle) sin_val = tf.math.sin(angle) one = tf.constant([1], tf.float32) zero = tf.constant([0], tf.float32) rot_mat_inv = tf.concat([cos_val, sin_val, zero, -sin_val, cos_val, zero, zero, zero, one], axis=0) rot_mat_inv = tf.reshape(rot_mat_inv, [3,3]) return rot_mat_inv angle = float(angle)* tf.random.normal([1],dtype='float32') rot_mat_inv = get_rotation_mat_inv(angle) return transform_grid_mark(image, rot_mat_inv, image_shape) @tf.function def grid_mask() : h = tf.constant(IMAGE_SIZE[0], dtype=tf.float32) w = tf.constant(IMAGE_SIZE[1], dtype=tf.float32) image_height, image_width =(h, w) d1 = 112 d2 = 352 rotate_angle = 45 ratio = 0.6 hh = tf.math.ceil(tf.math.sqrt(h*h+w*w)) hh = tf.cast(hh, tf.int32) hh = hh+1 if hh%2==1 else hh d = tf.random.uniform(shape=[], minval=d1, maxval=d2, dtype=tf.int32) l = tf.cast(tf.cast(d,tf.float32)*ratio+0.5, tf.int32) st_h = tf.random.uniform(shape=[], minval=0, maxval=d, dtype=tf.int32) st_w = tf.random.uniform(shape=[], minval=0, maxval=d, dtype=tf.int32) y_ranges = tf.range(-1 * d + st_h, -1 * d + st_h + l) x_ranges = tf.range(-1 * d + st_w, -1 * d + st_w + l) for i in range(0, hh//d+1): s1 = i * d + st_h s2 = i * d + st_w y_ranges = tf.concat([y_ranges, tf.range(s1,s1+l)], axis=0) x_ranges = tf.concat([x_ranges, tf.range(s2,s2+l)], axis=0) x_clip_mask = tf.logical_or(x_ranges <0 , x_ranges > hh-1) y_clip_mask = tf.logical_or(y_ranges <0 , y_ranges > hh-1) clip_mask = tf.logical_or(x_clip_mask, y_clip_mask) x_ranges = tf.boolean_mask(x_ranges, tf.logical_not(clip_mask)) y_ranges = tf.boolean_mask(y_ranges, tf.logical_not(clip_mask)) hh_ranges = tf.tile(tf.range(0,hh), [tf.cast(tf.reduce_sum(tf.ones_like(x_ranges)) , tf.int32)]) x_ranges = tf.repeat(x_ranges, hh) y_ranges = tf.repeat(y_ranges, hh) y_hh_indices = tf.transpose(tf.stack([y_ranges, hh_ranges])) x_hh_indices = tf.transpose(tf.stack([hh_ranges, x_ranges])) y_mask_sparse = tf.SparseTensor(tf.cast(y_hh_indices, tf.int64), tf.zeros_like(y_ranges), [hh, hh]) y_mask = tf.sparse.to_dense(y_mask_sparse, 1, False) x_mask_sparse = tf.SparseTensor(tf.cast(x_hh_indices, tf.int64), tf.zeros_like(x_ranges), [hh, hh]) x_mask = tf.sparse.to_dense(x_mask_sparse, 1, False) mask = tf.expand_dims(tf.clip_by_value(x_mask + y_mask, 0, 1), axis=-1) mask = random_rotate(mask, rotate_angle, [hh, hh, 1]) mask = tf.image.crop_to_bounding_box(mask,(hh-tf.cast(h, tf.int32)) //2,(hh-tf.cast(w, tf.int32)) //2, tf.cast(image_height, tf.int32), tf.cast(image_width, tf.int32)) return mask @tf.function def apply_grid_mask(image): mask = grid_mask() mask = tf.concat([mask, mask, mask], axis=-1) return image * tf.cast(mask, 'float32' )<normalization>
data['Fare'] = pd.qcut(data['Fare'], 4) lbl = LabelEncoder() data['Fare'] = lbl.fit_transform(data['Fare'] )
Titanic - Machine Learning from Disaster
11,066,913
@tf.function def image_augmentation_tta_0(iw, filename): max_chance = 8 image, words = iw if tf.random.uniform(shape=[], minval=0, maxval=11, dtype=tf.int32, seed=SEED)< max_chance: image = tf.image.flip_left_right(image) if tf.random.uniform(shape=[], minval=0, maxval=11, dtype=tf.int32, seed=SEED)< max_chance: image = tf.image.random_brightness(image, 0.1) if tf.random.uniform(shape=[], minval=0, maxval=11, dtype=tf.int32, seed=SEED)< max_chance: image = tf.image.random_contrast(image, 0.9, 1.1) return(( image, words), filename) @tf.function def image_augmentation_tta_1(iw, filename): max_chance = 8 image, words = iw if tf.random.uniform(shape=[], minval=0, maxval=11, dtype=tf.int32, seed=SEED)< max_chance: image = tf.image.flip_left_right(image) if tf.random.uniform(shape=[], minval=0, maxval=11, dtype=tf.int32, seed=SEED)< max_chance: image = tf.image.random_brightness(image, 0.1) if tf.random.uniform(shape=[], minval=0, maxval=11, dtype=tf.int32, seed=SEED)< max_chance: image = tf.image.random_contrast(image, 0.9, 1.1) if tf.random.uniform(shape=[], minval=0, maxval=11, dtype=tf.int32, seed=SEED)< max_chance: image = tf.image.random_saturation(image, 0.95, 1.05) if tf.random.uniform(shape=[], minval=0, maxval=11, dtype=tf.int32, seed=SEED)< max_chance: image = tf.image.random_hue(image, 0.05) if tf.random.uniform(shape=[], minval=0, maxval=11, dtype=tf.int32, seed=SEED)< max_chance: image = transform(image) return(( image, words), filename) @tf.function def image_augmentation_tta_2(iw, filename): max_chance = 8 image, words = iw if tf.random.uniform(shape=[], minval=0, maxval=11, dtype=tf.int32, seed=SEED)< max_chance: image = tf.image.flip_left_right(image) if tf.random.uniform(shape=[], minval=0, maxval=11, dtype=tf.int32, seed=SEED)< max_chance: image = tf.image.random_brightness(image, 0.1) if tf.random.uniform(shape=[], minval=0, maxval=11, dtype=tf.int32, seed=SEED)< max_chance: image = tf.image.random_contrast(image, 0.9, 1.1) if tf.random.uniform(shape=[], minval=0, maxval=11, dtype=tf.int32, seed=SEED)< max_chance: image = tf.image.random_saturation(image, 0.95, 1.05) if tf.random.uniform(shape=[], minval=0, maxval=11, dtype=tf.int32, seed=SEED)< max_chance: image = tf.image.random_hue(image, 0.05) if tf.random.uniform(shape=[], minval=0, maxval=11, dtype=tf.int32, seed=SEED)< max_chance: image = transform(image) if tf.random.uniform(shape=[], minval=0, maxval=10, dtype=tf.int32, seed=SEED)< max_chance: image = apply_grid_mask(image) return(( image, words), filename )<import_modules>
titles_data = sorted(set([x for x in data['Name'].map(lambda x: get_title(x)) ])) print(len(titles_data), ':', titles_data )
Titanic - Machine Learning from Disaster
11,066,913
from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import Input, Flatten, Dense, Dropout, AveragePooling2D, GlobalAveragePooling2D, SpatialDropout2D, BatchNormalization, Activation, Concatenate<init_hyperparams>
data['Title'] = data['Name'].map(lambda x: get_title(x)) data['Title'] = data.apply(set_title, axis=1 )
Titanic - Machine Learning from Disaster
11,066,913
class EpochCallback(tf.keras.callbacks.Callback): def on_epoch_begin(self, epoch, logs=None): global current_epoch global chance current_epoch = epoch if current_epoch < 2: chance = 0 elif current_epoch < 9: chance = current_epoch - 1 else: chance = 8 print(f'Epoch print(datetime.now() )<choose_model_class>
print(data['Title'].value_counts() )
Titanic - Machine Learning from Disaster
11,066,913
es_val_acc = tf.keras.callbacks.EarlyStopping( monitor='val_sparse_categorical_accuracy', min_delta=0.001, patience=5, verbose=1, mode='auto', baseline=None, restore_best_weights=True ) es_val_loss = tf.keras.callbacks.EarlyStopping( monitor='val_loss', min_delta=0.001, patience=5, verbose=1, mode='auto', baseline=None, restore_best_weights=True ) es_acc = tf.keras.callbacks.EarlyStopping( monitor='sparse_categorical_accuracy', min_delta=0.001, patience=5, verbose=1, mode='auto', baseline=None, restore_best_weights=False ) es_loss = tf.keras.callbacks.EarlyStopping( monitor='loss', min_delta=0.001, patience=5, verbose=1, mode='auto', baseline=None, restore_best_weights=False ) epoch_cb = EpochCallback()<choose_model_class>
print('Total missing age data: ', pd.isnull(data['Age'] ).sum())
Titanic - Machine Learning from Disaster
11,066,913
with strategy.scope() : efn7 = efn.EfficientNetB7(weights='noisy-student', include_top=False, input_shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3)) for layer in efn7.layers: layer.trainable = True model_image = Sequential([ efn7, GlobalAveragePooling2D(name='efficientnet-b7_gap'), ], name='b7-image') model_words = Sequential([ Input(( 6633,), name='mlp-words_input'), Dense(331, name='mlp-words_dense_1'), BatchNormalization(name='mlp-words_bn_1'), Activation('relu', name='mlp-words_act_1'), Dense(110, name='mlp-words_dense_2'), BatchNormalization(name='mlp-words_bn_2'), Activation('relu', name='mlp-words_act_2'), ], name='mlp-words') concatenate = Concatenate(name='concatenate' )([model_image.output, model_words.output]) output = Dense(len(CLASSES), activation='softmax', name='output' )(concatenate) model = Model(inputs=[model_image.input, model_words.input], outputs=output) model.compile(optimizer=tfa.optimizers.LAMB(0.01), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy']) model.summary() <train_model>
data['Age'] = data.groupby('Title')['Age'].apply(lambda x: x.fillna(x.median()))
Titanic - Machine Learning from Disaster
11,066,913
print(f'Pre training time : {(datetime.now() - PRE_TRAINING_TIME_START ).total_seconds() } seconds' )<init_hyperparams>
data['Age'] = pd.qcut(data['Age'], 4) lbl = LabelEncoder() data['Age'] = lbl.fit_transform(data['Age'] )
Titanic - Machine Learning from Disaster