kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
3,808,829 | for i in range(len(data['combined_text'])) :
data['combined_text'][i] = remove_shortforms(data['combined_text'][i])
data['combined_text'][i] = remove_special_char(data['combined_text'][i])
data['combined_text'][i] = remove_wordswithnum(data['combined_text'][i])
data['combined_text'][i] = lowercase(data['combined_text'][i])
data['combined_text'][i] = remove_stop_words(data['combined_text'][i])
text = data['combined_text'][i]
text = text.split()
data['combined_text'][i] = stemming(text)
s = data['combined_text'][i]
data['combined_text'][i] = listToString(s)
data['combined_text'][i] = lemmatize_words(data['combined_text'][i] )<feature_engineering> | cosine_similarity(prediction_df.T ) | Titanic - Machine Learning from Disaster |
3,808,829 | cv = CountVectorizer(ngram_range=(1,3))
text_bow = cv.fit_transform(data['combined_text'])
print(text_bow.shape )<split> | from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.callbacks import EarlyStopping
from sklearn import preprocessing
from keras import regularizers | Titanic - Machine Learning from Disaster |
3,808,829 | train_text = text_bow[:train.shape[0]]
test_text = text_bow[train.shape[0]:]<split> | Titanic - Machine Learning from Disaster | |
3,808,829 | X_train,X_test,Y_train,Y_test = train_test_split(train_text,Y,test_size=0.2)
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
print(Y_test.shape )<compute_train_metric> | Titanic - Machine Learning from Disaster | |
3,808,829 | lr = LogisticRegression(C=1,penalty='l2',max_iter=2000)
lr.fit(X_train,Y_train)
pred = lr.predict(X_test)
print("F1 score :",f1_score(Y_test,pred))
print("Classification Report :",classification_report(Y_test,pred))<categorify> | vote_est = [
('ada', ensemble.AdaBoostClassifier()),
('bc', ensemble.BaggingClassifier()),
('etc',ensemble.ExtraTreesClassifier()),
('gbc', ensemble.GradientBoostingClassifier()),
('rfc', ensemble.RandomForestClassifier()),
('gpc', gaussian_process.GaussianProcessClassifier()),
('lr', linear_model.LogisticRegressionCV()),
('bnb', naive_bayes.BernoulliNB()),
('gnb', naive_bayes.GaussianNB()),
('knn', neighbors.KNeighborsClassifier()),
('svc', svm.SVC(probability=True)) ,
('xgb', XGBClassifier())
]
| Titanic - Machine Learning from Disaster |
3,808,829 | tfidf = TfidfVectorizer(ngram_range=(1,3))
text_tfidf = tfidf.fit_transform(data['combined_text'])
print(text_tfidf.shape )<split> | vote_ests = [vote_est, vote_est] | Titanic - Machine Learning from Disaster |
3,808,829 | train_text = text_tfidf[:train.shape[0]]
test_text = text_tfidf[train.shape[0]:]<split> | Titanic - Machine Learning from Disaster | |
3,808,829 | X_train,X_test,Y_train,Y_test = train_test_split(train_text,Y,test_size=0.2)
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
print(Y_test.shape )<compute_train_metric> | Titanic - Machine Learning from Disaster | |
3,808,829 | lr = LogisticRegression(C=2,penalty='l2',max_iter=2000)
lr.fit(X_train,Y_train)
pred = lr.predict(X_test)
print("F1 score :",f1_score(Y_test,pred))
print("Classification Report :",classification_report(Y_test,pred))<feature_engineering> | best_param = [[
[
{'learning_rate': 0.25, 'n_estimators': 300, 'random_state': 0}
],
[
{'max_samples': 0.5, 'n_estimators': 300, 'random_state': 0}
],
[
{'criterion': 'entropy', 'max_depth': 8, 'n_estimators': 50, 'random_state': 0}
],
[
{'learning_rate': 0.05, 'max_depth': 2, 'n_estimators': 300, 'random_state': 0}
],
[
{'criterion': 'entropy', 'max_depth': 8, 'n_estimators': 300, 'oob_score': True, 'random_state': 0}
],
[
{'max_iter_predict': 10, 'random_state': 0}
],
[
{'fit_intercept': True, 'random_state': 0, 'solver': 'newton-cg'}],
[{
'alpha': 0.1,
}],
[{}],
[{
'n_neighbors': 7,
'weights': 'distance',
'algorithm':'brute'
}],
[{
'C': 1,
'gamma': 0.1,
'decision_function_shape': 'ovo',
'probability': True,
'random_state': 0
}],
[
{'learning_rate': 0.05, 'max_depth': 4, 'n_estimators': 100, 'seed': 0}
]
],
[
[
{'learning_rate': 0.25, 'n_estimators': 300, 'random_state': 0}
],
[
{'max_samples': 1.0, 'n_estimators': 300, 'random_state': 0}
],
[
{'criterion': 'entropy', 'max_depth': None, 'n_estimators': 50, 'random_state': 0}
],
[
{'learning_rate': 0.05, 'max_depth': 6, 'n_estimators': 300, 'random_state': 0}
],
[
{'criterion': 'entropy', 'max_depth': None, 'n_estimators': 50, 'oob_score': True, 'random_state': 0}
],
[
{'max_iter_predict': 10, 'random_state': 0}
],
[
{'fit_intercept': True, 'random_state': 0, 'solver': 'lbfgs'}],
[{
'alpha': 0.1,
}],
[{}],
[{
'n_neighbors': 7,
'weights': 'distance',
'algorithm':'brute'
}],
[{
'C': 1,
'gamma': 0.1,
'decision_function_shape': 'ovo',
'probability': True,
'random_state': 0
}],
[
{'learning_rate': 0.05, 'max_depth': 4, 'n_estimators': 100, 'seed': 0}
]
]
] | Titanic - Machine Learning from Disaster |
3,808,829 | print('Loading word vectors...')
word2vec = {}
with open(os.path.join('.. /input/glove-global-vectors-for-word-representation/glove.6B.200d.txt'), encoding = "utf-8")as f:
for line in f:
values = line.split()
word = values[0]
vec = np.asarray(values[1:], dtype='float32')
word2vec[word] = vec
print('Found %s word vectors.' % len(word2vec))<load_from_csv> | for i in range(len(vote_ests)) :
for clf, param in zip(vote_ests[i], best_param[i]):
print('The best parameter for {} is {}'.format(clf[1].__class__.__name__, param[0]))
clf[1].set_params(**param[0] ) | Titanic - Machine Learning from Disaster |
3,808,829 | train = pd.read_csv(r'/kaggle/input/nlp-getting-started/train.csv')
test = pd.read_csv(r'/kaggle/input/nlp-getting-started/test.csv' )<prepare_x_and_y> | grid_hards = []
for i in range(len(vote_ests)) :
grid_hard = ensemble.VotingClassifier(estimators = vote_ests[i], voting = 'hard')
grid_hard_cv = model_selection.cross_validate(grid_hard, X_trains[i], y_train, cv = cv_split)
grid_hard.fit(X_trains[i], y_train)
grid_hards.append(grid_hard)
print("Hard Voting w/Tuned Hyperparameters Training w/bin score mean: {:.2f}".format(grid_hard_cv['train_score'].mean() *100))
print("Hard Voting w/Tuned Hyperparameters Test w/bin score mean: {:.2f}".format(grid_hard_cv['test_score'].mean() *100))
print("Hard Voting w/Tuned Hyperparameters Test w/bin score 3*std: +/- {:.2f}".format(grid_hard_cv['test_score'].std() *100*3))
print('-'*10)
| Titanic - Machine Learning from Disaster |
3,808,829 | <train_model><EOS> | alg_name = 'GridHardVoting'
feature_index = 0
prediction = grid_hards[feature_index].predict(X_tests[feature_index])
temp = {'PassengerID': passenger_id, 'Survived': prediction.astype(int)}
result = pd.DataFrame(temp)
result.to_csv('result_%s_feature%s.csv'%(alg_name, feature_index), index=False)
prediction_df[alg_name] = prediction
alg_name = 'GridHardVoting'
feature_index = 1
prediction = grid_hards[feature_index].predict(X_tests[feature_index])
temp = {'PassengerID': passenger_id, 'Survived': prediction.astype(int)}
result = pd.DataFrame(temp)
result.to_csv('result_%s_feature%s.csv'%(alg_name, feature_index), index=False)
prediction_df[alg_name] = prediction | Titanic - Machine Learning from Disaster |
12,521,033 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<count_unique_values> | import numpy as np
import pandas as pd | Titanic - Machine Learning from Disaster |
12,521,033 | word2index = tokenizer.word_index
print("Number of unique tokens : ",len(word2index))<prepare_x_and_y> | train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
12,521,033 | train_pad = data_padded[:train.shape[0]]
test_pad = data_padded[train.shape[0]:]<categorify> | women = train_data[train_data['Sex'] == 'female']['Survived']
rate_women = sum(women)/len(women)
print('% of women who survived:', rate_women ) | Titanic - Machine Learning from Disaster |
12,521,033 | embedding_matrix = np.zeros(( len(word2index)+1,200))
embedding_vec=[]
for word, i in tqdm(word2index.items()):
embedding_vec = word2vec.get(word)
if embedding_vec is not None:
embedding_matrix[i] = embedding_vec<choose_model_class> | men = train_data[train_data.Sex == 'male']['Survived']
rate_men = sum(men)/len(men)
print('% of men who survived:', rate_men ) | Titanic - Machine Learning from Disaster |
12,521,033 | model1 = keras.models.Sequential([
keras.layers.Embedding(len(word2index)+1,200,weights=[embedding_matrix],input_length=100,trainable=False),
keras.layers.LSTM(100,return_sequences=True),
keras.layers.LSTM(200),
keras.layers.Dropout(0.5),
keras.layers.Dense(1,activation='sigmoid')
] )<choose_model_class> | train_data[['Sex', 'Survived']].groupby(['Sex'] ).mean() | Titanic - Machine Learning from Disaster |
12,521,033 | model1.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'],
)<train_model> | train_data[['Pclass', 'Survived']].groupby(['Pclass'] ).mean() | Titanic - Machine Learning from Disaster |
12,521,033 | history1 = model1.fit(train_pad,Y,
batch_size=64,
epochs=10,
validation_split=0.2
)<choose_model_class> | women_count = 0
women_survived_count = 0
for idx, row in train_data.iterrows() :
if row['Sex'] == 'female':
women_count += 1
if row['Survived'] == 1:
women_survived_count += 1
women_survived_count / women_count | Titanic - Machine Learning from Disaster |
12,521,033 | model2 = keras.models.Sequential([
keras.layers.Embedding(len(word2index)+1,200,weights=[embedding_matrix],input_length=100,trainable=False),
keras.layers.GRU(100,return_sequences=True),
keras.layers.GRU(200),
keras.layers.Dropout(0.5),
keras.layers.Dense(1,activation='sigmoid')
] )<choose_model_class> | predictions = []
for idx, row in test_data.iterrows() :
if(row['Pclass'] == 1 or row['Pclass'] == 2)and row['Sex'] == 'female':
predictions.append(1)
elif row['Age'] < 13 and row['Pclass'] != 3:
predictions.append(1)
else:
predictions.append(0 ) | Titanic - Machine Learning from Disaster |
12,521,033 | model2.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'],
)<train_model> | test_data['Survived'] = predictions | Titanic - Machine Learning from Disaster |
12,521,033 | <choose_model_class><EOS> | test_data[['PassengerId', 'Survived']].to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
12,362,302 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<choose_model_class> | import numpy as np
import pandas as pd | Titanic - Machine Learning from Disaster |
12,362,302 | model3.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'],
)<train_model> | train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
12,362,302 | history3 = model3.fit(train_pad,Y,
batch_size=64,
epochs=10,
validation_split=0.2
)<choose_model_class> | women = train_data[train_data['Sex'] == 'female']['Survived']
rate_women = sum(women)/len(women)
print('% of women who survived:', rate_women ) | Titanic - Machine Learning from Disaster |
12,362,302 | es = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy',mode='max',verbose=1,patience=3 )<train_model> | men = train_data[train_data.Sex == 'male']['Survived']
rate_men = sum(men)/len(men)
print('% of men who survived:', rate_men ) | Titanic - Machine Learning from Disaster |
12,362,302 | history = model3.fit(train_pad,Y,
batch_size=64,
epochs=30,
validation_split=0.2,
callbacks=[es]
)<predict_on_test> | train_data[['Sex', 'Survived']].groupby(['Sex'] ).mean() | Titanic - Machine Learning from Disaster |
12,362,302 | submit = pd.DataFrame(test['id'],columns=['id'])
predictions = model3.predict(test_pad)
submit['target_prob'] = predictions
submit.head()<data_type_conversions> | train_data[['Pclass', 'Survived']].groupby(['Pclass'] ).mean() | Titanic - Machine Learning from Disaster |
12,362,302 | target = [None]*len(submit)
for i in range(len(submit)) :
target[i] = np.round(submit['target_prob'][i] ).astype(int)
submit['target'] = target
submit.head()<save_to_csv> | women_count = 0
women_survived_count = 0
for idx, row in train_data.iterrows() :
if row['Sex'] == 'female':
women_count += 1
if row['Survived'] == 1:
women_survived_count += 1
women_survived_count / women_count | Titanic - Machine Learning from Disaster |
12,362,302 | submit = submit.drop('target_prob',axis=1)
submit.to_csv('real-nlp_lstm.csv',index=False )<load_from_csv> | count1w = 0
count1m = 0
count2w = 0
count2m = 0
count3w = 0
count3m = 0
countm = 0
countw = 0
for idx, row in train_data.iterrows() :
if row['Pclass'] == 1:
if row['Sex'] == 'female':
count1w += 1
else:
count1m += 1
elif row['Pclass'] == 2:
if row['Sex'] == 'female':
count2w += 1
else:
count2m += 1
else:
if row['Sex'] == 'female':
count3w += 1
else:
count3m += 1
if row['Sex'] == 'male':
countm += 1
else:
countw += 1
print("Class 1 Men:",count1m/countm)
print("Class 1 Women:",count1w/countw)
print("Class 2 Men:",count2m/countm)
print("Class 2 Women:",count2w/countw)
print("Class 3 Men:",count3m/countm)
print("Class 3 Women:",count3w/countw)
| Titanic - Machine Learning from Disaster |
12,362,302 | train = pd.read_csv(r'/kaggle/input/nlp-getting-started/train.csv')
test = pd.read_csv(r'/kaggle/input/nlp-getting-started/test.csv' )<prepare_x_and_y> | predictions = []
for idx, row in test_data.iterrows() :
if row['Sex'] == 'female':
if row['Pclass'] == 1 or row['Pclass'] == 2 or row['Age'] < 25.0:
predictions.append(1)
else:
predictions.append(0)
else:
if row['Age'] < 18.0 and row['Pclass'] == 1:
predictions.append(1)
else:
predictions.append(0)
print(predictions[:10] ) | Titanic - Machine Learning from Disaster |
12,362,302 | Y = train['target']
train = train.drop('target',axis=1)
text_data_train = train['text']
text_data_test = test['text']<count_values> | test_data['Survived'] = predictions | Titanic - Machine Learning from Disaster |
12,362,302 | <load_pretrained><EOS> | test_data[['PassengerId', 'Survived']].to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
12,424,800 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<categorify> | import numpy as np
import pandas as pd | Titanic - Machine Learning from Disaster |
12,424,800 | def bert_encode(data,maximum_length):
input_ids = []
attention_masks = []
for i in range(len(data)) :
encoded = tokenizer.encode_plus(
data[i],
add_special_tokens=True,
max_length=maximum_length,
pad_to_max_length=True,
return_attention_mask=True,
)
input_ids.append(encoded['input_ids'])
attention_masks.append(encoded['attention_mask'])
return np.array(input_ids),np.array(attention_masks )<categorify> | train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
12,424,800 | train_input_ids,train_attention_masks = bert_encode(text_data_train,100)
test_input_ids,test_attention_masks = bert_encode(text_data_test,100 )<choose_model_class> | women = train_data[train_data['Sex'] == 'female']['Survived']
rate_women = sum(women)/len(women)
print('% of women who survived:', rate_women ) | Titanic - Machine Learning from Disaster |
12,424,800 | def create_model(bert_model):
input_ids = tf.keras.Input(shape=(100,),dtype='int32')
attention_masks = tf.keras.Input(shape=(100,),dtype='int32')
output = bert_model([input_ids,attention_masks])
output = output[1]
output = tf.keras.layers.Dense(1,activation='sigmoid' )(output)
model = tf.keras.models.Model(inputs = [input_ids,attention_masks],outputs = output)
model.compile(Adam(lr=6e-6), loss='binary_crossentropy', metrics=['accuracy'])
return model<train_model> | men = train_data[train_data.Sex == 'male']['Survived']
rate_men = sum(men)/len(men)
print('% of men who survived:', rate_men ) | Titanic - Machine Learning from Disaster |
12,424,800 | history = model.fit([train_input_ids,train_attention_masks],Y,
validation_split=0.2,
epochs=3,
batch_size=5 )<predict_on_test> | train_data[['Sex', 'Survived']].groupby(['Sex'] ).mean() | Titanic - Machine Learning from Disaster |
12,424,800 | result = model.predict([test_input_ids,test_attention_masks])
result = np.round(result ).astype(int)
submit = pd.DataFrame(test['id'],columns=['id'])
submit['target'] = result
submit.head()<save_to_csv> | train_data[['Pclass', 'Survived']].groupby(['Pclass'] ).mean() | Titanic - Machine Learning from Disaster |
12,424,800 | submit.to_csv('real_nlp_bert.csv',index=False )<load_from_csv> | women_count = 0
women_survived_count = 0
for idx, row in train_data.iterrows() :
if row['Sex'] == 'female':
women_count += 1
if row['Survived'] == 1:
women_survived_count += 1
women_survived_count / women_count | Titanic - Machine Learning from Disaster |
12,424,800 | DATA_DIR = '.. /input/aptos2019-blindness-detection'
train_dir = join(DATA_DIR, 'train_images')
label_df = pd.read_csv(join(DATA_DIR, 'train.csv'))
def train_validation_split(df, val_fraction=0.1):
val_ids = np.random.choice(df.id_code, size=int(len(df)* val_fraction))
val_df = df.query('id_code in @val_ids')
train_df = df.query('id_code not in @val_ids')
return train_df, val_df
train_df, val_df = train_validation_split(label_df)
print(train_df.shape, val_df.shape)
train_df.head()<categorify> | predictions = []
for idx, row in test_data.iterrows() :
if(row['Pclass'] == 1 or row['Pclass'] == 2)and row['Sex'] == 'female':
predictions.append(1)
elif row['Age'] < 15 and row['Pclass'] == 1:
predictions.append(1)
else:
predictions.append(0 ) | Titanic - Machine Learning from Disaster |
12,424,800 | %%time
class Diabetic_Retionopathy_Data(Dataset):
def __init__(self,
image_dir: str,
label_df: pd.DataFrame,
train=True,
transform=transforms.ToTensor() ,
sample_n=None,
in_memory=False,
write_images=False):
self.image_dir = image_dir
self.transform = transform
self.train = train
self.in_memory = in_memory
if sample_n:
label_df = label_df.sample(n=min(sample_n, len(label_df)))
ids = set(label_df.id_code)
self.img_files = [f for f in os.listdir(image_dir)if f.split('.')[0] in ids]
label_df.index = label_df.id_code
self.label_df = label_df.drop('id_code', axis=1)
if in_memory:
self.id2image = {}
for i, file_name in enumerate(self.img_files):
if i and i % 500 == 0:
print(f'{i} / {len(self.img_files)}')
image = self._read_process_image(join(image_dir, file_name))
id_ = file_name.split('.')[0]
self.id2image[id_] = image
if write_images:
image.save(file_name)
print(f'Initialized datatset with {len(self.img_files)} images.
')
@staticmethod
def _read_process_image(file_path: str, size=256):
image = cv2.imread(file_path)
return process_image_ratio_invariant(image, size=size)
def __getitem__(self, idx):
file_name = self.img_files[idx]
id_ = file_name.split('.')[0]
if self.in_memory:
img = self.id2image[id_]
else:
img = self._read_process_image(join(self.image_dir, file_name))
X = self.transform(img)
if self.train:
y = float(self.label_df.loc[id_].diagnosis)
return X, y, id_
else:
return X, id_
def __len__(self):
return len(self.img_files)
class RandomCenterCrop(transforms.CenterCrop):
def __init__(self, min_size: int, max_size: int):
self.min_size = min_size
self.max_size = max_size
def __call__(self, img):
size = np.random.randint(self.min_size, self.max_size + 1)
crop = transforms.CenterCrop(( size, size))
return crop(img)
def __repr__(self):
return f'{self.__class__.__name__}:(min-size={self.min_size}, max-size={self.max_size})'
batchsize = 16
train_transform = transforms.Compose([
RandomCenterCrop(min_size=200, max_size=256),
transforms.Resize(( 256, 256)) ,
transforms.RandomHorizontalFlip() ,
transforms.RandomRotation(( -20, 20)) ,
transforms.ToTensor() ,
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
train = Diabetic_Retionopathy_Data(train_dir,
train_df,
transform=train_transform,
in_memory=True,
write_images=False)
val = Diabetic_Retionopathy_Data(train_dir,
val_df,
transform=train_transform,
in_memory=True,
write_images=False)
train_loader = DataLoader(train, batch_size=batchsize, num_workers=4, shuffle=True)
val_loader = DataLoader(val, batch_size=batchsize, num_workers=3, shuffle=False)
X, y, _ = next(iter(val_loader))
print(f'batch-dimension:
X = {X.shape},
y = {y.shape}')
print(f'number of batches:
train: {len(train_loader)}
validation: {len(val_loader)}' )<choose_model_class> | test_data['Survived'] = predictions | Titanic - Machine Learning from Disaster |
12,424,800 | def count_parameters(model: nn.Module):
return sum([np.prod(x.shape)for x in model.parameters() ])
def print_lr_schedule(lr: float, decay: float, num_epochs=20):
print('
learning-rate schedule:')
for i in range(num_epochs):
if i % 2 == 0:
print(f'{i}\t{lr:.6f}')
lr = lr* decay
net = EfficientNet.from_name('efficientnet-b0')
net.load_state_dict(torch.load('.. /input/efficientnet-pytorch/efficientnet-b0-08094119.pth'))
num_in_features = net._fc.in_features
net._fc = nn.Linear(num_in_features, 1)
print(f'number of parameters: {count_parameters(net)}')
net.train()
net.cuda()
loss_function = nn.MSELoss()
lr = 0.0015
lr_decay = 0.97
optimizer = torch.optim.Adam(net.parameters() , lr=lr, weight_decay=1e-5)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
print_lr_schedule(lr, lr_decay )<train_model> | test_data[['PassengerId', 'Survived']].to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
6,637,182 | %%time
best_epoch_score = np.inf
print('epoch\ttrain-MSE\tval-MSE\tq-kappa\tlr\t\ttime [min]')
print('------------------------------------------------------------------')
for epoch in range(25):
start = time.time()
train_loss = []
for i,(X, y, id_)in enumerate(train_loader):
net.train()
optimizer.zero_grad()
out = net(X.cuda())
loss = loss_function(out, y.float().cuda().view(-1, 1))
train_loss.append(loss.item())
loss.backward()
optimizer.step()
validation_loss = []
predictions = np.array([])
truth = np.array([])
for X, y, id_ in val_loader:
net.eval()
out = net(X.cuda())
loss = loss_function(out, y.float().cuda().view(-1, 1))
validation_loss.append(loss.item())
predictions = np.append(predictions, out.detach().cpu().numpy())
truth = np.append(truth, y.detach().cpu().numpy().astype(int))
current_lr = optimizer.param_groups[0]['lr']
scheduler.step()
qk = cohen_kappa_score(predictions.round().astype(int), truth, weights='quadratic')
duration =(time.time() - start)/ 60
print(f'{epoch}:\t{np.mean(train_loss):.4f}\t\t{np.mean(validation_loss):.4f}\t{qk:.4f}\t{current_lr:.6f}\t{duration:.2f}')
if np.mean(validation_loss)< best_epoch_score:
torch.save(net.state_dict() , 'state_dict_best.pt')
best_epoch_score = np.mean(validation_loss)
best_epoch = epoch
print(f'epoch with best validation-score: {best_epoch}')
plt.hist(predictions, bins=5)
plt.xlim(-1, 5)
plt.title('distribution of predictions
(before rounding)')
plt.show()
plt.hist(train_df.diagnosis.values, bins=5)
plt.xlim(-1, 5)
plt.title('distribution of labels')
plt.show()<load_from_csv> | sns.set(style="ticks", context="talk")
| Titanic - Machine Learning from Disaster |
6,637,182 | test_dir = join(DATA_DIR, 'test_images')
test_df = pd.read_csv(join(DATA_DIR, 'test.csv'))
test_df.head(3 )<categorify> | testing = pd.read_csv('/kaggle/input/titanic/test.csv')
train = pd.read_csv('/kaggle/input/titanic/train.csv')
target = 'Survived'
test = testing.copy()
test.info()
print('-'*70)
train.info()
print('-'*70)
train.tail(10 ) | Titanic - Machine Learning from Disaster |
6,637,182 | def sample_images(train_dir: str, test_dir: str, n=10):
train_files = choice(os.listdir(train_dir), size=n)
test_files = choice(os.listdir(test_dir), size=n)
images = []
for train_f, test_f in zip(train_files, test_files):
train_img = Image.open(join(train_dir, train_f))
test_img = Image.open(join(test_dir, test_f))
images.append(( train_img, test_img))
def show_image(i):
train_image, test_image = images[i]
fig,(ax1, ax2)= plt.subplots(1, 2)
fig.set_size_inches(20, 10)
ax1.imshow(train_image)
ax2.imshow(test_image)
ax1.set_title('train')
ax2.set_title('test')
plt.show()
return interactive(show_image, i=(0, n-1))
sample_images(train_dir=train_dir,
test_dir=test_dir,
n=10)
<categorify> | print(train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False ).mean().sort_values(by='Survived', ascending=False ).round(2),
'
',
train[['Sex', 'Survived']].groupby(['Sex'], as_index=False ).mean().sort_values(by='Survived', ascending=False ).round(2)) | Titanic - Machine Learning from Disaster |
6,637,182 | test_transform = transforms.Compose([
transforms.Resize(( 256, 256)) ,
transforms.RandomHorizontalFlip() ,
transforms.RandomRotation(( -20, 20)) ,
transforms.ToTensor() ,
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
test_ds = Diabetic_Retionopathy_Data(test_dir,
test_df,
transform=test_transform,
train=False)
test_loader = DataLoader(test_ds, batch_size=batchsize)
X, _ = next(iter(test_loader))
print(f'batch-dimension:
X = {X.shape}' )<find_best_params> | train['agebucket'] = pd.cut(train['Age'], 5)
test['agebucket'] = pd.cut(test['Age'], 5)
train[['agebucket', 'Survived']].groupby(['agebucket'] ).mean().sort_values(by='agebucket', ascending=True ).round(2 ) | Titanic - Machine Learning from Disaster |
6,637,182 | net.load_state_dict(torch.load('state_dict_best.pt'))
net.eval()
net.cuda()
id2prediction = {}
for i,(X, id_)in enumerate(test_loader):
out = net(X.cuda())
preds = out.detach().cpu().numpy().ravel()
id2prediction = {**id2prediction, **dict(zip(id_, preds.round().astype(int ).tolist())) }<save_to_csv> | for dataset in [train, test]:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age'] = 4
train.head() | Titanic - Machine Learning from Disaster |
6,637,182 | submission_df = pd.read_csv(join(DATA_DIR, 'sample_submission.csv'))
submission_df.diagnosis = submission_df.id_code.map(id2prediction)
submission_df.diagnosis = submission_df.diagnosis.map(lambda p: max(p, 0))
submission_df.diagnosis = submission_df.diagnosis.map(lambda p: min(p, 4))
submission_df.to_csv('submission.csv', index=False)
display(submission_df.head() )<install_modules> | print(train[['Family', 'Survived']].groupby(['Family'], as_index=False ).mean().sort_values(by='Survived', ascending=False ).round(2),
'
',
train[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean().sort_values(by='Survived', ascending=False ).round(2)) | Titanic - Machine Learning from Disaster |
6,637,182 | !pip install -U '.. /input/install/efficientnet-0.0.3-py2.py3-none-any.whl'<load_from_csv> | train['isalone'] = [1 if x == 1 else 0 for x in train['Family']]
test['isalone'] = [1 if x == 1 else 0 for x in test['Family']]
train[['isalone', 'Survived']].groupby(['isalone'] ).mean().sort_values(by='Survived', ascending=False ).round(2 ) | Titanic - Machine Learning from Disaster |
6,637,182 | TEST_IMG_PATH = '.. /input/aptos2019-blindness-detection/test_images/'
test_df = pd.read_csv('.. /input/aptos2019-blindness-detection/test.csv')
print(test_df.shape)
original_names = test_df['id_code'].values
test_df['id_code'] = test_df['id_code'] + ".png"
test_df['diagnosis'] = np.zeros(test_df.shape[0])
display(test_df.head() )<choose_model_class> | dummy_features = ['Sex','Title', 'isalone']
drop_features = ['Embarked', 'PassengerId', 'Ticket', 'Name', 'Cabin','Parch','SibSp', 'agebucket']
train = pd.concat([train, pd.get_dummies(train[dummy_features])], axis = 1, sort = False)
train.drop(columns = train[dummy_features], inplace = True)
train.drop(columns = train[drop_features], inplace = True)
test = pd.concat([test, pd.get_dummies(test[dummy_features])], axis = 1, sort = False)
test.drop(columns = test[dummy_features], inplace = True)
test.drop(columns = test[drop_features], inplace = True)
train.tail() | Titanic - Machine Learning from Disaster |
6,637,182 | HEIGHT = 300
WIDTH = 300
COEFF = [0.5,1.5,2.5,3.5]
efficientnetb3 = EfficientNetB3(
weights=None,
input_shape=(HEIGHT,WIDTH,3),
include_top=False
)
def build_model() :
model = Sequential()
model.add(efficientnetb3)
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(5, activation=elu))
model.add(layers.Dense(1, activation="linear"))
return model
model = build_model()
model.load_weights('.. /input/aptos-3-best-models/best_weights/val_model_1.h5')
model.summary()<init_hyperparams> | y = train[target]
x = train.drop(columns = target)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state = 42 ) | Titanic - Machine Learning from Disaster |
6,637,182 | tta_steps = 4
predictions = []
for i in tqdm(range(tta_steps)) :
test_generator = ImageDataGenerator(rescale=1./255,
horizontal_flip=True,
rotation_range= 90,
vertical_flip=True,
brightness_range=(0.5,2),
zoom_range= 0.2,
fill_mode='constant',
cval = 0 ).flow_from_dataframe(test_df,
x_col='id_code',
y_col = 'diagnosis',
directory = TEST_IMG_PATH,
target_size=(WIDTH, HEIGHT),
batch_size=1,
class_mode='other',
shuffle = False,
preprocessing_function=preprocess_image)
preds = model.predict_generator(test_generator, steps = test_df.shape[0])
predictions.append(preds)
del test_generator
gc.collect()
y_test_1 = np.mean(predictions, axis=0)
for i, pred in enumerate(y_test_1):
if pred < COEFF[0]:
y_test_1[i] = 0
elif pred >= COEFF[0] and pred < COEFF[1]:
y_test_1[i] = 1
elif pred >= COEFF[1] and pred < COEFF[2]:
y_test_1[i] = 2
elif pred >= COEFF[2] and pred < COEFF[3]:
y_test_1[i] = 3
else:
y_test_1[i] = 4
y_test_1 = y_test_1.flatten()<set_options> | RF = ensemble.RandomForestClassifier()
RF_params = {
'n_estimators':[n for n in range(60,140,10)],
'max_depth':[n for n in range(3, 6)],
'max_features' : ['sqrt', 'log2', None],
'random_state' : [42]
}
RF_model = GridSearchCV(RF, param_grid = RF_params, cv = 5, n_jobs = -1 ).fit(x_train, y_train)
print("Best Hyper Parameters:",RF_model.best_params_)
RF_probs = RF_model.predict_proba(x_test)
RF_probs = RF_probs[:, 1]
RF_auc = roc_auc_score(y_test, RF_probs)
print('AUC: %.3f' % RF_auc)
RF_predictions = RF_model.predict(x_test)
RF_accuracy = accuracy_score(y_test, RF_predictions)
print("RF accuracy: %.3f" % RF_accuracy)
RF_fpr, RF_tpr, RF_thresholds = roc_curve(y_test, RF_probs)
plt.plot([0, 1], [0, 1], linestyle='--')
plt.plot(RF_fpr, RF_tpr, color = 'tab:green')
plt.show() | Titanic - Machine Learning from Disaster |
6,637,182 | del model
gc.collect()<choose_model_class> | GBT = ensemble.GradientBoostingClassifier()
GBT_params = {
'n_estimators':[n for n in range(180, 240, 20)],
'max_depth':[n for n in range(3, 6)],
'learning_rate': [0.1, 0.25, 0.5],
'random_state' : [42]
}
GBT_model = GridSearchCV(GBT, param_grid = GBT_params, cv = 5, n_jobs = -1)
GBT_model.fit(x_train, y_train)
print("Best Hyper Parameters:",GBT_model.best_params_)
GBT_probs = GBT_model.predict_proba(x_test)
GBT_probs = GBT_probs[:, 1]
GBT_auc = roc_auc_score(y_test, GBT_probs)
print('AUC: %.3f' % GBT_auc)
GBT_predictions = GBT_model.predict(x_test)
GBT_accuracy = accuracy_score(y_test, GBT_predictions)
print("GBT accuracy: %.3f" % GBT_accuracy)
GBT_fpr, GBT_tpr, GBT_thresholds = roc_curve(y_test, GBT_probs)
plt.plot([0, 1], [0, 1], linestyle='--')
plt.plot(GBT_fpr, GBT_tpr, color = 'tab:orange')
plt.show() | Titanic - Machine Learning from Disaster |
6,637,182 | HEIGHT = 320
WIDTH = 320
COEFF = [0.53164905, 1.37748383, 2.60330927, 3.40191179]
def build_model() :
efficientnetb3 = EfficientNetB3(
weights=None,
input_shape=(HEIGHT,WIDTH,3),
include_top=False
)
model = Sequential()
model.add(efficientnetb3)
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dropout(0.5))
model.add(layers.BatchNormalization())
model.add(layers.Dense(5, activation=elu))
model.add(layers.Dense(1, activation="linear"))
return model
model = build_model()
model.load_weights('.. /input/aptos-3-best-models/best_weights/val_model_2.h5')
model.summary()<init_hyperparams> | print("GBT cohen_kappa_score: %.3f" % cohen_kappa_score(y_test, GBT_predictions))
print("RF cohen_kappa_score: %.3f" % cohen_kappa_score(y_test, RF_predictions))
| Titanic - Machine Learning from Disaster |
6,637,182 | tta_steps = 4
predictions = []
for i in tqdm(range(tta_steps)) :
test_generator = ImageDataGenerator(rescale=1./255,
horizontal_flip=True,
rotation_range= 90,
vertical_flip=True,
brightness_range=(0.5,2),
zoom_range= 0.2,
fill_mode='constant',
preprocessing_function=preprocess_image,
cval = 0 ).flow_from_dataframe(test_df,
x_col='id_code',
y_col = 'diagnosis',
directory = TEST_IMG_PATH,
target_size=(WIDTH, HEIGHT),
batch_size=1,
class_mode='other',
shuffle = False)
preds = model.predict_generator(test_generator, steps = test_df.shape[0])
predictions.append(preds)
del test_generator
gc.collect()
y_test_2 = np.mean(predictions, axis=0)
for i, pred in enumerate(y_test_2):
if pred < COEFF[0]:
y_test_2[i] = 0
elif pred >= COEFF[0] and pred < COEFF[1]:
y_test_2[i] = 1
elif pred >= COEFF[1] and pred < COEFF[2]:
y_test_2[i] = 2
elif pred >= COEFF[2] and pred < COEFF[3]:
y_test_2[i] = 3
else:
y_test_2[i] = 4
y_test_2 = y_test_2.flatten()<set_options> | print("GBT", classification_report(y_test, GBT_predictions))
print("-"*100)
print("RF", classification_report(y_test, RF_predictions)) | Titanic - Machine Learning from Disaster |
6,637,182 | del model
gc.collect()<define_variables> | predict_RF = RF_model.predict(test)
predict_GBT = GBT_model.predict(test)
submit_RF = pd.DataFrame({'PassengerId':testing['PassengerId'],'Survived':predict_RF})
submit_GBT = pd.DataFrame({'PassengerId':testing['PassengerId'],'Survived':predict_GBT})
filename_RF = 'Titanic Prediction RF.csv'
submit_RF.to_csv(filename_RF,index=False)
print('Saved file: ' + filename_RF)
filename_GBT = 'Titanic Prediction GBT.csv'
submit_GBT.to_csv(filename_GBT,index=False)
print('Saved file: ' + filename_GBT ) | Titanic - Machine Learning from Disaster |
7,481,879 | tta_steps = 3
predictions = []
for i in tqdm(range(tta_steps)) :
test_generator = ImageDataGenerator(horizontal_flip=True,
vertical_flip=True,
brightness_range=(0.5,2),
zoom_range= 0.2,
fill_mode='constant',
cval = 0 ).flow_from_dataframe(test_df,
x_col='id_code',
y_col = 'diagnosis',
directory = TEST_IMG_PATH,
target_size=(WIDTH, HEIGHT),
batch_size=1,
class_mode='other',
shuffle = False,
preprocessing_function=preprocess_image)
preds = model.predict_generator(test_generator, steps = test_df.shape[0])
predictions.append(preds)
del test_generator
gc.collect()
y_test_3 = np.mean(predictions, axis=0)
y_test_3 = y_test_3 > BEST_TRESHOLD
y_test_3 = y_test_3.astype(int ).sum(axis=1)- 1
y_test_3 = y_test_3.flatten()<set_options> | from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder | Titanic - Machine Learning from Disaster |
7,481,879 | K.clear_session()
cuda.select_device(0)
cuda.close()<set_options> | def extract(m):
m = m.split(',')[1]
m = m.split('.')[0]
return m[1:] | Titanic - Machine Learning from Disaster |
7,481,879 | ! nvidia-smi<set_options> | path = '/kaggle/input/titanic/train.csv'
df = pd.read_csv(path)
df['Name'] = df['Name'].apply(extract)
df['Name'] = df['Name'].apply(lambda x: x if x in ['Mr','Mrs','Miss','Master'] else 'Others')
df['Parch'] = df['Parch'].apply(lambda x: x if x in [0,1,2] else 4.5)
| Titanic - Machine Learning from Disaster |
7,481,879 | %reload_ext autoreload
%autoreload 2
%matplotlib inline
warnings.filterwarnings("ignore")
%matplotlib inline
warnings.filterwarnings('ignore')
GlobalParams = collections.namedtuple('GlobalParams', [
'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate',
'num_classes', 'width_coefficient', 'depth_coefficient',
'depth_divisor', 'min_depth', 'drop_connect_rate', 'image_size'])
BlockArgs = collections.namedtuple('BlockArgs', [
'kernel_size', 'num_repeat', 'input_filters', 'output_filters',
'expand_ratio', 'id_skip', 'stride', 'se_ratio'])
GlobalParams.__new__.__defaults__ =(None,)* len(GlobalParams._fields)
BlockArgs.__new__.__defaults__ =(None,)* len(BlockArgs._fields)
def relu_fn(x):
return x * torch.sigmoid(x)
def round_filters(filters, global_params):
multiplier = global_params.width_coefficient
if not multiplier:
return filters
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
filters *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2)// divisor * divisor)
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, global_params):
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
def drop_connect(inputs, p, training):
if not training: return inputs
batch_size = inputs.shape[0]
keep_prob = 1 - p
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
def get_same_padding_conv2d(image_size=None):
if image_size is None:
return Conv2dDynamicSamePadding
else:
return partial(Conv2dStaticSamePadding, image_size=image_size)
class Conv2dDynamicSamePadding(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
self.stride = self.stride if len(self.stride)== 2 else [self.stride[0]]*2
def forward(self, x):
ih, iw = x.size() [-2:]
kh, kw = self.weight.size() [-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max(( oh - 1)* self.stride[0] +(kh - 1)* self.dilation[0] + 1 - ih, 0)
pad_w = max(( ow - 1)* self.stride[1] +(kw - 1)* self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w//2, pad_w - pad_w//2, pad_h//2, pad_h - pad_h//2])
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class Conv2dStaticSamePadding(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, image_size=None, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, **kwargs)
self.stride = self.stride if len(self.stride)== 2 else [self.stride[0]] * 2
assert image_size is not None
ih, iw = image_size if type(image_size)== list else [image_size, image_size]
kh, kw = self.weight.size() [-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max(( oh - 1)* self.stride[0] +(kh - 1)* self.dilation[0] + 1 - ih, 0)
pad_w = max(( ow - 1)* self.stride[1] +(kw - 1)* self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d(( pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2))
else:
self.static_padding = Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
class Identity(nn.Module):
def __init__(self,):
super(Identity, self ).__init__()
def forward(self, input):
return input
def efficientnet_params(model_name):
params_dict = {
'efficientnet-b0':(1.0, 1.0, 224, 0.2),
'efficientnet-b1':(1.0, 1.1, 240, 0.2),
'efficientnet-b2':(1.1, 1.2, 260, 0.3),
'efficientnet-b3':(1.2, 1.4, 300, 0.3),
'efficientnet-b4':(1.4, 1.8, 380, 0.4),
'efficientnet-b5':(1.6, 2.2, 456, 0.4),
'efficientnet-b6':(1.8, 2.6, 528, 0.5),
'efficientnet-b7':(2.0, 3.1, 600, 0.5),
}
return params_dict[model_name]
class BlockDecoder(object):
@staticmethod
def _decode_block_string(block_string):
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split(r'(\d.*)', op)
if len(splits)>= 2:
key, value = splits[:2]
options[key] = value
assert(( 's' in options and len(options['s'])== 1)or
(len(options['s'])== 2 and options['s'][0] == options['s'][1]))
return BlockArgs(
kernel_size=int(options['k']),
num_repeat=int(options['r']),
input_filters=int(options['i']),
output_filters=int(options['o']),
expand_ratio=int(options['e']),
id_skip=('noskip' not in block_string),
se_ratio=float(options['se'])if 'se' in options else None,
stride=[int(options['s'][0])])
@staticmethod
def _encode_block_string(block):
args = [
'r%d' % block.num_repeat,
'k%d' % block.kernel_size,
's%d%d' %(block.strides[0], block.strides[1]),
'e%s' % block.expand_ratio,
'i%d' % block.input_filters,
'o%d' % block.output_filters
]
if 0 < block.se_ratio <= 1:
args.append('se%s' % block.se_ratio)
if block.id_skip is False:
args.append('noskip')
return '_'.join(args)
@staticmethod
def decode(string_list):
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(BlockDecoder._decode_block_string(block_string))
return blocks_args
@staticmethod
def encode(blocks_args):
block_strings = []
for block in blocks_args:
block_strings.append(BlockDecoder._encode_block_string(block))
return block_strings
def efficientnet(width_coefficient=None, depth_coefficient=None, dropout_rate=0.2,
drop_connect_rate=0.2, image_size=None, num_classes=1000):
blocks_args = [
'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25',
'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25',
'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25',
'r1_k3_s11_e6_i192_o320_se0.25',
]
blocks_args = BlockDecoder.decode(blocks_args)
global_params = GlobalParams(
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
dropout_rate=dropout_rate,
drop_connect_rate=drop_connect_rate,
num_classes=num_classes,
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
depth_divisor=8,
min_depth=None,
image_size=image_size,
)
return blocks_args, global_params
def get_model_params(model_name, override_params):
if model_name.startswith('efficientnet'):
w, d, s, p = efficientnet_params(model_name)
blocks_args, global_params = efficientnet(
width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s)
else:
raise NotImplementedError('model name is not pre-defined: %s' % model_name)
if override_params:
global_params = global_params._replace(**override_params)
return blocks_args, global_params
url_map = {
'efficientnet-b0': 'http://storage.googleapis.com/public-models/efficientnet-b0-08094119.pth',
'efficientnet-b1': 'http://storage.googleapis.com/public-models/efficientnet-b1-dbc7070a.pth',
'efficientnet-b2': 'http://storage.googleapis.com/public-models/efficientnet-b2-27687264.pth',
'efficientnet-b3': 'http://storage.googleapis.com/public-models/efficientnet-b3-c8376fa2.pth',
'efficientnet-b4': 'http://storage.googleapis.com/public-models/efficientnet-b4-e116e8b3.pth',
'efficientnet-b5': 'http://storage.googleapis.com/public-models/efficientnet-b5-586e6cc6.pth',
}
def load_pretrained_weights(model, model_name, load_fc=True):
state_dict = model_zoo.load_url(url_map[model_name])
if load_fc:
model.load_state_dict(state_dict)
else:
state_dict.pop('_fc.weight')
state_dict.pop('_fc.bias')
res = model.load_state_dict(state_dict, strict=False)
assert str(res.missing_keys)== str(['_fc.weight', '_fc.bias']), 'issue loading pretrained weights'
print('Loaded pretrained weights for {}'.format(model_name))
class MBConvBlock(nn.Module):
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se =(self._block_args.se_ratio is not None)and(0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
inp = self._block_args.input_filters
oup = self._block_args.input_filters * self._block_args.expand_ratio
if self._block_args.expand_ratio != 1:
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
k = self._block_args.kernel_size
s = self._block_args.stride
self._depthwise_conv = Conv2d(
in_channels=oup, out_channels=oup, groups=oup,
kernel_size=k, stride=s, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
if self.has_se:
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
final_oup = self._block_args.output_filters
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
def forward(self, inputs, drop_connect_rate=None):
x = inputs
if self._block_args.expand_ratio != 1:
x = relu_fn(self._bn0(self._expand_conv(inputs)))
x = relu_fn(self._bn1(self._depthwise_conv(x)))
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed)* x
x = self._bn2(self._project_conv(x))
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs
return x
class EfficientNet(nn.Module):
def __init__(self, blocks_args=None, global_params=None):
super().__init__()
assert isinstance(blocks_args, list), 'blocks_args should be a list'
assert len(blocks_args)> 0, 'block args must be greater than 0'
self._global_params = global_params
self._blocks_args = blocks_args
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
bn_mom = 1 - self._global_params.batch_norm_momentum
bn_eps = self._global_params.batch_norm_epsilon
in_channels = 3
out_channels = round_filters(32, self._global_params)
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
self._blocks = nn.ModuleList([])
for block_args in self._blocks_args:
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters, self._global_params),
output_filters=round_filters(block_args.output_filters, self._global_params),
num_repeat=round_repeats(block_args.num_repeat, self._global_params)
)
self._blocks.append(MBConvBlock(block_args, self._global_params))
if block_args.num_repeat > 1:
block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
for _ in range(block_args.num_repeat - 1):
self._blocks.append(MBConvBlock(block_args, self._global_params))
in_channels = block_args.output_filters
out_channels = round_filters(1280, self._global_params)
self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
self._dropout = self._global_params.dropout_rate
self._fc = nn.Linear(out_channels, self._global_params.num_classes)
def extract_features(self, inputs):
x = relu_fn(self._bn0(self._conv_stem(inputs)))
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx)/ len(self._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
x = relu_fn(self._bn1(self._conv_head(x)))
return x
def forward(self, inputs):
x = self.extract_features(inputs)
x = F.adaptive_avg_pool2d(x, 1 ).squeeze(-1 ).squeeze(-1)
if self._dropout:
x = F.dropout(x, p=self._dropout, training=self.training)
x = self._fc(x)
return x
@classmethod
def from_name(cls, model_name, override_params=None):
cls._check_model_name_is_valid(model_name)
blocks_args, global_params = get_model_params(model_name, override_params)
return EfficientNet(blocks_args, global_params)
@classmethod
def from_pretrained(cls, model_name, num_classes=1000):
model = EfficientNet.from_name(model_name, override_params={'num_classes': num_classes})
return model
@classmethod
def get_image_size(cls, model_name):
cls._check_model_name_is_valid(model_name)
_, _, res, _ = efficientnet_params(model_name)
return res
@classmethod
def _check_model_name_is_valid(cls, model_name, also_need_pretrained_weights=False):
num_models = 4 if also_need_pretrained_weights else 8
valid_models = ['efficientnet_b'+str(i)for i in range(num_models)]
if model_name.replace('-','_')not in valid_models:
raise ValueError('model_name should be one of: ' + ', '.join(valid_models))
def get_df() :
base_image_dir = os.path.join('.. ', 'input/aptos2019-blindness-detection/')
train_dir = os.path.join(base_image_dir,'train_images/')
df = pd.read_csv(os.path.join(base_image_dir, 'train.csv'))
df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir,'{}.png'.format(x)))
df = df.drop(columns=['id_code'])
df = df.sample(frac=1 ).reset_index(drop=True)
test_df = pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv')
return df, test_df
df, test_df = get_df()
def qk(y_pred, y):
return torch.tensor(cohen_kappa_score(torch.round(y_pred), y, weights='quadratic'), device='cuda:0')
class OptimizedRounder(object):
def __init__(self):
self.coef_ = 0
def _kappa_loss(self, coef, X, y):
X_p = np.copy(X)
for i, pred in enumerate(X_p):
if pred < coef[0]:
X_p[i] = 0
elif pred >= coef[0] and pred < coef[1]:
X_p[i] = 1
elif pred >= coef[1] and pred < coef[2]:
X_p[i] = 2
elif pred >= coef[2] and pred < coef[3]:
X_p[i] = 3
else:
X_p[i] = 4
ll = metrics.cohen_kappa_score(y, X_p, weights='quadratic')
return -ll
def fit(self, X, y):
loss_partial = partial(self._kappa_loss, X=X, y=y)
initial_coef = [0.5, 1.5, 2.5, 3.5]
self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead')
print(-loss_partial(self.coef_['x']))
def predict(self, X, coef):
X_p = np.copy(X)
for i, pred in enumerate(X_p):
if pred < coef[0]:
X_p[i] = 0
elif pred >= coef[0] and pred < coef[1]:
X_p[i] = 1
elif pred >= coef[1] and pred < coef[2]:
X_p[i] = 2
elif pred >= coef[2] and pred < coef[3]:
X_p[i] = 3
else:
X_p[i] = 4
return X_p
def coefficients(self):
return self.coef_['x']<load_pretrained> | df = df.fillna(df.mean())
df['Embarked'] = df['Embarked'].apply(lambda x : x if(x=='C' or x=='Q')else 'S')
df['Embarked'].unique()
print(df.count())
le = LabelEncoder()
le.fit(df['Sex'])
df['Sex'] = le.transform(df['Sex'])
le.fit(df['Name'])
df['Name'] = le.transform(df['Name'])
le.fit(df['Embarked'])
df['Embarked'] = le.transform(df['Embarked'] ) | Titanic - Machine Learning from Disaster |
7,481,879 | md_ef = EfficientNet.from_pretrained('efficientnet-b5', num_classes=1)
!mkdir models
!cp '.. /input/kaggle-public/abcdef.pth' 'models'<categorify> | features = ['Pclass','Sex','SibSp','Parch','Fare','Embarked','Name']
y = df['Survived']
X = df[features] | Titanic - Machine Learning from Disaster |
7,481,879 | tta = 3
bs = 64
tfms = get_transforms(do_flip=True,flip_vert=True)
sz = 256
data =(ImageList.from_df(df=df,path='./',cols='path')
.split_by_rand_pct(0.2)
.label_from_df(cols='diagnosis',label_cls=FloatList)
.transform(tfms,size=sz,resize_method=ResizeMethod.SQUISH,padding_mode='zeros')
.databunch(bs=bs,num_workers=4)
.normalize(imagenet_stats)
)
learn = Learner(data,
md_ef,
metrics = [qk],
model_dir="models" ).to_fp16()
learn.data.add_test(ImageList.from_df(test_df,
'.. /input/aptos2019-blindness-detection',
folder='test_images',
suffix='.png'))
learn.load('abcdef');
opt = OptimizedRounder()
preds0,y = learn.get_preds(DatasetType.Test)
sz = 256
data =(ImageList.from_df(df=df,path='./',cols='path')
.split_by_rand_pct(0.2)
.label_from_df(cols='diagnosis',label_cls=FloatList)
.transform(tfms,size=sz,resize_method=ResizeMethod.SQUISH,padding_mode='zeros')
.databunch(bs=bs,num_workers=4)
.normalize(imagenet_stats)
)
learn = Learner(data,
md_ef,
metrics = [qk],
model_dir="models" ).to_fp16()
learn.data.add_test(ImageList.from_df(test_df,
'.. /input/aptos2019-blindness-detection',
folder='test_images',
suffix='.png'))
preds1,y = learn.get_preds(DatasetType.Test)
sz = 256
data =(ImageList.from_df(df=df,path='./',cols='path')
.split_by_rand_pct(0.2)
.label_from_df(cols='diagnosis',label_cls=FloatList)
.transform(tfms,size=sz,resize_method=ResizeMethod.SQUISH,padding_mode='zeros')
.databunch(bs=bs,num_workers=4)
.normalize(imagenet_stats)
)
learn = Learner(data,
md_ef,
metrics = [qk],
model_dir="models" ).to_fp16()
learn.data.add_test(ImageList.from_df(test_df,
'.. /input/aptos2019-blindness-detection',
folder='test_images',
suffix='.png'))
preds2,y = learn.get_preds(DatasetType.Test)
preds =(preds0 + preds1 + preds2)/tta<predict_on_test> | for i in range(1):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
model = GradientBoostingClassifier(n_estimators = 200, max_depth = 3)
model.fit(X_train,y_train)
print(i,(model.predict(X_train)-y_train==0 ).sum() *100/len(y_train))
print(( model.predict(X_test)-y_test==0 ).sum() *100/len(y_test),'
' ) | Titanic - Machine Learning from Disaster |
7,481,879 | y_test_4 = opt.predict(preds, coef=[0.5, 1.5, 2.5, 3.5])
y_test_4 = y_test_4.flatten()<train_model> | df3 = pd.read_csv('/kaggle/input/titanic/gender_submission.csv')
df3 | Titanic - Machine Learning from Disaster |
7,481,879 | def train_model(tfms,bs,sz):
data =(ImageList.from_df(df=df,path='./',cols='path')
.split_by_rand_pct(0.2)
.label_from_df(cols='diagnosis',label_cls=FloatList)
.transform(tfms,size=sz,resize_method=ResizeMethod.SQUISH,padding_mode='reflection')
.databunch(bs=bs,num_workers=4)
.normalize(imagenet_stats)
)
learn = Learner(data,
md_ef,
metrics = [qk],
model_dir="models" ).to_fp16()
learn.data.add_test(ImageList.from_df(test_df,
'.. /input/aptos2019-blindness-detection',
folder='test_images',
suffix='.png'))
learn.load('abcdef')
preds,y = learn.get_preds(DatasetType.Test)
return preds,y<predict_on_test> | model.fit(X,y)
df2 = pd.read_csv('/kaggle/input/titanic/test.csv')
df2['Name'] = df2['Name'].apply(extract)
df2['Name'] = df2['Name'].apply(lambda x: x if x in ['Mr','Mrs','Miss','Master'] else 'Others')
df2['Parch'] = df2['Parch'].apply(lambda x: x if x in [0,1,2] else 4.5)
df2 = df2.fillna(df.mean())
df2['Embarked'] = df2['Embarked'].apply(lambda x : x if(x=='C' or x=='Q')else 'S')
df2['Embarked'].unique()
le = LabelEncoder()
le.fit(df2['Sex'])
df2['Sex'] = le.transform(df2['Sex'])
le.fit(df2['Name'])
df2['Name'] = le.transform(df2['Name'])
le.fit(df2['Embarked'])
df2['Embarked'] = le.transform(df2['Embarked'])
X_sub = df2[features]
df2['Survived'] = model.predict(X_sub)
sub = pd.concat([df2['PassengerId'],df2['Survived']],axis = 1)
sub | Titanic - Machine Learning from Disaster |
7,481,879 | <compute_test_metric><EOS> | sub.to_csv('Submission.csv',index=False ) | Titanic - Machine Learning from Disaster |
1,759,840 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<categorify> | sns.set()
%matplotlib inline
warnings.filterwarnings('ignore')
| Titanic - Machine Learning from Disaster |
1,759,840 | COEFF = [0.5, 1.5, 2.5, 3.5]
for i, pred in enumerate(y_test):
if pred < COEFF[0]:
y_test[i] = 0
elif pred >= COEFF[0] and pred < COEFF[1]:
y_test[i] = 1
elif pred >= COEFF[1] and pred < COEFF[2]:
y_test[i] = 2
elif pred >= COEFF[2] and pred < COEFF[3]:
y_test[i] = 3
else:
y_test[i] = 4<save_to_csv> | path_train = '.. /input/train.csv'
path_test = '.. /input/test.csv' | Titanic - Machine Learning from Disaster |
1,759,840 | test_df['diagnosis'] = y_test.astype(int)
test_df['id_code'] = test_df['id_code'].str.replace(r'.png$', '')
test_df.to_csv('submission.csv',index=False)
print("Submission Distribution:")
print(round(test_df.diagnosis.value_counts() /len(test_df)*100,4))<define_variables> | train_df_raw = pd.read_csv(path_train)
train_df_raw.head() | Titanic - Machine Learning from Disaster |
1,759,840 | DEVICE = torch.device("cuda:0")
DATA_SOURCE = os.path.join(".. ","input","aptos2019-blindness-detection")
MODEL_SOURCE = os.path.join(".. ","input","densenet161-1-18-v2-pth")
MODEL_SIZE = 224<prepare_x_and_y> | draw_missing_data_table(train_df_raw ) | Titanic - Machine Learning from Disaster |
1,759,840 | def crop_image(img,tol=7):
w, h = img.shape[1],img.shape[0]
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
gray_img = cv2.blur(gray_img,(5,5))
shape = gray_img.shape
gray_img = gray_img.reshape(-1,1)
quant = quantile_transform(gray_img, n_quantiles=256, random_state=0, copy=True)
quant =(quant*256 ).astype(int)
gray_img = quant.reshape(shape)
xp =(gray_img.mean(axis=0)>tol)
yp =(gray_img.mean(axis=1)>tol)
x1, x2 = np.argmax(xp), w-np.argmax(np.flip(xp))
y1, y2 = np.argmax(yp), h-np.argmax(np.flip(yp))
if x1 >= x2 or y1 >= y2 :
return img
else:
img1=img[y1:y2,x1:x2,0]
img2=img[y1:y2,x1:x2,1]
img3=img[y1:y2,x1:x2,2]
img = np.stack([img1,img2,img3],axis=-1)
return img
def process_image(image, size=512):
image = cv2.resize(image,(size,int(size*image.shape[0]/image.shape[1])))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
try:
image = crop_image(image, tol=15)
except Exception as e:
image = image
print(str(e))
return image<load_from_csv> | def preprocess_data(df):
processed_df = df
processed_df['Embarked'].fillna('C', inplace=True)
processed_df['Age'] = processed_df.groupby(['Pclass','Sex','Parch','SibSp'])['Age'].transform(lambda x: x.fillna(x.mean()))
processed_df['Age'] = processed_df.groupby(['Pclass','Sex','Parch'])['Age'].transform(lambda x: x.fillna(x.mean()))
processed_df['Age'] = processed_df.groupby(['Pclass','Sex'])['Age'].transform(lambda x: x.fillna(x.mean()))
processed_df['Fare'] = processed_df['Fare'].interpolate()
processed_df['Cabin'].fillna('U', inplace=True)
processed_df['Title'] = pd.Series(( name.split('.')[0].split(',')[1].strip() for name in train_df_raw['Name']), index=train_df_raw.index)
processed_df['Title'] = processed_df['Title'].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
processed_df['Title'] = processed_df['Title'].replace(['Mlle', 'Ms'], 'Miss')
processed_df['Title'] = processed_df['Title'].replace('Mme', 'Mrs')
processed_df['Title'] = processed_df['Title'].map({"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5})
processed_df['Age'] = processed_df.groupby(['Title'])['Age'].transform(lambda x: x.fillna(x.mean()))
processed_df['Sex'] = processed_df['Sex'].map({'male': 0, 'female': 1})
processed_df['Embarked'] = processed_df['Embarked'].map({'S': 0, 'C': 1, 'Q': 2})
processed_df['FamillySize'] = processed_df['SibSp'] + processed_df['Parch'] + 1
processed_df['FamillySize'][processed_df['FamillySize'].between(1, 5, inclusive=False)] = 2
processed_df['FamillySize'][processed_df['FamillySize']>5] = 3
processed_df['IsAlone'] = np.where(processed_df['FamillySize']!=1, 0, 1)
processed_df['IsChild'] = processed_df['Age'] < 18
processed_df['IsChild'] = processed_df['IsChild'].astype(int)
processed_df['Cabin'] = processed_df['Cabin'].str[:1]
processed_df['Cabin'] = processed_df['Cabin'].map({cabin: p for p, cabin in enumerate(set(cab for cab in processed_df['Cabin'])) })
processed_df['TicketSurvivor'] = pd.Series(0, index=processed_df.index)
tickets = processed_df['Ticket'].value_counts().to_dict()
for t, occ in tickets.items() :
if occ != 1:
table = train_df_raw['Survived'][train_df_raw['Ticket'] == t]
if sum(table)!= 0:
processed_df['TicketSurvivor'][processed_df['Ticket'] == t] = 1
processed_df = processed_df.drop(['Name', 'Ticket', 'PassengerId'], 1)
return processed_df | Titanic - Machine Learning from Disaster |
1,759,840 | class RetinopathyDataset(Dataset):
def __init__(self, transform, is_test=False):
self.transform = transform
self.base_transform = transforms.Resize(( MODEL_SIZE, MODEL_SIZE))
self.is_test = is_test
if not os.path.exists("cache"): os.mkdir("cache")
if is_test : file = "test.csv"
else : file = "train.csv"
csv_file = os.path.join(DATA_SOURCE, file)
df = pd.read_csv(csv_file)
self.data = df.reset_index(drop=True)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if self.is_test : archive = "test_images"
else : archive = "train_images"
folder = os.path.join(DATA_SOURCE, archive)
code = str(self.data.loc[idx, 'id_code'])
file = code + ".png"
cache_path = os.path.join("cache",code+".png")
cached = os.path.exists(cache_path)
if not cached :
path = os.path.join(folder, file)
image = cv2.imread(path)
image = process_image(image)
imgpil = Image.fromarray(image)
imgpil = self.base_transform(imgpil)
imgpil.save(cache_path,"PNG")
imgpil = Image.open(cache_path)
img_tensor = self.transform(imgpil)
if self.is_test : return {'image': img_tensor}
else :
label = self.data.loc[idx, "diagnosis"]
return {'image': img_tensor, 'label': label}
def get_df(self):
return self.data<split> | train_df = train_df_raw.copy()
X = train_df.drop(['Survived'], 1)
Y = train_df['Survived']
X = preprocess_data(X)
sc = StandardScaler()
X = pd.DataFrame(sc.fit_transform(X.values), index=X.index, columns=X.columns)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
X_train.head() | Titanic - Machine Learning from Disaster |
1,759,840 | NUM_FOLDS = 5
data_augmentation = transforms.Compose([
transforms.RandomRotation(( -15, 15)) ,
transforms.Resize(224),
transforms.RandomHorizontalFlip() ,
transforms.RandomVerticalFlip() ,
transforms.ToTensor() ,
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
DATA = RetinopathyDataset(data_augmentation)
df = DATA.get_df()
skf = StratifiedKFold(n_splits=NUM_FOLDS)
folds_generator = skf.split(df.index.values, df.diagnosis.values)
data_train, data_eval = [], []
for t, e in folds_generator:
data_train.append(t)
data_eval.append(e )<load_pretrained> | lg = LogisticRegression(solver='lbfgs', random_state=42)
lg.fit(X_train, Y_train)
logistic_prediction = lg.predict(X_test)
score = metrics.accuracy_score(Y_test, logistic_prediction)
display_confusion_matrix(Y_test, logistic_prediction, score=score ) | Titanic - Machine Learning from Disaster |
1,759,840 | def get_dataloader_for_fold(n, data, train_data, eval_data, batch_size):
train_sampler = SubsetRandomSampler(train_data[n])
valid_sampler = SubsetRandomSampler(eval_data[n])
data_loader_train = torch.utils.data.DataLoader(data,
batch_size=batch_size, drop_last=False,
sampler=train_sampler)
data_loader_eval = torch.utils.data.DataLoader(data,
batch_size=batch_size, drop_last=False,
sampler=valid_sampler)
return data_loader_train, data_loader_eval<choose_model_class> | dt = DecisionTreeClassifier(min_samples_split=15, min_samples_leaf=20, random_state=42)
dt.fit(X_train, Y_train)
dt_prediction = dt.predict(X_test)
score = metrics.accuracy_score(Y_test, dt_prediction)
display_confusion_matrix(Y_test, dt_prediction, score=score ) | Titanic - Machine Learning from Disaster |
1,759,840 | class Classificator0(nn.Module):
def __init__(self, size=128):
super(Classificator0, self ).__init__()
self.size = size
self.network = nn.Sequential(
nn.BatchNorm1d(size),
nn.Dropout(p=0.3),
nn.Linear(in_features=size, out_features=5, bias=True),
)
def forward(self, x):
return self.network(x)
class Classificator(nn.Module):
def __init__(self, size=128):
super(Classificator, self ).__init__()
self.size = size
self.network = nn.Sequential(
nn.BatchNorm1d(size),
nn.Dropout(p=0.25),
nn.Linear(in_features=size, out_features=size, bias=True),
nn.ReLU() ,
nn.BatchNorm1d(size),
nn.Dropout(p=0.5),
nn.Linear(in_features=size, out_features=5, bias=True),
)
def forward(self, x):
return self.network(x )<choose_model_class> | svm = SVC(gamma='auto', random_state=42)
svm.fit(X_train, Y_train)
svm_prediction = svm.predict(X_test)
score = metrics.accuracy_score(Y_test, svm_prediction)
display_confusion_matrix(Y_test, svm_prediction, score=score ) | Titanic - Machine Learning from Disaster |
1,759,840 | def get_base_model() :
model = torchvision.models.densenet161(pretrained=False)
in_features = model.classifier.in_features
model.classifier = Classificator0(in_features)
model_path = os.path.join(MODEL_SOURCE, "densenet161.1.18.v2.pth")
model.load_state_dict(torch.load(model_path))
model.classifier = Classificator(in_features)
model = model.to(DEVICE)
model.eval()
return model<choose_model_class> | rf = RandomForestClassifier(n_estimators=200, random_state=42)
rf.fit(X_train, Y_train)
rf_prediction = rf.predict(X_test)
score = metrics.accuracy_score(Y_test, rf_prediction)
display_confusion_matrix(Y_test, rf_prediction, score=score ) | Titanic - Machine Learning from Disaster |
1,759,840 | def train_model(model, optimizer, scheduler, train_data_loader, eval_data_loader,
file_name, num_epochs = 50, patience = 7, prev_loss = 1000.00):
criterion = nn.CrossEntropyLoss()
countdown = patience
best_loss = 1000.00
since = time.time()
for epoch in range(num_epochs):
running_loss = 0.0
counter = 0
for bi, d in enumerate(train_data_loader):
inputs = d["image"].to(DEVICE, dtype=torch.float)
labels = d["label"].to(DEVICE, dtype=torch.long)
if inputs.shape[0] > 1 :
counter += inputs.size(0)
model.to(DEVICE)
model.train()
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
loss_val = running_loss / counter
print("{:7} {:.4f} {:.4f}".format(counter, loss.item() *1, loss_val), end="\r")
epoch_loss = running_loss /(len(train_data_loader)* train_data_loader.batch_size)
time_elapsed = time.time() - since
print(" T{:3}/{:3} loss: {:.4f}({:3.0f}m {:2.0f}s)".format(
epoch, num_epochs - 1, epoch_loss,time_elapsed // 60, time_elapsed % 60))
running_loss = 0.0
counter = 0
for bi, d in enumerate(eval_data_loader):
inputs = d["image"].to(DEVICE, dtype=torch.float)
counter += inputs.size(0)
labels = d["label"].to(DEVICE, dtype=torch.long)
model.to(DEVICE)
model.eval()
with torch.no_grad() :
outputs = model(inputs)
loss = criterion(outputs, labels)
running_loss += loss.item() * inputs.size(0)
loss_val = running_loss / counter
print("{:7} {:.4f} {:.4f}".format(counter, loss.item() *1, loss_val), end="\r")
epoch_loss = running_loss /(len(eval_data_loader)* eval_data_loader.batch_size)
if epoch_loss < best_loss :
best_loss = epoch_loss
if epoch_loss < prev_loss:
torch.save(model.state_dict() , file_name)
prev_loss = epoch_loss
print("*", end="")
else:
print(".", end="")
countdown = patience
else:
print("{:1}".format(countdown), end="")
countdown -= 1
time_elapsed = time.time() - since
print("E{:3}/{:3} loss: {:.4f}({:3.0f}m {:2.0f}s)".format(
epoch, num_epochs - 1, epoch_loss,time_elapsed // 60, time_elapsed % 60))
scheduler.step()
if countdown <= 0 : break
return prev_loss
print("done.")
<load_pretrained> | def build_ann(optimizer='adam'):
ann = Sequential()
ann.add(Dense(units=32, kernel_initializer='glorot_uniform', activation='relu', input_shape=(13,)))
ann.add(Dense(units=64, kernel_initializer='glorot_uniform', activation='relu'))
ann.add(Dropout(rate=0.5))
ann.add(Dense(units=64, kernel_initializer='glorot_uniform', activation='relu'))
ann.add(Dropout(rate=0.5))
ann.add(Dense(units=1, kernel_initializer='glorot_uniform', activation='sigmoid'))
ann.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
return ann | Titanic - Machine Learning from Disaster |
1,759,840 | batch_size = 56
num_round_per_fold = 2
for no in range(NUM_FOLDS):
print("-"*22, "fold",no)
bst_loss = 10000.00
for r in range(num_round_per_fold):
print("-"*11,"round",r)
data_loader_train, data_loader_eval = get_dataloader_for_fold(no,
DATA, data_train, data_eval, batch_size)
model = get_base_model()
plist = [{"params": model.features.denseblock4.parameters() , "lr":0.0001},
{"params": model.classifier.parameters() }]
optimizer = optim.Adam(plist, lr=0.001, amsgrad=True)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [1,5], gamma=0.1, last_epoch=-1)
bst_loss = train_model(model, optimizer, scheduler,
data_loader_train, data_loader_eval,
"tmp"+str(no)+".pth", prev_loss=bst_loss,
num_epochs=13, patience=3)
print("-"*22, "best loss", bst_loss)
print("")
<load_pretrained> | opt = optimizers.Adam(lr=0.001)
ann = build_ann(opt)
history = ann.fit(X_train, Y_train, batch_size=16, epochs=30, validation_data=(X_test, Y_test)) | Titanic - Machine Learning from Disaster |
1,759,840 | def get_trained_model(no):
extractor = torchvision.models.densenet161(pretrained=False)
in_features = extractor.classifier.in_features
extractor.classifier = Classificator(in_features)
model_path = os.path.join("tmp"+str(no)+".pth")
extractor.load_state_dict(torch.load(model_path))
extractor = extractor.to(DEVICE)
extractor.eval()
return extractor<choose_model_class> | ann_prediction = ann.predict(X_test)
ann_prediction =(ann_prediction > 0.5)
score = metrics.accuracy_score(Y_test, ann_prediction)
display_confusion_matrix(Y_test, ann_prediction, score=score ) | Titanic - Machine Learning from Disaster |
1,759,840 | def get_extractor_model(no):
extractor = get_trained_model(no)
extractor.classifier = nn.Identity()
extractor = extractor.to(DEVICE)
extractor.eval()
return extractor<categorify> | n_folds = 10
cv_score_lg = cross_val_score(estimator=lg, X=X_train, y=Y_train, cv=n_folds, n_jobs=-1)
cv_score_dt = cross_val_score(estimator=dt, X=X_train, y=Y_train, cv=n_folds, n_jobs=-1)
cv_score_svm = cross_val_score(estimator=svm, X=X_train, y=Y_train, cv=n_folds, n_jobs=-1)
cv_score_rf = cross_val_score(estimator=rf, X=X_train, y=Y_train, cv=n_folds, n_jobs=-1)
cv_score_ann = cross_val_score(estimator=KerasClassifier(build_fn=build_ann, batch_size=16, epochs=20, verbose=0),
X=X_train, y=Y_train, cv=n_folds, n_jobs=-1 ) | Titanic - Machine Learning from Disaster |
1,759,840 | def get_train_features(data_loader, extractor):
for bi, d in enumerate(data_loader):
print(".", end="")
img_tensor = d["image"].to(DEVICE)
target = d["label"].numpy()
with torch.no_grad() : feature = extractor(img_tensor)
feature = feature.cpu().detach().squeeze(0 ).numpy()
if bi == 0 :
features = feature
targets = target
else :
features = np.concatenate([features, feature], axis=0)
targets = np.concatenate([targets, target], axis=0)
return features, targets<init_hyperparams> | cv_result = {'lg': cv_score_lg, 'dt': cv_score_dt, 'svm': cv_score_svm, 'rf': cv_score_rf, 'ann': cv_score_ann}
cv_data = {model: [score.mean() , score.std() ] for model, score in cv_result.items() }
cv_df = pd.DataFrame(cv_data, index=['Mean_accuracy', 'Variance'])
cv_df | Titanic - Machine Learning from Disaster |
1,759,840 | XGBOOST_PARAM = {
"random_state" : 42,
"n_estimators" : 200,
"objective" : "multi:softmax",
"num_class" : 5,
"eval_metric" : "mlogloss",
}<feature_engineering> | class EsemblingClassifier:
def __init__(self, verbose=True):
self.ann = build_ann(optimizer=optimizers.Adam(lr=0.001))
self.rf = RandomForestClassifier(n_estimators=300, max_depth=11, random_state=42)
self.svm = SVC(random_state=42)
self.trained = False
self.verbose = verbose
def fit(self, X, y):
if self.verbose:
print('-------- Fitting models --------')
self.ann.fit(X, y, epochs=30, batch_size=16, verbose=0)
self.rf.fit(X, y)
self.svm.fit(X, y)
self.trained = True
def predict(self, X):
if self.trained == False:
raise NotFittedError('Please train the classifier before making a prediction')
if self.verbose:
print('-------- Making and combining predictions --------')
predictions = list()
pred_ann = self.ann.predict(X)
pred_ann =(pred_ann > 0.5)*1
pred_rf = self.rf.predict(X)
pred_svm = self.svm.predict(X)
for n in range(len(pred_ann)) :
combined = pred_ann[n] + pred_rf[n] + pred_svm[n]
p = 0 if combined == 1 or combined == 0 else 1
predictions.append(p)
return predictions | Titanic - Machine Learning from Disaster |
1,759,840 | batch_size = 64
eval_set = []
for no in range(NUM_FOLDS):
print("-"*22, "fold",no)
data_loader_train, data_loader_eval = get_dataloader_for_fold(no,
DATA, data_train, data_eval, batch_size)
extractor = get_extractor_model(no)
print("...........|.............................................|")
features_eval, targets_eval = get_train_features(data_loader_eval,
extractor)
features_train, targets_train = get_train_features(data_loader_train,
extractor)
print("")
xgb_model = xgb.XGBClassifier(**XGBOOST_PARAM)
xgb_model = xgb_model.fit(features_train,targets_train.reshape(-1),
eval_set=[(features_eval, targets_eval.reshape(-1)) ],
early_stopping_rounds=20,
verbose=False)
print("score",xgb_model.evals_result() ["validation_0"]["mlogloss"][-1])
pickle.dump(xgb_model, open("xgb_model_"+str(no), "wb"))<categorify> | ens = EsemblingClassifier()
ens.fit(X_train, Y_train)
ens_prediction = ens.predict(X_test)
score = metrics.accuracy_score(Y_test, ens_prediction)
display_confusion_matrix(Y_test, ens_prediction, score=score ) | Titanic - Machine Learning from Disaster |
1,759,840 | base_transform = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor() ,
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
DATA.transform = base_transform<categorify> | test_df_raw = pd.read_csv(path_test)
test = test_df_raw.copy()
test = preprocess_data(test)
test = pd.DataFrame(sc.fit_transform(test.values), index=test.index, columns=test.columns)
test.head() | Titanic - Machine Learning from Disaster |
1,759,840 | <choose_model_class><EOS> | model_test = EsemblingClassifier()
model_test.fit(X, Y)
prediction = model_test.predict(test)
result_df = test_df_raw.copy()
result_df['Survived'] = prediction
result_df.to_csv('submission.csv', columns=['PassengerId', 'Survived'], index=False ) | Titanic - Machine Learning from Disaster |
7,677,885 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<create_dataframe> | %matplotlib inline
| Titanic - Machine Learning from Disaster |
7,677,885 | base_transform = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor() ,
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
data_test = RetinopathyDataset(base_transform, is_test=True)
data_loader = torch.utils.data.DataLoader(data_test,
batch_size=16, shuffle=False,
num_workers=0, drop_last=False)
def get_test_features(data_loader, extractor):
for bi, d in enumerate(data_loader):
if bi % 4 == 0 : print(".", end="")
img_tensor = d["image"].to(DEVICE)
with torch.no_grad() : feature = extractor(img_tensor)
feature = feature.cpu().detach().numpy()
if bi == 0 :
features = feature
else :
features = np.concatenate([features, feature], axis=0)
return features<choose_model_class> | train_file_path = ".. /input/titanic/train.csv"
test_file_path = ".. /input/titanic/test.csv"
train_data = pd.read_csv(train_file_path)
test_data = pd.read_csv(test_file_path ) | Titanic - Machine Learning from Disaster |
7,677,885 | print("................................ v")
predictions = np.zeros(( len(data_test),5))
for tta in range(1):
print("............ tta"+str(tta)+"................ ")
for no in range(NUM_FOLDS):
extractor = get_extractor_model(no)
features = get_test_features(data_loader, extractor)
print("",no)
xgb_model = xgb.XGBClassifier()
model_path = os.path.join("xgb_model_"+str(no))
xgb_model = pickle.load(open(model_path, "rb"))
prediction = xgb_model.predict_proba(features)
predictions = predictions + prediction<create_dataframe> | train_data.groupby(by=['Pclass'] ).count() | Titanic - Machine Learning from Disaster |
7,677,885 | batch_size = 8
data_augmentation = transforms.Compose([
transforms.Resize(( MODEL_SIZE, MODEL_SIZE)) ,
transforms.RandomHorizontalFlip() ,
transforms.RandomVerticalFlip() ,
transforms.ToTensor() ,
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
data_test = RetinopathyDataset(data_augmentation, is_test=True)
data_loader = torch.utils.data.DataLoader(data_test,
batch_size=batch_size, shuffle=False,
num_workers=0, drop_last=False )<train_model> | perc = train_data[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False ).mean().sort_values(by='Survived', ascending=False)
perc*100
| Titanic - Machine Learning from Disaster |
7,677,885 | softmax = nn.Softmax(dim=1)
for tta in range(4):
print("............ tta"+str(tta)+"...............")
for no in range(NUM_FOLDS):
model = get_trained_model(no)
batch_slice =(0, 0)
for bi, d in enumerate(data_loader):
if bi %(64//batch_size)== 0 : print(".", end="")
img_tensor = d["image"].to(DEVICE, dtype=torch.float)
batch_slice =(batch_slice[1], batch_slice[1]+img_tensor.size(0))
with torch.no_grad() :
feature = model(img_tensor)
feature = softmax(feature)
predictions[batch_slice[0]:batch_slice[1],:] += \
feature.cpu().detach().squeeze(0 ).numpy()
print("" )<save_to_csv> | def extract_title(name):
for string in name.split() :
if '.' in string:
return string[:-1]
train_data['Title'] = train_data['Name'].apply(lambda n: extract_title(n))
test_data['Title'] = test_data['Name'].apply(lambda n: extract_title(n))
print(test_data['Title'].value_counts() ,'
',train_data['Title'].value_counts())
| Titanic - Machine Learning from Disaster |
7,677,885 | prediction_final = predictions.argmax(axis=1)
csv_file = os.path.join(DATA_SOURCE, "sample_submission.csv")
df = pd.read_csv(csv_file)
df["diagnosis"] = prediction_final
df.to_csv('submission.csv',index=False )<feature_engineering> | for dataframe in [train_data, test_data]:
dataframe['Title'] = dataframe['Title'].replace('Mlle', 'Miss')
dataframe['Title'] = dataframe['Title'].replace('Ms', 'Miss')
dataframe['Title'] = dataframe['Title'].replace('Mme', 'Mrs')
dataframe['Title'] = dataframe['Title'].replace(['Lady', 'Capt', 'Col','Don', 'Dr',
'Major', 'Rev', 'Sir', 'Dona', 'Countess', 'Jonkheer'], 'Other')
dataframe.drop('Name', axis=1, inplace=True)
| Titanic - Machine Learning from Disaster |
7,677,885 | t_start = time.time()<define_variables> | print('% of survived females:', train_data['Survived'][train_data['Sex'] == 'female'].value_counts(normalize = True)[1]*100)
print('% of survived males:', train_data['Survived'][train_data['Sex'] == 'male'].value_counts(normalize = True)[1]*100)
| Titanic - Machine Learning from Disaster |
7,677,885 | IMG_WIDTH = 456
IMG_HEIGHT = 456
CHANNEL = 3
BATCH_SIZE = 4
EPOCHS_OLD_DATA = 10
WARMUP_EPOCHS = 3
NUM_CLASSES = 5
SEED = 2
LEARNING_RATE = 1e-4
WARMUP_LEARNING_RATE = 1e-3
ES_PATIENCE = 5
RLROP_PATIENCE = 3
DECAY_DROP = 0.5<define_variables> | train_data[['Sex', 'Survived']].groupby(['Sex'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
7,677,885 | BASE_DIR = '/kaggle/input/aptos2019-blindness-detection/'
TRAIN_DIR = '/kaggle/input/aptos2019-blindness-detection/train_images'
TEST_DIR = '/kaggle/input/aptos2019-blindness-detection/test_images'
TRAIN_DIR = '/kaggle/input/diabetic-retinopathy-resized/resized_train/resized_train'<load_from_csv> | train_data['FamilySize'] = train_data['SibSp'] + train_data['Parch']
test_data['FamilySize'] = train_data['SibSp'] + train_data['Parch']
train_data['IsAlone'] = train_data['FamilySize'].apply(lambda fs: 1 if fs == 0 else 0)
test_data['IsAlone'] = test_data['FamilySize'].apply(lambda fs: 1 if fs == 0 else 0 ) | Titanic - Machine Learning from Disaster |
7,677,885 | TRAIN_DF = pd.read_csv(BASE_DIR + "train.csv",dtype='object')
TEST_DF = pd.read_csv(BASE_DIR + "test.csv",dtype='object')
TRAIN_DF = pd.read_csv("/kaggle/input/diabetic-retinopathy-resized/trainLabels.csv",dtype='object')
X_COL='id_code'
Y_COL='diagnosis'<rename_columns> | train_data[['FamilySize', 'Survived']].groupby('FamilySize', as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
7,677,885 | TRAIN_DF.columns = ['id_code', 'diagnosis']
<categorify> | train_data.drop(['Parch', 'SibSp'], axis=1, inplace=True)
test_data.drop(['Parch', 'SibSp'], axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
7,677,885 | def append_file_ext(file_name):
return file_name + ".png"
def append_file_ext_jpeg(file_name):
return file_name.replace(".png",".jpeg" )<feature_engineering> | train_data[['Ticket', 'PassengerId']].groupby('Ticket', as_index=False ).count().sort_values('PassengerId', ascending=False ) | Titanic - Machine Learning from Disaster |
7,677,885 | TRAIN_DF[X_COL] = TRAIN_DF[X_COL].apply(append_file_ext)
TEST_DF[X_COL] = TEST_DF[X_COL].apply(append_file_ext)
TRAIN_DF[X_COL] = TRAIN_DF[X_COL].apply(append_file_ext_jpeg )<concatenate> | train_data['TicketGroupSize'] = train_data.groupby(['Ticket'])['PassengerId'].transform('count')
test_data['TicketGroupSize'] = test_data.groupby(['Ticket'])['PassengerId'].transform('count' ) | Titanic - Machine Learning from Disaster |
7,677,885 | df0 = TRAIN_DF.loc[TRAIN_DF['diagnosis'] == '0']
df1 = TRAIN_DF.loc[TRAIN_DF['diagnosis'] == '1']
df2 = TRAIN_DF.loc[TRAIN_DF['diagnosis'] == '2']
df3 = TRAIN_DF.loc[TRAIN_DF['diagnosis'] == '3']
df4 = TRAIN_DF.loc[TRAIN_DF['diagnosis'] == '4']
df0 = df0.head(2000)
df1 = df1.head(2000)
df2 = df2.head(2000)
TRAIN_DF = df0.append([df1, df2, df3, df4],ignore_index = True)
print(TRAIN_DF.head())
print('**********************************')
TRAIN_DF = shuffle(TRAIN_DF)
print('**********************************')
print(len(TRAIN_DF))
print(TRAIN_DF.head() )<choose_model_class> | train_data.drop('Ticket', axis=1, inplace=True)
test_data.drop('Ticket', axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
7,677,885 | def create_model(input_shape, n_out):
input_tensor = Input(shape=input_shape)
base_model = EfficientNetB5(weights=None,
include_top=False,
input_tensor=input_tensor)
base_model.load_weights('/kaggle/input/efficientnet-keras-weights-b0b5/efficientnet-b5_imagenet_1000_notop.h5')
x = GlobalAveragePooling2D()(base_model.output)
x = Dropout(0.5 )(x)
x = Dense(2048, activation='relu' )(x)
final_output = Dense(n_out, activation='softmax', name='final_output' )(x)
model = Model(input_tensor, final_output)
return model<choose_model_class> | fare_median = test_data['Fare'].median()
test_data['Fare'] = test_data['Fare'].fillna(fare_median ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.