kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
1,259,505 | display(Image('.. /input/train/train/0004be2cfeaba1c0361d39e2b000257b.jpg'))
display(Image('.. /input/train/train/000c8a36845c0208e833c79c1bffedd1.jpg'))
display(Image('.. /input/train/train/002134abf28af54575c18741b89dd2a4.jpg'))
display(Image('.. /input/train/train/0024320f43bdd490562246435af4f90b.jpg'))<define_variables> | from sklearn.ensemble import ExtraTreesClassifier | Titanic - Machine Learning from Disaster |
1,259,505 | images = ['.. /input/train/train/' + fname for fname in train_csv['id']]
labels = train_csv['has_cactus'].tolist()<split> | xt_clf = ExtraTreesClassifier(random_state=42)
xt_clf.fit(X_train, y_train)
print_score(xt_clf, X_train, y_train, X_test, y_test, train=True)
print_score(xt_clf, X_train, y_train, X_test, y_test, train=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | X_train, X_dev, y_train, y_dev = train_test_split(images, labels, test_size=0.1, random_state=42)
n_train = len(X_train)
n_dev = len(X_dev )<normalization> | Y_pred = xt_clf.predict(test_df.drop('PassengerId',axis=1))
Y_pred
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred
})
submission.to_csv('submissions_xt.csv', index=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | IMAGE_SIZE = 96
def preprocess_image(fname, label=None):
img = tf.io.read_file(fname)
img = tf.image.decode_jpeg(img)
img = tf.cast(img, tf.float32)
img =(img / 127.5)- 1
img = tf.image.resize(img, size=(IMAGE_SIZE, IMAGE_SIZE))
if label is not None:
return img, label
else:
return img<split> | ada_clf = AdaBoostClassifier()
ada_clf.fit(X_train, y_train)
print_score(ada_clf, X_train, y_train, X_test, y_test, train=True)
print_score(ada_clf, X_train, y_train, X_test, y_test, train=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | BATCH_SIZE = 32
ds_train =(tf.data.Dataset.from_tensor_slices(( X_train, y_train))
.map(preprocess_image, num_parallel_calls=AUTOTUNE)
.shuffle(n_train)
.batch(BATCH_SIZE)
.prefetch(buffer_size=AUTOTUNE)
)
ds_dev =(tf.data.Dataset.from_tensor_slices(( X_dev, y_dev))
.map(preprocess_image, num_parallel_calls=AUTOTUNE)
.shuffle(n_dev)
.batch(BATCH_SIZE)
.prefetch(buffer_size=AUTOTUNE)
)<choose_model_class> | Y_pred = ada_clf.predict(test_df.drop('PassengerId',axis=1))
Y_pred
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred
})
submission.to_csv('submissions_ada.csv', index=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | base_model = tf.keras.applications.MobileNetV2(
input_shape =(IMAGE_SIZE, IMAGE_SIZE, 3),
include_top = False,
weights = 'imagenet'
)
base_model.trainable = False
base_model.summary()<choose_model_class> | from sklearn.ensemble import RandomForestClassifier | Titanic - Machine Learning from Disaster |
1,259,505 | pooling_layer = tf.keras.layers.GlobalMaxPooling2D()
final_layer = tf.keras.layers.Dense(units=1, activation='sigmoid' )<choose_model_class> | ada_clf = AdaBoostClassifier(RandomForestClassifier())
ada_clf.fit(X_train, y_train)
print_score(ada_clf, X_train, y_train, X_test, y_test, train=True)
print_score(ada_clf, X_train, y_train, X_test, y_test, train=False)
ada_clf = AdaBoostClassifier(base_estimator=RandomForestClassifier())
ada_clf.fit(X_train, y_train)
print_score(ada_clf, X_train, y_train, X_test, y_test, train=True)
print_score(ada_clf, X_train, y_train, X_test, y_test, train=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | model = tf.keras.Sequential([
base_model,
pooling_layer,
final_layer
] )<choose_model_class> | Y_pred = ada_clf.predict(test_df.drop('PassengerId',axis=1))
Y_pred
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred
})
submission.to_csv('submissions_ada_random.csv', index=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | learning_rate = 0.0001
model.compile(
optimizer=tf.optimizers.Adam(learning_rate=learning_rate),
loss='binary_crossentropy',
metrics=['accuracy']
)<train_model> | from sklearn.ensemble import GradientBoostingClassifier | Titanic - Machine Learning from Disaster |
1,259,505 | initial_epochs = 32
steps_per_epoch =(tf.math.ceil(n_train/BATCH_SIZE))
model.fit(
ds_train.repeat() ,
epochs=initial_epochs,
steps_per_epoch=steps_per_epoch,
validation_data=ds_dev
)<set_options> | gbc_clf = GradientBoostingClassifier()
gbc_clf.fit(X_train, y_train ) | Titanic - Machine Learning from Disaster |
1,259,505 | base_model.trainable = True
len(base_model.layers )<train_model> | print_score(gbc_clf, X_train, y_train, X_test, y_test, train=True ) | Titanic - Machine Learning from Disaster |
1,259,505 | fine_tune_at = 100
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False<choose_model_class> | print_score(gbc_clf, X_train, y_train, X_test, y_test, train=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | model.compile(
optimizer=tf.optimizers.Adam(learning_rate=learning_rate/10),
loss='binary_crossentropy',
metrics=['accuracy']
)<train_model> | Y_pred = gbc_clf.predict(test_df.drop('PassengerId',axis=1))
Y_pred
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred
})
submission.to_csv('submissions_gbc.csv', index=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | fine_epochs = 32
total_epochs = initial_epochs + fine_epochs
model.fit(
ds_train.repeat() ,
epochs=total_epochs,
initial_epoch=initial_epochs,
steps_per_epoch=steps_per_epoch,
validation_data=ds_dev
)<define_variables> | import xgboost as xgb | Titanic - Machine Learning from Disaster |
1,259,505 | test_image_names = tf.io.gfile.listdir('.. /input/test/test/')
n_test = len(test_image_names)
test_image_paths = list(map(lambda s: '.. /input/test/test/' + s, test_image_names))<create_dataframe> | xgb_clf = xgb.XGBClassifier(max_depth=5, n_estimators=10000, learning_rate=0.3,
n_jobs=-1 ) | Titanic - Machine Learning from Disaster |
1,259,505 | ds_test =(tf.data.Dataset.from_tensor_slices(test_image_paths)
.map(preprocess_image, num_parallel_calls=AUTOTUNE)
.batch(BATCH_SIZE)
.prefetch(buffer_size=AUTOTUNE)
)<predict_on_test> | xgb_clf.fit(X_train, y_train ) | Titanic - Machine Learning from Disaster |
1,259,505 | predictions = model.predict(ds_test )<create_dataframe> | print_score(xgb_clf, X_train, y_train, X_test, y_test, train=True ) | Titanic - Machine Learning from Disaster |
1,259,505 | final_df = pd.DataFrame()
final_df['id'] = test_image_names
final_df['has_cactus'] = predictions<save_to_csv> | print_score(xgb_clf, X_train, y_train, X_test, y_test, train=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | final_df.to_csv('submission.csv', index=False )<set_options> | Y_pred = xgb_clf.predict(test_df.drop('PassengerId',axis=1))
Y_pred
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred
})
submission.to_csv('submissions_xgb.csv', index=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | %matplotlib inline<load_from_csv> | tree_clf = DecisionTreeClassifier()
tree_clf.fit(X_train, y_train)
print_score(tree_clf, X_train, y_train, X_test, y_test, train=True)
print_score(tree_clf, X_train, y_train, X_test, y_test, train=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | train_df = pd.read_csv(".. /input/train.csv")
train_df.head()<define_variables> | rf_clf = RandomForestClassifier()
rf_clf.fit(X_train, y_train.ravel())
print_score(rf_clf, X_train, y_train, X_test, y_test, train=True)
print_score(rf_clf, X_train, y_train, X_test, y_test, train=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | data_folder = Path(".. /input")
train_img = ImageList.from_df(train_df, path=data_folder, folder='train/train' )<feature_engineering> | en_en = pd.DataFrame() | Titanic - Machine Learning from Disaster |
1,259,505 | transformations = get_transforms(do_flip=True, flip_vert=True, max_rotate=10.0, max_zoom=1.1, max_lighting=0.2,
max_warp=0.2, p_affine=0.75, p_lighting=0.75 )<load_from_csv> | tree_clf.predict_proba(X_train ) | Titanic - Machine Learning from Disaster |
1,259,505 | test_df = pd.read_csv(".. /input/sample_submission.csv")
test_img = ImageList.from_df(test_df, path=data_folder, folder='test/test' )<normalization> | en_en['tree_clf'] = pd.DataFrame(tree_clf.predict_proba(X_train)) [1]
en_en['rf_clf'] = pd.DataFrame(rf_clf.predict_proba(X_train)) [1]
col_name = en_en.columns
en_en = pd.concat([en_en, pd.DataFrame(y_train ).reset_index(drop=True)], axis=1 ) | Titanic - Machine Learning from Disaster |
1,259,505 | train_img = train_img.split_by_rand_pct(0.01 ).label_from_df().add_test(test_img ).transform(transformations, size=128 ).databunch(path='.', bs=64, device=torch.device('cuda:0')).normalize(imagenet_stats )<choose_model_class> | tmp = list(col_name)
tmp.append('ind')
en_en.columns = tmp | Titanic - Machine Learning from Disaster |
1,259,505 | learn = cnn_learner(train_img, models.densenet161, metrics=[error_rate, accuracy] )<train_model> | m_clf = LogisticRegression(fit_intercept=False)
m_clf.fit(en_en[['tree_clf', 'rf_clf']], en_en['ind'] ) | Titanic - Machine Learning from Disaster |
1,259,505 | lr = 3e-02
learn.fit_one_cycle(5, slice(lr))<save_to_csv> | en_test = pd.DataFrame() | Titanic - Machine Learning from Disaster |
1,259,505 | preds, _ = learn.get_preds(ds_type=DatasetType.Test)
test_df.has_cactus = preds.numpy() [:,0]
test_df.to_csv('submission.csv', index=False )<set_options> | en_test['tree_clf'] = pd.DataFrame(tree_clf.predict_proba(X_test)) [1]
en_test['rf_clf'] = pd.DataFrame(rf_clf.predict_proba(X_test)) [1]
col_name = en_en.columns
en_test['combined'] = m_clf.predict(en_test[['tree_clf', 'rf_clf']] ) | Titanic - Machine Learning from Disaster |
1,259,505 | import random
import os
import sklearn.utils
from tqdm import tqdm, tqdm_notebook
import pandas as pd
import cv2 as cv
from tqdm import tqdm
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D, Dense, Flatten, BatchNormalization, Dropout, LeakyReLU, DepthwiseConv2D, Flatten
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.callbacks import ModelCheckpoint,ReduceLROnPlateau,EarlyStopping
import json
import logging
import numpy as np
import matplotlib.pyplot as plt<set_options> | en_test = pd.concat([en_test, pd.DataFrame(y_test ).reset_index(drop=True)], axis=1 ) | Titanic - Machine Learning from Disaster |
1,259,505 | seed(1372)
set_random_seed(1372)
<train_model> | en_test.columns = tmp | Titanic - Machine Learning from Disaster |
1,259,505 | def resize_and_save(filename, input_dir, output_dir, size=32):
image = Image.open(os.path.join(input_dir, filename))
image.save(os.path.join(output_dir, filename))<load_pretrained> | print(round(accuracy_score(en_test['ind'], en_test['combined']), 4)) | Titanic - Machine Learning from Disaster |
1,259,505 | class Params() :
def __init__(self, json_path):
self.update(json_path)
def save(self, json_path):
with open(json_path, 'w')as f:
json.dump(self.__dict__, f, indent=4)
def update(self, json_path):
with open(json_path)as f:
params = json.load(f)
self.__dict__.update(params)
@property
def dict(self):
return self.__dict__
def set_logger(log_path):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if not logger.handlers:
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(stream_handler)
def save_dict_to_json(d, json_path):
with open(json_path, 'w')as f:
d = {k: float(v)for k, v in d.items() }
json.dump(d, f, indent=4)
<data_type_conversions> | print(classification_report(en_test['ind'], en_test['combined'])) | Titanic - Machine Learning from Disaster |
1,259,505 | df = pd.read_csv(csv_dir)
df['id'] = dataset_dir + '/' + df['id'].astype(str)
filenames = df['id']
labels = df['has_cactus'].astype(np.float32)
df = sklearn.utils.shuffle(df,random_state=1372)
df = df.reset_index(drop=True )<categorify> | Titanic - Machine Learning from Disaster | |
1,259,505 | def _parse_function(filename, label, size):
image_string = tf.read_file(filename)
image_decoded = tf.image.decode_jpeg(image_string, channels=3)
image = tf.image.convert_image_dtype(image_decoded, tf.float32)
resized_image = tf.image.resize_images(image, [size, size])
return resized_image, label<train_model> | from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier | Titanic - Machine Learning from Disaster |
1,259,505 | def input_fn(is_training, filenames, labels, params):
num_samples = len(filenames)
assert len(filenames)== len(labels), "Filenames and labels should have same length"
parse_fn = lambda f, l: _parse_function(f, l, params.image_size)
train_fn = lambda f, l: train_preprocess(f, l, params.use_random_flip)
if is_training:
dataset =(tf.data.Dataset.from_tensor_slices(( tf.constant(filenames), tf.constant(labels)))
.shuffle(num_samples)
.map(parse_fn, num_parallel_calls=params.num_parallel_calls)
.map(train_fn, num_parallel_calls=params.num_parallel_calls)
.batch(params.batch_size)
.repeat()
.prefetch(32)
)
else:
dataset =(tf.data.Dataset.from_tensor_slices(( tf.constant(filenames), tf.constant(labels)))
.map(parse_fn)
.batch(params.batch_size)
.repeat()
.prefetch(32)
)
return dataset<split> | pd.Series(list(y_train)).value_counts() / pd.Series(list(y_train)).count() | Titanic - Machine Learning from Disaster |
1,259,505 | params = Params(json_path)
split = int(len(filenames)*0.15)
train_dataset = input_fn(True, filenames[:split], labels[:split], params)
valid_dataset = input_fn(False , filenames[split:], labels[split:], params )<choose_model_class> | class_weight = {0:0.61, 1:0.38} | Titanic - Machine Learning from Disaster |
1,259,505 | model = Sequential()
model.add(Conv2D(3, kernel_size = 3, activation = 'relu', input_shape =(32, 32, 3)))
model.add(Conv2D(filters = 16, kernel_size = 3, activation = 'relu'))
model.add(Conv2D(filters = 16, kernel_size = 3, activation = 'relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(DepthwiseConv2D(kernel_size = 3, strides = 1, padding = 'Same', use_bias = True))
model.add(Conv2D(filters = 32, kernel_size = 1, activation = 'relu'))
model.add(Conv2D(filters = 64, kernel_size = 1, activation = 'relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(DepthwiseConv2D(kernel_size = 3, strides = 2, padding = 'Same', use_bias = True))
model.add(Conv2D(filters = 128, kernel_size = 1, activation = 'relu'))
model.add(Conv2D(filters = 256, kernel_size = 1, activation = 'relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(DepthwiseConv2D(kernel_size = 3, strides = 1, padding = 'Same', use_bias = True))
model.add(Conv2D(filters = 256, kernel_size = 1, activation = 'relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters = 512, kernel_size = 1, activation = 'relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(DepthwiseConv2D(kernel_size = 3, strides = 2, padding = 'Same', use_bias = True))
model.add(Conv2D(filters = 512, kernel_size = 1, activation = 'relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters = 512, kernel_size = 1, activation = 'relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(DepthwiseConv2D(kernel_size = 3, strides = 1, padding = 'Same', use_bias = True))
model.add(Conv2D(filters = 1024, kernel_size = 1, activation = 'relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters = 1024, kernel_size = 1, activation = 'relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(512, activation = 'relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(256, activation = 'relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(128, activation = 'elu'))
model.add(Dense(1, activation = 'sigmoid'))<choose_model_class> | forest = RandomForestClassifier(class_weight=class_weight ) | Titanic - Machine Learning from Disaster |
1,259,505 | Adam = tf.keras.optimizers.Adam(lr=params.learning_rate , amsgrad=True)
model.compile(optimizer = Adam, loss = tf.losses.log_loss, metrics = ['accuracy'])
model.summary()<choose_model_class> | ada = AdaBoostClassifier(base_estimator=forest, n_estimators=100,
learning_rate=0.5, random_state=42 ) | Titanic - Machine Learning from Disaster |
1,259,505 | file_path = 'weights-aerial-cactus.h5'
callbacks = [
ModelCheckpoint(file_path, monitor = 'val_acc', verbose = 1, save_best_only = True, mode = 'max'),
ReduceLROnPlateau(monitor = 'val_loss', factor = 0.2, patience = 3, verbose = 1, mode = 'min', min_lr = 0.00001),
EarlyStopping(monitor = 'val_loss', min_delta = 1e-10, patience = 15, verbose = 1, restore_best_weights = True)
]<train_model> | ada.fit(X_train, y_train.ravel() ) | Titanic - Machine Learning from Disaster |
1,259,505 | history = model.fit(train_dataset, validation_data=valid_dataset,
epochs=50,verbose=True,
steps_per_epoch=int(( len(filenames)- split)/params.batch_size),
validation_steps=int(split/params.batch_size),
callbacks = callbacks )<load_pretrained> | print_score(ada, X_train, y_train, X_test, y_test, train=True)
print_score(ada, X_train, y_train, X_test, y_test, train=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | model.load_weights(file_path )<save_to_csv> | bag_clf = BaggingClassifier(base_estimator=ada, n_estimators=50,
max_samples=1.0, max_features=1.0, bootstrap=True,
bootstrap_features=False, n_jobs=-1,
random_state=42 ) | Titanic - Machine Learning from Disaster |
1,259,505 | test_df = pd.read_csv('.. /input/sample_submission.csv')
X_test = []
images_test = test_df['id'].values
for img_id in tqdm_notebook(images_test):
X_test.append(cv.imread('.. /input/test/test/' + img_id))
X_test = np.asarray(X_test)
X_test = X_test.astype('float32')
X_test /= 255
y_test_pred = model.predict_proba(X_test)
test_df['has_cactus'] = y_test_pred
test_df.to_csv('aerial-cactus-submission_1.csv', index = False)
for i in range(len(y_test_pred)) :
if y_test_pred[i][0] >= 0.5:
y_test_pred[i][0] = 1.0
else:
y_test_pred[i][0] = 0.0
test_df['has_cactus'] = y_test_pred
test_df.to_csv('aerial-cactus-submission_2.csv', index = False )<import_modules> | bag_clf.fit(X_train, y_train.ravel() ) | Titanic - Machine Learning from Disaster |
1,259,505 | from pathlib import Path
from fastai import *
from fastai.vision import *
import torch<define_variables> | print_score(bag_clf, X_train, y_train, X_test, y_test, train=True)
print_score(bag_clf, X_train, y_train, X_test, y_test, train=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | <load_from_csv><EOS> | Y_pred = bag_clf.predict(test_df.drop('PassengerId',axis=1))
Y_pred
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred
})
submission.to_csv('submissions_bag_last.csv', index=False ) | Titanic - Machine Learning from Disaster |
1,248,437 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<choose_model_class> | warnings.filterwarnings('ignore')
warnings.filterwarnings('ignore', category=DeprecationWarning)
%matplotlib inline
mpl.style.use('ggplot')
sns.set_style('white')
params = {
'axes.labelsize': "large",
'xtick.labelsize': 'x-large',
'legend.fontsize': 20,
'figure.dpi': 150,
'figure.figsize': [25, 7]
}
plt.rcParams.update(params ) | Titanic - Machine Learning from Disaster |
1,248,437 | learn = cnn_learner(train_img, models.densenet161, metrics=[error_rate, accuracy] )<train_model> | HTML() ; | Titanic - Machine Learning from Disaster |
1,248,437 | lr = 3e-02
learn.fit_one_cycle(5, slice(lr))<predict_on_test> | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
test_df = test.copy() | Titanic - Machine Learning from Disaster |
1,248,437 | abc,_ = learn.get_preds(ds_type=DatasetType.Test)
<set_options> | Titanic - Machine Learning from Disaster | |
1,248,437 | test_df.has_cactus = abc.numpy() [:, 0]<save_to_csv> | train.Survived.value_counts() | Titanic - Machine Learning from Disaster |
1,248,437 | test_df.to_csv('submission.csv', index=False )<import_modules> | train.Survived.value_counts(normalize=True ) | Titanic - Machine Learning from Disaster |
1,248,437 | from pathlib import Path
from fastai import *
from fastai.vision import *
import torch<define_variables> | train[['Pclass' , 'Survived']].groupby('Pclass' ).mean() | Titanic - Machine Learning from Disaster |
1,248,437 | data_folder = Path(".. /input" )<load_from_csv> | Titanic - Machine Learning from Disaster | |
1,248,437 | train_df = pd.read_csv(".. /input/train.csv")
test_df = pd.read_csv(".. /input/sample_submission.csv" )<define_variables> | print(train.Age.count())
train['Age'].fillna(train.Age.median() , inplace=True ) | Titanic - Machine Learning from Disaster |
1,248,437 | train_img.show_batch(rows=3, figsize=(7,6))<choose_model_class> | print(train.Age.count() ) | Titanic - Machine Learning from Disaster |
1,248,437 | learn = cnn_learner(train_img, models.densenet161, metrics=[error_rate, accuracy] )<train_model> | Titanic - Machine Learning from Disaster | |
1,248,437 | lr = 3e-02
learn.fit_one_cycle(5, slice(lr))<save_to_csv> | print(train.Embarked.count())
train['Embarked'].fillna(train['Embarked'].mode() [0] ,inplace=True ) | Titanic - Machine Learning from Disaster |
1,248,437 | preds,_ = learn.get_preds(ds_type=DatasetType.Test)
test_df.has_cactus = preds.numpy() [:, 0]
test_df.to_csv('submission.csv', index=False )<install_modules> | train.Embarked.count() | Titanic - Machine Learning from Disaster |
1,248,437 | !pip install albumentations > /dev/null 2>&1
!pip install pretrainedmodels > /dev/null 2>&1
!pip install catalyst > /dev/null 2>&1<set_options> | train[['SibSp' , 'Survived']].groupby('SibSp' ).mean() | Titanic - Machine Learning from Disaster |
1,248,437 | %matplotlib inline
train_on_gpu = True
<import_modules> | train[['Parch','Survived']].groupby('Parch' ).mean() | Titanic - Machine Learning from Disaster |
1,248,437 | from catalyst.dl.utils import UtilsFactory
from catalyst.dl.experiments import SupervisedRunner
from catalyst.dl.callbacks import EarlyStoppingCallback, OneCycleLR, InferCallback<categorify> | train['Alone']=0
train.loc[(train['SibSp']==0)&(train['Parch']==0), 'Alone'] = 1
test['Alone']=0
test.loc[(test['SibSp']==0)&(test['Parch']==0), 'Alone'] = 1 | Titanic - Machine Learning from Disaster |
1,248,437 | data_transforms = albumentations.Compose([
albumentations.HorizontalFlip() ,
albumentations.VerticalFlip() ,
albumentations.RandomBrightness() ,
albumentations.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensor()
])
data_transforms_test = albumentations.Compose([
albumentations.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensor()
] )<load_from_csv> | drop_features = ['PassengerId' , 'Name' , 'SibSp' , 'Parch' , 'Ticket' , 'Cabin']
train.drop(drop_features , axis=1, inplace = True)
test.drop(drop_features , axis=1 , inplace = True ) | Titanic - Machine Learning from Disaster |
1,248,437 | train_df = pd.read_csv('.. /input/train.csv')
train, valid = train_test_split(train_df.has_cactus, stratify=train_df.has_cactus, test_size=0.1)
img_class_dict = {k:v for k, v in zip(train_df.id, train_df.has_cactus)}<categorify> | def map_all(frame):
frame['Sex'] = frame.Sex.map({'female': 0 , 'male': 1} ).astype(int)
frame['Embarked'] = frame.Embarked.map({'S' : 0 , 'C': 1 , 'Q':2} ).astype(int)
frame.loc[frame.Age <= 16 , 'Age'] = 0
frame.loc[(frame.Age >16)&(frame.Age<=32), 'Age'] = 1
frame.loc[(frame.Age >32)&(frame.Age<=48), 'Age'] = 2
frame.loc[(frame.Age >48)&(frame.Age<=64), 'Age'] = 3
frame.loc[(frame.Age >64)&(frame.Age<=80), 'Age'] = 4
frame.loc[(frame.Fare <= 7.91), 'Fare'] = 0
frame.loc[(frame.Fare > 7.91)&(frame.Fare <= 14.454), 'Fare'] = 1
frame.loc[(frame.Fare > 14.454)&(frame.Fare <= 31), 'Fare'] = 2
frame.loc[(frame.Fare > 31), 'Fare'] = 3 | Titanic - Machine Learning from Disaster |
1,248,437 | class CactusDataset(Dataset):
def __init__(self, datafolder, datatype='train', transform = transforms.Compose([transforms.CenterCrop(32),transforms.ToTensor() ]), labels_dict={}):
self.datafolder = datafolder
self.datatype = datatype
self.image_files_list = [s for s in os.listdir(datafolder)]
self.transform = transform
self.labels_dict = labels_dict
if self.datatype == 'train':
self.labels = [np.float32(labels_dict[i])for i in self.image_files_list]
else:
self.labels = [np.float32(0.0)for _ in range(len(self.image_files_list)) ]
def __len__(self):
return len(self.image_files_list)
def __getitem__(self, idx):
img_name = os.path.join(self.datafolder, self.image_files_list[idx])
img = cv2.imread(img_name)[:,:,::-1]
image = self.transform(image=img)
image = image['image']
label = self.labels[idx]
return image, label<create_dataframe> | map_all(train)
train.head() | Titanic - Machine Learning from Disaster |
1,248,437 | dataset = CactusDataset(datafolder='.. /input/train/train', datatype='train', transform=data_transforms, labels_dict=img_class_dict)
test_set = CactusDataset(datafolder='.. /input/test/test', datatype='test', transform=data_transforms_test )<load_pretrained> | map_all(test)
test.head() | Titanic - Machine Learning from Disaster |
1,248,437 | loaders = collections.OrderedDict()
train_sampler = SubsetRandomSampler(list(train.index))
valid_sampler = SubsetRandomSampler(list(valid.index))
batch_size = 512
num_workers = 0
train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers)
valid_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=valid_sampler, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, num_workers=num_workers)
loaders["train"] = train_loader
loaders["valid"] = valid_loader
loaders["test"] = test_loader<init_hyperparams> | x_train,x_test,y_train,y_test=train_test_split(train.drop('Survived',axis=1),train.Survived,test_size=0.20,random_state=66 ) | Titanic - Machine Learning from Disaster |
1,248,437 | class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class Net(nn.Module):
def __init__(
self,
num_classes: int,
p: float = 0.2,
pooling_size: int = 2,
last_conv_size: int = 1664,
arch: str = "densenet169",
pretrained: str = "imagenet")-> None:
super().__init__()
net = pretrainedmodels.__dict__[arch](pretrained=pretrained)
modules = list(net.children())[:-1]
modules += [nn.Sequential(
Flatten() ,
nn.BatchNorm1d(1664),
nn.Dropout(p),
nn.Linear(1664, num_classes)
)]
self.net = nn.Sequential(*modules)
def forward(self, x):
logits = self.net(x)
return torch.squeeze(logits )<choose_model_class> | models = [LogisticRegression() ,LinearSVC() ,SVC(kernel='rbf'),KNeighborsClassifier() ,RandomForestClassifier() ,
DecisionTreeClassifier() ,GradientBoostingClassifier() ,GaussianNB() , LinearDiscriminantAnalysis() ,
QuadraticDiscriminantAnalysis() ]
model_names=['LogisticRegression','LinearSVM','rbfSVM','KNearestNeighbors','RandomForestClassifier','DecisionTree',
'GradientBoostingClassifier','GaussianNB', 'LinearDiscriminantAnalysis','QuadraticDiscriminantAnalysis']
accuracy = []
for model in range(len(models)) :
clf = models[model]
clf.fit(x_train,y_train)
pred = clf.predict(x_test)
accuracy.append(accuracy_score(pred , y_test))
compare = pd.DataFrame({'Algorithm' : model_names , 'Accuracy' : accuracy})
compare | Titanic - Machine Learning from Disaster |
1,248,437 | num_epochs = 10
logdir = "./logs/simple"
model = Net(num_classes=1)
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.SGD(model.parameters() , momentum=0.99, lr=1e-2)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=2 )<train_model> | params_dict={'criterion':['gini','entropy'],'max_depth':[5.21,5.22,5.23,5.24,5.25,5.26,5.27,5.28,5.29,5.3]}
clf_dt=GridSearchCV(estimator=DecisionTreeClassifier() ,param_grid=params_dict,scoring='accuracy', cv=5)
clf_dt.fit(x_train,y_train)
pred=clf_dt.predict(x_test)
print(accuracy_score(pred,y_test))
print(clf_dt.best_params_ ) | Titanic - Machine Learning from Disaster |
1,248,437 | runner = SupervisedRunner()
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
logdir=logdir,
callbacks=[
OneCycleLR(
cycle_len=num_epochs,
div_factor=3,
increase_fraction=0.3,
momentum_range=(0.95, 0.85))
],
num_epochs=num_epochs,
verbose=False
)<choose_model_class> | no_of_test=[i+1 for i in range(50)]
params_dict={'n_neighbors':no_of_test}
clf_knn=GridSearchCV(estimator=KNeighborsClassifier() ,param_grid=params_dict,scoring='accuracy')
clf_knn.fit(x_train,y_train)
pred=clf_knn.predict(x_test)
print(accuracy_score(pred,y_test))
print(clf_knn.best_params_ ) | Titanic - Machine Learning from Disaster |
1,248,437 | <predict_on_test><EOS> | pred = clf_dt.predict(test)
d = {'PassengerId' : test_df.PassengerId , 'Survived' : pred}
answer = pd.DataFrame(d)
answer.to_csv('Prediction.csv' , index=False ) | Titanic - Machine Learning from Disaster |
1,129,041 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<save_to_csv> | warnings.filterwarnings('ignore')
df_train = pd.read_csv('.. /input/train.csv')
df_test = pd.read_csv('.. /input/test.csv' ) | Titanic - Machine Learning from Disaster |
1,129,041 | test_img = os.listdir('.. /input/test/test')
test_df = pd.DataFrame(test_img, columns=['id'])
test_preds = pd.DataFrame({'imgs': test_df.id.values, 'preds': runner.callbacks[0].predictions["logits"]})
test_preds.columns = ['id', 'has_cactus']
test_preds.to_csv('sub.csv', index=False)
test_preds.head()<save_to_csv> | y = df_train['Survived']
test_index = df_test['PassengerId']
combine = [df_train, df_test] | Titanic - Machine Learning from Disaster |
1,129,041 | runner1.infer(
model=model,
loaders=test_loader,
callbacks=[InferCallback() ],
)
test_preds['has_cactus'] = runner1.callbacks[0].predictions["logits"]
test_preds.to_csv('sub1.csv', index=False )<set_options> | for dataset in combine:
dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0} ).astype(int)
freq_port = df_train.Embarked.dropna().mode() [0]
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int)
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
print("Values in Sex:", df_train['Sex'].unique())
print("Values in Embarked:", df_train['Embarked'].unique())
print("Values in Title:", sorted(df_train['Title'].unique())) | Titanic - Machine Learning from Disaster |
1,129,041 | %matplotlib inline
<split> | df_train = df_train.drop(['FareBand'], axis=1)
combine = [df_train, df_test] | Titanic - Machine Learning from Disaster |
1,129,041 | train_df = pd.read_csv('.. /input/train.csv')
train_df.head()
x_train,x_test,y_train,y_test = train_test_split(
train_df['id'],
train_df['has_cactus'],
test_size = 0.2,
random_state = 3 )<data_type_conversions> | for dataset in combine:
dataset.loc[(dataset.Age.isnull()), 'Age'] = dataset.Age.median() | Titanic - Machine Learning from Disaster |
1,129,041 | X_train = []
for images in tqdm(x_train):
img = plt.imread('.. /input/train/train/' + images)
X_train.append(img)
X_test = []
for images in tqdm(x_test):
img = plt.imread('.. /input/train/train/' + images)
X_test.append(img)
X_train = np.array(X_train)
X_test = np.array(X_test)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = X_train/255
X_test = X_test/255<train_model> | df_train['AgeBand'] = pd.cut(df_train['Age'], 5)
df_train[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False ).mean().sort_values(by='AgeBand', ascending=True ) | Titanic - Machine Learning from Disaster |
1,129,041 | augmentations = ImageDataGenerator(
vertical_flip=True,
horizontal_flip=True,
zoom_range=0.1)
augmentations.fit(X_train )<choose_model_class> | df_train = df_train.drop(['AgeBand'], axis=1)
combine = [df_train, df_test]
df_train.head() | Titanic - Machine Learning from Disaster |
1,129,041 | best_model_weights = './base.model'
checkpoint = ModelCheckpoint(
best_model_weights,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min',
save_weights_only=False,
period=1
)
earlystop = EarlyStopping(
monitor='val_loss',
min_delta=0.001,
patience=10,
verbose=1,
mode='auto'
)
tensorboard = TensorBoard(
log_dir = './logs',
histogram_freq=0,
batch_size=16,
write_graph=True,
write_grads=True,
write_images=False,
)
csvlogger = CSVLogger(
filename= "training_csv.log",
separator = ",",
append = False
)
reduce = ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=10,
verbose=1,
mode='auto',
cooldown=1
)
callbacks = [checkpoint,tensorboard,csvlogger,reduce]<train_model> | for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
df_train.loc[:, ['Age*Class', 'Age', 'Pclass']].head(5 ) | Titanic - Machine Learning from Disaster |
1,129,041 | opt = SGD(lr=1e-4,momentum=0.99)
opt1 = Adam(lr=1e-3)
model.compile(
loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy']
)
history = model.fit_generator(
augmentations.flow(X_train,y_train,batch_size = 16),
steps_per_epoch=150,
validation_steps=150,
validation_data=(X_test,y_test),
epochs = 50,
verbose = 1,
callbacks=callbacks,
)<load_pretrained> | df_train = df_train.drop(['Name', 'Ticket', 'Cabin'], axis=1)
df_test = df_test.drop(['Name', 'Ticket', 'Cabin'], axis=1)
combine = [df_train, df_test]
display(combine[0].head())
display(combine[1].head() ) | Titanic - Machine Learning from Disaster |
1,129,041 | show_final_history(history)
model.load_weights(best_model_weights)
model_json = model.to_json()
with open("model.json","w")as json_file:
json_file.write(model_json)
model.save("model.h5")
print("Weights Saved")
print("JSON Saved" )<compute_train_metric> | df_train_X = combine[0].drop(["Survived", "PassengerId"], axis=1)
train_y = combine[0]["Survived"]
test_test_X = combine[1].drop("PassengerId", axis=1 ).copy() | Titanic - Machine Learning from Disaster |
1,129,041 | train_pred = model.predict(X_train, verbose= 1)
valid_pred = model.predict(X_test, verbose= 1)
train_acc = roc_auc_score(np.round(train_pred), y_train)
valid_acc = roc_auc_score(np.round(valid_pred), y_test)
confusion_matrix(np.round(valid_pred),y_test )<load_from_csv> | MLA = [
ensemble.AdaBoostClassifier() ,
ensemble.BaggingClassifier() ,
ensemble.ExtraTreesClassifier() ,
ensemble.GradientBoostingClassifier() ,
ensemble.RandomForestClassifier() ,
gaussian_process.GaussianProcessClassifier() ,
linear_model.LogisticRegressionCV() ,
linear_model.PassiveAggressiveClassifier() ,
linear_model.RidgeClassifierCV() ,
linear_model.SGDClassifier() ,
linear_model.Perceptron() ,
naive_bayes.BernoulliNB() ,
naive_bayes.GaussianNB() ,
neighbors.KNeighborsClassifier() ,
svm.SVC(probability=True),
svm.NuSVC(probability=True),
svm.LinearSVC() ,
tree.DecisionTreeClassifier() ,
tree.ExtraTreeClassifier() ,
discriminant_analysis.LinearDiscriminantAnalysis() ,
discriminant_analysis.QuadraticDiscriminantAnalysis() ,
XGBClassifier()
]
cv_split = model_selection.ShuffleSplit(n_splits = 10, test_size =.3, train_size =.6, random_state = 0)
MLA_columns = ['MLA Name', 'MLA Parameters','MLA Train Accuracy Mean', 'MLA Test Accuracy Mean', 'MLA Test Accuracy 3*STD' ,'MLA Time']
MLA_compare = pd.DataFrame(columns = MLA_columns)
MLA_predict = {}
row_index = 0
for alg in MLA:
MLA_name = alg.__class__.__name__
MLA_compare.loc[row_index, 'MLA Name'] = MLA_name
MLA_compare.loc[row_index, 'MLA Parameters'] = str(alg.get_params())
cv_results = model_selection.cross_validate(alg, df_train_X, train_y, cv = cv_split, return_train_score=True)
MLA_compare.loc[row_index, 'MLA Time'] = cv_results['fit_time'].mean()
MLA_compare.loc[row_index, 'MLA Train Accuracy Mean'] = cv_results['train_score'].mean()
MLA_compare.loc[row_index, 'MLA Test Accuracy Mean'] = cv_results['test_score'].mean()
MLA_compare.loc[row_index, 'MLA Test Accuracy 3*STD'] = cv_results['test_score'].std() *3
alg.fit(df_train_X, train_y)
MLA_compare.loc[row_index, 'F1 Score'] = metrics.f1_score(train_y, alg.predict(df_train_X))
MLA_predict[MLA_name] = alg.predict(test_test_X)
row_index+=1
MLA_compare.sort_values(by = ['F1 Score'], ascending = False, inplace = True ) | Titanic - Machine Learning from Disaster |
1,129,041 | sample = pd.read_csv('.. /input/sample_submission.csv')
test = []
for images in tqdm(sample['id']):
img = plt.imread('.. /input/test/test/' + images)
test.append(img)
test = np.array(test )<save_to_csv> | best_model = MLA_compare.loc[MLA_compare['F1 Score'].idxmax() ]['MLA Name']
best_model_score = round(MLA_compare.loc[MLA_compare['F1 Score'].idxmax() ]['F1 Score'],3)
print("Best model:",best_model)
print("F1 Score:",best_model_score ) | Titanic - Machine Learning from Disaster |
1,129,041 | <set_options><EOS> | predictions = MLA_predict[best_model]
predictions = predictions.ravel()
data_to_submit = pd.DataFrame({
'PassengerId': test_index,
'Survived': predictions
})
data_to_submit.to_csv("results.csv", index=False ) | Titanic - Machine Learning from Disaster |
11,317,713 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<set_options> | warnings.filterwarnings("ignore")
%matplotlib inline | Titanic - Machine Learning from Disaster |
11,317,713 | warnings.filterwarnings('ignore' )<load_from_csv> | train = pd.read_csv('.. /input/titanic/train.csv')
test = pd.read_csv('.. /input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
11,317,713 | train_directory = ".. /input/train/train/"
train_df = pd.read_csv('.. /input/train.csv')
train_df.tail()<prepare_x_and_y> | test.isnull().sum() | Titanic - Machine Learning from Disaster |
11,317,713 | X = []
Y = []
imges = train_df['id'].values
for img_id in tqdm_notebook(imges):
X.append(cv2.imread(train_directory + img_id))
Y.append(train_df[train_df['id'] == img_id]['has_cactus'].values[0])
X = np.asarray(X)
X = X.astype('float32')
X /= 255
Y = np.asarray(Y )<import_modules> | test.isnull().sum() | Titanic - Machine Learning from Disaster |
11,317,713 | from keras.layers import Conv2D, MaxPool2D, Dense, BatchNormalization, Activation, GlobalAveragePooling2D, Flatten, Dropout
from keras.models import Sequential, Model
from keras.regularizers import l2
from keras.optimizers import Adam<choose_model_class> | train.isnull().sum() | Titanic - Machine Learning from Disaster |
11,317,713 | def simple_model() :
model = Sequential()
model.add(Conv2D(32,(3, 3), input_shape=(32, 32, 3), padding='same', use_bias=False, kernel_regularizer=l2(1e-4)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32,(3, 3), padding='same', use_bias=False, kernel_regularizer=l2(1e-4)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32,(3, 3), padding='same', use_bias=False, kernel_regularizer=l2(1e-4)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32,(3, 3), padding='same', use_bias=False, kernel_regularizer=l2(1e-4)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPool2D())
model.add(Conv2D(64,(3, 3), padding='same', use_bias=False, kernel_regularizer=l2(1e-4)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64,(3, 3), padding='same', use_bias=False, kernel_regularizer=l2(1e-4)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64,(3, 3), padding='same', use_bias=False, kernel_regularizer=l2(1e-4)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPool2D())
model.add(Conv2D(128,(3, 3), padding='same', use_bias=False, kernel_regularizer=l2(1e-4)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(128,(3, 3), padding='same', use_bias=False, kernel_regularizer=l2(1e-4)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(128,(3, 3), padding='same', use_bias=False, kernel_regularizer=l2(1e-4)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPool2D())
model.add(GlobalAveragePooling2D())
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=Adam(lr=1e-5),
metrics=['accuracy'])
return model<choose_model_class> | train.isnull().sum() | Titanic - Machine Learning from Disaster |
11,317,713 | datagen = ImageDataGenerator(featurewise_center=False,
",
samplewise_center=False,
",
featurewise_std_normalization=False,
",
samplewise_std_normalization=False,
",
zca_whitening=False,
",
zca_epsilon=1e-06,
",
rotation_range=45,
",
width_shift_range=0.2,
",
height_shift_range=0.2,
",
shear_range=0.2,
",
zoom_range=0.2,
",
channel_shift_range=0.,
",
fill_mode='nearest',
validation_split=0.1,
horizontal_flip=True,
vertical_flip=True)
datagen.fit(X )<train_model> | train['Survived'].value_counts(normalize=True ) | Titanic - Machine Learning from Disaster |
11,317,713 | model = simple_model()
batch_size = 16
epoch = 1000
train = datagen.flow(X, Y, batch_size=batch_size, subset='training')
validate = datagen.flow(X, Y, batch_size=batch_size, subset='validation')
history = model.fit_generator(train,
validation_data=validate,
steps_per_epoch=len(X)/ batch_size,
epochs=epoch,
shuffle=True,
validation_steps=len(X)/batch_size )<data_type_conversions> | train['Survived'].groupby(train['Pclass'] ).mean() | Titanic - Machine Learning from Disaster |
11,317,713 | test_directory = ".. /input/test/test/"
X_test = []
Test_imgs = []
for img_id in tqdm_notebook(os.listdir(test_directory)) :
X_test.append(cv2.imread(test_directory + img_id))
Test_imgs.append(img_id)
X_test = np.asarray(X_test)
X_test = X_test.astype('float32')
X_test /= 255<predict_on_test> | train['Title'] = train['Title'] = train.Name.str.extract('([A-Za-z]+)\.', expand=False)
train['Title'].value_counts() | Titanic - Machine Learning from Disaster |
11,317,713 | test_predictions = model.predict(X_test )<save_to_csv> | train['Survived'].groupby(train['Title'] ).mean() | Titanic - Machine Learning from Disaster |
11,317,713 | pred_df = pd.DataFrame(test_predictions, columns=['has_cactus'])
pred_df['id'] = ''
cols = pred_df.columns.tolist()
cols = cols[-1:] + cols[:-1]
pred_df=pred_df[cols]
for i, img in enumerate(Test_imgs):
pred_df.set_value(i,'id',img)
pred_df.to_csv('submission.csv',index=False )<import_modules> | train['Name_Len'] = train['Name'].apply(lambda x: len(x))
train['Survived'].groupby(pd.qcut(train['Name_Len'],5)).mean() | Titanic - Machine Learning from Disaster |
11,317,713 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob
import scipy
import cv2
import random
from sklearn.model_selection import KFold
import keras<load_from_csv> | pd.qcut(train['Name_Len'],5 ).value_counts() | Titanic - Machine Learning from Disaster |
11,317,713 | train_data = pd.read_csv('.. /input/train.csv' )<define_variables> | train['Sex'].value_counts(normalize=True ) | Titanic - Machine Learning from Disaster |
11,317,713 | positive_examples_indexes = train_data[train_data.has_cactus==1].index
negative_examples_indexes = train_data[train_data.has_cactus==0].index<randomize_order> | train['Survived'].groupby(train['Sex'] ).mean() | Titanic - Machine Learning from Disaster |
11,317,713 | pos_indexes = positive_examples_indexes.tolist() [:]
random.shuffle(pos_indexes )<define_variables> | train['Survived'].groupby(pd.qcut(train['Age'],5)).mean() | Titanic - Machine Learning from Disaster |
11,317,713 | k = 5
third = int(len(pos_indexes)/ k)
folds_indexes = []
for i in range(k):
start = i*third
end =(i+1)*third
if i == k-1:
end = len(pos_indexes)
folds_indexes.append(pos_indexes[start:end] )<randomize_order> | pd.qcut(train['Age'],5 ).value_counts() | Titanic - Machine Learning from Disaster |
11,317,713 | def image_generator(indexes=None, batch_size = 16, shuffle=True, train=True):
while True:
if train:
temp_indexes = indexes[:]
temp_indexes.extend(negative_examples_indexes.tolist())
random.shuffle(temp_indexes)
random.shuffle(temp_indexes)
else:
temp_indexes = indexes[:]
N = int(len(temp_indexes)/ batch_size)
for i in range(N):
current_indexes = temp_indexes[i*batch_size:(i+1)*batch_size]
batch_input = []
batch_output = []
for index in current_indexes:
img = mpimg.imread('.. /input/train/train/' + train_data.id[index])
batch_input += [img]
batch_input += [img[::-1, :, :]]
batch_input += [img[:, ::-1, :]]
batch_input += [np.rot90(img)]
temp_img = np.zeros_like(img)
temp_img[:28, :, :] = img[4:, :, :]
batch_input += [temp_img]
temp_img = np.zeros_like(img)
temp_img[:, :28, :] = img[:, 4:, :]
batch_input += [temp_img]
temp_img = np.zeros_like(img)
temp_img[4:, :, :] = img[:28, :, :]
batch_input += [temp_img]
temp_img = np.zeros_like(img)
temp_img[:, 4:, :] = img[:, :28, :]
batch_input += [temp_img]
batch_input += [cv2.resize(img[2:30, 2:30, :],(32, 32)) ]
batch_input += [scipy.ndimage.interpolation.rotate(img, 10, reshape=False)]
batch_input += [scipy.ndimage.interpolation.rotate(img, 5, reshape=False)]
for _ in range(11):
batch_output += [train_data.has_cactus[index]]
batch_input = np.array(batch_input)
batch_output = np.array(batch_output)
yield(batch_input, batch_output.reshape(-1, 1))<choose_model_class> | train['SibSp'].value_counts() | Titanic - Machine Learning from Disaster |
11,317,713 | def build_model() :
model = keras.models.Sequential()
model.add(keras.layers.Conv2D(64,(3, 3), input_shape=(32, 32, 3)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.LeakyReLU(alpha=0.3))
model.add(keras.layers.Conv2D(64,(3, 3)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.LeakyReLU(alpha=0.3))
model.add(keras.layers.Conv2D(128,(3, 3)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.LeakyReLU(alpha=0.3))
model.add(keras.layers.Conv2D(128,(3, 3)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.LeakyReLU(alpha=0.3))
model.add(keras.layers.Conv2D(256,(3, 3)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.LeakyReLU(alpha=0.3))
model.add(keras.layers.Conv2D(256,(3, 3)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.LeakyReLU(alpha=0.3))
model.add(keras.layers.Conv2D(512,(3, 3)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.LeakyReLU(alpha=0.3))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(100))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.LeakyReLU(alpha=0.3))
model.add(keras.layers.Dense(1, activation='sigmoid'))
opt = keras.optimizers.Adam(0.0001)
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
return model<train_model> | train['Survived'].groupby(train['SibSp'] ).mean() | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.