kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
2,067,866 | learn.unfreeze()
learn.lr_find()
learn.recorder.plot()<train_model> | test.isnull().any() | Titanic - Machine Learning from Disaster |
2,067,866 | learn.fit_one_cycle(8, slice(1e-6,1e-3))<predict_on_test> | train.loc[train["Age"].isnull() ==True,"Age"]=np.mean(train["Age"] ) | Titanic - Machine Learning from Disaster |
2,067,866 | valid_preds = learn.get_preds(ds_type=DatasetType.Valid )<compute_test_metric> | test.loc[test["Age"].isnull() ==True,"Age"]=np.mean(test["Age"] ) | Titanic - Machine Learning from Disaster |
2,067,866 | class OptimizedRounder(object):
def __init__(self):
self.coef_ = 0
def _kappa_loss(self, coef, X, y):
X_p = np.copy(X)
for i, pred in enumerate(X_p):
if pred < coef[0]:
X_p[i] = 0
elif pred >= coef[0] and pred < coef[1]:
X_p[i] = 1
elif pred >= coef[1] and pred < coef[2]:
X_p[i] = 2
elif pred >= coef[2] and pred < coef[3]:
X_p[i] = 3
else:
X_p[i] = 4
ll = metrics.cohen_kappa_score(y, X_p, weights='quadratic')
return -ll
def fit(self, X, y):
loss_partial = partial(self._kappa_loss, X=X, y=y)
initial_coef = [0.5, 1.5, 2.5, 3.5]
self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead')
print(-loss_partial(self.coef_['x']))
def predict(self, X, coef):
X_p = np.copy(X)
for i, pred in enumerate(X_p):
if pred < coef[0]:
X_p[i] = 0
elif pred >= coef[0] and pred < coef[1]:
X_p[i] = 1
elif pred >= coef[1] and pred < coef[2]:
X_p[i] = 2
elif pred >= coef[2] and pred < coef[3]:
X_p[i] = 3
else:
X_p[i] = 4
return X_p
def coefficients(self):
return self.coef_['x']<train_model> | train.loc[train["Fare"].isnull() ==True,"Fare"]=np.mean(train["Fare"] ) | Titanic - Machine Learning from Disaster |
2,067,866 | optR = OptimizedRounder()
optR.fit(valid_preds[0],valid_preds[1] )<load_from_csv> | test.loc[test["Fare"].isnull() ==True,"Fare"]=np.mean(test["Fare"] ) | Titanic - Machine Learning from Disaster |
2,067,866 | sample_df = pd.read_csv(PATH/'sample_submission.csv')
sample_df.head()<define_variables> | gmm=GaussianMixture(n_components=10,max_iter=500,random_state=0)
age=train["Age"]
age=age.values
age=age.reshape(-1,1)
gmm.fit(age ) | Titanic - Machine Learning from Disaster |
2,067,866 | learn.data.add_test(ImageList.from_df(sample_df,PATH,folder='test_images',suffix='.png'))<predict_on_test> | log_likelihood=gmm.score_samples(age ) | Titanic - Machine Learning from Disaster |
2,067,866 | preds,y = learn.get_preds(DatasetType.Test)
<predict_on_test> | detected_outliers_age=np.where(log_likelihood<-8)[0] | Titanic - Machine Learning from Disaster |
2,067,866 | test_predictions = optR.predict(preds, coefficients )<data_type_conversions> | train.iloc[detected_outliers_age] | Titanic - Machine Learning from Disaster |
2,067,866 | sample_df.diagnosis = test_predictions.astype(int)
sample_df.head()<save_to_csv> | gmm=GaussianMixture(n_components=80,max_iter=500,random_state=0)
SibSp=train["SibSp"]
SibSp=SibSp.values
SibSp=SibSp.reshape(-1,1)
gmm.fit(SibSp ) | Titanic - Machine Learning from Disaster |
2,067,866 | sample_df.to_csv('submission.csv',index=False )<import_modules> | detected_outliers_SibSp=np.where(log_likelihood<1.5)[0] | Titanic - Machine Learning from Disaster |
2,067,866 | base_dir = ".. /input/aptos2019-blindness-detection/"
train_csv = base_dir+"train.csv"
test_csv = base_dir+"test.csv"
test_dir = base_dir+"test_images/"
test_dir_processed = 'test_dir_processed'
train_dir = "train_data_cropped"
IMG_SIZE = 224
SEED = 72
<install_modules> | train.iloc[detected_outliers_SibSp] | Titanic - Machine Learning from Disaster |
2,067,866 |
<import_modules> | gmm=GaussianMixture(n_components=80,max_iter=500,random_state=0)
parch=train["Parch"]
parch=parch.values
parch=parch.reshape(-1,1)
gmm.fit(parch ) | Titanic - Machine Learning from Disaster |
2,067,866 | sys.path.append(os.path.abspath('.. /input/efficientnet/efficientnet-master/efficientnet-master/'))
<import_modules> | detected_outliers_parch=np.where(log_likelihood<0)[0] | Titanic - Machine Learning from Disaster |
2,067,866 | import cv2
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils import class_weight, shuffle<train_model> | train.iloc[detected_outliers_parch] | Titanic - Machine Learning from Disaster |
2,067,866 | image = cv2.imread('.. /input/aptos2019-blindness-detection/train_images/295fdc964f6e.png')
plt.imshow(image )<define_variables> | fare=train["Fare"]
fare=fare.values
fare=fare.reshape(-1,1)
gmm=GaussianMixture(n_components=3,max_iter=100,random_state=0)
gmm.fit(fare ) | Titanic - Machine Learning from Disaster |
2,067,866 | i = 0
for fileName in os.listdir("train_data_cropped/"):
i = i + 1
print(i )<define_variables> | detected_outliers_fare=np.where(log_likelihood<-10)[0] | Titanic - Machine Learning from Disaster |
2,067,866 | i = 0
for fileName in os.listdir("test_dir_processed/"):
i = i + 1
print(i )<train_on_grid> | outliers=(detected_outliers_parch,detected_outliers_SibSp,detected_outliers_fare,detected_outliers_age)
outliers=np.concatenate(outliers)
outliers | Titanic - Machine Learning from Disaster |
2,067,866 | image1 = cv2.imread('.. /input/aptos2019-blindness-detection/train_images/295fdc964f6e.png')
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
plt.imshow(image1 )<define_variables> | train=train.drop(index=outliers,axis=1)
train.shape | Titanic - Machine Learning from Disaster |
2,067,866 | WORKERS = 2
CHANNEL = 3
warnings.filterwarnings("ignore")
NUM_CLASSES = 5
SEED = 72
TRAIN_NUM = 1000<split> | data=pd.concat(objs=[train.drop(labels="Survived",axis=1), test], axis=0 ).reset_index(drop=True)
| Titanic - Machine Learning from Disaster |
2,067,866 | def df_train_test_split_preprocess(df):
image_ids = df["id_code"].values.tolist()
labels = df["diagnosis"].values.tolist()
for i in range(len(image_ids)) :
imgname = image_ids[i]
newname = str(imgname)+ ".png"
image_ids[i] = newname
xtrain, xval, ytrain, yval = train_test_split(image_ids, labels, test_size = 0.15)
df_train = pd.DataFrame({"id_code":xtrain, "diagnosis":ytrain})
df_val = pd.DataFrame({"id_code":xval, "diagnosis":yval})
df_train["diagnosis"] = df_train["diagnosis"].astype('str')
df_val["diagnosis"] = df_val["diagnosis"].astype('str')
print("Length of Training Data :",len(df_train))
print("Length of Validation Data :",len(df_val))
return df_train, df_val<split> | dist_Pclass=train["Pclass"].value_counts(sort=False)
dist_Pclass.rename(index={1:"First class",2:"Second class",3:"Third class"},inplace=True)
dist_Pclass_Norm=dist_Pclass/dist_Pclass.sum()
print("Abs.values
{0}
".format(dist_Pclass))
print("Rel.values
{0}".format(dist_Pclass_Norm)) | Titanic - Machine Learning from Disaster |
2,067,866 | df = pd.read_csv(train_csv)
df_train, df_val = df_train_test_split_preprocess(df )<import_modules> | dist_sex=train["Sex"].value_counts(sort=False)
dist_sex_Norm=dist_sex/dist_sex.sum()
print("Abs.values
{0}
".format(dist_sex))
print("Rel.values
{0}".format(dist_sex_Norm)) | Titanic - Machine Learning from Disaster |
2,067,866 | from sklearn.metrics import f1_score, fbeta_score, cohen_kappa_score<set_options> | data["title"]=data["Name"].apply(extract)
train["title"]=train["Name"].apply(extract)
data["title"].value_counts() | Titanic - Machine Learning from Disaster |
2,067,866 | train_aug = ImageDataGenerator(rescale=1./255,
horizontal_flip = True,
zoom_range = 0.15,
vertical_flip = True,
shear_range=0.1,
rotation_range = 90
)<create_dataframe> | data.loc[(data["title"]=="Ms.")|(data["title"]=="Mlle.")|(data["title"]=="Mme."),"title"]="Miss."
data.loc[(data["title"]=="Master.")|(data["title"]=="Dr.")|(data["title"]=="Rev.")|(data["title"]=="Col.")|(data["title"]=="Major.")|(data["title"]=="Dona.")|(data["title"]=="Don.")|(data["title"]=="Capt.")|(data["title"]=="Sir.")|(data["title"]=="Jonkheer.")|(data["title"]=="Countess.")|(data["title"]=="Lady."),"title"]="wellSituated"
train.loc[(train["title"]=="Ms.")|(train["title"]=="Mlle.")|(train["title"]=="Mme."),"title"]="Miss."
train.loc[(train["title"]=="Master.")|(train["title"]=="Dr.")|(train["title"]=="Rev.")|(train["title"]=="Col.")|(train["title"]=="Major.")|(train["title"]=="Dona.")|(train["title"]=="Don.")|(train["title"]=="Capt.")|(train["title"]=="Sir.")|(train["title"]=="Jonkheer.")|(train["title"]=="Countess.")|(train["title"]=="Lady."),"title"]="wellSituated"
data["title"].value_counts() | Titanic - Machine Learning from Disaster |
2,067,866 | train_generator = train_aug.flow_from_dataframe(dataframe = df_train,
directory = train_dir,
x_col = "id_code",
y_col = "diagnosis",
batch_size = 16,
target_size =(IMG_SIZE, IMG_SIZE),
class_mode = "categorical" )<create_dataframe> | data["title"]=data["title"].map({"Mr.":0,"Miss.":1,"Mrs.":2,"wellSituated":3})
train["title"]=train["title"].map({"Mr.":0,"Miss.":1,"Mrs.":2,"wellSituated":3})
data.head(5 ) | Titanic - Machine Learning from Disaster |
2,067,866 | validation_generator = train_aug.flow_from_dataframe(dataframe = df_val,
directory = train_dir,
x_col = "id_code",
y_col = "diagnosis",
batch_size = 16,
target_size =(IMG_SIZE, IMG_SIZE),
class_mode = "categorical" )<compute_test_metric> | train=pd.get_dummies(data=train,columns=["title"],prefix="title")
data=pd.get_dummies(data=data,columns=["title"],prefix="title")
train.head(5)
train=train.drop(labels="Name",axis=1)
data=data.drop(labels="Name",axis=1 ) | Titanic - Machine Learning from Disaster |
2,067,866 | def cohens_kappa(y_true, y_pred):
y_true_classes = tf.argmax(y_true, 1)
y_pred_classes = tf.argmax(y_pred, 1)
ck_val = tf.contrib.metrics.cohen_kappa(y_true_classes, y_pred_classes, 5)[1]
print(ck_val)
return ck_val
<feature_engineering> | labelencoder_X_1=LabelEncoder()
train["Sex"]=labelencoder_X_1.fit_transform(train.iloc[:,3])
data["Sex"]=labelencoder_X_1.fit_transform(data.iloc[:,7])
data.head(5)
| Titanic - Machine Learning from Disaster |
2,067,866 | test_df_orig = pd.read_csv(test_csv)
def process_test_df(test_df):
test_ids = test_df["id_code"].values.tolist()
for i in range(len(test_ids)) :
imgname = test_ids[i]
newname = str(imgname)+ ".png"
test_ids[i] = newname
test_df["id_code"] = test_ids
return test_df
test_df = process_test_df(test_df_orig )<create_dataframe> | print(train["Cabin"].isnull().any())
print(data["Cabin"].isnull().any() ) | Titanic - Machine Learning from Disaster |
2,067,866 | test_aug = ImageDataGenerator(rescale = 1./255)
test_generator = test_aug.flow_from_dataframe(dataframe = test_df,
directory = test_dir_processed,
x_col = "id_code",
batch_size = 1,
target_size =(IMG_SIZE, IMG_SIZE),
shuffle = False,
class_mode = None )<choose_model_class> | train=pd.get_dummies(data=train,columns=["Cabin"],prefix="Cabin")
data=pd.get_dummies(data=data,columns=["Cabin"],prefix="Cabin")
train.head(5 ) | Titanic - Machine Learning from Disaster |
2,067,866 | class RAdam(keras.optimizers.Optimizer):
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
epsilon=None, decay=0., weight_decay=0., amsgrad=False,
total_steps=0, warmup_proportion=0.1, min_lr=0., **kwargs):
super(RAdam, self ).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
self.weight_decay = K.variable(weight_decay, name='weight_decay')
self.total_steps = K.variable(total_steps, name='total_steps')
self.warmup_proportion = K.variable(warmup_proportion, name='warmup_proportion')
self.min_lr = K.variable(lr, name='min_lr')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
self.initial_weight_decay = weight_decay
self.initial_total_steps = total_steps
self.amsgrad = amsgrad
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr *(1./(1.+ self.decay * K.cast(self.iterations, K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx())+ 1
if self.initial_total_steps > 0:
warmup_steps = self.total_steps * self.warmup_proportion
decay_steps = self.total_steps - warmup_steps
lr = K.switch(
t <= warmup_steps,
lr *(t / warmup_steps),
lr *(1.0 - K.minimum(t, decay_steps)/ decay_steps),
)
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p), name='m_' + str(i)) for(i, p)in enumerate(params)]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p), name='v_' + str(i)) for(i, p)in enumerate(params)]
if self.amsgrad:
vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p), name='vhat_' + str(i)) for(i, p)in enumerate(params)]
else:
vhats = [K.zeros(1, name='vhat_' + str(i)) for i in range(len(params)) ]
self.weights = [self.iterations] + ms + vs + vhats
beta_1_t = K.pow(self.beta_1, t)
beta_2_t = K.pow(self.beta_2, t)
sma_inf = 2.0 /(1.0 - self.beta_2)- 1.0
sma_t = sma_inf - 2.0 * t * beta_2_t /(1.0 - beta_2_t)
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
m_t =(self.beta_1 * m)+(1.- self.beta_1)* g
v_t =(self.beta_2 * v)+(1.- self.beta_2)* K.square(g)
m_corr_t = m_t /(1.0 - beta_1_t)
if self.amsgrad:
vhat_t = K.maximum(vhat, v_t)
v_corr_t = K.sqrt(vhat_t /(1.0 - beta_2_t)+ self.epsilon)
self.updates.append(K.update(vhat, vhat_t))
else:
v_corr_t = K.sqrt(v_t /(1.0 - beta_2_t)+ self.epsilon)
r_t = K.sqrt(( sma_t - 4.0)/(sma_inf - 4.0)*
(sma_t - 2.0)/(sma_inf - 2.0)*
sma_inf / sma_t)
p_t = K.switch(sma_t > 5, r_t * m_corr_t / v_corr_t, m_corr_t)
if self.initial_weight_decay > 0:
p_t += self.weight_decay * p
p_t = p - lr * p_t
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
if getattr(p, 'constraint', None)is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)) ,
'beta_1': float(K.get_value(self.beta_1)) ,
'beta_2': float(K.get_value(self.beta_2)) ,
'decay': float(K.get_value(self.decay)) ,
'weight_decay': float(K.get_value(self.weight_decay)) ,
'epsilon': self.epsilon,
'amsgrad': self.amsgrad,
'total_steps': float(K.get_value(self.total_steps)) ,
'warmup_proportion': float(K.get_value(self.warmup_proportion)) ,
'min_lr': float(K.get_value(self.min_lr)) ,
}
base_config = super(RAdam, self ).get_config()
return dict(list(base_config.items())+ list(config.items()))<import_modules> | train.loc[train["Embarked"].isnull() ==True,"Embarked"]="S"
data.loc[data["Embarked"].isnull() ==True,"Embarked"]="S"
train=pd.get_dummies(data=train,columns=["Embarked"],prefix="Embarked")
data=pd.get_dummies(data=data,columns=["Embarked"],prefix="Embarked" ) | Titanic - Machine Learning from Disaster |
2,067,866 | from keras.applications import ResNet50
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D, Dropout, Input
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from sklearn.model_selection import train_test_split
import keras
from keras.engine import Layer,InputSpec
<choose_model_class> | train=train.drop(labels="Ticket",axis=1)
data=data.drop(labels="Ticket",axis=1 ) | Titanic - Machine Learning from Disaster |
2,067,866 | class GroupNormalization(Layer):
def __init__(self,
groups=32,
axis=-1,
epsilon=1e-5,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
super(GroupNormalization, self ).__init__(**kwargs)
self.supports_masking = True
self.groups = groups
self.axis = axis
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
def build(self, input_shape):
dim = input_shape[self.axis]
if dim is None:
raise ValueError('Axis ' + str(self.axis)+ ' of '
'input tensor should have a defined dimension '
'but the layer received an input with shape ' +
str(input_shape)+ '.')
if dim < self.groups:
raise ValueError('Number of groups(' + str(self.groups)+ ')cannot be '
'more than the number of channels(' +
str(dim)+ ' ).')
if dim % self.groups != 0:
raise ValueError('Number of groups(' + str(self.groups)+ ')must be a '
'multiple of the number of channels(' +
str(dim)+ ' ).')
self.input_spec = InputSpec(ndim=len(input_shape),
axes={self.axis: dim})
shape =(dim,)
if self.scale:
self.gamma = self.add_weight(shape=shape,
name='gamma',
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(shape=shape,
name='beta',
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint)
else:
self.beta = None
self.built = True
def call(self, inputs, **kwargs):
input_shape = K.int_shape(inputs)
tensor_input_shape = K.shape(inputs)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
broadcast_shape.insert(1, self.groups)
reshape_group_shape = K.shape(inputs)
group_axes = [reshape_group_shape[i] for i in range(len(input_shape)) ]
group_axes[self.axis] = input_shape[self.axis] // self.groups
group_axes.insert(1, self.groups)
group_shape = [group_axes[0], self.groups] + group_axes[2:]
group_shape = K.stack(group_shape)
inputs = K.reshape(inputs, group_shape)
group_reduction_axes = list(range(len(group_axes)))
group_reduction_axes = group_reduction_axes[2:]
mean = K.mean(inputs, axis=group_reduction_axes, keepdims=True)
variance = K.var(inputs, axis=group_reduction_axes, keepdims=True)
inputs =(inputs - mean)/(K.sqrt(variance + self.epsilon))
inputs = K.reshape(inputs, group_shape)
outputs = inputs
if self.scale:
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
outputs = outputs * broadcast_gamma
if self.center:
broadcast_beta = K.reshape(self.beta, broadcast_shape)
outputs = outputs + broadcast_beta
outputs = K.reshape(outputs, tensor_input_shape)
return outputs
def get_config(self):
config = {
'groups': self.groups,
'axis': self.axis,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
base_config = super(GroupNormalization, self ).get_config()
return dict(list(base_config.items())+ list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape<load_pretrained> | train=pd.get_dummies(data=train,columns=["Pclass"],prefix="Pclass")
data=pd.get_dummies(data=data,columns=["Pclass"],prefix="Pclass" ) | Titanic - Machine Learning from Disaster |
2,067,866 | input_layer = Input(shape =(IMG_SIZE,IMG_SIZE,3))
base_model = EfficientNetB5(weights = None,
include_top = False,
input_tensor = input_layer)
base_model.load_weights('.. /input/effnet/efficientnetb5notop.h5')
x = GlobalAveragePooling2D()(base_model.output)
x = Dense(1024, activation='relu' )(x)
x = Dropout(0.40 )(x)
x = Dense(512, activation='relu' )(x)
x = Dropout(0.3 )(x)
out = Dense(5, activation = 'softmax' )(x)
model = Model(inputs = input_layer, outputs = out )<choose_model_class> | print("NaN-Vals in SibSp in train set present? {0}".format(train["SibSp"].isnull().any()))
print("NaN-Vals in SibSp in data set present? {0}".format(data["SibSp"].isnull().any()))
print("NaN-Vals in Parch in train present? {0}".format(train["Parch"].isnull().any()))
print("NaN-Vals in Parch in data set present? {0}".format(data["Parch"].isnull().any())) | Titanic - Machine Learning from Disaster |
2,067,866 | optimizer = RAdam(lr=0.0005)
es = EarlyStopping(monitor='cohens_kappa', mode='auto', verbose=1, patience=3,restore_best_weights=True)
rlrop = ReduceLROnPlateau(monitor='cohens_kappa',
factor=0.2,
patience=5,
verbose=1,
mode='auto',
min_lr=1e-6)
callback_list = [ rlrop ]
model.compile(optimizer = optimizer, loss = "categorical_crossentropy", metrics = ["accuracy",cohens_kappa] )<feature_engineering> | train["familySize"]=train["SibSp"]+train["Parch"]+1
data["familySize"]=data["SibSp"]+data["Parch"]+1
train.head(5 ) | Titanic - Machine Learning from Disaster |
2,067,866 | K.get_session().run(tf.local_variables_initializer() )<train_model> | train["lonely"]=train["familySize"].map(lambda x: 1 if x==1 else 0)
data["lonely"]=data["familySize"].map(lambda x: 1 if x==1 else 0)
train["smallFamily"]=train["familySize"].map(lambda x:1 if x==2 else 0)
data["smallFamily"]=data["familySize"].map(lambda x:1 if x==2 else 0)
train["biggerFamily"]=train["familySize"].map(lambda x:1 if x>2 & x<=4 else 0)
data["biggerFamily"]=data["familySize"].map(lambda x:1 if x>2 & x<=4 else 0)
train["biggerFamily"]=train["familySize"].map(lambda x:1 if x>4 else 0)
data["biggerFamily"]=data["familySize"].map(lambda x:1 if x>4 else 0)
train=train.drop(labels=["SibSp","Parch","familySize"],axis=1)
data=data.drop(labels=["SibSp","Parch","familySize"],axis=1)
train.head(5 ) | Titanic - Machine Learning from Disaster |
2,067,866 | history = model.fit_generator(generator = train_generator,
steps_per_epoch = len(train_generator),
epochs = 18,
validation_data = validation_generator,
validation_steps = len(validation_generator),
callbacks = callback_list )<predict_on_test> | data.drop(labels = ["PassengerId"], axis = 1, inplace = True)
train_set=data[:len(train)]
test=data[len(train):]
train.head(5 ) | Titanic - Machine Learning from Disaster |
2,067,866 | predprobs = model.predict_generator(test_generator, steps=len(test_generator))<drop_column> | y=train["Survived"] | Titanic - Machine Learning from Disaster |
2,067,866 | shutil.rmtree('train_data_cropped')
shutil.rmtree('test_dir_processed' )<prepare_output> | kfold = StratifiedKFold(n_splits=10 ) | Titanic - Machine Learning from Disaster |
2,067,866 | predictions = []
for i in predprobs:
predictions.append(np.argmax(i))<prepare_output> | classifier=XGBClassifier()
parameters = {'nthread':[4],
'objective':['binary:logistic'],
'learning_rate': [0.05],
'max_depth': [6],
'min_child_weight': [11],
'silent': [1],
'subsample': [0.8],
'colsample_bytree': [0.7],
'n_estimators': [5],
'missing':[-999],
'seed': [1337]}
clf=GridSearchCV(classifier,parameters, n_jobs=5,cv=kfold,scoring='roc_auc',verbose=2, refit=True)
clf.fit(train_set,y)
best_parameters = clf.best_score_
test=clf.predict(test)
| Titanic - Machine Learning from Disaster |
2,067,866 | test_df_orig["diagnosis"] = predictions<feature_engineering> | y_pred= test | Titanic - Machine Learning from Disaster |
2,067,866 | test_ids = test_df_orig["id_code"].values.tolist()
for i in range(len(test_ids)) :
imgname = test_ids[i]
newname = imgname.split('.')[0]
test_ids[i] = newname
test_df_orig["id_code"] = test_ids<save_to_csv> | y_pred=pd.DataFrame(data=y_pred ) | Titanic - Machine Learning from Disaster |
2,067,866 | <load_from_csv><EOS> | submission = pd.read_csv(".. /input/gender_submission.csv")
submission['Survived'] = y_pred
submission.to_csv('.. /working/submit.csv',index=False ) | Titanic - Machine Learning from Disaster |
672,295 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<set_options> | train_df = pd.read_csv(".. /input/train.csv", header=0)
train_df["Gender"] = train_df["Sex"].map({"female": 0, "male": 1} ).astype(int)
train_df.head(3)
median_age = train_df["Age"].dropna().median()
if len(train_df.Age[train_df.Age.isnull() ])> 0:
train_df.loc[(train_df.Age.isnull()), "Age"] = median_age
train_df['family_size'] = train_df['SibSp'] + train_df['Parch'] + 1
train_df['isAlone'] = train_df['family_size'][train_df['family_size'] == 1]
train_df.loc[(train_df.isAlone.isnull()), 'isAlone'] = 0
def name_classifier(name_df):
name_class_df = pd.DataFrame(columns={'miss', 'mrs', 'master', 'mr'})
for name in name_df:
if 'Miss' in name:
df = pd.DataFrame([[1,0,0,0]], columns={'miss', 'mrs', 'master', 'mr'})
elif 'Mrs' in name:
df = pd.DataFrame([[0,1,0,0]], columns={'miss', 'mrs', 'master', 'mr'})
elif 'Master' in name:
df = pd.DataFrame([[0,0,1,0]], columns={'miss', 'mrs', 'master', 'mr'})
elif 'Mr' in name:
df = pd.DataFrame([[0,0,0,1]], columns={'miss', 'mrs', 'master', 'mr'})
else:
df = pd.DataFrame([[0,0,0,0]], columns={'miss', 'mrs', 'master', 'mr'})
name_class_df = name_class_df.append(df, ignore_index=True)
return name_class_df
train_df = pd.concat(( train_df,name_classifier(train_df['Name'])) , axis=1)
train_df = train_df.drop(["Name", "Ticket", "Sex", "SibSp", "Parch", "Cabin","Embarked", "PassengerId"], axis=1)
test_df = pd.read_csv(".. /input/test.csv", header=0)
test_df["Gender"] = test_df["Sex"].map({"female": 0, "male": 1} ).astype(int)
median_age = test_df["Age"].dropna().median()
if len(test_df.Age[test_df.Age.isnull() ])> 0:
test_df.loc[(test_df.Age.isnull()), "Age"] = median_age
test_df['family_size'] = test_df['SibSp'] + test_df['Parch'] + 1
test_df['isAlone'] = test_df['family_size'][test_df['family_size'] == 1]
test_df.loc[(test_df.isAlone.isnull()), 'isAlone'] = 0
test_df = pd.concat(( test_df,name_classifier(test_df['Name'])) , axis=1)
ids = test_df["PassengerId"].values
test_df = test_df.drop(["Name", "Ticket", "Sex", "SibSp", "Parch", "Cabin", "Embarked", "PassengerId"], axis=1)
train_data = train_df.values
test_data = test_df.values
model = Sequential()
model.add(Dense(9, activation='relu', input_dim=10))
model.add(Dense(9, activation='relu'))
model.add(Dense(5, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
model.fit(train_data[:,1:], train_data[:,0], batch_size=32, epochs=200)
y_pred = model.predict(test_data)
y_final =(y_pred > 0.5 ).astype(int ).reshape(test_data.shape[0])
output = y_final
submit_file = open("./submit.csv", "w")
file_object = csv.writer(submit_file)
file_object.writerow(["PassengerId", "Survived"])
file_object.writerows(zip(ids, output))
submit_file.close()
train_df.head(5 ) | Titanic - Machine Learning from Disaster |
1,744,678 | t_start = time.time()
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
%matplotlib inline
style.use('fivethirtyeight')
sns.set(style='whitegrid', color_codes=True)
!ls.. /input/*<import_modules> | warnings.filterwarnings('ignore')
%matplotlib inline | Titanic - Machine Learning from Disaster |
1,744,678 | fastai.__version__<set_options> | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv' ) | Titanic - Machine Learning from Disaster |
1,744,678 | def is_interactive() :
return 'runtime' in get_ipython().config.IPKernelApp.connection_file
print('Interactive?', is_interactive() )<set_options> | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv' ) | Titanic - Machine Learning from Disaster |
1,744,678 | def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(42 )<define_variables> | pd.isnull(train ).sum() | Titanic - Machine Learning from Disaster |
1,744,678 | Path('/tmp/.cache/torch/checkpoints/' ).mkdir(exist_ok=True, parents=True)
model_path = '/tmp/.cache/torch/checkpoints/efficientNet.pth'
!cp.. /input/efficientnet*/efficientNet_*.pth {model_path}<load_from_csv> | train["Cabin"] =(train["Cabin"].notnull().astype('int'))
test["Cabin"] =(test["Cabin"].notnull().astype('int'))
sns.barplot(x="Cabin", y="Survived", data=train ) | Titanic - Machine Learning from Disaster |
1,744,678 | PATH = Path('.. /input/aptos2019-blindness-detection')
PATH_train = Path('.. /input/drd-newold/drd')
df_train = pd.read_csv('.. /input/oldandnew/new_train_data.csv')
df_test = pd.read_csv(PATH/'test.csv')
_ = df_train.hist()<load_pretrained> | sex_map = {'male':0, 'female':1}
train['Sex'] = train['Sex'].map(sex_map)
test['Sex'] = test['Sex'].map(sex_map ) | Titanic - Machine Learning from Disaster |
1,744,678 | aptos19_stats =([0.42, 0.22, 0.075], [0.27, 0.15, 0.081])
data = ImageDataBunch.from_df(df=df_train,
path=PATH_train, folder='aptos_drd_jpeg', suffix='.jpeg',
valid_pct=0.1,
ds_tfms=get_transforms(flip_vert=True, max_warp=0.05, max_rotate=20.) ,
bs=4,
num_workers=os.cpu_count()
).normalize(aptos19_stats )<import_modules> | train.isnull().sum() | Titanic - Machine Learning from Disaster |
1,744,678 | print(f'Classes:
{data.classes}' )<define_variables> | train.isnull().sum() | Titanic - Machine Learning from Disaster |
1,744,678 | data.show_batch(rows=3, figsize=(7,6))<define_variables> | test["Fare"] = test["Fare"].fillna(test["Fare"].median())
test.isnull().sum() | Titanic - Machine Learning from Disaster |
1,744,678 | package_path = '.. /input/efficientnet-pytorch/efficientnet-pytorch/EfficientNet-PyTorch-master'
sys.path.append(package_path)
<load_pretrained> | label = train["Survived"]
PassengerId = test["PassengerId"]
train = train.drop(["Survived","PassengerId","Name","Ticket","Embarked","AgeGroup"], axis=1)
test = test.drop(["PassengerId","Name","Ticket","Embarked","AgeGroup"], axis=1 ) | Titanic - Machine Learning from Disaster |
1,744,678 | def EfficientNetB4(pretrained=True):
model = EfficientNet.from_name('efficientnet-b4', override_params={'num_classes': 5 })
if pretrained:
model_state = torch.load(model_path)
if '_fc.weight' in model_state.keys() :
model_state.pop('_fc.weight')
model_state.pop('_fc.bias')
res = model.load_state_dict(model_state, strict=False)
assert str(res.missing_keys)== str(['_fc.weight', '_fc.bias']), 'issue loading pretrained weights'
else:
mapping = { i:o for i,o in zip(model_state.keys() , model.state_dict().keys())}
mapped_model_state = OrderedDict([
(mapping[k], v)for k,v in model_state.items() if not mapping[k].startswith('_fc')
])
res = model.load_state_dict(mapped_model_state, strict=False)
print(res)
return model<choose_model_class> | gbc = GradientBoostingClassifier()
gbc.fit(train,label)
gbc.score(train,label)
| Titanic - Machine Learning from Disaster |
1,744,678 | model = EfficientNetB4(pretrained=True)
<compute_train_metric> | pred = gbc.predict(test ) | Titanic - Machine Learning from Disaster |
1,744,678 | class FocalLoss(nn.Module):
def __init__(self, gamma=3., reduction='mean'):
super().__init__()
self.gamma = gamma
self.reduction = reduction
def forward(self, inputs, targets):
CE_loss = nn.CrossEntropyLoss(reduction='none' )(inputs, targets)
pt = torch.exp(-CE_loss)
F_loss =(( 1 - pt)**self.gamma)* CE_loss
if self.reduction == 'sum':
return F_loss.sum()
elif self.reduction == 'mean':
return F_loss.mean()<choose_model_class> | pred_df = pd.DataFrame(pred, columns=['Survived'])
result = pd.concat([PassengerId,pred_df],axis =1)
| Titanic - Machine Learning from Disaster |
1,744,678 | <choose_model_class><EOS> | result.to_csv('result.csv', index=False ) | Titanic - Machine Learning from Disaster |
1,637,620 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_pretrained> | pd.set_option('display.max_columns', 30)
sns.set()
| Titanic - Machine Learning from Disaster |
1,637,620 | learn.freeze()
learn.lr_find(start_lr=1e-6, end_lr=1e1, wd=5e-3)
learn.recorder.plot(suggestion=True )<train_model> | df_train = pd.read_csv('.. /input/train.csv')
df_test = pd.read_csv('.. /input/test.csv' ) | Titanic - Machine Learning from Disaster |
1,637,620 | learn.fit_one_cycle(2, max_lr=3e-2, div_factor=10, final_div=100, wd=5e-3)
learn.save('stage-1')
learn.recorder.plot_losses()<train_model> | print('Number of records:')
print(len(df_train.index))
print('{:=<70}'.format(''))
print('Missing values in the training dataset:')
print(df_train.isnull().sum())
print('{:=<70}'.format('')) | Titanic - Machine Learning from Disaster |
1,637,620 | learn.unfreeze()
<train_model> | print('Number of records:')
print(len(df_test.index))
print('{:=<70}'.format(''))
print('Missing values in the test dataset:')
print(df_test.isnull().sum())
print('{:=<70}'.format('')) | Titanic - Machine Learning from Disaster |
1,637,620 | learn.fit_one_cycle(cyc_len=5, max_lr=slice(5e-5, 5e-4), pct_start=0, wd=1e-3)
learn.save('stage-3')
<train_model> | df_train['Dataset'] = 'train'
df_test['Dataset'] = 'test'
df_test.insert(loc=1, column='Survived', value=np.nan)
df_all = df_train.append(df_test ) | Titanic - Machine Learning from Disaster |
1,637,620 | tta_params = {'beta':0.12, 'scale':1.0}<load_from_csv> | print('Number of records:')
print(len(df_all.index))
print('{:=<70}'.format(''))
print('Missing values in the test dataset:')
print(df_all.isnull().sum())
print('{:=<70}'.format('')) | Titanic - Machine Learning from Disaster |
1,637,620 | sample_df = pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv')
sample_df.head()<define_variables> | print('Percentage of passengers who survived(1)and died(0):')
print(df_train['Survived'].value_counts(normalize=True)) | Titanic - Machine Learning from Disaster |
1,637,620 | learn.data.add_test(ImageList.from_df(
sample_df, PATH,
folder='test_images',
suffix='.png'
))<choose_model_class> | sex = df_train['Sex'].value_counts(normalize=True)
print('{:=<70}'.format(''))
print('Proportion of passengers by gender:')
print(sex)
print('{:=<70}'.format(''))
f_passengers = sex['female']
m_passengers = sex['male'] | Titanic - Machine Learning from Disaster |
1,637,620 | preds,y = learn.TTA(ds_type=DatasetType.Test, **tta_params )<prepare_output> | proportions_col = ['category', '%passengers', '%survived']
proportions_cat = ['female', 'male', 'class 1', 'class 2', 'class 3']
proportions_survived = [f_survived, m_survived, c1_survived, c2_survived, c3_survived]
proportions_passengers =[f_passengers, m_passengers, c1_passengers, c2_passengers, c3_passengers]
df_proportions = pd.DataFrame(
list(zip(proportions_cat,
proportions_passengers,
proportions_survived)
),
columns=proportions_col)
df_proportions['variance'] = df_proportions['%passengers'] - df_proportions['%survived']
df_proportions_melted = df_proportions.melt(id_vars=['category', 'variance'],
var_name='ratio type',
value_name='ratio value')
my_red = '
my_green = '
l_colours = [my_red if i >=0 else my_green for i in df_proportions['variance'].tolist() ]
d_colours = dict(zip(df_proportions['category'],l_colours))
fig, ax = plt.subplots(figsize=(6,8))
ax = sns.pointplot(x='ratio type',
y='ratio value',
hue='category',
data=df_proportions_melted,
palette=d_colours)
for i in range(0, len(df_proportions)) :
lbl_offset = 1.1
if(df_proportions['%passengers'] - df_proportions['%survived'])[i] < 0:
ax.text(x=lbl_offset,
y=df_proportions['%survived'][i],
s=df_proportions['category'][i],
ha='left',
color=my_green)
else:
ax.text(x=lbl_offset,
y=df_proportions['%survived'][i],
s=df_proportions['category'][i],
ha='left',
color=my_red)
ax.set(title='Proportions of passengers in the total and survivors population',
ylabel='Percentage')
ax.xaxis.label.set_visible(False)
ax.legend_.set_visible(False)
plt.show() | Titanic - Machine Learning from Disaster |
1,637,620 | sample_df.diagnosis = preds.argmax(1)
sample_df.head()<save_to_csv> | summary = pd.pivot_table(data=df_train,
index=['Survived'],
columns=['Sex', 'Pclass'],
values=['Name'],
aggfunc=('count'),
margins=True,
margins_name='Total')
print('{:=<70}'.format(''))
print(summary.div(summary.iloc[-1]))
print('{:=<70}'.format('')) | Titanic - Machine Learning from Disaster |
1,637,620 | sample_df.to_csv('submission.csv',index=False)
_ = sample_df.hist()<count_values> | def get_chi2(crosstab, HasMargin=True, proba=0.95):
var1 = crosstab.index.name
var2 = crosstab.columns.name
if HasMargin==True:
i = -1
else:
i = ""
stat, p, dof, expected = chi2_contingency(
observed=crosstab.iloc[:i,:i],
correction=True)
critical = chi2.ppf(proba, dof)
if stat >= critical:
chi2_independence = 'The variables {} and {} are dependent(X2>eX2)'.format(var1, var2)
else:
chi2_independence = 'The variables {} and {} are independent(X2<eX2)'.format(var1, var2)
alpha = 1 - proba
if p <= alpha:
chi2_significance = 'The test is statistically significant(p<alpha)'
else:
chi2_significance = 'The test is not statistically significant(p>alpha)'
print('{:^70}'.format('Chi2 test of independence'))
print('{:=<70}'.format(''))
print('Tested variables')
print(var1)
print(var2)
print('{:=<70}'.format(''))
print('{}'.format('Test of independence'))
print('Chi-square statistic(X2): {:.3f}'.format(stat))
print('Minimum expected chi-square statistic(eX2): {:.3f}'.format(critical))
print(chi2_independence)
print('{:=<70}'.format(''))
print('Significance of the test')
print('p-value(p): {:.3f}'.format(p))
print('Accepted significance level(alpha): {:.3f}'.format(alpha))
print(chi2_significance)
print('{:=<70}'.format('')) | Titanic - Machine Learning from Disaster |
1,637,620 | sample_df['diagnosis'].value_counts()<set_options> | get_chi2(summary_sex, proba=0.99 ) | Titanic - Machine Learning from Disaster |
1,637,620 | %reload_ext autoreload
%autoreload 2
%matplotlib inline<import_modules> | get_chi2(summary_class, proba=0.99 ) | Titanic - Machine Learning from Disaster |
1,637,620 | import fastai
from fastai import *
from fastai.vision import *
from fastai.callbacks import *
import cv2
import pandas as pd
import matplotlib.pyplot as plt<set_options> | df_train['FamilySize'] = df_train['SibSp'] + df_train['Parch'] + 1
df_all['FamilySize'] = df_all['SibSp'] + df_all['Parch'] + 1 | Titanic - Machine Learning from Disaster |
1,637,620 | print('Make sure cudnn is enabled:', torch.backends.cudnn.enabled )<set_options> | family_passengers = df_train['FamilySize'].value_counts(normalize=True, sort=False ).to_frame().reset_index()
family_passengers.columns = ['FamilySize', '%total']
print('{:=<70}'.format(''))
print(family_passengers)
print('{:=<70}'.format('')) | Titanic - Machine Learning from Disaster |
1,637,620 | def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
SEED = 1667
seed_everything(SEED )<feature_engineering> | df_family = pd.merge(left=family_passengers,
right=family_survived,
how='inner',
on='FamilySize')
df_family_melted = df_family.melt(id_vars=['FamilySize'],
var_name='ratio type',
value_name='ratio value')
ax = sns.catplot(data=df_family_melted,
x='ratio type',
y='ratio value',
col='FamilySize',
col_wrap=9,
kind='point',
height=3,
aspect=0.8)
ax.set_axis_labels('', 'Percentage')
plt.subplots_adjust(top=0.7)
plt.suptitle('Proportions of passengers: total and survivors')
plt.show() | Titanic - Machine Learning from Disaster |
1,637,620 | base_image_dir = os.path.join('.. ', 'input/aptos2019-blindness-detection/')
train_dir = os.path.join(base_image_dir,'train_images/')
df = pd.read_csv(os.path.join(base_image_dir, 'train.csv'))
df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir,'{}.png'.format(x)))
df = df.drop(columns=['id_code'])
df = df.sample(frac=1 ).reset_index(drop=True)
df.head(10 )<define_variables> | get_chi2(summary_family ) | Titanic - Machine Learning from Disaster |
1,637,620 | src =(ImageList.from_df(df=df,path='./',cols='path')
.split_by_rand_pct(0.2, seed=42)
.label_from_df(cols='diagnosis',label_cls=FloatList)
)
src<normalization> | df_median_age = pd.pivot_table(data=df_age,
index=['Sex','Pclass'],
values='Age',
aggfunc=np.median ).reset_index()
def fill_age(data):
median = df_median_age
age = data['Age']
sex = data['Sex']
pclass = data['Pclass']
if pd.isnull(age):
if sex == 'female' and pclass == 1:
return median[(median['Sex'] == 'female')&(median['Pclass'] == 1)]['Age'].values[0]
if sex == 'female' and pclass == 2:
return median[(median['Sex'] == 'female')&(median['Pclass'] == 2)]['Age'].values[0]
if sex == 'female' and pclass == 3:
return median[(median['Sex'] == 'female')&(median['Pclass'] == 3)]['Age'].values[0]
if sex == 'male' and pclass == 1:
return median[(median['Sex'] == 'male')&(median['Pclass'] == 1)]['Age'].values[0]
if sex == 'male' and pclass == 2:
return median[(median['Sex'] == 'male')&(median['Pclass'] == 2)]['Age'].values[0]
if sex == 'male' and pclass == 3:
return median[(median['Sex'] == 'male')&(median['Pclass'] == 3)]['Age'].values[0]
else:
return age
df_train['Age'] = df_train[['Age', 'Sex', 'Pclass']].apply(fill_age, axis=1)
df_all['Age'] = df_all[['Age', 'Sex', 'Pclass']].apply(fill_age, axis=1)
print('{:=<70}'.format(''))
print('Missing age values in the combined dataset:')
print(df_all['Age'].isnull().sum())
print('{:=<70}'.format('')) | Titanic - Machine Learning from Disaster |
1,637,620 | tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=0.10, max_zoom=1.3, max_warp=0.0, max_lighting=0.2)
data =(
src.transform(tfms,size=224)
.databunch()
.normalize(imagenet_stats)
)
data<compute_test_metric> | print(df_train['Embarked'].value_counts(normalize=True)) | Titanic - Machine Learning from Disaster |
1,637,620 | def quadratic_kappa(y_hat, y):
return torch.tensor(cohen_kappa_score(torch.round(y_hat), y, weights='quadratic'),device='cuda:0' )<compute_test_metric> | df_train['Embarked'].fillna(value='S', inplace=True)
df_all['Embarked'].fillna(value='S', inplace=True ) | Titanic - Machine Learning from Disaster |
1,637,620 | kappa = KappaScore()
kappa.weights = "quadratic"
<choose_model_class> | get_chi2(summary_embarked ) | Titanic - Machine Learning from Disaster |
1,637,620 | learn = cnn_learner(data, base_arch=models.resnet152 ,
metrics=[quadratic_kappa],model_dir='/kaggle',
pretrained=True,
callback_fns=[partial(EarlyStoppingCallback, monitor='quadratic_kappa',
min_delta=0.01, patience=3)] )<train_model> | df_all[df_all['Fare'].isnull() == True] | Titanic - Machine Learning from Disaster |
1,637,620 | learn.fit_one_cycle(6, 3e-2)
<save_model> | median_fare = df_train['Fare'][df_train['Pclass']==3].median()
df_all['Fare'].fillna(value=median_fare, inplace=True ) | Titanic - Machine Learning from Disaster |
1,637,620 | learn.save('stage1' )<train_model> | d_ticket_count = dict(df_all['Ticket'].value_counts())
df_all['TicketCount'] = df_all['Ticket'].map(d_ticket_count)
df_all['AdjFare'] = df_all['Fare'] / df_all['TicketCount']
df_train['AdjFare'] = df_all[df_all['Dataset'] == 'train']['AdjFare'] | Titanic - Machine Learning from Disaster |
1,637,620 | learn.fit_one_cycle(10, max_lr=slice(1e-6,1e-3))<predict_on_test> | df_all['LastName'] = df_all['Name'].str.extract(pat= '^([^,]*),', expand=True)
| Titanic - Machine Learning from Disaster |
1,637,620 | valid_preds = learn.get_preds(ds_type=DatasetType.Valid )<import_modules> | df_all['TicketNum'] = df_all['Ticket'].str.replace(pat= '(\D)', repl= '')
| Titanic - Machine Learning from Disaster |
1,637,620 | import numpy as np
import pandas as pd
import os
import scipy as sp
from functools import partial
from sklearn import metrics
from collections import Counter
import json<compute_test_metric> | d_ticket_count = dict(df_all['Ticket'].value_counts())
df_all['TicketCount'] = df_all['Ticket'].map(d_ticket_count ) | Titanic - Machine Learning from Disaster |
1,637,620 | class OptimizedRounder(object):
def __init__(self):
self.coef_ = 0
def _kappa_loss(self, coef, X, y):
X_p = np.copy(X)
for i, pred in enumerate(X_p):
if pred < coef[0]:
X_p[i] = 0
elif pred >= coef[0] and pred < coef[1]:
X_p[i] = 1
elif pred >= coef[1] and pred < coef[2]:
X_p[i] = 2
elif pred >= coef[2] and pred < coef[3]:
X_p[i] = 3
else:
X_p[i] = 4
ll = metrics.cohen_kappa_score(y, X_p, weights='quadratic')
return -ll
def fit(self, X, y):
loss_partial = partial(self._kappa_loss, X=X, y=y)
initial_coef = [0.5, 1.5, 2.5, 3.5]
self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead')
print(-loss_partial(self.coef_['x']))
def predict(self, X, coef):
X_p = np.copy(X)
for i, pred in enumerate(X_p):
if pred < coef[0]:
X_p[i] = 0
elif pred >= coef[0] and pred < coef[1]:
X_p[i] = 1
elif pred >= coef[1] and pred < coef[2]:
X_p[i] = 2
elif pred >= coef[2] and pred < coef[3]:
X_p[i] = 3
else:
X_p[i] = 4
return X_p
def coefficients(self):
return self.coef_['x']<train_model> | df_all['SharedTicket'] = [1 if i > 1 else 0 for i in df_all['TicketCount']]
d_shared_name = dict(df_all['LastName'].value_counts())
df_all['SharedName'] = [1 if i > 1 else 0 for i in df_all['LastName'].map(d_shared_name)]
df_all['SharedFeatures'] = df_all['SharedTicket'] + df_all['SharedName'] | Titanic - Machine Learning from Disaster |
1,637,620 | optR = OptimizedRounder()
optR.fit(valid_preds[0],valid_preds[1] )<load_from_csv> | shared_features = [
df_all['SharedFeatures'] == 0,
df_all['SharedFeatures'] == 1,
(df_all['SharedTicket'] == 0)&(df_all['SharedName'] == 1)&(df_all['FamilySize'] == 1),
(df_all['SharedTicket'] == 0)&(df_all['SharedName'] == 1)&(df_all['FamilySize'] > 1),
(df_all['SharedTicket'] == 1)&(df_all['SharedName'] == 0)
]
is_single = [1, 0, 1, 0, 0]
df_all['IsSingle'] = np.select(shared_features, is_single ) | Titanic - Machine Learning from Disaster |
1,637,620 | sample_df = pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv')
sample_df.head()<save_to_csv> | from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix | Titanic - Machine Learning from Disaster |
1,637,620 | learn.data.add_test(ImageList.from_df(sample_df,'.. /input/aptos2019-blindness-detection',folder='test_images',suffix='.png'))
preds,y = learn.get_preds(ds_type=DatasetType.Test)
test_predictions = optR.predict(preds, coefficients)
sample_df.diagnosis = test_predictions.astype(int)
sample_df.head()
sample_df.to_csv('submission.csv',index=False )<set_options> | df_all['Sex'] = [1 if i == 'female' else 0 for i in df_all['Sex']]
l_predictors = ['Dataset', 'Survived', 'Pclass', 'Sex', 'Embarked', 'IsSingle', 'FamilySize']
df_final = df_all[l_predictors]
df_final = pd.get_dummies(data = df_final,
columns = ['Pclass', 'Embarked', 'FamilySize'] ) | Titanic - Machine Learning from Disaster |
1,637,620 | %reload_ext autoreload
%autoreload 2
%matplotlib inline<import_modules> | predictors = df_final[df_final['Dataset'] == 'train'].drop(['Dataset', 'Survived'], axis=1)
targets = df_final[df_final['Dataset'] == 'train']['Survived']
X_train, X_test, y_train, y_test = train_test_split(predictors, targets, test_size=0.30, random_state=0 ) | Titanic - Machine Learning from Disaster |
1,637,620 | from fastai import *
from fastai.vision import *
import torch<define_variables> | rfc = RandomForestClassifier(n_estimators=100, random_state = 42)
rfc.fit(X_train, y_train)
rfc_pred = rfc.predict(X_test)
print(accuracy_score(rfc_pred, y_test)) | Titanic - Machine Learning from Disaster |
1,637,620 | path = Path('.. /input/aptos2019-blindness-detection')
path_train = path/'train_images'
path_test = path/'test_images'
path, path_train, path_test<load_from_csv> | param = {'n_estimators': [100, 500],
'criterion' :['gini'],
'max_features': ['auto'],
'max_depth': [3, 4, 5]}
grid = GridSearchCV(estimator=rfc, param_grid=param, refit=True, cv=3)
grid.fit(X_train,y_train)
grid_pred = grid.predict(X_test)
print(accuracy_score(grid_pred, y_test)) | Titanic - Machine Learning from Disaster |
1,637,620 | labels = pd.read_csv(path/'train.csv')
labels.head()<feature_engineering> | grid.best_params_ | Titanic - Machine Learning from Disaster |
1,637,620 | tfms = get_transforms(
do_flip=True,
flip_vert=True,
max_warp=0.1,
max_rotate=360.,
max_zoom=1.1,
max_lighting=0.1,
p_lighting=0.5
)<define_search_space> | rfc_best = RandomForestClassifier(random_state = 42,
criterion = 'gini',
max_depth = 4,
max_features = 'auto',
n_estimators = 100)
rfc_best.fit(X_train, y_train)
rfc_best_pred = rfc_best.predict(X_test)
print(accuracy_score(rfc_best_pred, y_test)) | Titanic - Machine Learning from Disaster |
1,637,620 | aptos19_stats =([0.42, 0.22, 0.075], [0.27, 0.15, 0.081] )<load_from_csv> | svc = SVC(random_state=42, gamma='scale')
svc.fit(X_train, y_train)
svc_pred = svc.predict(X_test)
print(accuracy_score(svc_pred, y_test)) | Titanic - Machine Learning from Disaster |
1,637,620 | test_labels = pd.read_csv(path/'sample_submission.csv')
test = ImageList.from_df(test_labels, path = path_test, suffix = '.png' )<split> | param = {'C': [1, 10, 100, 1000],
'gamma' :[1, 0.1, 0.001, 0.0001],
'kernel': ['linear', 'rbf']}
svm_grid = GridSearchCV(estimator=svc, param_grid=param, refit=True, cv=3, iid=False)
svm_grid.fit(X_train,y_train)
svm_grid_pred = svm_grid.predict(X_test)
print(accuracy_score(svm_grid_pred, y_test)) | Titanic - Machine Learning from Disaster |
1,637,620 | src =(ImageList.from_df(labels, path = path_train, suffix = '.png')
.split_by_rand_pct(seed = 42)
.label_from_df(cols = 'diagnosis')
.add_test(test))<normalization> | svm_grid.best_params_ | Titanic - Machine Learning from Disaster |
1,637,620 | data =(
src.transform(
tfms,
size = 512,
resize_method=ResizeMethod.SQUISH,
padding_mode='zeros'
)
.databunch(bs=8)
.normalize(aptos19_stats))<choose_model_class> | svm_best = SVC(random_state = 42,
C = 1,
gamma = 0.1,
kernel = 'rbf')
svm_best.fit(X_train, y_train)
svm_best_pred = svm_best.predict(X_test)
print(accuracy_score(svm_best_pred, y_test)) | Titanic - Machine Learning from Disaster |
1,637,620 | kappa = KappaScore()
kappa.weights = "quadratic"<choose_model_class> | X_test_submit = df_final[df_final['Dataset'] == 'test'].drop(['Dataset', 'Survived'], axis=1)
submit_pred = rfc_best.predict(X_test_submit)
file_submit = pd.DataFrame({'PassengerId': df_test['PassengerId'].values,
'Survived': submit_pred.astype(np.int32)})
file_submit.to_csv('titanic_submit_pred.csv', index=False ) | Titanic - Machine Learning from Disaster |
585,608 | learn = cnn_learner(
data,
models.resnet152,
metrics = [accuracy, kappa],
model_dir = Path('.. /kaggle/working'),
path = Path(".")
)<train_model> | X = X_train.iloc[:,[2,4,5,6,7,9,11]].copy()
Y = X_train.iloc[:, 1]
accuracy = X_test.iloc[:, [0,1]].copy()
xText = X_test.iloc[:,[2,4,5,6,7,9,11]].copy()
finalTest = test.iloc[:,[1,3,4,5,6,8,10]].copy()
print(X.head(3))
print(Y.head(3))
print(finalTest.head(3))
| Titanic - Machine Learning from Disaster |
585,608 | learn.fit_one_cycle(3 )<save_model> | def mapAgeToCategory(x):
if x[0] < 10:
return 0
elif(x[0] >= 10 and x[0] < 20):
return 1
elif(x[0] >= 20 and x[0] < 30):
return 2
elif(x[0] >= 30 and x[0] < 40):
return 3
elif(x[0] >= 40 and x[0] < 50):
return 4
else:
return 5
def mapFareToCategory(x):
if x < 5:
return 0
elif x >= 5 and x < 10:
return 1
elif x >= 10 and x < 20:
return 1
elif x >= 20 and x < 30:
return 2
elif x >= 30 and x < 50:
return 3
elif x >=50 and x < 100:
return 4
else:
return 5 | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.