kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
604,207 | train_df = org_train_df.copy()
test_df = org_test_df.copy()<init_hyperparams> | gridBoost.fit(X_train,y_train)
print(gridBoost.best_score_)
print(gridBoost.best_estimator_ ) | Titanic - Machine Learning from Disaster |
604,207 | lgbm_param = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'binary_logloss',
"learning_rate": 0.03,
"num_leaves": 24,
"max_depth": 6,
"colsample_bytree": 0.65,
"subsample": 0.7,
"reg_alpha": 0.1,
"reg_lambda": 0.2,
"nthread":8
}<prepare_x_and_y> | gridBoost.best_estimator_.fit(X_train,y_train)
predict=gridBoost.best_estimator_.predict(X_test)
print(accuracy_score(y_test,predict))
| Titanic - Machine Learning from Disaster |
604,207 | y_value = train_df["Cover_Type"]
del train_df["Cover_Type"], train_df["ID"]
del test_df["Cover_Type"], test_df["ID"]<split> | xgb=XGBClassifier(max_depth=2, n_estimators=700, learning_rate=0.009,nthread=-1,subsample=1,colsample_bytree=0.8)
xgb.fit(X_train,y_train)
predict=xgb.predict(X_test)
print(accuracy_score(y_test,predict))
print(confusion_matrix(y_test,predict))
print(precision_score(y_test,predict))
print(recall_score(y_test,predict)) | Titanic - Machine Learning from Disaster |
604,207 |
<predict_on_test> | lda=LinearDiscriminantAnalysis()
lda.fit(X_train,y_train)
predict=lda.predict(X_test)
print(accuracy_score(y_test,predict))
print(precision_score(y_test,predict))
print(recall_score(y_test,predict)) | Titanic - Machine Learning from Disaster |
604,207 | dtrain = lgbm.Dataset(train_df, label=y_value)
clf = lgbm.train(lgbm_param, train_set=dtrain, num_boost_round=5000)
predict = clf.predict(test_df )<save_to_csv> | dectree = DecisionTreeClassifier(criterion="entropy",
max_depth=5,
class_weight = 'balanced',
min_weight_fraction_leaf = 0.009,
random_state=2000)
dectree.fit(X_train, y_train)
y_pred = dectree.predict(X_test)
dectree_accy = round(accuracy_score(y_pred, y_test), 3)
print(dectree_accy)
print(confusion_matrix(y_test,y_pred))
print(precision_score(y_test,y_pred))
print(recall_score(y_test,y_pred))
| Titanic - Machine Learning from Disaster |
604,207 | submission = pd.read_csv('.. /input/kaggletutorial/sample_submission.csv')
submission['Cover_Type'] = predict
submission.to_csv('lgbm_last.csv', index=False )<compute_test_metric> | randomforest = RandomForestClassifier(n_estimators=100,max_depth=5,min_samples_split=20,max_features=0.2, min_samples_leaf=8,random_state=20)
randomforest.fit(X_train, y_train)
y_pred = randomforest.predict(X_test)
random_accy = round(accuracy_score(y_pred, y_test), 3)
print(random_accy)
print(precision_score(y_test,y_pred))
print(recall_score(y_test,y_pred))
print(confusion_matrix(y_test,y_pred))
| Titanic - Machine Learning from Disaster |
604,207 | org_train_df, org_test_df = nn_data_preprocessing(train, test )<create_dataframe> | BaggingClassifier = BaggingClassifier()
BaggingClassifier.fit(X_train, y_train)
y_pred = BaggingClassifier.predict(X_test)
bagging_accy = round(accuracy_score(y_pred, y_test), 3)
print(bagging_accy ) | Titanic - Machine Learning from Disaster |
604,207 | train_df = org_train_df.copy()
test_df = org_test_df.copy()<choose_model_class> | voting_classifier = VotingClassifier(estimators=[
('logreg',logreg),
('random_forest', randomforest),
('decision_tree',dectree),
('XGB Classifier', xgb),
('BaggingClassifier', BaggingClassifier)])
voting_classifier.fit(X_train,y_train)
y_pred = voting_classifier.predict(X_test)
voting_accy = round(accuracy_score(y_pred, y_test), 3)
print(voting_accy ) | Titanic - Machine Learning from Disaster |
604,207 | def keras_model(input_dims):
model = Sequential()
model.add(Dense(input_dims, input_dim=input_dims))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Dense(input_dims))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Dense(input_dims//2))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Dense(input_dims//5))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam', metrics=['accuracy'])
return model
<prepare_x_and_y> | model = CatBoostClassifier(verbose=False, one_hot_max_size=3)
model.fit(X_train,y_train)
y_pred=model.predict(X_test)
acc = round(accuracy_score(y_pred, y_test), 3)
print(acc ) | Titanic - Machine Learning from Disaster |
604,207 | y_value = train_df['Cover_Type']
del train_df['Cover_Type'], train_df['ID']
del test_df['Cover_Type'], test_df['ID']
model = keras_model(train_df.shape[1])
callbacks = [
EarlyStopping(
patience=10,
verbose=10)
]<train_model> | y_predict=model.predict(testframe ) | Titanic - Machine Learning from Disaster |
604,207 |
<train_model> | temp = pd.DataFrame(pd.DataFrame({
"PassengerId": passenger_id,
"Survived": y_predict
}))
temp.to_csv(".. /working/submission3.csv", index = False ) | Titanic - Machine Learning from Disaster |
10,579,474 | history = model.fit(train_df.values, y_value.values, nb_epoch=30, batch_size = 64, verbose=1)
predict = model.predict(test_df.values )<save_to_csv> | train_df = pd.read_csv("/kaggle/input/titanic/train.csv")
train_df.head() | Titanic - Machine Learning from Disaster |
10,579,474 | submission_nn = pd.read_csv('.. /input/kaggletutorial/sample_submission.csv')
submission_nn['Cover_Type'] = predict
submission_nn.to_csv('nn_last.csv', index=False )<merge> | %matplotlib inline | Titanic - Machine Learning from Disaster |
10,579,474 | def calculate_correlation(base_df, target_df):
source = base_df.copy()
source = source.merge(target_df,on='ID')
corr_df = source.corr()
corr = corr_df.ix['Cover_Type_x']['Cover_Type_y']
del corr_df, source
return corr<merge> | test_df = pd.read_csv('.. /input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
10,579,474 | source = submission.copy()
source = source.merge(submission_nn,on='ID')
source<compute_test_metric> | combine = [train_df, test_df] | Titanic - Machine Learning from Disaster |
10,579,474 | calculate_correlation(submission, submission_nn )<train_model> | train_df = train_df.drop(['Name', 'PassengerId'], axis=1)
test_df = test_df.drop(['Name'], axis=1)
combine = [train_df, test_df]
train_df.shape, test_df.shape | Titanic - Machine Learning from Disaster |
10,579,474 | class SklearnWrapper(object):
def __init__(self, clf, params=None, **kwargs):
seed = kwargs.get('seed', 0)
params['random_state'] = seed
self.clf = clf(**params)
def train(self, x_train, y_train, x_cross=None, y_cross=None):
self.clf.fit(x_train, y_train)
def predict(self, x):
return self.clf.predict_proba(x)[:,1]<choose_model_class> | for dataset in combine:
dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0} ).astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
10,579,474 | class LgbmWrapper(object):
def __init__(self, params=None, **kwargs):
seed = kwargs.get('seed', 0)
num_rounds = kwargs.get('num_rounds', 1000)
early_stopping = kwargs.get('ealry_stopping', 100)
eval_function = kwargs.get('eval_function', None)
verbose_eval = kwargs.get('verbose_eval', 100)
self.param = params
self.param['seed'] = seed
self.num_rounds = num_rounds
self.early_stopping = early_stopping
self.eval_function = eval_function
self.verbose_eval = verbose_eval
def train(self, x_train, y_train, x_cross=None, y_cross=None):
need_cross_validation = True
if x_cross is None:
need_cross_validation = False
if isinstance(y_train, pd.DataFrame)is True:
y_train = y_train[y_train.columns[0]]
if need_cross_validation is True:
y_cross = y_cross[y_cross.columns[0]]
if need_cross_validation is True:
dtrain = lgbm.Dataset(x_train, label=y_train, silent=True)
dvalid = lgbm.Dataset(x_cross, label=y_cross, silent=True)
self.clf = lgbm.train(self.param, train_set=dtrain, num_boost_round=self.num_rounds, valid_sets=dvalid,
feval=self.eval_function, early_stopping_rounds=self.early_stopping,
verbose_eval=self.verbose_eval)
else:
dtrain = lgbm.Dataset(x_train, label=y_train, silent= True)
self.clf = lgbm.train(self.param, dtrain, self.num_rounds)
def predict(self, x):
return self.clf.predict(x, num_iteration=self.clf.best_iteration)
def get_params(self):
return self.param<prepare_x_and_y> | for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1 | Titanic - Machine Learning from Disaster |
10,579,474 | def get_oof(clf, x_train, y_train, x_test, eval_func, **kwargs):
nfolds = kwargs.get('NFOLDS', 5)
kfold_shuffle = kwargs.get('kfold_shuffle', True)
kfold_random_state = kwargs.get('kfold_random_sate', 0)
ntrain = x_train.shape[0]
ntest = x_test.shape[0]
kf = StratifiedKFold(n_splits= nfolds, shuffle=kfold_shuffle, random_state=kfold_random_state)
oof_train = np.zeros(( ntrain,))
oof_test = np.zeros(( ntest,))
oof_test_skf = np.empty(( nfolds, ntest))
cv_sum = 0
try:
if clf.clf is not None:
print(clf.clf)
except:
print(clf)
print(clf.get_params())
for i,(train_index, cross_index)in enumerate(kf.split(x_train, y_train)) :
x_tr, x_cr = None, None
y_tr, y_cr = None, None
if isinstance(x_tr, pd.DataFrame):
x_tr, x_cr = x_train.iloc[train_index], x_train.iloc[cross_index]
y_tr, y_cr = y_train.iloc[train_index], y_train.iloc[cross_index]
else:
x_tr, x_cr = x_train[train_index], x_train[cross_index]
y_tr, y_cr = y_train[train_index], y_train[cross_index]
clf.train(x_tr, y_tr, x_cr, y_cr)
oof_train[cross_index] = clf.predict(x_cr)
cv_score = eval_func(y_cr, oof_train[cross_index])
print('Fold %d / ' %(i+1), 'CV-Score: %.6f' % cv_score)
cv_sum = cv_sum + cv_score
score = cv_sum / nfolds
print("Average CV-Score: ", score)
clf.train(x_train, y_train)
oof_test = clf.predict(x_test)
return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1 )<init_hyperparams> | train_df = train_df.drop(['Parch', 'SibSp'], axis=1)
test_df = test_df.drop(['Parch', 'SibSp'], axis=1)
combine = [train_df, test_df]
train_df.head() | Titanic - Machine Learning from Disaster |
10,579,474 | lgbm_param1 = {
'boosting_type': 'dart',
'objective': 'binary',
'metric': 'binary_logloss',
"learning_rate": 0.03,
"num_leaves": 31,
"max_depth": 7,
"colsample_bytree": 0.8,
"subsample": 0.8,
"reg_alpha": 0.1,
"reg_lambda": 0.1,
"nthread":8,
'drop_rate':0.1,
'skip_drop':0.5,
'max_drop':50,
'top_rate':0.1,
'other_rate':0.1
}
lgbm_param2 = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'binary_logloss',
"learning_rate": 0.03,
"num_leaves": 10,
"max_depth": 4,
"colsample_bytree": 0.5,
"subsample": 0.8,
"reg_alpha": 0.1,
"reg_lambda": 0.1,
"nthread":8
}
lgbm_param3 = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'binary_logloss',
"learning_rate": 0.03,
"num_leaves": 24,
"max_depth": 6,
"colsample_bytree": 0.5,
"subsample": 0.8,
"reg_alpha": 0.1,
"reg_lambda": 0.1,
"nthread":8
}
rf_params = {
'criterion':'gini', 'max_leaf_nodes':24, 'n_estimators':200, 'min_impurity_split':0.0000001,
'max_features':0.4, 'max_depth':6, 'min_samples_leaf':20, 'min_samples_split':2,
'min_weight_fraction_leaf':0.0, 'bootstrap':True,
'random_state':1, 'verbose':False
}
et_parmas = {
'criterion':'gini', 'max_leaf_nodes':31, 'n_estimators':200, 'min_impurity_split':0.0000001,
'max_features':0.6, 'max_depth':10, 'min_samples_leaf':20, 'min_samples_split':2,
'min_weight_fraction_leaf':0.0, 'bootstrap':True,
'random_state':1, 'verbose':False
}<merge> | freq_port = train_df.Embarked.dropna().mode() [0]
freq_port | Titanic - Machine Learning from Disaster |
10,579,474 | org_train_df, org_test_df = tree_data_preprocessing(train, test)
train_df = org_train_df.copy()
test_df = org_test_df.copy()<prepare_x_and_y> | for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna(freq_port ) | Titanic - Machine Learning from Disaster |
10,579,474 | y_value = train_df["Cover_Type"]
del train_df["Cover_Type"], train_df["ID"]
del test_df["Cover_Type"], test_df["ID"]<choose_model_class> | for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
10,579,474 | et_model = SklearnWrapper(clf = ExtraTreesClassifier, params=et_parmas)
rf_model = SklearnWrapper(clf = RandomForestClassifier, params=rf_params )<categorify> | test_df['Fare'].fillna(test_df['Fare'].dropna().median() , inplace=True)
test_df.head() | Titanic - Machine Learning from Disaster |
10,579,474 | et_train, et_test = get_oof(et_model, train_df.values, y_value, test_df.values, log_loss, NFOLDS=3)
rf_train, rf_test = get_oof(rf_model, train_df.values, y_value, test_df.values, log_loss, NFOLDS=3 )<concatenate> | guess_ages = np.zeros(( 2,3))
guess_ages
for dataset in combine:
for i in range(0, 2):
for j in range(0, 3):
guess_df = dataset[(dataset['Sex'] == i)& \
(dataset['Pclass'] == j+1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i,j] = int(age_guess/0.5 + 0.5)* 0.5
for i in range(0, 2):
for j in range(0, 3):
dataset.loc[(dataset.Age.isnull())&(dataset.Sex == i)&(dataset.Pclass == j+1),\
'Age'] = guess_ages[i,j]
dataset['Age'] = dataset['Age'].astype(int)
| Titanic - Machine Learning from Disaster |
10,579,474 | x_train_second_layer = np.concatenate(( rf_train, et_train), axis=1)
x_test_second_layer = np.concatenate(( rf_test, et_test), axis=1 )<create_dataframe> | X_train = train_df.drop("Survived", axis=1)
Y_train = train_df["Survived"]
X_test = test_df.drop("PassengerId", axis=1 ).copy()
X_train.shape, Y_train.shape, X_test.shape | Titanic - Machine Learning from Disaster |
10,579,474 | x_train = pd.DataFrame(x_train_second_layer)
x_test = pd.DataFrame(x_test_second_layer )<split> | from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier | Titanic - Machine Learning from Disaster |
10,579,474 | lgbm_meta_params = {
'boosting':'gbdt', 'num_leaves':28, 'learning_rate':0.03, 'min_sum_hessian_in_leaf':0.1,
'max_depth':7, 'feature_fraction':0.6, 'min_data_in_leaf':30, 'poission_max_delta_step':0.7,
'bagging_fraction':0.8, 'min_gain_to_split':0,
'objective':'binary', 'seed':1,'metric': 'binary_logloss'
}
NFOLD = 3
folds = StratifiedKFold(n_splits= NFOLD, shuffle=True, random_state=2018)
total_score = 0
best_iteration = 0
for n_fold,(train_idx, valid_idx)in enumerate(folds.split(x_train, y_value)) :
train_x, train_y = train_df.iloc[train_idx], y_value.iloc[train_idx]
valid_x, valid_y = train_df.iloc[valid_idx], y_value.iloc[valid_idx]
evals_result_dict = {}
dtrain = lgbm.Dataset(train_x, label=train_y)
dvalid = lgbm.Dataset(valid_x, label=valid_y)
clf = lgbm.train(lgbm_meta_params, train_set=dtrain, num_boost_round=5000, valid_sets=[dtrain, dvalid],
early_stopping_rounds=200, evals_result=evals_result_dict, verbose_eval=500)
predict = clf.predict(valid_x)
cv_score = log_loss(valid_y, predict)
total_score += cv_score
best_iteration = max(best_iteration, clf.best_iteration)
print('Fold {} LogLoss : {}'.format(n_fold + 1, cv_score))
lgbm.plot_metric(evals_result_dict)
plt.show()
print("Best Iteration", best_iteration)
print("Total LogLoss", total_score / NFOLD)
print("Baseline model Score Diff", total_score / NFOLD - baseline_tree_score )<train_model> | random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train, Y_train)
Y_pred = random_forest.predict(X_test)
random_forest.score(X_train, Y_train)
acc_random_forest = round(random_forest.score(X_train, Y_train)* 100, 2)
acc_random_forest | Titanic - Machine Learning from Disaster |
10,579,474 | dtrain = lgbm.Dataset(x_train, label=y_value)
clf = lgbm.train(lgbm_meta_params, train_set=dtrain, num_boost_round=5000)
predict_stacking = clf.predict(x_test )<save_to_csv> | scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test ) | Titanic - Machine Learning from Disaster |
10,579,474 | submission_stacking = pd.read_csv('.. /input/kaggletutorial/sample_submission.csv')
submission_stacking['Cover_Type'] = predict_stacking
submission_stacking.to_csv('submission_stacking.csv', index=False )<save_to_csv> | from sklearn.neural_network import MLPClassifier | Titanic - Machine Learning from Disaster |
10,579,474 | submission_et = pd.read_csv('.. /input/kaggletutorial/sample_submission.csv')
submission_et['Cover_Type'] = et_test
submission_et.to_csv('submission_et.csv', index=False )<save_to_csv> | clf = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(5, 2), random_state=1)
clf.fit(X_train, Y_train)
MLPClassifier(alpha=1e-05, hidden_layer_sizes=(5, 2), random_state=1, solver='lbfgs')
Y_pred = clf.predict(X_test)
Y_pred | Titanic - Machine Learning from Disaster |
10,579,474 | submission_rf = pd.read_csv('.. /input/kaggletutorial/sample_submission.csv')
submission_rf['Cover_Type'] = rf_test
submission_rf.to_csv('submission_rf.csv', index=False )<load_from_csv> | submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred
} ) | Titanic - Machine Learning from Disaster |
10,579,474 | <load_from_csv><EOS> | submission.to_csv('my_submission1.csv', index=False ) | Titanic - Machine Learning from Disaster |
9,494,798 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<feature_engineering> | pd.options.mode.chained_assignment = None | Titanic - Machine Learning from Disaster |
9,494,798 | train_df['label'] = train_df['label'].map(lambda x: 1 if x else 0)
dev_df['label'] = dev_df['label'].map(lambda x: 1 if x else 0 )<define_variables> | train=pd.read_csv('/kaggle/input/titanic/train.csv')
train.head() | Titanic - Machine Learning from Disaster |
9,494,798 | train_sentences = [list(d)for d in train_df['comments'].to_list() ]
dev_sentences = [list(d)for d in dev_df['comments'].to_list() ]<categorify> | test=pd.read_csv('/kaggle/input/titanic/test.csv')
pid=test.PassengerId | Titanic - Machine Learning from Disaster |
9,494,798 | train_labels = train_df['label'].to_list()
dev_labels = dev_df['label'].to_list()<count_unique_values> | print('Train Set')
print(train[train.isnull() ])
print('
Test Set')
print(test.isnull().sum() ) | Titanic - Machine Learning from Disaster |
9,494,798 | def make_dictionary(sentences, vocabulary_size=None, initial_words=['<UNK>', '<PAD>', '<SOS>', '<EOS>']):
counter = Counter()
for words in sentences:
counter.update(words)
if vocabulary_size is None:
vocabulary_size = len(counter.keys())
vocab_words = counter.most_common(vocabulary_size)
for initial_word in initial_words:
vocab_words.insert(0,(initial_word, 0))
word2idx = {word:idx for idx,(word, count)in enumerate(vocab_words)}
idx2word = {idx:word for word, idx in word2idx.items() }
return word2idx, idx2word
def process_sentences(sentences, word2idx, sentence_length=20, padding='<PAD>'):
sentences_processed = []
for sentence in sentences:
if len(sentence)> sentence_length:
fixed_sentence = sentence[:sentence_length]
else:
fixed_sentence = sentence + [padding]*(sentence_length - len(sentence))
sentence_idx = [word2idx[word] if word in word2idx.keys() else word2idx['<UNK>'] for word in fixed_sentence]
sentences_processed.append(sentence_idx)
return sentences_processed
def make_mask(sentences, sentence_length):
masks = []
for sentence in sentences:
words_count = len(sentence[:sentence_length])
sentence_mask = np.concatenate([np.ones(words_count-1), np.ones(1), np.zeros(sentence_length-words_count)])
masks.append(sentence_mask)
mask = np.array(masks)
return mask<categorify> | train['Age']=train['Age'].fillna(train['Age'].median())
train['Embarked']=train['Embarked'].fillna(train['Embarked'].mode() [0])
test['Age']=test['Age'].fillna(test['Age'].median())
test['Fare']=test['Fare'].fillna(test['Fare'].mean() ) | Titanic - Machine Learning from Disaster |
9,494,798 | word2idx, idx2word = make_dictionary(train_sentences, initial_words=['<UNK>', '<PAD>'] )<string_transform> | print('Train Set')
print(train.isnull().sum())
print('
Test Set')
print(test.isnull().sum() ) | Titanic - Machine Learning from Disaster |
9,494,798 | SENTENCE_LENGTH = 150
train_sentences_processed = process_sentences(train_sentences, word2idx, sentence_length=SENTENCE_LENGTH)
dev_sentences_processed = process_sentences(dev_sentences, word2idx, sentence_length=SENTENCE_LENGTH )<categorify> | for data in [train,test]:
data['FamilySize']=data['SibSp']+data['Parch']+1
data['IsAlone']=1
data['IsAlone'].loc[data['FamilySize']>1]=0
data['Title'] = data['Name'].str.split(", ", expand=True)[1].str.split(".", expand=True)[0] | Titanic - Machine Learning from Disaster |
9,494,798 | train_mask = make_mask(train_sentences, sentence_length=SENTENCE_LENGTH)
dev_mask = make_mask(dev_sentences, sentence_length=SENTENCE_LENGTH )<import_modules> | train['Title'].value_counts() | Titanic - Machine Learning from Disaster |
9,494,798 | import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
from torch.optim import Adam<feature_engineering> | train['Title']=train['Title'].replace('Ms','Miss')
train['Title']=train['Title'].replace('Mlle','Miss')
train['Title']=train['Title'].replace('the Countess','Mrs')
train['Title']=train['Title'].replace('Mme','Mrs')
test['Title']=test['Title'].replace('Dona','Mrs')
test['Title']=test['Title'].replace('Ms','Miss')
names=(train['Title'].value_counts() < 10)
train['Title']=train['Title'].apply(lambda x: 'Misc' if names.loc[x] == True else x)
names=(test['Title'].value_counts() < 10)
test['Title']=test['Title'].apply(lambda x: 'Misc' if names.loc[x] == True else x)
print('Train Set
',train['Title'].value_counts())
print('
Test Set
',test['Title'].value_counts() ) | Titanic - Machine Learning from Disaster |
9,494,798 | class DatasetLoader(Dataset):
def __init__(self, sentences_processed, labels):
assert len(sentences_processed)== len(labels)
self.sentences_processed = sentences_processed
self.labels = torch.LongTensor(labels)
def __getitem__(self, index):
return torch.LongTensor(self.sentences_processed[index]), self.labels[index]
def __len__(self):
return len(self.sentences_processed )<create_dataframe> | columns=['PassengerId','Cabin','Ticket','Name']
train=train.drop(columns,axis=1)
test=test.drop(columns,axis=1 ) | Titanic - Machine Learning from Disaster |
9,494,798 | train_dataset = DatasetLoader(train_sentences_processed, train_labels)
dev_dataset = DatasetLoader(dev_sentences_processed, dev_labels )<load_pretrained> | label=LabelEncoder()
for data in [train,test]:
data['Sex']=label.fit_transform(data['Sex'])
data['Embarked']=label.fit_transform(data['Embarked'])
data['Title']=label.fit_transform(data['Title'])
data['Age']=data['Age'].astype('int64')
train.head() | Titanic - Machine Learning from Disaster |
9,494,798 | train_dataloader = DataLoader(train_dataset, batch_size=128)
dev_dataloader = DataLoader(dev_dataset, batch_size=256 )<import_modules> | target=['Survived']
selected=['Sex','Pclass','Embarked','Title','SibSp','Parch','FamilySize','IsAlone']
for x in selected:
print('Survival Percentage By',x)
print(train[[x, target[0]]].groupby(x,as_index=False ).mean() ,'
')
| Titanic - Machine Learning from Disaster |
9,494,798 | import torch
import torch.nn as nn
import torch.nn.functional as F<choose_model_class> | target=train['Survived']
train.drop(['Survived'],axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
9,494,798 | class CharCNN(nn.Module):
def __init__(self, vocab_size, embedding_size, output_size, kernel_num, kernel_sizes):
super().__init__()
self.embedding = nn.Embedding(
vocab_size, embedding_size, padding_idx=0
)
self.convs = nn.ModuleList([
nn.Conv1d(embedding_size, kernel_num, kernel_size=kernel_size)
for kernel_size in kernel_sizes
])
self.maxpools = nn.ModuleList([
nn.MaxPool1d(kernel_size)
for kernel_size in kernel_sizes
])
self.linear = nn.Linear(1140, output_size)
self.softmax = nn.LogSoftmax(dim=1)
self.sigmoid = nn.Sigmoid()
self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
batch_size = x.size(0)
embedded = self.embedding(x)
embedded = embedded.transpose(1,2)
pools = []
for conv, maxpool in zip(self.convs, self.maxpools):
feature_map = conv(embedded)
pooled = maxpool(feature_map)
pools.append(pooled)
conv_concat = torch.cat(pools, dim=-1 ).view(batch_size, -1)
conv_concat = self.dropout(conv_concat)
logits = self.linear(conv_concat)
return self.softmax(logits )<choose_model_class> | X_train, X_val, y_train, y_val = train_test_split(train, target, test_size=0.2, random_state=1, stratify=target)
print('Mean Absolute Errors:')
model = RandomForestClassifier(bootstrap=True, ccp_alpha=0.0, class_weight=None,
criterion='gini', max_depth=4, max_features='auto',
max_leaf_nodes=5, max_samples=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=15,
min_weight_fraction_leaf=0.0, n_estimators=350,
n_jobs=None, oob_score=True, random_state=1, verbose=0,
warm_start=False)
model.fit(X_train, y_train)
predict = model.predict(X_val)
print('Random Forrest: ' + str(mean_absolute_error(predict, y_val)))
model = XGBRegressor(learning_rate=0.01, n_estimators=3460, max_depth=3, min_child_weight=0,
gamma=0, subsample=0.7,colsample_bytree=0.7,objective='reg:squarederror',
nthread=-1,scale_pos_weight=1, seed=27, reg_alpha=0.00006)
model.fit(X_train, y_train)
predict = model.predict(X_val)
print('XGBoost: ' + str(mean_absolute_error(predict, y_val)))
model = LassoCV(max_iter=1e7, random_state=14, cv=10)
model.fit(X_train, y_train)
predict = model.predict(X_val)
print('Lasso: ' + str(mean_absolute_error(predict, y_val)))
model = GradientBoostingRegressor(n_estimators=300, learning_rate=0.05, max_depth=4, random_state=5)
model.fit(X_train, y_train)
predict = model.predict(X_val)
print('GradientBoosting: ' + str(mean_absolute_error(predict, y_val)) ) | Titanic - Machine Learning from Disaster |
9,494,798 | model = CharCNN(
vocab_size=len(word2idx),
embedding_size=300,
output_size=2,
kernel_num=10,
kernel_sizes=[3,4,5]
)<set_options> | model=RandomForestClassifier(bootstrap=True, ccp_alpha=0.0, class_weight=None,
criterion='gini', max_depth=4, max_features='auto',
max_leaf_nodes=5, max_samples=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=15,
min_weight_fraction_leaf=0.0, n_estimators=350,
n_jobs=None, oob_score=True, random_state=1, verbose=0,
warm_start=False)
model.fit(X_train, y_train)
predict = model.predict(X_val)
print('Random Forest MAE: ' + str(mean_absolute_error(predict, y_val)))
print("Out of Bag Score: %.4f" % model.oob_score_)
y_pred_train = model.predict(X_train)
y_pred_test = model.predict(X_val)
print("Training accuracy: ", accuracy_score(y_train, y_pred_train))
print("Testing accuracy: ", accuracy_score(y_val, y_pred_test))
print("
Confusion Matrix
")
print('[[True Positive False Positive]
[False Negative True Negative]]
')
print(confusion_matrix(y_val, y_pred_test))
fpr, tpr, _ = roc_curve(y_val, y_pred_test)
roc_auc = auc(fpr, tpr)
print("
ROC AUC on evaluation set",roc_auc)
plt.figure()
plt.plot(fpr, tpr, label='ROC curve(area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend(loc="lower right")
plt.show() | Titanic - Machine Learning from Disaster |
9,494,798 | if torch.cuda.is_available() :
model.to('cuda' )<choose_model_class> | prediction=model.predict(test)
prediction | Titanic - Machine Learning from Disaster |
9,494,798 | <choose_model_class><EOS> | output = pd.DataFrame({'PassengerId': pid, 'Survived': prediction})
output.to_csv('my_submission.csv', index=False)
print("Submission successfully saved!" ) | Titanic - Machine Learning from Disaster |
2,509,935 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<find_best_params> | import numpy as np
import pandas as pd | Titanic - Machine Learning from Disaster |
2,509,935 | for epoch in range(6):
losses = []
for i,(batch_data, batch_label)in enumerate(train_dataloader):
batch_data, batch_label = Variable(batch_data), Variable(batch_label)
if torch.cuda.is_available() :
batch_data = batch_data.to('cuda')
batch_label = batch_label.to('cuda')
log_probs = model(batch_data)
loss = loss_function(log_probs, batch_label.transpose(0, -1))
model.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.item())
corrects = log_probs.data.cpu().numpy().argmax(axis=1)== batch_label.data.cpu().numpy()
train_accuracy = corrects.astype(np.long ).mean()
val_losses = []
val_accuracies = []
for test_data, test_label in dev_dataloader:
test_data, test_label = Variable(test_data), Variable(test_label)
if torch.cuda.is_available() :
test_data = test_data.to('cuda')
test_label = test_label.to('cuda')
val_log_probs = model(test_data)
val_loss = loss_function(val_log_probs, test_label)
val_corrects = val_log_probs.data.cpu().numpy().argmax(axis=1)== test_label.data.cpu().numpy()
val_accuracy = val_corrects.astype(np.float64 ).mean()
val_losses.append(val_loss.item())
val_accuracies.append(val_accuracy)
message = "Epoch: {epoch:<5d} Iteration: {iteration:<5d} Loss: {loss:<.3} Val Loss: {val_loss:<.3} Train Accuracy: {train_acc:<.3} Test Accuracy: {test_acc:<.3}".format(
epoch=epoch,
iteration= i,
loss=sum(losses)/len(losses),
val_loss=sum(val_losses)/len(val_losses),
train_acc=train_accuracy,
test_acc=sum(val_accuracies)/len(val_accuracies)
)
print(message )<load_from_csv> | data_train = pd.read_csv('.. /input/train.csv')
data_train.head() | Titanic - Machine Learning from Disaster |
2,509,935 | test_df = pd.read_csv('/kaggle/input/koreangenderbiasdetection/test.gender_bias.no_label.csv' )<define_variables> | data_test = pd.read_csv('.. /input/test.csv')
data_test.head() | Titanic - Machine Learning from Disaster |
2,509,935 | test_sentences = [list(d)for d in test_df['comments'].to_list() ]<create_dataframe> | df = data_train.append(data_test, sort = True)
df.shape | Titanic - Machine Learning from Disaster |
2,509,935 | test_sentences_processed = process_sentences(test_sentences, word2idx, sentence_length=SENTENCE_LENGTH)
test_mask = make_mask(test_sentences, sentence_length=SENTENCE_LENGTH)
test_dataset = DatasetLoader(test_sentences_processed, [0 for i in range(len(test_sentences_processed)) ])
test_dataloader = DataLoader(test_dataset, batch_size=1 )<feature_engineering> | %matplotlib inline
plt.style.use('bmh')
plt.rc('font', family='DejaVu Sans', size=13 ) | Titanic - Machine Learning from Disaster |
2,509,935 | test_df['label'] = res<count_values> | cat_list = ['Cabin', 'Embarked', 'Name', 'Parch', 'Pclass', 'Sex', 'SibSp', 'Ticket']
for n, i in enumerate(cat_list):
cat_num = len(df[i].value_counts().index)
print('The feature "%s" has %d values.' %(i, cat_num)) | Titanic - Machine Learning from Disaster |
2,509,935 | test_df['label'].value_counts()<feature_engineering> | df.isnull().sum() | Titanic - Machine Learning from Disaster |
2,509,935 | test_df['label'] = test_df['label'].map(lambda x: True if x else False )<save_to_csv> | df[df.Fare.isnull() ] | Titanic - Machine Learning from Disaster |
2,509,935 | test_df.to_csv('/kaggle/working/prediction.csv', index=None, header=True )<set_options> | df[(df.Pclass == 3)&(df.Age > 60)&(df.Sex == 'male')] | Titanic - Machine Learning from Disaster |
2,509,935 | sns.set()
%matplotlib inline
warnings.filterwarnings('ignore')
%load_ext tensorboard.notebook
pd.set_option('display.max_columns', None)
<load_from_csv> | fare_mean = df[(df.Pclass == 3)&(df.Age > 60)&(df.Sex == 'male')].Fare.mean()
df.loc[df.PassengerId == 1044, 'Fare'] = fare_mean
df[df.PassengerId == 1044] | Titanic - Machine Learning from Disaster |
2,509,935 | odf=pd.read_csv('/kaggle/input/equipfails/equip_failures_training_set.csv',index_col=0)
odft=pd.read_csv('/kaggle/input/equipfails/equip_failures_test_set.csv',index_col=0 )<data_type_conversions> | df[df.Embarked.isnull() ] | Titanic - Machine Learning from Disaster |
2,509,935 | df=odf.replace({'na':-999999})
Xt=odft.replace({'na':-999999})
Xt=Xt.astype(float)
df=df.astype(float)
df['target']=df['target'].astype(int )<prepare_x_and_y> | df.Embarked = df.Embarked.fillna('C' ) | Titanic - Machine Learning from Disaster |
2,509,935 | X=df.iloc[:,1:]
y=df.iloc[:,0]
Xn=normalize(X)
Xtn=normalize(Xt )<split> | df.Cabin = df.Cabin.fillna('0')
len(df.Cabin.value_counts().index ) | Titanic - Machine Learning from Disaster |
2,509,935 | X_train, X_test, y_train, y_test = train_test_split(Xn, y, test_size=0.2, random_state=8 )<compute_train_metric> | df['CabinCat'] = pd.Categorical(df.Cabin.apply(lambda x : x[0])).codes | Titanic - Machine Learning from Disaster |
2,509,935 | my_class = ExtraTreesClassifier(random_state=0)
my_class.fit(X_train, y_train)
y_pred= clf.predict(X_test)
print('accuracy: {}'.format(accuracy_score(y_test, y_pred)))
print(f'F1: {f1_score(y_test,y_pred)}')
confusion_matrix(y_test,y_pred )<compute_test_metric> | import re
from sklearn.preprocessing import LabelEncoder | Titanic - Machine Learning from Disaster |
2,509,935 | my_class = AdaBoostClassifier(random_state=0)
my_class.fit(X_train, y_train)
y_pred= my_class.predict(X_test)
print('accuracy: {}'.format(accuracy_score(y_test, y_pred)))
print(f'F1: {f1_score(y_test,y_pred)}')
confusion_matrix(y_test,y_pred )<compute_train_metric> | df['Title'] = df.Name.apply(lambda x : re.search('([a-zA-Z]+)\.', x ).group(1))
title_mapping = {'Mr' : 1, 'Miss' : 2, 'Mrs' : 3, 'Master' : 4, 'Dr' : 5, 'Rev' : 6, 'Major' : 7, 'Col' : 7, 'Mlle' : 2, 'Mme' : 3, 'Don' : 9, 'Dona' : 9, 'Lady' : 10, 'Countess' : 10, 'Jonkheer' : 10, 'Sir' : 9, 'Capt' : 7, 'Ms' : 2}
df['TitleCat'] = df.Title.map(title_mapping)
df['FamilySize'] = df.SibSp + df.Parch + 1
df['FamilyName'] = df.Name.apply(lambda x : str.split(x, ',')[0])
df['IsAlone'] = 0
df.loc[df.FamilySize == 1, 'IsAlone'] = 1
le = LabelEncoder()
df['NameLength'] = df.Name.apply(lambda x : len(x))
df['NameLengthBin'] = pd.qcut(df.NameLength, 5)
df['NameLengthBinCode'] = le.fit_transform(df.NameLengthBin)
df['Embarked'] = pd.Categorical(df.Embarked ).codes
df = pd.concat([df, pd.get_dummies(df.Sex)], axis = 1)
table_ticket = pd.DataFrame(df.Ticket.value_counts())
table_ticket.rename(columns = {'Ticket' : 'TicketNum'}, inplace = True)
table_ticket['TicketId'] = pd.Categorical(table_ticket.index ).codes
table_ticket.loc[table_ticket.TicketNum < 3, 'TicketId'] = -1
df = pd.merge(left = df, right = table_ticket, left_on = 'Ticket', right_index = True, how = 'left', sort = False)
df['TicketCode'] = list(pd.cut(df.TicketId, bins = [-2, 0, 500, 1000], labels = [0, 1, 2]))
regex = re.compile('\s*(\w+)\s*')
df['CabinNum'] = df.Cabin.apply(lambda x : len(regex.findall(x)) ) | Titanic - Machine Learning from Disaster |
2,509,935 | lg = LogisticRegression(solver='lbfgs', random_state=18)
lg.fit(X_train, y_train)
logistic_prediction = lg.predict(X_test)
score = metrics.accuracy_score(y_test, logistic_prediction)
print(score)
confusion_matrix(y_test,logistic_prediction )<train_model> | from sklearn.ensemble import ExtraTreesRegressor | Titanic - Machine Learning from Disaster |
2,509,935 | data_dmatrix = xgb.DMatrix(data=Xn,label=y)
xgc = xgb.XGBClassifier(objective ='reg:logistic', colsample_bytree = 0.15,
learning_rate = 0.1,
max_depth = 20, alpha = 12, n_estimators = 700)
xgc.fit(X,y )<predict_on_test> | classers = ['Fare','Parch','Pclass','SibSp','TitleCat', 'CabinCat','female','male', 'Embarked', 'FamilySize', 'IsAlone', 'NameLengthBinCode','TicketNum','TicketCode']
etr = ExtraTreesRegressor(n_estimators = 200, random_state = 0)
age_X_train = df[classers][df.Age.notnull() ]
age_y_train = df.Age[df.Age.notnull() ]
age_X_test = df[classers][df.Age.isnull() ]
etr.fit(age_X_train, np.ravel(age_y_train))
age_pred = etr.predict(age_X_test)
df.loc[df.Age.isnull() , 'Age'] = age_pred | Titanic - Machine Learning from Disaster |
2,509,935 | pred_train=xgc.predict(X)
pred_train.sum()<predict_on_test> | childAge = 18
def getIdentity(passenger):
age, sex = passenger
if age < childAge:
return 'child'
elif sex == 'male':
return 'male_adult'
else:
return 'female_adult'
df = pd.concat([df, pd.DataFrame(df[['Age', 'Sex']].apply(getIdentity, axis = 1), columns = ['Identity'])], axis = 1)
df = pd.concat([df, pd.get_dummies(df.Identity)], axis = 1 ) | Titanic - Machine Learning from Disaster |
2,509,935 | pred_test=xgc.predict(Xt)
pred_test.sum()<create_dataframe> | DEFAULT_SURVIVAL_VALUE = 0.5
df['FamilySurvival'] = DEFAULT_SURVIVAL_VALUE
for _, grp_df in df.groupby(['FamilyName', 'Fare']):
if len(grp_df)!= 1 :
for index, row in grp_df.iterrows() :
smax = grp_df.drop(index ).Survived.max()
smin = grp_df.drop(index ).Survived.min()
pid = row.PassengerId
if smax == 1:
df.loc[df.PassengerId == pid, 'FamilySurvival'] = 1.0
elif smin == 0:
df.loc[df.PassengerId == pid, 'FamilySurvival'] = 0.0
for _, grp_df in df.groupby(['Ticket']):
if len(grp_df != 1):
for index, row in grp_df.iterrows() :
if(row.FamilySurvival == 0.0 or row.FamilySurvival == 0.5):
smax = grp_df.drop(index ).Survived.max()
smin = grp_df.drop(index ).Survived.min()
pid = row.PassengerId
if smax == 1:
df.loc[df.PassengerId == pid, 'FamilySurvival'] = 1.0
elif smin == 0:
df.loc[df.PassengerId == pid, 'FamilySurvival'] = 0.0
df.FamilySurvival.value_counts() | Titanic - Machine Learning from Disaster |
2,509,935 | yt=pd.DataFrame(pred_test)
yt.index=yt.index+1
yt<save_to_csv> | df['FareBin'] = pd.qcut(df.Fare, 5)
le = LabelEncoder()
df['FareBinCode'] = le.fit_transform(df.FareBin ) | Titanic - Machine Learning from Disaster |
2,509,935 | test=pd.read_csv('.. /input/equipfails/equip_failures_test_set.csv',na_values='na')
df= pd.DataFrame()
df['id'] = test['id']
df['target'] = pred_test
df.to_csv('submission2.csv', index=False )<save_to_csv> | df['AgeBin'] = pd.qcut(df.Age, 4)
le = LabelEncoder()
df['AgeBinCode'] = le.fit_transform(df.AgeBin ) | Titanic - Machine Learning from Disaster |
2,509,935 | file_name='submision.csv'
yt.to_csv(file_name,index=True)
<load_pretrained> | from sklearn.preprocessing import MinMaxScaler, StandardScaler | Titanic - Machine Learning from Disaster |
2,509,935 | filename = 'Final_Model.mod'
pickle.dump(xgc, open(filename, 'wb'))<set_options> | target = data_train['Survived'].values
select_features = ['AgeBinCode', 'Embarked', 'FareBinCode', 'Parch', 'Pclass', 'SibSp', 'CabinCat', 'TitleCat', 'FamilySize', 'IsAlone', 'FamilySurvival', 'NameLengthBinCode', 'female', 'male', 'TicketNum', 'TicketCode', 'CabinNum', 'child', 'female_adult', 'male_adult'] | Titanic - Machine Learning from Disaster |
2,509,935 | sns.set()
%matplotlib inline
warnings.filterwarnings('ignore')
%load_ext tensorboard.notebook
pd.set_option('display.max_columns', None)
<load_from_csv> | scaler = StandardScaler()
df_scaled = scaler.fit_transform(df[select_features])
train = df_scaled[0:891].copy()
test = df_scaled[891:].copy() | Titanic - Machine Learning from Disaster |
2,509,935 | odf=pd.read_csv('/kaggle/input/equipfails/equip_failures_training_set.csv',index_col=0)
odft=pd.read_csv('/kaggle/input/equipfails/equip_failures_test_set.csv',index_col=0 )<data_type_conversions> | from sklearn.feature_selection import SelectKBest, f_classif | Titanic - Machine Learning from Disaster |
2,509,935 | df=odf.replace({'na':-999999})
Xt=odft.replace({'na':-999999})
Xt=Xt.astype(float)
df=df.astype(float)
df['target']=df['target'].astype(int )<prepare_x_and_y> | selector = SelectKBest(f_classif, len(select_features))
selector.fit(train, target)
scores = -np.log10(selector.pvalues_)
indices = np.argsort(scores)[::-1]
print('Features importance:')
for i in range(len(scores)) :
print('%.2f %s' %(scores[indices[i]], select_features[indices[i]])) | Titanic - Machine Learning from Disaster |
2,509,935 | X=df.iloc[:,1:]
y=df.iloc[:,0]
Xn=normalize(X)
Xtn=normalize(Xt )<split> | kf = KFold(n_splits = 5, random_state = 1 ) | Titanic - Machine Learning from Disaster |
2,509,935 | X_train, X_test, y_train, y_test = train_test_split(Xn, y, test_size=0.2, random_state=8 )<compute_train_metric> | rfc_parameters = {'max_depth' : [5], 'n_estimators' : [500], 'min_samples_split' : [9], 'random_state' : [1], 'n_jobs' : [-1]}
rfc = RandomForestClassifier()
clf_rfc = GridSearchCV(rfc, rfc_parameters, n_jobs = -1, cv = kf, scoring = 'roc_auc')
clf_rfc.fit(train, target ) | Titanic - Machine Learning from Disaster |
2,509,935 | my_class = ExtraTreesClassifier(random_state=0)
my_class.fit(X_train, y_train)
y_pred= clf.predict(X_test)
print('accuracy: {}'.format(accuracy_score(y_test, y_pred)))
print(f'F1: {f1_score(y_test,y_pred)}')
confusion_matrix(y_test,y_pred )<compute_test_metric> | rfc2_parameters = {'max_depth' : [2, 5, 8, 10, 20, 50], 'n_estimators' : [10, 50, 100, 200, 500, 1000, 2000], 'min_samples_split' : [2, 3, 5, 9, 20]}
rfc2 = RandomForestClassifier(random_state = 1, n_jobs = -1)
clf_rfc2 = RandomizedSearchCV(rfc2, rfc2_parameters, n_jobs = -1, cv = kf, scoring = 'roc_auc')
clf_rfc2.fit(train, target ) | Titanic - Machine Learning from Disaster |
2,509,935 | my_class = AdaBoostClassifier(random_state=0)
my_class.fit(X_train, y_train)
y_pred= my_class.predict(X_test)
print('accuracy: {}'.format(accuracy_score(y_test, y_pred)))
print(f'F1: {f1_score(y_test,y_pred)}')
confusion_matrix(y_test,y_pred )<compute_train_metric> | importance = clf_rfc.best_estimator_.feature_importances_
indices = np.argsort(importance)[::-1]
print(clf_rfc.best_score_)
print(clf_rfc.score(train, target))
print(clf_rfc.best_params_)
print('
Feature importance:')
for i in range(len(select_features)) :
print('%.2f %s' %(importance[indices[i]], select_features[indices[i]])) | Titanic - Machine Learning from Disaster |
2,509,935 | lg = LogisticRegression(solver='lbfgs', random_state=18)
lg.fit(X_train, y_train)
logistic_prediction = lg.predict(X_test)
score = metrics.accuracy_score(y_test, logistic_prediction)
print(score)
confusion_matrix(y_test,logistic_prediction )<train_model> | lr_paramaters = {'C' : [0.05, 0.1, 0.2], 'random_state' : [1]}
lr = LogisticRegression()
clf_lr = GridSearchCV(lr, lr_paramaters, n_jobs = -1, cv = kf, scoring = 'roc_auc')
clf_lr.fit(train, target ) | Titanic - Machine Learning from Disaster |
2,509,935 | data_dmatrix = xgb.DMatrix(data=Xn,label=y)
xgc = xgb.XGBClassifier(objective ='reg:logistic', colsample_bytree = 0.2,
learning_rate = 0.1,
max_depth = 20, alpha = 10, n_estimators = 700)
xgc.fit(X,y )<predict_on_test> | print(clf_lr.best_score_)
print(clf_lr.score(train, target))
print(clf_lr.best_params_ ) | Titanic - Machine Learning from Disaster |
2,509,935 | pred_train=xgc.predict(X)
pred_train.sum()<predict_on_test> | svc_paramaters = {'C' : [5.5, 6, 6.5], 'kernel' : ['linear', 'rbf'], 'gamma' : ['auto', 'scale'], 'random_state' : [1]}
svc = SVC()
clf_svc = GridSearchCV(svc, svc_paramaters, n_jobs = -1, cv = kf, scoring = 'roc_auc')
clf_svc.fit(train, target ) | Titanic - Machine Learning from Disaster |
2,509,935 | pred_test=xgc.predict(Xt)
pred_test.sum()<create_dataframe> | print(clf_svc.best_score_)
print(clf_svc.score(train, target))
print(clf_svc.best_params_ ) | Titanic - Machine Learning from Disaster |
2,509,935 | yt=pd.DataFrame(pred_test)
yt.index=yt.index+1
yt<save_to_csv> | gbdt_parameters = {'subsample' : [1], 'min_samples_leaf' : [3], 'learning_rate' : [0.1], 'n_estimators' : [50], 'min_samples_split' : [2], 'max_depth' : [3], 'random_state' : [1]}
gbdt = GradientBoostingClassifier()
clf_gbdt = GridSearchCV(gbdt, gbdt_parameters, n_jobs = -1, cv = kf, scoring = 'roc_auc')
clf_gbdt.fit(train, target ) | Titanic - Machine Learning from Disaster |
2,509,935 | test=pd.read_csv('.. /input/equipfails/equip_failures_test_set.csv',na_values='na')
df= pd.DataFrame()
df['id'] = test['id']
df['target'] = pred_test
df.to_csv('submission2.csv', index=False )<save_to_csv> | print(clf_gbdt.best_score_)
print(clf_gbdt.score(train, target))
print(clf_gbdt.best_params_ ) | Titanic - Machine Learning from Disaster |
2,509,935 | file_name='submision.csv'
yt.to_csv(file_name,index=True)
<load_pretrained> | xgb_paramaters = {'subsample' : [0.7], 'min_child_weight' : [1], 'max_depth' : [3], 'learning_rate' : [0.1], 'n_estimators' : [100], 'n_jobs' : [-1], 'random_state' : [1]}
xgb = XGBClassifier()
clf_xgb = GridSearchCV(xgb, xgb_paramaters, n_jobs = -1, cv = kf, scoring = 'roc_auc')
clf_xgb.fit(train, target ) | Titanic - Machine Learning from Disaster |
2,509,935 | filename = 'Final_Model.mod'
pickle.dump(xgc, open(filename, 'wb'))<load_from_csv> | print(clf_xgb.best_score_)
print(clf_xgb.score(train, target))
print(clf_xgb.best_params_ ) | Titanic - Machine Learning from Disaster |
2,509,935 | df2 = pd.read_csv(".. /input/train.csv", parse_dates=[0])
test = pd.read_csv(".. /input/test.csv", parse_dates=[0] )<create_dataframe> | prediction = clf_rfc2.predict(test ) | Titanic - Machine Learning from Disaster |
2,509,935 | df3 = df2.copy()
test3 = test.copy()<data_type_conversions> | submission = pd.DataFrame({'Survived' : prediction}, index = data_test.PassengerId)
submission.to_csv('submission.csv', index_label = ['PassengerId'] ) | Titanic - Machine Learning from Disaster |
715,277 | df['populacao']= df['populacao'].str.replace(',','', regex=False)
df['populacao']= df['populacao'].str.replace('(2)','', regex=False)
df['populacao']= df['populacao'].str.replace('(1)','', regex=False)
df['populacao']= df['populacao'].str.replace('() ','', regex=False)
df['populacao']= df.populacao.astype(float )<data_type_conversions> | print('All Good!' ) | Titanic - Machine Learning from Disaster |
715,277 | df['area']= df['area'].str.replace(',','', regex=False)
df['area']= df.area.astype(float )<data_type_conversions> | def read_data(train = 'train', test = 'test'):
train = pd.read_csv(".. /input/" + train + ".csv")
test = pd.read_csv(".. /input/" + test + ".csv")
return train, test
def write_df(df, filename):
df.to_csv(filename + '.csv', index = False)
print(filename, 'written to csv.')
print('All Good!' ) | Titanic - Machine Learning from Disaster |
715,277 | df['densidade_dem']= df['densidade_dem'].str.replace(',','', regex=False)
df['densidade_dem']= df.densidade_dem.astype(float )<data_type_conversions> | train, test = read_data()
print('All Good!' ) | Titanic - Machine Learning from Disaster |
715,277 | for col in df.columns:
if df[col].dtype == 'object':
df[col] = df[col].astype('category' ).cat.codes<count_missing_values> | print('Train data:', train.columns.values)
print('
')
print('Test data:', test.columns.values ) | Titanic - Machine Learning from Disaster |
715,277 | col_nan=df.isnull().any(axis=0)
col_nan<feature_engineering> | predictor_cols = test.columns.values
data = pd.concat([train[predictor_cols], test])
data.is_copy = False
print('All Good!' ) | Titanic - Machine Learning from Disaster |
715,277 | df['servidores']= np.where(df['servidores'].isna() , round(df['populacao']*df.servidores.mean() /df.populacao.mean() , 0),
df['servidores'] )<feature_engineering> | data['Travelling_alone'] = data['SibSp'] + data['Parch']
data['Travelling_alone'] = np.where(data['Travelling_alone'] > 0, 0, 1)
data.drop(['SibSp', 'Parch'], axis=1, inplace=True)
print('All Good!' ) | Titanic - Machine Learning from Disaster |
715,277 | df['gasto_pc_educacao'] = np.where(df['gasto_pc_educacao'].isna() , round(df['populacao']*df.gasto_pc_educacao.mean() /df.populacao.mean() , 2),
df['gasto_pc_educacao'] )<feature_engineering> | data.drop(['PassengerId', 'Ticket', 'Name'], axis=1, inplace=True)
print('All Good!' ) | Titanic - Machine Learning from Disaster |
715,277 | df['comissionados_por_servidor']= df['comissionados']/df['servidores']<feature_engineering> | def na_count(data):
na_cols = []
for i in data.columns:
if data[i].isnull().sum() != 0:
na_cols.append([i, data[i].isnull().sum() , round(data[i].isnull().sum() / len(data[i])* 100, 4)])
return na_cols
print('All Good!' ) | Titanic - Machine Learning from Disaster |
715,277 | df['densidade_dem'] = np.where(df['densidade_dem'].isna() , round(df['populacao']/df['area'], 1),
df['densidade_dem'] )<feature_engineering> | na_cols = na_count(data)
print('Missing Values:')
for i in na_cols:
print(i ) | Titanic - Machine Learning from Disaster |
715,277 | df['hab_p_medico'] = np.where(df['hab_p_medico'].isna() , -1, df['hab_p_medico'])
<feature_engineering> | data['Age'].fillna(data['Age'].median(skipna=True), inplace=True)
print('All Good!' ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.