kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
21,208,150
cm = LogisticRegression(penalty='l1', solver='liblinear', C=5, random_state=11) pred_train, pred_test, loss_train, loss_test, acc_test, matrix_test = build_model(X_train_vect, X_test_vect, y_train['energy'], y_test['energy'], cm) ll_scores.append(loss_test) ac_scores.append(acc_test) print('Energy:') print('Log loss on training set: ',loss_train) print('Log loss on test set: ', loss_test) print('Accuracy on test set:', acc_test) print('Test set confusion matrix: ', matrix_test )<train_model>
train.isnull().sum()
Titanic - Machine Learning from Disaster
21,208,150
cm = LogisticRegression(penalty='l1', solver='liblinear', C=5, random_state=11) pred_train, pred_test, loss_train, loss_test, acc_test, matrix_test = build_model(X_train_vect, X_test_vect, y_train['nature'], y_test['nature'], cm) ll_scores.append(loss_test) ac_scores.append(acc_test) print('Nature:') print('Log loss on training set: ',loss_train) print('Log loss on test set: ', loss_test) print('Accuracy on test set:', acc_test) print('Test set confusion matrix: ', matrix_test )<train_model>
train = train.drop(["Cabin"],axis = 1) print(train.shape) train.head()
Titanic - Machine Learning from Disaster
21,208,150
cm = LogisticRegression(penalty='l1', solver='liblinear', C=3, random_state=11) pred_train, pred_test, loss_train, loss_test, acc_test, matrix_test = build_model(X_train_vect, X_test_vect, y_train['tactics'], y_test['tactics'], cm) ll_scores.append(loss_test) ac_scores.append(acc_test) print('Tactics:') print('Log loss on training set: ',loss_train) print('Log loss on test set: ', loss_test) print('Accuracy on test set:', acc_test) print('Test set confusion matrix: ', matrix_test )<compute_test_metric>
train.isnull().sum()
Titanic - Machine Learning from Disaster
21,208,150
print('Average log loss across personality categories: ', round(np.mean(ll_scores), 3)) print('Average accuracy across personality categories: ', round(np.mean(ac_scores), 3))<prepare_x_and_y>
train.isnull().sum()
Titanic - Machine Learning from Disaster
21,208,150
results = test[['id']] X_train_vect, X_test_vect = vectorise(train_clean['posts'], test_clean['posts']) X_train_vect.shape<predict_on_test>
train['Embarked'].fillna(method = 'backfill', inplace = True )
Titanic - Machine Learning from Disaster
21,208,150
c_vals = [4, 5, 5, 3] class1_proba = [] for i in range(4): cm = LogisticRegression(penalty='l1', solver='liblinear', C=c_vals[i], random_state=11) cm.fit(X_train_vect, train[categories[i]].values) pred_test = cm.predict(X_test_vect) results[categories[i]] = pred_test probability = cm.predict_proba(X_test_vect) class1_proba.append(probability[:,1] )<save_to_csv>
train['Age'].fillna(train['Age'].median() , inplace = True )
Titanic - Machine Learning from Disaster
21,208,150
results.to_csv('MBTI_l1_liblinear_C4553_split_tt_all_pp_emoji_lib_handles.csv', index=False) <feature_engineering>
train.isnull().sum()
Titanic - Machine Learning from Disaster
21,208,150
for i in range(4): results[categories[i]] = class1_proba[i]<save_to_csv>
train.isnull().sum()
Titanic - Machine Learning from Disaster
21,208,150
results.to_csv('MBTI_l1_liblinear_C4553_split_tt_all_pp_emoji_lib_handles_proba.csv', index=False) <set_options>
def str_to_cat(training_df): for p,q in training_df.items() : if is_string_dtype(q): training_df[p] = q.astype('category' ).cat.as_ordered() return training_df
Titanic - Machine Learning from Disaster
21,208,150
sns.set_style('whitegrid') <load_from_csv>
def mydf_to_nums(training_df, feature, null_status): if not is_numeric_dtype(feature): training_df[null_status] = feature.cat.codes + 1 def mydf_imputer(training_df, feature, null_status, null_table): if is_numeric_dtype(feature): if pd.isnull(feature ).sum() or(null_status in null_table): filler = null_table[null_status] if null_status in null_table else feature.median() training_df[null_status] = feature.fillna(filler) null_table[null_status] = filler return null_table def mydf_preprocessor(training_df, null_table): if null_table is None: null_table = dict() for p,q in training_df.items() : null_table = mydf_imputer(training_df, q, p, null_table) for p,q in training_df.items() : mydf_to_nums(training_df, q, p) training_df = pd.get_dummies(training_df, dummy_na = True) res = [training_df, null_table] return res
Titanic - Machine Learning from Disaster
21,208,150
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv') <drop_column>
train = str_to_cat(train) train_df,my_table = mydf_preprocessor(train,null_table = None) print(train_df.shape) train_df.head()
Titanic - Machine Learning from Disaster
21,208,150
ntrain = train.shape[0] ntest = test.shape[0] all_data = pd.concat(( train, test), sort=False ).reset_index(drop=True) all_data.drop("id", axis=1, inplace=True) <groupby>
train.duplicated().sum()
Titanic - Machine Learning from Disaster
21,208,150
grouped_wordclouds = all_data.groupby('type' ).sum() grouped_wordclouds <create_dataframe>
test.isnull().sum()
Titanic - Machine Learning from Disaster
21,208,150
<groupby>
test.isnull().sum()
Titanic - Machine Learning from Disaster
21,208,150
print("There are {} words in the combination of all posts.".format(len(grouped_wordclouds['posts'].sum())) )<feature_engineering>
test = test.drop(["Cabin"],axis = 1) print(test.shape )
Titanic - Machine Learning from Disaster
21,208,150
pattern_url = r'http[s]?://(?:[A-Za-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9A-Fa-f][0-9A-Fa-f])) +' subs_url = r'url-web' all_data['posts'] = all_data['posts'].replace(to_replace=pattern_url, value=subs_url, regex=True) all_data['posts'] = all_data['posts'].str.lower() <feature_engineering>
test['Age'].fillna(test['Age'].median() , inplace = True )
Titanic - Machine Learning from Disaster
21,208,150
def remove_punctuation_numbers(post): punc_numbers = string.punctuation + '0123456789' return ''.join([l for l in post if l not in punc_numbers]) all_data['posts'] = all_data['posts'].apply(remove_punctuation_numbers) <feature_engineering>
class_fares = test.groupby('Pclass')['Fare'].median() test['median_fare'] = test['Pclass'].apply(lambda x: class_fares[x]) test['Fare'].fillna(test['median_fare'], inplace = True,) del test['median_fare']
Titanic - Machine Learning from Disaster
21,208,150
tokeniser = TreebankWordTokenizer() all_data['tokens'] = all_data['posts'].apply(tokeniser.tokenize) <feature_engineering>
test.isnull().sum()
Titanic - Machine Learning from Disaster
21,208,150
stemmer = SnowballStemmer('english') def train_stemmer(words, stemmer): return [stemmer.stem(word)for word in words] all_data['stem'] = all_data['tokens'].apply(train_stemmer, args=(stemmer,)) <categorify>
test.isnull().sum()
Titanic - Machine Learning from Disaster
21,208,150
lemmatizer = WordNetLemmatizer() def train_lemma(words, lemmatizer): return [lemmatizer.lemmatize(word)for word in words] all_data['lemma'] = all_data['tokens'].apply(train_lemma, args=(lemmatizer,)) <feature_engineering>
test = str_to_cat(test) test,my_table = mydf_preprocessor(test,null_table = None) print(test.shape) test.head(5 )
Titanic - Machine Learning from Disaster
21,208,150
detokenizer = TreebankWordDetokenizer() all_data['detoken'] = all_data['lemma'].apply(lambda x: detokenizer.detokenize(x)) <split>
test.duplicated().sum()
Titanic - Machine Learning from Disaster
21,208,150
train = all_data[:ntrain] test = all_data[ntrain:] <split>
train = train.drop('Name', axis = 1) test = test.drop(columns=['Name'], axis = 1 )
Titanic - Machine Learning from Disaster
21,208,150
X = train.detoken y = train.type X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) X = pd.concat([X_train, y_train], axis=1) ESTP = X[X.type == 'ESTP'] ESTJ = X[X.type == 'ESTJ'] ESFP = X[X.type == 'ESFP'] ESFJ = X[X.type == 'ESFJ'] ENTP = X[X.type == 'ENTP'] ENTJ = X[X.type == 'ENTJ'] ENFP = X[X.type == 'ENFP'] ENFJ = X[X.type == 'ENFJ'] INFP = X[X.type == 'INFP'] mbti_upsampledESTP = resample(ESTP, replace=True, n_samples=len(INFP), random_state=27) mbti_upsampledESFP = resample(ESFP, replace=True, n_samples=len(INFP), random_state=27) mbti_upsampledESFJ = resample(ESFJ, replace=True, n_samples=len(INFP), random_state=27) mbti_upsampledESTJ = resample(ESTJ, replace=True, n_samples=len(INFP), random_state=27) mbti_upsampledENFJ = resample(ENFJ, replace=True, n_samples=len(INFP), random_state=27) mbti_upsampledENTJ = resample(ENTJ, replace=True, n_samples=len(INFP), random_state=27) mbti_upsampledENTP = resample(ENTP, replace=True, n_samples=len(INFP), random_state=27) mbti_upsampledENFP = resample(ENFP, replace=True, n_samples=len(INFP), random_state=27) upsampled_E = pd.concat([INFP, mbti_upsampledESTP,mbti_upsampledESTJ,mbti_upsampledESFP,mbti_upsampledESFJ,mbti_upsampledENFJ,mbti_upsampledENTJ,mbti_upsampledENTP,mbti_upsampledENFP]) upsampled_E.type.value_counts() <filter>
X_train = train.drop('Survived', axis=1) X_train = pd.get_dummies(X_train,columns=['Embarked','Pclass']) Y_train = train['Survived'] print(X_train.shape,Y_train.shape )
Titanic - Machine Learning from Disaster
21,208,150
ISTP = X[X.type == 'ISTP'] ISTJ = X[X.type == 'ISTJ'] ISFP = X[X.type == 'ISFP'] ISFJ = X[X.type == 'ISFJ'] INTP = X[X.type == 'INTP'] INTJ = X[X.type == 'INTJ'] INFJ = X[X.type == 'INFJ'] INFP = X[X.type == 'INFP'] mbti_upsampledISTP = resample(ISTP, replace=True, n_samples=len(INFP), random_state=27) mbti_upsampledISFP = resample(ISFP, replace=True, n_samples=len(INFP), random_state=27) mbti_upsampledISFJ = resample(ISFJ, replace=True, n_samples=len(INFP), random_state=27) mbti_upsampledISTJ = resample(ISTJ, replace=True, n_samples=len(INFP), random_state=27) mbti_upsampledINFJ = resample(INFJ, replace=True, n_samples=len(INFP), random_state=27) mbti_upsampledINTJ = resample(INTJ, replace=True, n_samples=len(INFP), random_state=27) mbti_upsampledINTP = resample(INTP, replace=True, n_samples=len(INFP), random_state=27) upsampled_I = pd.concat([mbti_upsampledISTP, mbti_upsampledISTJ, mbti_upsampledISFP, mbti_upsampledISFJ, mbti_upsampledINFJ, mbti_upsampledINTJ, mbti_upsampledINTP]) upsampled_I.type.value_counts() <concatenate>
scaler = StandardScaler().fit(X_train) scaled_X = scaler.transform(X_train) print(scaled_X )
Titanic - Machine Learning from Disaster
21,208,150
upsampled_data = pd.concat([upsampled_E,upsampled_I]) upsampled_data.head()<split>
x_test = pd.get_dummies(test,columns=['Embarked','Pclass'] )
Titanic - Machine Learning from Disaster
21,208,150
X_train = upsampled_data.detoken y_train = upsampled_data.type <predict_on_test>
sc = StandardScaler().fit(x_test) sc_X = sc.transform(x_test) print(sc_X )
Titanic - Machine Learning from Disaster
21,208,150
clf = DummyClassifier(strategy='most_frequent' ).fit(X_train, y_train) y_pred = clf.predict(X_test) print(' Our baseline accuracy is %s' % accuracy_score(y_pred, y_test)) <train_model>
model_1 = RandomForestClassifier(n_jobs = -1, n_estimators = 10, bootstrap = True) model_1.fit(scaled_X,Y_train )
Titanic - Machine Learning from Disaster
21,208,150
nb = Pipeline([('vect', CountVectorizer(stop_words='english', ngram_range=(1, 2))), ('tfidf', TfidfTransformer(sublinear_tf=True, norm='l2')) , ('clf', MultinomialNB())]) nb.fit(X_train, y_train) <predict_on_test>
pred = model_1.predict(scaled_X )
Titanic - Machine Learning from Disaster
21,208,150
y_predNB = nb.predict(X_test) print('accuracy %s' % accuracy_score(y_predNB, y_test)) <predict_on_test>
parameters = {'n_estimators': [20, 50, 60, 80, 90, 100, 120, 150, 200], 'max_features': ["auto", "sqrt", "log2"]} rfc = RandomForestClassifier(random_state = 1) cls = GridSearchCV(estimator = rfc, param_grid = parameters) cls.fit(scaled_X, Y_train) cls.best_params_
Titanic - Machine Learning from Disaster
21,208,150
y_probsNB = nb.predict_proba(X_test) print("The log loss error for our model is: ", log_loss(y_test, y_probsNB))<train_model>
model_2 = RandomForestClassifier(n_estimators=80, max_features="auto") model_2.fit(scaled_X, Y_train) y_pred = model_2.predict(sc_X )
Titanic - Machine Learning from Disaster
21,208,150
logreg = Pipeline([('vect', CountVectorizer(stop_words='english', ngram_range=(1, 2))), ('tfidf', TfidfTransformer(norm='l2')) , ('clf', LogisticRegression(n_jobs=1, C=1e5)) ]) logreg.fit(X_train, y_train) <predict_on_test>
sub_df = pd.DataFrame({'PassengerId' : test["PassengerId"],'Survived' : y_pred}) submission = sub_df.to_csv('submission.csv',index=False )
Titanic - Machine Learning from Disaster
21,763,900
y_predLogRegTrain = logreg.predict(X_test) <predict_on_test>
sns.set(style="ticks", context="talk") warnings.filterwarnings('ignore' )
Titanic - Machine Learning from Disaster
21,763,900
y_predLogReg = logreg.predict(test.posts) <compute_test_metric>
testing = pd.read_csv('/kaggle/input/titanic/test.csv') train = pd.read_csv('/kaggle/input/titanic/train.csv') test = testing.copy() target = 'Survived' data = pd.concat([train, test], axis = 0) data.info() data.head(5 )
Titanic - Machine Learning from Disaster
21,763,900
print('accuracy %s' % accuracy_score(y_predLogRegTrain, y_test)) <predict_on_test>
data['Embarked'] = data['Embarked'].fillna(data['Embarked'].mode() [0] )
Titanic - Machine Learning from Disaster
21,763,900
y_probslogreg = logreg.predict_proba(X_test) print("The log loss error for our model is: ", log_loss(y_test, y_probslogreg))<compute_test_metric>
data[data['Fare'].isna() ]
Titanic - Machine Learning from Disaster
21,763,900
print(classification_report(y_test, y_predLogRegTrain))<feature_engineering>
data_corr = data.corr().abs().unstack().sort_values(kind = "quicksort", ascending = False ).reset_index() data_corr.loc[data_corr['level_0'] == 'Fare']
Titanic - Machine Learning from Disaster
21,763,900
DecisionTree = Pipeline([('vect', CountVectorizer(stop_words='english', ngram_range=(1, 2))), ('tfidf', TfidfTransformer()), ('clf', DecisionTreeClassifier())]) clf = DecisionTree.fit(X_train, y_train) <predict_on_test>
data['Fare'] = data['Fare'].fillna(data.groupby(['Pclass'])['Fare'].transform('median'))
Titanic - Machine Learning from Disaster
21,763,900
y_predTree = clf.predict(X_test) print('accuracy %s' % accuracy_score(y_predTree, y_test)) <predict_on_test>
data.isna().sum()
Titanic - Machine Learning from Disaster
21,763,900
y_probsTree = clf.predict_proba(X_test) print("The log loss error for our model is: ", log_loss(y_test, y_probsTree))<compute_test_metric>
data['Title'] = data.Name.apply(lambda x: re.search('([A-Z][a-z]+)\.', x ).group(1)) data['Title'].unique()
Titanic - Machine Learning from Disaster
21,763,900
print(classification_report(y_test, y_predTree)) <predict_on_test>
data['Title'].value_counts(normalize = True ).round(3 )
Titanic - Machine Learning from Disaster
21,763,900
clf = Pipeline([('vect', CountVectorizer(stop_words='english')) , ('tfidf', TfidfTransformer()), ('clf', SVC(kernel='polynomial')) ]) clf.fit(X_train, y_train) y_predSVC = clf.predict(X_test) print('accuracy %s' % accuracy_score(y_predSVC, y_test)) <load_from_csv>
data['Age'].isna().sum()
Titanic - Machine Learning from Disaster
21,763,900
sample = pd.read_csv('.. /input/random_example.csv' )<create_dataframe>
data[data['Age'] < 1]
Titanic - Machine Learning from Disaster
21,763,900
submission = pd.DataFrame(data = sample['id'], columns= ['id']) submission['Type'] = y_predLogReg<data_type_conversions>
data.loc[data['Age'] < 1, 'Age'] = None data['Age'].isna().sum()
Titanic - Machine Learning from Disaster
21,763,900
submission['mind'] = submission['Type'].apply(lambda x: x[0] == 'E' ).astype('int') submission['energy'] = submission['Type'].apply(lambda x: x[1] == 'N' ).astype('int') submission['nature'] = submission['Type'].apply(lambda x: x[2] == 'T' ).astype('int') submission['tactics'] = submission['Type'].apply(lambda x: x[3] == 'J' ).astype('int') submission = submission.drop(['Type'],axis=1 )<save_to_csv>
data_corr = data.corr().abs().unstack().sort_values(kind = "quicksort", ascending = False ).reset_index() data_corr.loc[data_corr['level_0'] == 'Age']
Titanic - Machine Learning from Disaster
21,763,900
submission.to_csv('submit_2.csv', index=False )<set_options>
data['Age'] = data['Age'].fillna(data.groupby(['Pclass', 'Title'])['Age'].transform('median')) data['Age'].isna().sum()
Titanic - Machine Learning from Disaster
21,763,900
warnings.filterwarnings('ignore') class bold: START = '\033[1m' END = '\033[0m' print('\t'+'\t'+bold.START+ 'What Is Your Myers Briggs Personality Type?' + bold.END) HTML("<iframe width='560' height='315' src='https://www.youtube.com/embed/M4YLO-2Tb2w/' frameborder='0' allowfullscreen></iframe>") <set_options>
data_corr = data.corr().abs().unstack().sort_values(kind = "quicksort", ascending = False ).reset_index() data_corr.loc[data_corr['level_0'] == 'Age']
Titanic - Machine Learning from Disaster
21,763,900
warnings.filterwarnings('ignore' )<load_from_csv>
data[['Pclass', 'Embarked', 'Survived']].groupby(['Pclass', 'Embarked'] ).mean().round(2 )
Titanic - Machine Learning from Disaster
21,763,900
df_train = pd.read_csv('.. /input/train.csv' )<categorify>
print('Pclass: ', data[data['Cabin_group'] == 'T']['Pclass'].values) data.loc[data['Cabin'] == 'T', 'Cabin_group'] = 'A'
Titanic - Machine Learning from Disaster
21,763,900
df_train['Mind'] = df_train['type'].str[0] df_train['Energy'] = df_train['type'].str[1] df_train['Nature'] = df_train['type'].str[2] df_train['Tactics'] = df_train['type'].str[3] df_train = df_train.replace({'Mind' : {'I':0, 'E':1}, 'Energy' : {'S':0, 'N':1}, 'Nature' : {'F':0, 'T':1}, 'Tactics' : {'P':0, 'J':1}}) df_train = df_train.drop('type', axis = 1 )<load_from_csv>
data[['Cabin_group', 'Pclass', 'Survived']].groupby(['Cabin_group', 'Pclass'] ).mean().round(2 )
Titanic - Machine Learning from Disaster
21,763,900
df_test = pd.read_csv('.. /input/test.csv') len_train = len(df_train) df_all = pd.concat([df_train, df_test], sort=False )<feature_engineering>
data['Sex_int'] = data['Sex'].replace({'male': 1, 'female': 0}) data['Embarked_int'] = data['Embarked'].replace({'S': 0, 'C': 1, 'Q':2}) data['Title_int'] = data['Title'].replace({'Mr': 0, 'Mrs': 1, 'Miss':2, 'Master':3, 'Special':4}) data['age_group_int'] = data['age_group'].replace({'Adults': 0, 'Middle age': 1, 'Infants':2, 'Adolescents':3, 'Preschool':4, 'Children':5, 'Seniors':6}) data['Cabin_group_int'] = data['Cabin_group'].replace({'unkown': 0, 'ABC': 1, 'DE':2, 'FG':3} )
Titanic - Machine Learning from Disaster
21,763,900
tfv = TfidfVectorizer(stop_words='english',max_features=10000,lowercase=True,max_df=0.75,ngram_range=(1,2)) X = tfv.fit_transform(df_all['posts'] )<split>
data = data.loc[:,~data.columns.str.endswith('_int')]
Titanic - Machine Learning from Disaster
21,763,900
train_X = X[:len_train]<prepare_x_and_y>
dummy_features = ['Sex' , 'Pclass' , 'Embarked' , 'Cabin_group' , 'Title' , 'age_group' ] for col in dummy_features: data[col] = data[col].astype(object) drop_features = ['PassengerId', 'Ticket', 'Name', 'Cabin' ,'small_family_size' ,'Alone' ,'SibSp' ,'Parch' ,'Age' ] data = pd.concat([data, pd.get_dummies(data[dummy_features], drop_first = True)], axis = 1, sort = False) data.drop(columns = data[dummy_features], inplace = True) data.drop(columns = data[drop_features], inplace = True) data.tail()
Titanic - Machine Learning from Disaster
21,763,900
y_mind = df_train['Mind'] y_energy = df_train['Energy'] y_nature = df_train['Nature'] y_tactics = df_train['Tactics']<choose_model_class>
train.dropna(inplace = True) test.dropna(inplace = True )
Titanic - Machine Learning from Disaster
21,763,900
lr = LogisticRegression(n_jobs = -1) rf = RandomForestClassifier(n_jobs=-1) adb = AdaBoostClassifier() knn = KNeighborsClassifier(n_jobs=-1 )<compute_train_metric>
y = train['Survived'] x = train.drop(columns = target) x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.3, random_state = 42 , stratify = y )
Titanic - Machine Learning from Disaster
21,763,900
print('Logistic Regression ') scores = cross_val_score(lr, train_X, y_mind, cv=5) print("Accuracy(Mind): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(lr, train_X, y_energy, cv=5) print("Accuracy(Energy): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(lr, train_X, y_nature, cv=5) print("Accuracy(Nature): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(lr, train_X, y_tactics, cv=5) print("Accuracy(Tactics): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) print('-'*30) print('Random Forest ') scores = cross_val_score(rf, train_X, y_mind, cv=5) print("Accuracy(Mind): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(rf, train_X, y_energy, cv=5) print("Accuracy(Energy): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(rf, train_X, y_nature, cv=5) print("Accuracy(Nature): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(rf, train_X, y_tactics, cv=5) print("Accuracy(Tactics): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) print('-'*30) print('AdaBoost ') scores = cross_val_score(adb, train_X, y_mind, cv=5) print("Accuracy(Mind): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(adb, train_X, y_energy, cv=5) print("Accuracy(Energy): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(adb, train_X, y_nature, cv=5) print("Accuracy(Nature): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(adb, train_X, y_tactics, cv=5) print("Accuracy(Tactics): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) print('-'*30) print('KNN ') scores = cross_val_score(knn, train_X, y_mind, cv=5) print("Accuracy(Mind): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(knn, train_X, y_energy, cv=5) print("Accuracy(Energy): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(knn, train_X, y_nature, cv=5) print("Accuracy(Nature): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(knn, train_X, y_tactics, cv=5) print("Accuracy(Tactics): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2))<compute_train_metric>
RF = ensemble.RandomForestClassifier() params = { 'n_estimators':[n for n in range(100, 250, 50)] ,'max_depth':[n for n in range(3, 8)] ,'min_samples_leaf': [n for n in range(3, 6, 1)] ,'max_features' : [None] ,'random_state' : [42] } RF_model = GridSearchCV(RF, param_grid = params, cv = 5, n_jobs = -1 ).fit(x_train, y_train) print("Best Hyper Parameters:",RF_model.best_params_) RF_probs = RF_model.predict_proba(x_test) RF_probs = RF_probs[:, 1] RF_auc = roc_auc_score(y_test, RF_probs) print('AUC: %.3f' % RF_auc) RF_predictions = RF_model.predict(x_test ).astype(int) RF_accuracy = accuracy_score(y_test, RF_predictions) print("RF accuracy: %.3f" % RF_accuracy) print("RF Recall: " + '%.3f' % recall_score(y_test, RF_predictions)) print("RF Precission: " + '%.3f' % precision_score(y_test, RF_predictions)) print("RF cohen_kappa_score: %.3f" % cohen_kappa_score(y_test, RF_predictions)) plt.figure(figsize =(8, 6)) RF_fpr, RF_tpr, RF_thresholds = roc_curve(y_test, RF_probs) plt.plot([0, 1], [0, 1], linestyle = '--') plt.plot(RF_fpr, RF_tpr, color = 'tab:green') plt.show()
Titanic - Machine Learning from Disaster
21,763,900
print('Logistic Regression ') scores = cross_val_score(lr, train_X, y_mind, cv=5, scoring='f1_macro') print("F1(Mind): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(lr, train_X, y_energy, cv=5, scoring='f1_macro') print("F1(Energy): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(lr, train_X, y_nature, cv=5, scoring='f1_macro') print("F1(Nature): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(lr, train_X, y_tactics, cv=5, scoring='f1_macro') print("F1(Tactics): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) print('-'*30) print('Random Forest ') scores = cross_val_score(rf, train_X, y_mind, cv=5, scoring='f1_macro') print("F1(Mind): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(rf, train_X, y_energy, cv=5, scoring='f1_macro') print("F1(Energy): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(rf, train_X, y_nature, cv=5, scoring='f1_macro') print("F1(Nature): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(rf, train_X, y_tactics, cv=5, scoring='f1_macro') print("F1(Tactics): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) print('-'*30) print('AdaBoost ') scores = cross_val_score(adb, train_X, y_mind, cv=5, scoring='f1_macro') print("F1(Mind): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(adb, train_X, y_energy, cv=5, scoring='f1_macro') print("F1(Energy): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(adb, train_X, y_nature, cv=5, scoring='f1_macro') print("F1(Nature): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(adb, train_X, y_tactics, cv=5, scoring='f1_macro') print("F1(Tactics): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) print('-'*30) print('KNN ') scores = cross_val_score(knn, train_X, y_mind, cv=5, scoring='f1_macro') print("F1(Mind): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(knn, train_X, y_energy, cv=5, scoring='f1_macro') print("F1(Energy): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(knn, train_X, y_nature, cv=5, scoring='f1_macro') print("F1(Nature): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(knn, train_X, y_tactics, cv=5, scoring='f1_macro') print("F1(Tactics): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2))<compute_train_metric>
data['churn_proba'] = RF_model.best_estimator_.predict_proba(data[x.columns])[:,1]
Titanic - Machine Learning from Disaster
21,763,900
print('Logistic Regression ') scores = cross_val_score(lr, train_X, y_mind, cv=5, scoring='neg_log_loss') print("Log Loss(Mind): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(lr, train_X, y_energy, cv=5, scoring='neg_log_loss') print("Log Loss(Energy): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(lr, train_X, y_nature, cv=5, scoring='neg_log_loss') print("Log Loss(Nature): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(lr, train_X, y_tactics, cv=5, scoring='neg_log_loss') print("Log Loss(Tactics): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) print('-'*30) print('Random Forest ') scores = cross_val_score(rf, train_X, y_mind, cv=5, scoring='neg_log_loss') print("Log Loss(Mind): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(rf, train_X, y_energy, cv=5, scoring='neg_log_loss') print("Log Loss(Energy): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(rf, train_X, y_nature, cv=5, scoring='neg_log_loss') print("Log Loss(Nature): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(rf, train_X, y_tactics, cv=5, scoring='neg_log_loss') print("Log Loss(Tactics): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) print('-'*30) print('AdaBoost ') scores = cross_val_score(adb, train_X, y_mind, cv=5, scoring='neg_log_loss') print("Log Loss(Mind): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(adb, train_X, y_energy, cv=5, scoring='neg_log_loss') print("Log Loss(Energy): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(adb, train_X, y_nature, cv=5, scoring='neg_log_loss') print("Log Loss(Nature): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(adb, train_X, y_tactics, cv=5, scoring='neg_log_loss') print("Log Loss(Tactics): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) print('-'*30) print('KNN ') scores = cross_val_score(knn, train_X, y_mind, cv=5, scoring='neg_log_loss') print("Log Loss(Mind): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(knn, train_X, y_energy, cv=5, scoring='neg_log_loss') print("Log Loss(Energy): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(knn, train_X, y_nature, cv=5, scoring='neg_log_loss') print("Log Loss(Nature): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(knn, train_X, y_tactics, cv=5, scoring='neg_log_loss') print("Log Loss(Tactics): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2))<compute_train_metric>
predict_RF = RF_model.predict(test ).astype(int) submit_RF = pd.DataFrame({'PassengerId': testing['PassengerId'], 'Survived': predict_RF}) filename_RF = 'Titanic Prediction RF.csv' submit_RF.to_csv(filename_RF,index=False) print('Saved file: ' + filename_RF )
Titanic - Machine Learning from Disaster
21,749,285
print('Logistic Regression ') scores = cross_val_score(lr, train_X, y_mind, cv=5, scoring='roc_auc') print("AUC(Mind): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(lr, train_X, y_energy, cv=5, scoring='roc_auc') print("AUC(Energy): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(lr, train_X, y_nature, cv=5, scoring='roc_auc') print("AUC(Nature): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(lr, train_X, y_tactics, cv=5, scoring='roc_auc') print("AUC(Tactics): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) print('-'*30) print('Random Forest ') scores = cross_val_score(rf, train_X, y_mind, cv=5, scoring='roc_auc') print("AUC(Mind): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(rf, train_X, y_energy, cv=5, scoring='roc_auc') print("AUC(Energy): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(rf, train_X, y_nature, cv=5, scoring='roc_auc') print("AUC(Nature): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(rf, train_X, y_tactics, cv=5, scoring='roc_auc') print("AUC(Tactics): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) print('-'*30) print('AdaBoost ') scores = cross_val_score(adb, train_X, y_mind, cv=5, scoring='roc_auc') print("AUC(Mind): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(adb, train_X, y_energy, cv=5, scoring='roc_auc') print("AUC(Energy): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(adb, train_X, y_nature, cv=5, scoring='roc_auc') print("AUC(Nature): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(adb, train_X, y_tactics, cv=5, scoring='roc_auc') print("AUC(Tactics): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) print('-'*30) print('KNN ') scores = cross_val_score(knn, train_X, y_mind, cv=5, scoring='roc_auc') print("AUC(Mind): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(knn, train_X, y_energy, cv=5, scoring='roc_auc') print("AUC(Energy): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(knn, train_X, y_nature, cv=5, scoring='roc_auc') print("AUC(Nature): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2)) scores = cross_val_score(knn, train_X, y_tactics, cv=5, scoring='roc_auc') print("AUC(Tactics): %0.2f(+/- %0.2f)" %(scores.mean() , scores.std() * 2))<split>
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.head()
Titanic - Machine Learning from Disaster
21,749,285
test_X = X[len_train:]<choose_model_class>
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data.head()
Titanic - Machine Learning from Disaster
21,749,285
lr_mind = LogisticRegression(n_jobs=-1) lr_energy = LogisticRegression(n_jobs=-1) lr_nature = LogisticRegression(n_jobs=-1) lr_tactics = LogisticRegression(n_jobs=-1 )<train_model>
women = train_data.loc[train_data.Sex == 'female']["Survived"] rate_women = sum(women)/len(women) print("% of women who survived:", rate_women )
Titanic - Machine Learning from Disaster
21,749,285
lr_mind.fit(train_X,y_mind) lr_energy.fit(train_X,y_energy) lr_nature.fit(train_X,y_nature) lr_tactics.fit(train_X,y_tactics )<predict_on_test>
men = train_data.loc[train_data.Sex == 'male']["Survived"] rate_men = sum(men)/len(men) print("% of men who survived:", rate_men )
Titanic - Machine Learning from Disaster
21,749,285
submission = pd.DataFrame({'id': df_test.id.values, 'mind': lr_mind.predict(test_X), 'energy': lr_energy.predict(test_X), 'nature': lr_nature.predict(test_X), 'tactics': lr_tactics.predict(test_X)} )<predict_on_test>
for column in train_data.columns: print("%-15s%3d" %(column, pd.isnull(train_data[column] ).sum()))
Titanic - Machine Learning from Disaster
21,749,285
mind_prob = lr_mind.predict_proba(test_X) mind_t = mind_prob[:,0].mean() energy_prob = lr_energy.predict_proba(test_X) energy_t = energy_prob[:,0].mean() nature_prob = lr_nature.predict_proba(test_X) nature_t = nature_prob[:,0].mean() tactics_prob = lr_tactics.predict_proba(test_X) tactics_t = tactics_prob[:,0].mean() mind_threshold = np.percentile(mind_prob[:,1], mind_t*100,) energy_threshold = np.percentile(energy_prob[:,1], energy_t*100,) nature_threshold = np.percentile(nature_prob[:,1], nature_t*100,) tactics_threshold = np.percentile(tactics_prob[:,1], tactics_t*100,) mind_pred_threshold =(mind_prob[:,1] > mind_threshold ).astype(int) energy_pred_threshold =(energy_prob[:,1] > energy_threshold ).astype(int) nature_pred_threshold =(nature_prob[:,1] > nature_threshold ).astype(int) tactics_pred_threshold =(tactics_prob[:,1] > tactics_threshold ).astype(int )<create_dataframe>
for column in test_data.columns: print("%-15s%3d" %(column, pd.isnull(test_data[column] ).sum()))
Titanic - Machine Learning from Disaster
21,749,285
submission = pd.DataFrame({'id': df_test.id.values, 'mind': mind_pred_threshold, 'energy': energy_pred_threshold, 'nature': nature_pred_threshold, 'tactics': tactics_pred_threshold} )<save_to_csv>
print(train_data.drop_duplicates().groupby('Embarked')['PassengerId'].count() )
Titanic - Machine Learning from Disaster
21,749,285
submission.to_csv('submission.csv', index=False )<import_modules>
train_data['Embarked'] = train_data['Embarked'].fillna("S" )
Titanic - Machine Learning from Disaster
21,749,285
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from wordcloud import WordCloud import re import string from nltk import word_tokenize from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import TruncatedSVD<set_options>
test_data['Fare'] = test_data['Fare'].fillna(0 )
Titanic - Machine Learning from Disaster
21,749,285
warnings.filterwarnings("ignore" )<load_from_csv>
train_data['TicketLength'] = train_data['Ticket'].apply(lambda x: len(x))
Titanic - Machine Learning from Disaster
21,749,285
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )<load_from_csv>
test_data['TicketLength'] = test_data['Ticket'].apply(lambda x: len(x))
Titanic - Machine Learning from Disaster
21,749,285
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )<concatenate>
y = train_data["Survived"] features = ["Pclass", "Sex", "SibSp", "Parch", "Fare", "Embarked", "TicketLength"] X = pd.get_dummies(train_data[features]) X_test = pd.get_dummies(test_data[features]) model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1) model.fit(X, y) predictions = model.predict(X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('submission.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
22,735,561
combined_data = pd.concat([train[['posts']], test[['posts']]] )<categorify>
train = pd.read_csv("/kaggle/input/titanic/train.csv") test = pd.read_csv("/kaggle/input/titanic/test.csv") train
Titanic - Machine Learning from Disaster
22,735,561
def clean_text(text): text = re.sub('https?\S+', 'URL', text) text = re.sub('[0-9]+','', text) text = re.sub('@[a-z0-9]+', 'user', text) text = re.sub('\|\|\|', ' ', text) text = re.sub('[%s]*' % string.punctuation, '',text) text = text.lower() tokenized_text = word_tokenize(text) lemmatizer = WordNetLemmatizer() text = ' '.join([lemmatizer.lemmatize(a)for a in tokenized_text]) return text<feature_engineering>
submission=pd.read_csv("/kaggle/input/titanic/gender_submission.csv") print(train.columns) print(test.shape) train.drop(['Name', 'Ticket', 'Fare', 'Embarked'], axis=1, inplace=True) test.drop(['Name', 'Ticket', 'Fare', 'Embarked'], axis=1, inplace=True) train['Sex'].replace({'male':0, 'female':1}, inplace=True) test['Sex'].replace({'male':0, 'female':1}, inplace=True) train.drop(['Cabin'], axis=1,inplace=True) test.drop(['Cabin'], axis=1,inplace=True )
Titanic - Machine Learning from Disaster
22,735,561
combined_data['clean_posts'] = combined_data['posts'].apply(clean_text )<feature_engineering>
X = train.drop(['Survived', 'PassengerId'], axis=1) y = train['Survived'] X_test = test.drop(['PassengerId'], axis=1)
Titanic - Machine Learning from Disaster
22,735,561
vectorizer = TfidfVectorizer() combined_X = vectorizer.fit_transform(combined_data['clean_posts'] )<feature_engineering>
model=RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1) model.fit(X,y) prediction=model.predict(X_test)
Titanic - Machine Learning from Disaster
22,735,561
<split><EOS>
result=pd.DataFrame() result['PassengerId'] = test['PassengerId'] result['Survived']=prediction result.to_csv("submission.csv", index=False )
Titanic - Machine Learning from Disaster
21,768,867
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<prepare_x_and_y>
pd.set_option("display.precision", 4) sns.set(style="darkgrid") warnings.filterwarnings('ignore') TRAIN_LEN = 891 RNG_SEED = 343 COLS_TO_DROP = [] CHILD_AGE_END = 18 DECKS = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'N'] DEFAULT_SURVIVAL = 0.5 def get_deck_class_count(df, T_deck): deck_count = {'A': {}, 'B': {}, 'C': {}, 'D': {}, 'E': {}, 'F': {}, 'G': {}, 'N': {}, 'T': {}} if not T_deck: deck_count.pop('T', None) deck_percent = {} decks = df.transpose().columns.levels[0] for deck in decks: for pclass in range(1, 4): try: count = int(df.loc[deck, pclass]) deck_count[deck][pclass] = count except KeyError: deck_count[deck][pclass] = 0 deck_percent[deck] = [(count / sum(deck_count[deck].values())) * 100 for count in deck_count[deck].values() ] return deck_count, deck_percent def get_surv_prop(deck, pclass): return deck_class_surv_prop[deck][pclass] def get_corr(df): corr = df.corr().abs().unstack().sort_values(kind="quicksort", ascending=False ).reset_index() cols_map = {"level_0": "Feature 1", "level_1": "Feature 2", 0: 'Correlation Coefficient'} corr.drop(corr.iloc[1::2].index, inplace=True) corr.rename(columns=cols_map, inplace=True) return corr.drop(corr[corr['Correlation Coefficient'] == 1.0].index) def combine_df(df1, df2): return pd.concat([df1, df2], sort=True ).reset_index(drop=True) def divide_df(df, first_len): return df.loc[:first_len - 1], df.loc[first_len:].drop(['Survived'], axis=1) def drop_cols(cols): for col in cols: train.drop([col], inplace=True, axis=1) test.drop([col], inplace=True, axis=1) return def display_class_dist(percentages, y_label, title): df_percent = pd.DataFrame(percentages ).transpose() deck_names = percentages.keys() bar_count = np.arange(len(deck_names)) bar_width = 0.75 plt.figure(figsize=(16, 8)) plt.bar(bar_count, df_percent[0], color='red', edgecolor='black', width=bar_width, label='Passenger Class 1') plt.bar(bar_count, df_percent[1], bottom=df_percent[0], color='lime', edgecolor='black', width=bar_width, label='Passenger Class 2') plt.bar(bar_count, df_percent[2], bottom=df_percent[0] + df_percent[1], color='blue', edgecolor='black', width=bar_width, label='Passenger Class 3') plt.xlabel('Deck', size=25) plt.ylabel(y_label, size=25) plt.xticks(bar_count, deck_names) plt.tick_params(axis='x', labelsize=15) plt.tick_params(axis='y', labelsize=15) plt.legend(loc='upper right', prop={'size': 15}) plt.title(title, size=30, y=1) plt.show() return def group_survivors(df, group, new_feature_name): df[new_feature_name] = DEFAULT_SURVIVAL for _, group_df in df.groupby(group): if len(group_df)> 1: surv_max = group_df['Survived'].max() surv_min = group_df['Survived'].min() if isnan(surv_max)and isnan(surv_min): continue for _, row in group_df.iterrows() : passId = row['PassengerId'] if(surv_max == 1.0): df.loc[df['PassengerId'] == passId, new_feature_name] = 1.0 elif(surv_min==0.0): df.loc[df['PassengerId'] == passId, new_feature_name] = 0.0 return df
Titanic - Machine Learning from Disaster
21,768,867
y = train['type'] y_mind = y.apply(lambda x: 0 if x[0] == 'I' else 1) y_energy = y.apply(lambda x: 0 if x[1] == 'S' else 1) y_nature = y.apply(lambda x: 0 if x[2] == 'F' else 1) y_tactics = y.apply(lambda x: 0 if x[3] == 'P' else 1 )<import_modules>
train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') combined = combine_df(train, test )
Titanic - Machine Learning from Disaster
21,768,867
from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from xgboost import XGBClassifier from mlxtend.classifier import StackingCVClassifier from sklearn.model_selection import cross_val_score, GridSearchCV<choose_model_class>
train[['Sex', 'Survived']].groupby(['Sex'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
21,768,867
LogR_model = LogisticRegression() RFC_model = RandomForestClassifier() SVC_model = SVC(probability = True) XGB_model = XGBClassifier() category = [(y_mind, "Mind"),(y_energy, "Energy"),(y_nature, "Nature"),(y_tactics, "Tactics")] models = [(LogR_model, "LOG"),(RFC_model, "RFC"),(SVC_model, 'SVC'),(XGB_model, 'XGB')]<choose_model_class>
adults = train[train['Age'] >= CHILD_AGE_END] children = train[train['Age'] < CHILD_AGE_END] print('Proportion of passengers <{} who survived: {:.4f}'.format(CHILD_AGE_END, children['Survived'].mean())) print('Proportion of passengers >={} who survived: {:.4f}'.format(CHILD_AGE_END, adults['Survived'].mean()))
Titanic - Machine Learning from Disaster
21,768,867
LogR_model = LogisticRegression(C = 10 )<choose_model_class>
train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
21,768,867
SVC_model = SVC(gamma = 5, probability = True )<choose_model_class>
combined[combined['Fare'].isnull() ]
Titanic - Machine Learning from Disaster
21,768,867
XGB_model = XGBClassifier(n_estimators=150, learning_rate=0.15,gamma= 0.5 )<choose_model_class>
combined['Fare'] = combined['Fare'].fillna(med_fare[3][0][0] )
Titanic - Machine Learning from Disaster
21,768,867
Meta_model = XGBClassifier() Stack_model = StackingCVClassifier(classifiers = [SVC_model, XGB_model, LogR_model], meta_classifier = Meta_model )<save_to_csv>
combined[combined['Embarked'].isnull() ]
Titanic - Machine Learning from Disaster
21,768,867
models = [(LogR_model, "log_submission.csv"),(SVC_model, 'svc_submission.csv'),(XGB_model, 'xgb_submission.csv')] for model, file_name in models: submission_file(model, file_name )<load_from_csv>
combined['Embarked'] = combined['Embarked'].fillna('S' )
Titanic - Machine Learning from Disaster
21,768,867
train_data=pd.read_csv("/kaggle/input/predice-el-futuro/train_csv.csv") display(train_data.head()) test_data=pd.read_csv("/kaggle/input/predice-el-futuro/test_csv.csv") display(test_data.head()) <feature_engineering>
train, test = divide_df(combined, TRAIN_LEN) corr_train = get_corr(train) corr_train[(corr_train['Feature 1'] == 'Age')| (corr_train['Feature 2'] == 'Age')]
Titanic - Machine Learning from Disaster
21,768,867
train_data['time'] = pd.to_datetime(train_data['time'],format='%Y-%m-%d %H:%M:%S') train_data['year']=train_data['time'].dt.year train_data['month']=train_data['time'].dt.month train_data['day']=train_data['time'].dt.day train_data['dayofweek_num']=train_data['time'].dt.dayofweek train_data['dayofweek_name']=train_data['time'].dt.weekday_name train_data['Hour'] = train_data['time'].dt.hour train_data['minute'] = train_data['time'].dt.minute train_data['second'] = train_data['time'].dt.second train_data.head()<feature_engineering>
age_pclass_sex = train.groupby(['Pclass', 'Sex'] ).median() ['Age'] print('Median ages for the following groups(training data):') for pclass in range(1, train['Pclass'].nunique() + 1): for sex in ['female', 'male']: print('Pclass {} {}s: {}'.format(pclass, sex, age_pclass_sex[pclass][sex])) print('All passengers: {}'.format(train['Age'].median()))
Titanic - Machine Learning from Disaster
21,768,867
train_data['lag_1'] = train_data['feature'].shift(1) train_data = train_data[['time', 'lag_1', 'feature']] train_data.head()<feature_engineering>
combined['Age'] = combined.groupby(['Pclass', 'Sex'])['Age'].apply(lambda x: x.fillna(x.median()))
Titanic - Machine Learning from Disaster
21,768,867
train_data['lag_1'] = train_data['feature'].shift(1) train_data['lag_2'] = train_data['feature'].shift(2) train_data['lag_3'] = train_data['feature'].shift(3) train_data['lag_4'] = train_data['feature'].shift(4) train_data['lag_5'] = train_data['feature'].shift(5) train_data['lag_6'] = train_data['feature'].shift(6) train_data['lag_7'] = train_data['feature'].shift(7) data = train_data[['time', 'lag_1', 'lag_2', 'lag_3', 'lag_4', 'lag_5', 'lag_6', 'lag_7', 'feature']] data.head(10 )<train_model>
combined['Deck'] = combined['Cabin'].apply(lambda s: s[0] if pd.notnull(s)else 'N') train, test = divide_df(combined, TRAIN_LEN) deck_class_count = train.groupby(['Deck', 'Pclass'] ).count().rename(columns={'Name': 'Count'}) deck_class_count = deck_class_count[['Count']] print('Passenger counts for each Deck, Pclass combination(training data)where N deck denotes null values:', deck_class_count, sep=' ' )
Titanic - Machine Learning from Disaster
21,768,867
model = Prophet() model.fit(train_data.reset_index() \ .rename(columns={'time':'ds', 'feature':'y'}))<predict_on_test>
combined = combine_df(train, test) i = combined[combined['Deck'] == 'T'].index combined.loc[i, 'Deck'] = 'A' deck_class_count.pop('T', None) deck_class_count['A'][1] += 1 COLS_TO_DROP.extend(['Cabin'] )
Titanic - Machine Learning from Disaster
21,768,867
test_fcst = model.predict(df=test_data.reset_index() \ .rename(columns={'time':'ds'}))<split>
train, test = divide_df(combined, TRAIN_LEN) corr_train = get_corr(train) high_corr_train = corr_train['Correlation Coefficient'] > 0.1 print('Training set correlations(coefficient > 0.1):') display(corr_train[high_corr_train]) corr_test = get_corr(test) high_corr_test = corr_test['Correlation Coefficient'] > 0.1 print('Testing set correlations(coefficient > 0.1):') display(corr_test[high_corr_test] )
Titanic - Machine Learning from Disaster
21,768,867
data=test_fcst["yhat_upper"] data.loc[1:40]<predict_on_test>
deck_class_surv_count = train[['Deck', 'Pclass', 'Survived']] deck_class_surv_count = deck_class_surv_count.groupby(['Deck', 'Pclass'] ).sum() deck_class_surv_count, _ = get_deck_class_count(deck_class_surv_count, False) deck_class_surv_prop = deck_class_surv_count.copy() for deck in DECKS: for pclass in deck_class_count[deck].keys() : try: deck_class_surv_prop[deck][pclass] = round(( deck_class_surv_count[deck][pclass] / deck_class_count[deck][pclass]), 2) except ZeroDivisionError: pass print('Decimal percent of passengers survived for each 'Deck', 'Pclass' combination(training set)':') display(deck_class_surv_prop )
Titanic - Machine Learning from Disaster
21,768,867
train_fcst = model.predict(df=train_data.reset_index() \ .rename(columns={'time':'ds'}))<data_type_conversions>
combined = combine_df(train, test) combined['DeckPclassSurvProp'] = combined.apply(lambda x: get_surv_prop(x['Deck'], x['Pclass']),axis=1 )
Titanic - Machine Learning from Disaster
21,768,867
def parserToTimeDatatype(time): return datetime.strptime(time,"%Y-%m-%d %H:%M:%S" )<load_from_csv>
combined['FamilySize'] = combined['SibSp'] + combined['Parch'] + 1 COLS_TO_DROP.extend(['SibSp', 'Parch']) train, test = divide_df(combined, TRAIN_LEN )
Titanic - Machine Learning from Disaster
21,768,867
df= pd.read_csv('/kaggle/input/predice-el-futuro/train_csv.csv', parse_dates = [1], index_col = 1, date_parser=parserToTimeDatatype) <filter>
combined = combine_df(train, test) combined['Title'] = combined['Name'].apply(lambda name: name.split(',')[1].split('.')[0].strip()) print('Count of passenger titles aboard the Titanic:') combined['Title'].value_counts()
Titanic - Machine Learning from Disaster
21,768,867
df= df.iloc[:,[1]] df<correct_missing_values>
normalized_titles = { "Capt": "Officer", "Col": "Officer", "Don": "Royalty", "Dona": "Royalty", "Dr": "Officer", "Jonkheer": "Royalty", "Lady" : "Royalty", "Major": "Officer", "Master" : "Master", "Miss" : "Miss", "Mlle": "Miss", "Mme": "Mrs", "Mr" : "Mr", "Mrs" : "Mrs", "Ms": "Mrs", "Rev": "Officer", "Sir" : "Royalty", "the Countess": "Royalty"} combined['Title'] = combined['Title'].map(normalized_titles) print('Count of updated passenger titles aboard the Titanic:') combined['Title'].value_counts()
Titanic - Machine Learning from Disaster
21,768,867
modifiedDF = df.diff(periods=1) modifiedDF.dropna(inplace=True) modifiedDF.head()<train_model>
print('Decimal percentages for survival based on passenger title:') combined[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean() \ .sort_values(by='Survived', kind="quicksort", ascending=False )
Titanic - Machine Learning from Disaster
21,768,867
model = Prophet() model.fit(modifiedDF.reset_index() \ .rename(columns={'time':'ds', 'feature':'y'}))<drop_column>
combined['Surname'] = combined['Name'].apply(lambda x: str.split(x, ",")[0] )
Titanic - Machine Learning from Disaster
21,768,867
train_data=train_data.drop(index = 79 )<predict_on_test>
display(combined.loc[combined['Surname'] == 'Davies'] )
Titanic - Machine Learning from Disaster
21,768,867
test_fcst = model.predict(df=modifiedDF.reset_index() \ .rename(columns={'time':'ds'}))<drop_column>
combined.loc[combined['FamilySize'] == 11]
Titanic - Machine Learning from Disaster
21,768,867
del test_data['time']<compute_test_metric>
combined = group_survivors(combined, ['Surname', 'Fare', 'FamilySize'], 'FamilySurvival') print('Count of passengers with family survival data: ', combined.loc[combined['FamilySurvival']!=0.5].shape[0] )
Titanic - Machine Learning from Disaster
21,768,867
mean_squared_error(y_true=modifiedDF['feature'], y_pred=test_fcst['yhat'] )<compute_test_metric>
combined = group_survivors(combined, ['Ticket'], 'GroupSurvival') print('Count of passenger with group survival data: ', combined[combined['GroupSurvival']!=0.5].shape[0] )
Titanic - Machine Learning from Disaster