kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
10,785,592
submission = pd.read_csv('.. /input/sample_submission.csv' )<predict_on_test>
data.Age[data.Survived==1][data.Age<=18].count()
Titanic - Machine Learning from Disaster
10,785,592
prediction = linear_svc.predict(X_test) submission['Survived'] = prediction<save_to_csv>
data.Embarked.fillna(data.Embarked.mode() [0], inplace=True )
Titanic - Machine Learning from Disaster
10,785,592
submission.to_csv('./submission7.csv', index=False )<import_modules>
data.Embarked.mode()
Titanic - Machine Learning from Disaster
10,785,592
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import StandardScaler from sklearn.utils import shuffle from sklearn.linear_model import LogisticRegression from sklearn.linear_model import Perceptron from sklearn import svm from sklearn.ensemble import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn.metrics import confusion_matrix from sklearn.ensemble import VotingClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.neural_network import MLPClassifier from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.model_selection import cross_val_predict from sklearn.model_selection import cross_validate from sklearn.model_selection import GridSearchCV from sklearn import svm, tree, linear_model, neighbors, naive_bayes, ensemble, discriminant_analysis, gaussian_process from sklearn.preprocessing import OneHotEncoder, LabelEncoder from sklearn import feature_selection from sklearn import model_selection from sklearn import metrics import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.pylab as pylab import seaborn as sns from pandas.tools.plotting import scatter_matrix<load_from_csv>
data['total_family_size'] = data.SibSp+data.Parch data.drop(['SibSp', 'Parch'], axis=1, inplace=True) data.corr()
Titanic - Machine Learning from Disaster
10,785,592
train_df = pd.read_csv(".. /input/train.csv") test_df = pd.read_csv(".. /input/test.csv") data_df = train_df.append(test_df )<feature_engineering>
data['familysize-fare+age+pclass'] = data.total_family_size - data.Fare+data.Age+data.Pclass data.drop('total_family_size', axis=1, inplace=True) data.corr()
Titanic - Machine Learning from Disaster
10,785,592
data_df['Title'] = data_df['Name'] for name_string in data_df['Name']: data_df['Title'] = data_df['Name'].str.extract('([A-Za-z]+)\.', expand=True) <define_variables>
data['age_fare'] = data.Fare-data.Age data['agegrp_fare'] = data.Fare*data.age_grp data.corr()
Titanic - Machine Learning from Disaster
10,785,592
mapping = {'Mlle': 'Miss', 'Major': 'Mr', 'Col': 'Mr', 'Sir': 'Mr', 'Don': 'Mr', 'Mme': 'Miss', 'Jonkheer': 'Mr', 'Lady': 'Mrs', 'Capt': 'Mr', 'Countess': 'Mrs', 'Ms': 'Miss', 'Dona': 'Mrs'} data_df.replace({'Title': mapping}, inplace=True) titles = ['Dr', 'Master', 'Miss', 'Mr', 'Mrs', 'Rev']<feature_engineering>
data.isna().sum()
Titanic - Machine Learning from Disaster
10,785,592
for title in titles: age_to_impute = data_df.groupby('Title')['Age'].median() [titles.index(title)] data_df.loc[(data_df['Age'].isnull())&(data_df['Title'] == title), 'Age'] = age_to_impute train_df['Age'] = data_df['Age'][:891] test_df['Age'] = data_df['Age'][891:] data_df.drop('Title', axis = 1, inplace = True )<feature_engineering>
dict(list(zip(['Mr', 'Mrs', 'Miss', 'Master', 'rare'], [1,2,3,4,5])) )
Titanic - Machine Learning from Disaster
10,785,592
data_df['Family_Size'] = data_df['Parch'] + data_df['SibSp'] train_df['Family_Size'] = data_df['Family_Size'][:891] test_df['Family_Size'] = data_df['Family_Size'][891:]<feature_engineering>
data['suff+fare'] = data.suffix+data.Fare data['suff+Pclass'] = data.suffix-data.Pclass data['suff-fs-fare+age+cls'] = data.suffix-data['familysize-fare+age+pclass'] data['suff+age_fare'] = data.suffix+data.age_fare data['suff+agegrp_fare'] = data.suffix+data.agegrp_fare data.corr()
Titanic - Machine Learning from Disaster
10,785,592
data_df['Last_Name'] = data_df['Name'].apply(lambda x: str.split(x, ",")[0]) data_df['Fare'].fillna(data_df['Fare'].mean() , inplace=True) data_df['Fare'].fillna(data_df['Fare'].mean() , inplace=True )<groupby>
data.isnull().sum()
Titanic - Machine Learning from Disaster
10,785,592
train_size = len(train_df) test_size = len(test_df) fare_df = data_df.loc[data_df['Family_Size']>1, ["Last_Name", "Fare", "Family_Size"]].iloc[:train_size] fare_diff =(((fare_df.groupby(['Last_Name', 'Family_Size'] ).max() - fare_df.groupby(['Last_Name', 'Family_Size'] ).min())!=0 ).sum() /train_size * 100) print(( "Percentage of families with different fares is: %.1f" %(fare_diff.values[0])) + '%') <data_type_conversions>
data.isnull().sum()
Titanic - Machine Learning from Disaster
10,785,592
train_temp_df = data_df.iloc[:train_size] family_df_grpby = train_temp_df[train_temp_df['Family_Size']>1][ ['Last_Name', 'Fare', 'Family_Size', 'Survived']].groupby(['Last_Name', 'Fare']) family_df = pd.DataFrame(data=family_df_grpby.size() , columns=['Size in train']) family_df['Survived total'] = family_df_grpby['Survived'].sum().astype(int) family_df['Family_Size'] = family_df_grpby['Family_Size'].mean().astype(int) print("Whole family survived: %.1f" %(100*len(family_df[family_df['Size in train']==family_df['Survived total'] ])/len(family_df)) +'%') print("Whole family perished: %.1f" %(100*len(family_df[family_df['Survived total'] == 0])/len(family_df)) +'%') <feature_engineering>
data.Embarked = data.Embarked.map({'C':1,'S':2, 'Q':3} )
Titanic - Machine Learning from Disaster
10,785,592
DEFAULT_SURVIVAL_VALUE = 0.5 data_df['Family_Survival'] = DEFAULT_SURVIVAL_VALUE for grp, grp_df in data_df[['Survived','Name', 'Last_Name', 'Fare', 'Ticket', 'PassengerId', 'SibSp', 'Parch', 'Age', 'Cabin']].groupby(['Last_Name', 'Fare']): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): data_df.loc[data_df['PassengerId'] == passID, 'Family_Survival'] = 1 elif(smin==0.0): data_df.loc[data_df['PassengerId'] == passID, 'Family_Survival'] = 0 print("Number of passengers with family survival information:", data_df.loc[data_df['Family_Survival']!=0.5].shape[0]) <train_model>
data['embarked_suffix'] = data.Embarked/data.suffix data.corr()
Titanic - Machine Learning from Disaster
10,785,592
for _, grp_df in data_df.groupby('Ticket'): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : if(row['Family_Survival'] == 0)|(row['Family_Survival']== 0.5): smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): data_df.loc[data_df['PassengerId'] == passID, 'Family_Survival'] = 1 elif(smin==0.0): data_df.loc[data_df['PassengerId'] == passID, 'Family_Survival'] = 0 print("Number of passenger with family/group survival information: " +str(data_df[data_df['Family_Survival']!=0.5].shape[0])) train_df['Family_Survival'] = data_df['Family_Survival'][:891] test_df['Family_Survival'] = data_df['Family_Survival'][891:]<feature_engineering>
data['pclass-suff+age'] = data.Pclass-data['suff+age_fare'] data['pclass/suff'] = data.Pclass/data.suffix data.corr()
Titanic - Machine Learning from Disaster
10,785,592
data_df['FareBin'] = pd.qcut(data_df['Fare'], 5) <categorify>
data.Sex = data.Sex.map({'male':0, 'female':1}) data.corr()
Titanic - Machine Learning from Disaster
10,785,592
label = LabelEncoder() data_df['FareBin_Code'] = label.fit_transform(data_df['FareBin'] )<drop_column>
v0 = data.copy()
Titanic - Machine Learning from Disaster
10,785,592
train_df['FareBin_Code'] = data_df['FareBin_Code'][:891] test_df['FareBin_Code'] = data_df['FareBin_Code'][891:] train_df.drop(['Fare'], 1, inplace=True) test_df.drop(['Fare'], 1, inplace=True )<categorify>
data = v0.copy()
Titanic - Machine Learning from Disaster
10,785,592
data_df['AgeBin'] = pd.qcut(data_df['Age'], 4) label = LabelEncoder() data_df['AgeBin_Code'] = label.fit_transform(data_df['AgeBin']) train_df['AgeBin_Code'] = data_df['AgeBin_Code'][:891] test_df['AgeBin_Code'] = data_df['AgeBin_Code'][891:] train_df.drop(['Age'], 1, inplace=True) test_df.drop(['Age'], 1, inplace=True )<drop_column>
v1 = data.copy()
Titanic - Machine Learning from Disaster
10,785,592
train_df['Sex'].replace(['male','female'],[0,1],inplace=True) test_df['Sex'].replace(['male','female'],[0,1],inplace=True) train_df.drop(['Name', 'PassengerId', 'SibSp', 'Parch', 'Ticket', 'Cabin', 'Embarked'], axis = 1, inplace = True) test_df.drop(['Name','PassengerId', 'SibSp', 'Parch', 'Ticket', 'Cabin', 'Embarked'], axis = 1, inplace = True )<prepare_x_and_y>
alldata = data.copy() x = alldata.drop('Survived', axis=1) x = x[sorted(x)] y = alldata.Survived
Titanic - Machine Learning from Disaster
10,785,592
X = train_df.drop('Survived', 1) y = train_df['Survived'] X_test = test_df.copy()<normalization>
trainx, testx, trainy, testy = train_test_split(x, y, test_size=0.1, random_state=4355) xgb = xgboost(n_estimators=8, max_depth=3, random_state=345) xgb.fit(trainx, trainy) print(xgb.score(trainx, trainy)) print(xgb.score(testx, testy)) impcol = x.columns[xgb.feature_importances_.astype(bool)] x = x[impcol] impx = x.copy() impy = y.copy() model(x,y) trainx, testx, trainy, testy = train_test_split(x, y, test_size=0.1, random_state=4355) xgb = xgboost(n_estimators=8, max_depth=3, random_state=345) xgb.fit(trainx, trainy) print(xgb.score(trainx, trainy)) print(xgb.score(testx, testy))
Titanic - Machine Learning from Disaster
10,785,592
std_scaler = StandardScaler() X = std_scaler.fit_transform(X) X_test = std_scaler.transform(X_test )<import_modules>
alldata1 = corrdata.copy() x = alldata1.drop('Survived', axis=1) x = x[sorted(x)] y = alldata.Survived corx = x.copy() cory = y.copy()
Titanic - Machine Learning from Disaster
10,785,592
from sklearn.model_selection import GridSearchCV<define_search_space>
model(x, y )
Titanic - Machine Learning from Disaster
10,785,592
<train_on_grid>
trainx, testx, trainy, testy = train_test_split(x, y, test_size=0.1, random_state=4355) xgb = xgboost(n_estimators=8, max_depth=3, random_state=345) xgb.fit(trainx, trainy) print(xgb.score(trainx, trainy)) print(xgb.score(testx, testy))
Titanic - Machine Learning from Disaster
10,785,592
<choose_model_class>
cor_impcol = x.columns[xgb.feature_importances_.astype(bool)] x = x[x.columns[xgb.feature_importances_.astype(bool)]] cor_impcol
Titanic - Machine Learning from Disaster
10,785,592
<init_hyperparams>
from tensorflow.keras import Sequential from tensorflow.keras.layers import Dense, Dropout
Titanic - Machine Learning from Disaster
10,785,592
n_neighbors = [6,7,8,9,10,11,12,14,16,18,20,22] algorithm = ['auto'] weights = ['uniform', 'distance'] leaf_size = list(range(1,50,5)) hyperparams = {'algorithm': algorithm, 'weights': weights, 'leaf_size': leaf_size, 'n_neighbors': n_neighbors} gd=GridSearchCV(estimator = KNeighborsClassifier() , param_grid = hyperparams, verbose=True, cv=10, scoring = "accuracy") gd.fit(X, y) print(gd.best_score_) print(gd.best_estimator_) <predict_on_test>
trainx, testx, trainy, testy = train_test_split(corx[cor_impcol], cory, test_size=0.1, random_state=4355) inpshape = trainx.shape inpshape
Titanic - Machine Learning from Disaster
10,785,592
gd.best_estimator_.fit(X, y) y_pred = gd.best_estimator_.predict(X_test )<save_to_csv>
model = Sequential([ Dense(10, activation='relu', input_shape=inpshape), Dense(13, activation='relu'), Dropout(0.3), Dense(17, activation='relu'), Dropout(0.3), Dense(13, activation='relu'), Dense(10, activation='relu'), Dropout(0.3), Dense(1, activation='sigmoid'), ]) model.summary() model.compile(metrics=['accuracy'], loss=['binary_crossentropy'] )
Titanic - Machine Learning from Disaster
10,785,592
submission = pd.read_csv(".. /input/sample_submission.csv") submission['Survived'] = y_pred submission.to_csv(".. /working/submission0615_9.csv", index = False )<set_options>
history = model.fit(trainx, trainy, batch_size=32, epochs=40, validation_split=0.1 )
Titanic - Machine Learning from Disaster
10,785,592
plt.style.use('seaborn') sns.set(font_scale=2.5) warnings.filterwarnings('ignore') %matplotlib inline<load_from_csv>
model.evaluate(testx, testy )
Titanic - Machine Learning from Disaster
10,785,592
df_train = pd.read_csv('.. /input/train.csv') df_test = pd.read_csv('.. /input/test.csv') df_train.head() <feature_engineering>
import pandas as pd import numpy as np
Titanic - Machine Learning from Disaster
10,785,592
df_train['FamilySize'] = df_train['SibSp'] + df_train['Parch'] + 1 df_test['FamilySize'] = df_test['SibSp'] + df_test['Parch'] + 1<feature_engineering>
import pandas as pd import numpy as np
Titanic - Machine Learning from Disaster
10,785,592
df_test.loc[df_test.Fare.isnull() , 'Fare'] = df_test['Fare'].mean() df_train['Fare'] = df_train['Fare'].map(lambda i: np.log(i)if i > 0 else 0) df_test['Fare'] = df_test['Fare'].map(lambda i: np.log(i)if i > 0 else 0 )<feature_engineering>
test_data = Test(pd.read_csv('.. /input/titanic/test.csv')) test_data = test_data.get_data()
Titanic - Machine Learning from Disaster
10,785,592
df_train['Fare_r'] = 0 df_train.loc[df_train['Fare'] < 2.065, 'Fare_r'] = 0 df_train.loc[(2.065 <= df_train['Fare'])&(df_train['Fare'] < 2.44), 'Fare_r'] = 1 df_train.loc[(2.44 <= df_train['Fare'])&(df_train['Fare'] < 3.18), 'Fare_r'] = 2 df_train.loc[(3.18 <= df_train['Fare'])&(df_train['Fare'] < 3.75), 'Fare_r'] = 3 df_train.loc[3.75 <= df_train['Fare'], 'Fare_r'] = 4<feature_engineering>
for i in impx.columns: if i not in test_data.columns: print(i)
Titanic - Machine Learning from Disaster
10,785,592
df_test['Fare_r'] = 0 df_test.loc[df_test['Fare'] < 2.065, 'Fare_r'] = 0 df_test.loc[(2.065 <= df_test['Fare'])&(df_test['Fare'] < 2.44), 'Fare_r'] = 1 df_test.loc[(2.44 <= df_test['Fare'])&(df_test['Fare'] < 3.18), 'Fare_r'] = 2 df_test.loc[(3.18 <= df_test['Fare'])&(df_test['Fare'] < 3.75), 'Fare_r'] = 3 df_test.loc[3.75 <= df_test['Fare'], 'Fare_r'] = 4<feature_engineering>
for i in test_data.columns: if i not in impx.columns: print(i)
Titanic - Machine Learning from Disaster
10,785,592
df_train['Initial']= df_train.Name.str.extract('([A-Za-z]+)\.') df_test['Initial']= df_test.Name.str.extract('([A-Za-z]+)\.' )<categorify>
test_data = test_data[sorted(test_data)]
Titanic - Machine Learning from Disaster
10,785,592
df_train['Initial'].replace(['Mlle','Mme','Ms','Dr','Major','Lady','Countess','Jonkheer','Col','Rev','Capt','Sir','Don', 'Dona'], ['Miss','Miss','Miss','Mr','Mr','Mrs','Mrs','Other','Other','Other','Mr','Mr','Mr', 'Mr'],inplace=True) df_test['Initial'].replace(['Mlle','Mme','Ms','Dr','Major','Lady','Countess','Jonkheer','Col','Rev','Capt','Sir','Don', 'Dona'], ['Miss','Miss','Miss','Mr','Mr','Mrs','Mrs','Other','Other','Other','Mr','Mr','Mr', 'Mr'],inplace=True )<feature_engineering>
Fmodel = xgboost(n_estimators=8, max_depth=3, random_state=345) Fmodel.fit(impx, impy) print(Fmodel.score(impx, impy))
Titanic - Machine Learning from Disaster
10,785,592
<data_type_conversions><EOS>
predS(Fmodel, test_data[impx.columns])
Titanic - Machine Learning from Disaster
12,512,656
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<feature_engineering>
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import cross_val_score import re import xgboost as xgb from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier
Titanic - Machine Learning from Disaster
12,512,656
df_train['Age_cat'] = 0 df_train.loc[df_train['Age'] < 21, 'Age_cat'] = 0 df_train.loc[(21 <= df_train['Age'])&(df_train['Age'] < 27), 'Age_cat'] = 1 df_train.loc[(27 <= df_train['Age'])&(df_train['Age'] < 33), 'Age_cat'] = 2 df_train.loc[(33 <= df_train['Age'])&(df_train['Age'] < 39), 'Age_cat'] = 3 df_train.loc[39 <= df_train['Age'], 'Age_cat'] = 4<feature_engineering>
train = pd.read_csv('.. /input/titanic/train.csv') test = pd.read_csv('.. /input/titanic/test.csv') print("Train set size:", train.shape) print("Test set size:", test.shape )
Titanic - Machine Learning from Disaster
12,512,656
df_test['Age_cat'] = 0 df_test.loc[df_train['Age'] < 21, 'Age_cat'] = 0 df_test.loc[(21 <= df_train['Age'])&(df_test['Age'] < 27), 'Age_cat'] = 1 df_test.loc[(27 <= df_train['Age'])&(df_test['Age'] < 33), 'Age_cat'] = 2 df_test.loc[(33 <= df_train['Age'])&(df_test['Age'] < 39), 'Age_cat'] = 3 df_test.loc[39 <= df_train['Age'], 'Age_cat'] = 4<count_values>
test_ID = test['PassengerId'] train=train.drop(columns=['PassengerId',"Ticket","Cabin"]) test=test.drop(columns=["PassengerId","Ticket","Cabin"] )
Titanic - Machine Learning from Disaster
12,512,656
print(df_train['Age_cat'].value_counts() )<categorify>
train['FamilySize'] = train['SibSp'] + train['Parch'] + 1 test['FamilySize'] = test['SibSp'] + test['Parch'] + 1
Titanic - Machine Learning from Disaster
12,512,656
df_train = pd.get_dummies(df_train, columns=['Age_cat'], prefix='Age_cat') df_test = pd.get_dummies(df_test, columns=['Age_cat'], prefix='Age_cat' )<drop_column>
train=train.drop(columns=["SibSp","Parch"]) test=test.drop(columns=["SibSp","Parch"] )
Titanic - Machine Learning from Disaster
12,512,656
df_train.drop(['Age'], axis=1, inplace=True) df_test.drop(['Age'], axis=1, inplace=True) df_train.head()<categorify>
for dataset in [train,test]: dataset['IsAlone'] = 0 dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
Titanic - Machine Learning from Disaster
12,512,656
df_train['Initial'] = df_train['Initial'].map({'Master': 0, 'Miss': 1, 'Mr': 2, 'Mrs': 3, 'Other': 4}) df_test['Initial'] = df_test['Initial'].map({'Master': 0, 'Miss': 1, 'Mr': 2, 'Mrs': 3, 'Other': 4}) <count_values>
train['Embarked'] = train['Embarked'].fillna('S') train['Embarked'] = train['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int) test['Embarked'] = test['Embarked'].fillna('S') test['Embarked'] = test['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int) train['Sex'] = train['Sex'].map({'male': 0, 'female': 1}) test['Sex'] = test['Sex'].map({'male': 0, 'female': 1})
Titanic - Machine Learning from Disaster
12,512,656
df_train['Embarked'].unique() df_train['Embarked'].value_counts()<categorify>
def get_title(name): title_search = re.search('([A-Za-z]+)\.', name) if title_search: return title_search.group(1) return "" for dataset in [train,test]: dataset['Title'] = dataset['Name'].apply(get_title) for dataset in [train,test]: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0)
Titanic - Machine Learning from Disaster
12,512,656
df_train['Embarked'] = df_train['Embarked'].map({'C': 0, 'Q': 1, 'S': 2}) df_test['Embarked'] = df_test['Embarked'].map({'C': 0, 'Q': 1, 'S': 2} )<count_missing_values>
train=train.drop(columns=["Name"]) test=test.drop(columns=["Name"] )
Titanic - Machine Learning from Disaster
12,512,656
df_train['Embarked'].isnull().any()<categorify>
y=train.Survived train=train.drop(columns=["Survived"]) all_data = pd.concat(( train,test))
Titanic - Machine Learning from Disaster
12,512,656
df_train['Sex'] = df_train['Sex'].map({'female': 0, 'male': 1}) df_test['Sex'] = df_test['Sex'].map({'female': 0, 'male': 1} )<categorify>
all_data = all_data.fillna(all_data.mean() )
Titanic - Machine Learning from Disaster
12,512,656
df_train = pd.get_dummies(df_train, columns=['Initial'], prefix='Initial') df_test = pd.get_dummies(df_test, columns=['Initial'], prefix='Initial' )<categorify>
all_data.columns[all_data.isnull().any() ]
Titanic - Machine Learning from Disaster
12,512,656
df_train = pd.get_dummies(df_train, columns=['Embarked'], prefix='Embarked') df_test = pd.get_dummies(df_test, columns=['Embarked'], prefix='Embarked' )<drop_column>
Ntrain = all_data[:train.shape[0]] test = all_data[train.shape[0]:]
Titanic - Machine Learning from Disaster
12,512,656
df_train.drop(['PassengerId', 'Name', 'SibSp', 'Parch'], axis=1, inplace=True) df_test.drop(['PassengerId', 'Name', 'SibSp', 'Parch'], axis=1, inplace=True )<set_options>
def cv(model,cv=10): cvx= np.sqrt(cross_val_score(model, Ntrain, y, scoring="accuracy", cv=10)) return(cvx )
Titanic - Machine Learning from Disaster
12,512,656
%matplotlib inline <prepare_x_and_y>
ran_for = RandomForestClassifier(n_estimators=50) cv(ran_for, cv=10 ).mean()
Titanic - Machine Learning from Disaster
12,512,656
X_train = df_train.drop('Survived', axis=1 ).values target_label = df_train['Survived'].values X_test = df_test.values<split>
xgb_cl= xgb.XGBClassifier(colsample_bytree= 0.70,learning_rate=0.03, max_depth= 5, min_child_weight= 4, n_estimators= 50, nthread= 4, subsample= 0.7) cv(xgb_cl ).mean()
Titanic - Machine Learning from Disaster
12,512,656
X_tr, X_vld, y_tr, y_vld = train_test_split(X_train,target_label, test_size=0.3, random_state=2018 )<compute_train_metric>
gbm = xgb.XGBClassifier( n_estimators= 200, max_depth= 4, min_child_weight= 2, gamma=0.9, subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic', nthread= -1, scale_pos_weight=1) cv(gbm ).mean()
Titanic - Machine Learning from Disaster
12,512,656
<compute_train_metric><EOS>
sub = pd.DataFrame() sub['PassengerId'] = test_ID xgb_cl.fit(Ntrain,y) ran_for.fit(Ntrain,y) gbm.fit(Ntrain,y) sub['Survived'] =(0.6*xgb_cl.predict(test)+0.3*gbm.predict(test)+0.1*ran_for.predict(test)) sub['Survived'] =sub['Survived'].apply(lambda f: 1 if f>=0.5 else 0) sub.to_csv('ensbleX.csv',index=False )
Titanic - Machine Learning from Disaster
12,004,261
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<compute_train_metric>
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler, RobustScaler from sklearn.model_selection import StratifiedKFold, cross_validate from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from lightgbm import LGBMClassifier from sklearn.neural_network import MLPClassifier from hyperopt import hp, tpe, Trials, fmin, STATUS_OK, space_eval
Titanic - Machine Learning from Disaster
12,004,261
random_forest = RandomForestClassifier() random_forest.fit(X_tr, y_tr) prediction = random_forest.predict(X_vld) print('Random Forest2 : 총 {}명 중 {:.2f}% 정확도로 생존을 맞춤'.format(y_vld.shape[0], 100 * metrics.accuracy_score(prediction, y_vld))) acc_random_forest2 =100 * metrics.accuracy_score(prediction, y_vld )<compute_train_metric>
train = pd.read_csv('/kaggle/input/titanic/train.csv') train.head()
Titanic - Machine Learning from Disaster
12,004,261
logreg = LogisticRegression() logreg.fit(X_tr, y_tr) prediction = logreg.predict(X_vld) print('Logistic : 총 {}명 중 {:.2f}% 정확도로 생존을 맞춤'.format(y_vld.shape[0], 100 * metrics.accuracy_score(prediction, y_vld))) acc_log = 100 * metrics.accuracy_score(prediction, y_vld )<compute_train_metric>
test = pd.read_csv('/kaggle/input/titanic/test.csv') test.head()
Titanic - Machine Learning from Disaster
12,004,261
svc = SVC() svc.fit(X_tr, y_tr) prediction = svc.predict(X_vld) print(' Support Vector Machines: 총 {}명 중 {:.2f}% 정확도로 생존을 맞춤'.format(y_vld.shape[0], 100 * metrics.accuracy_score(prediction, y_vld))) acc_svc=100 * metrics.accuracy_score(prediction, y_vld )<compute_train_metric>
submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv' )
Titanic - Machine Learning from Disaster
12,004,261
knn = KNeighborsClassifier(n_neighbors=26) knn.fit(X_tr, y_tr) prediction = knn.predict(X_vld) print(' k-Nearest Neighbor : 총 {}명 중 {:.2f}% 정확도로 생존을 맞춤'.format(y_vld.shape[0], 100 * metrics.accuracy_score(prediction, y_vld))) acc_knn=100 * metrics.accuracy_score(prediction, y_vld )<compute_train_metric>
print('---- train null ---- ', train.isnull().sum()) print(' ---- test null ---- ', test.isnull().sum() )
Titanic - Machine Learning from Disaster
12,004,261
gaussian = GaussianNB() gaussian.fit(X_tr, y_tr) prediction = gaussian.predict(X_vld) print('naive Bayes : 총 {}명 중 {:.2f}% 정확도로 생존을 맞춤'.format(y_vld.shape[0], 100 * metrics.accuracy_score(prediction, y_vld))) acc_gaussian=100 * metrics.accuracy_score(prediction, y_vld )<compute_train_metric>
sns.set_palette('rainbow') sns.set_style('darkgrid' )
Titanic - Machine Learning from Disaster
12,004,261
perceptron = Perceptron() perceptron.fit(X_tr, y_tr) prediction = perceptron.predict(X_vld) print('Perceptron : 총 {}명 중 {:.2f}% 정확도로 생존을 맞춤'.format(y_vld.shape[0], 100 * metrics.accuracy_score(prediction, y_vld))) acc_perceptron=100 * metrics.accuracy_score(prediction, y_vld )<compute_train_metric>
combine = pd.concat([train, test], sort=False )
Titanic - Machine Learning from Disaster
12,004,261
linear_svc = LinearSVC() linear_svc.fit(X_tr, y_tr) prediction = linear_svc.predict(X_vld) print('Linear SVC: 총 {}명 중 {:.2f}% 정확도로 생존을 맞춤'.format(y_vld.shape[0], 100 * metrics.accuracy_score(prediction, y_vld))) acc_linear_svc=100 * metrics.accuracy_score(prediction, y_vld )<compute_train_metric>
combine['Sex'].replace(['male', 'female'], [0, 1], inplace=True )
Titanic - Machine Learning from Disaster
12,004,261
sgd = SGDClassifier() sgd.fit(X_tr, y_tr) prediction = sgd.predict(X_vld) print('Stochastic Gradient Descent: 총 {}명 중 {:.2f}% 정확도로 생존을 맞춤'.format(y_vld.shape[0], 100 * metrics.accuracy_score(prediction, y_vld))) acc_sgd=100 * metrics.accuracy_score(prediction, y_vld )<compute_train_metric>
combine['Age'].fillna(combine['Age'].median() , inplace=True )
Titanic - Machine Learning from Disaster
12,004,261
decision_tree = DecisionTreeClassifier() decision_tree.fit(X_tr, y_tr) prediction = decision_tree.predict(X_vld) print('Decision Tree: 총 {}명 중 {:.2f}% 정확도로 생존을 맞춤'.format(y_vld.shape[0], 100 * metrics.accuracy_score(prediction, y_vld))) acc_decision_tree= 100 * metrics.accuracy_score(prediction, y_vld )<train_model>
standard = StandardScaler() combine[['Age']] = standard.fit_transform(combine[['Age']] )
Titanic - Machine Learning from Disaster
12,004,261
gbk = GradientBoostingClassifier() gbk.fit(X_tr,y_tr) prediction = gbk.predict(X_vld) acc_gbk = round(accuracy_score(prediction,y_vld)*100,2) print(acc_gbk )<create_dataframe>
combine['Fare'].fillna(combine['Fare'].median() , inplace=True )
Titanic - Machine Learning from Disaster
12,004,261
models = pd.DataFrame({ 'Model': ['XGB','Support Vector Machines', 'KNN', 'Logistic Regression', 'Random Forest1','Random Forest2', 'Naive Bayes', 'Perceptron', 'Stochastic Gradient Decent', 'Linear SVC', 'Decision Tree'], 'Score': [acc_xgb, acc_svc, acc_knn, acc_log, acc_random_forest1,acc_random_forest2, acc_gaussian, acc_perceptron, acc_sgd, acc_linear_svc, acc_decision_tree]}) print(models.sort_values(by='Score', ascending=False))<load_from_csv>
rscaler = RobustScaler(quantile_range=(25., 75.)) combine[['Fare']] = rscaler.fit_transform(combine[['Fare']] )
Titanic - Machine Learning from Disaster
12,004,261
submission = pd.read_csv('.. /input/sample_submission.csv' )<predict_on_test>
combine['Pclass'] =(combine['Pclass'] - 1)/ 2
Titanic - Machine Learning from Disaster
12,004,261
prediction = logreg.predict(X_test) submission['Survived'] = prediction<save_to_csv>
combine['FamilySize'] = combine['SibSp'] + combine['Parch']
Titanic - Machine Learning from Disaster
12,004,261
submission.to_csv('./my_first_submission.csv', index=False )<load_from_csv>
standard = StandardScaler() combine[['FamilySize']] = standard.fit_transform(combine[['FamilySize']] )
Titanic - Machine Learning from Disaster
12,004,261
df_train = pd.read_csv('/kaggle/input/1056lab-diabetes-diagnosis/train.csv', index_col=0) df_test = pd.read_csv('/kaggle/input/1056lab-diabetes-diagnosis/test.csv', index_col=0 )<categorify>
combine['Embarked'].fillna(combine['Embarked'].mode() [0], inplace=True )
Titanic - Machine Learning from Disaster
12,004,261
df_train_dummies = pd.get_dummies(df_train, columns=['Gender'], drop_first=True) df_test_dummies = pd.get_dummies(df_test, columns=['Gender'], drop_first=True )<train_model>
combine = pd.get_dummies(combine, columns=['Embarked'], drop_first=True )
Titanic - Machine Learning from Disaster
12,004,261
X_train_dummies = df_train_dummies.drop(columns='Diabetes' ).values y_train_dummies = df_train_dummies['Diabetes'].values X_train, X_valid, y_train, y_valid = train_test_split(X_train_dummies, y_train_dummies, test_size=0.2, random_state=0) rfc = RandomForestClassifier(random_state=0) rfc.fit(X_train, y_train )<compute_train_metric>
combine = pd.get_dummies(combine, columns=['Title'], drop_first=True )
Titanic - Machine Learning from Disaster
12,004,261
y_pred = rfc.predict_proba(X_valid)[:,1] fpr, tpr, thresholds = roc_curve(y_valid, y_pred) auc(fpr, tpr )<find_best_params>
tmp = pd.DataFrame() tmp['Cabin'] = combine['Cabin'] tmp['Cabin'].loc[tmp['Cabin'].notnull() ] = 1 combine['Cabin'] = tmp['Cabin'] combine['Cabin'].replace(np.nan, 0, inplace=True )
Titanic - Machine Learning from Disaster
12,004,261
def objective(trial): criterion = trial.suggest_categorical('criterion', ['gini', 'entropy']) max_depth = trial.suggest_int('max_depth', 1, 30) n_estimators = trial.suggest_int('n_estimators',10,300) model = RandomForestClassifier(criterion=criterion, max_depth=max_depth, n_estimators=n_estimators, random_state=0,n_jobs=-1) model.fit(X_train, y_train) y_pred = model.predict_proba(X_valid)[:,1] fpr, tpr, thresholds = roc_curve(y_valid, y_pred) return(auc(fpr, tpr)) study = optuna.create_study() study.optimize(objective, n_trials=100) study.best_params<find_best_params>
combine_cleaned = combine.drop(['PassengerId', 'Name', 'SibSp', 'Parch', 'Ticket'], axis=1) combine_cleaned.head()
Titanic - Machine Learning from Disaster
12,004,261
criterion=study.best_params['criterion'] max_depth=study.best_params['max_depth'] n_estimators=study.best_params['n_estimators'] model = RandomForestClassifier(criterion=criterion, max_depth=max_depth, n_estimators=n_estimators, random_state=0,n_jobs=-1) model.fit(X_train, y_train) y_pred = model.predict_proba(X_valid)[:,1] fpr, tpr, thresholds = roc_curve(y_valid, y_pred) auc(fpr, tpr )<train_model>
def hyperopt_and_pred(models, max_evals=100): preds = [] for model in models: clf = model['classifier'] space = model['space'] def objective(space): classifier = clf(**space) classifier.fit(X_train, y_train) skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=0) acc = cross_validate(estimator=classifier, X=X_train, y=y_train, cv=skf) mean_acc = np.mean(acc['test_score']) return{'loss': 1 - mean_acc, 'status': STATUS_OK} print('='*10, str(clf ).split('.')[-1].replace(''>', ''), '='*10) trials = Trials() best = fmin(objective, space, algo=tpe.suggest, max_evals=max_evals, trials=trials, verbose=1) best_params = space_eval(space, best) predict = clf(**best_params ).fit(X_train, y_train ).predict(X_test) acc = 1 - trials.best_trial['result']['loss'] preds.append(predict) print(' ', 'best parameters:', best_params) print('accuracy:', f'{acc:.04f}', ' ') return preds
Titanic - Machine Learning from Disaster
12,004,261
model.fit(X_train_dummies, y_train_dummies) X_test = df_test_dummies.values y_pred = rfc.predict_proba(X_test)[:, 1]<save_to_csv>
models = [ { 'classifier': LogisticRegression, 'space': { 'C': hp.uniform('C', 0, 100), 'max_iter': hp.choice('max_iter', [2000]), 'solver': hp.choice('solver', ['lbfgs', 'liblinear', 'sag', 'saga']) } }, { 'classifier': RandomForestClassifier, 'space': { 'n_estimators': hp.choice('n_estimators', np.arange(10, 401, 10)) , 'max_depth': hp.uniform('max_depth', 1, 5), 'criterion': hp.choice('criterion', ['gini', 'entropy']) } }, { 'classifier': KNeighborsClassifier, 'space': { 'n_neighbors': hp.choice('n_neighbors', np.arange(1, 15)) } }, { 'classifier': AdaBoostClassifier, 'space': { 'n_estimators': hp.choice('n_estimators', [30,50,100,200,300]), 'learning_rate': hp.uniform('learning_rate', 0.8, 1.4) } }, { 'classifier': SVC, 'space': { 'C': hp.uniform('C', 0, 2), 'gamma': hp.loguniform('gamma', -8, 2), 'kernel': hp.choice('kernel', ['rbf', 'poly', 'sigmoid']) } }, { 'classifier': LGBMClassifier, 'space': { 'objective': hp.choice('objective', ['binary']), 'max_bin': hp.choice('max_bin', np.arange(64, 513, 1)) , 'num_leaves': hp.choice('num_leaves', np.arange(30, 201, 10)) , 'max_depth': hp.choice('max_depth', np.arange(3, 10, 1)) , 'learning_rate': hp.uniform('learning_rate', 0.03, 0.2) } }, { 'classifier': MLPClassifier, 'space': { 'hidden_layer_sizes': hp.choice('hidden_layer_sizes', [8, 16, 32,(8,8),(16,16)]), 'activation': hp.choice('activation', ['relu', 'tanh']), 'max_iter': hp.choice('max_iter', [3000]) } } ]
Titanic - Machine Learning from Disaster
12,004,261
submit = pd.read_csv('/kaggle/input/1056lab-diabetes-diagnosis/sampleSubmission.csv') submit['Diabetes'] = y_pred submit.to_csv('submission.csv', index=False )<load_from_csv>
predictions = hyperopt_and_pred(models )
Titanic - Machine Learning from Disaster
12,004,261
train = pd.read_csv('.. /input/gl-hack-landmarks/train.csv') sample_submission = pd.read_csv('.. /input/gl-hack-landmarks/sample_submission.csv') coord_list = list(sample_submission.columns)[1:] for c in coord_list: sample_submission[c] = train[c].mean() sample_submission.to_csv('submission.csv', index=False )<set_options>
ensembled_pred = np.round(sum(predictions)/ len(predictions)).astype('int' )
Titanic - Machine Learning from Disaster
12,004,261
<load_from_csv><EOS>
results = pd.Series(ensembled_pred, name='Survived') submission = pd.concat([submission['PassengerId'], results], axis=1) submission.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
10,598,995
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_csv>
%matplotlib inline pd.options.display.float_format = '{:.2f}'.format
Titanic - Machine Learning from Disaster
10,598,995
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv' )<categorify>
train = pd.read_csv('.. /input/titanic/train.csv') train.head()
Titanic - Machine Learning from Disaster
10,598,995
pres = {'deKlerk': 0, 'Mandela': 1, 'Mbeki': 2, 'Motlanthe': 3, 'Zuma': 4, 'Ramaphosa': 5} train.replace({'president': pres}, inplace=True )<define_variables>
train.isnull().sum()
Titanic - Machine Learning from Disaster
10,598,995
starts = { 0: 1, 1: 1, 2: 1, 3: 12, 4: 12, 5: 5, 6: 1, 7: 1, 8: 8, 9: 9, 10: 12, 11: 14, 12: 14, 13: 15, 14: 15, 15: 15, 16: 15, 17: 15, 18: 15, 19: 15, 20: 20, 21: 1, 22: 15, 23: 20, 24: 20, 25: 15, 26: 15, 27: 20, 28: 20, 29: 15, 30: 18 }<string_transform>
train.isnull().sum()
Titanic - Machine Learning from Disaster
10,598,995
def divide_on(df, char): sentences = [] for i, row in df.iterrows() : for sentence in row['text'].split(char)[starts[i]:]: sentences.append([row['president'], sentence]) df = pd.DataFrame(sentences, columns=['president', 'text']) return df[df['text'] != '']<string_transform>
sur = train['Survived'].value_counts() survival_rate = [sur[0]/len(train)*100,sur[1]/len(train)*100]
Titanic - Machine Learning from Disaster
10,598,995
train = divide_on(train, '.' )<count_values>
survived_male = len(train[(train['Sex'] == 'male')&(train['Survived'] == 1)])/len(train[train['Sex'] == 'male'])*100 survived_female = len(train[(train['Sex'] == 'female')&(train['Survived'] == 1)])/len(train[train['Sex'] == 'female'])*100 male = [survived_male,100-survived_male] female = [survived_female,100-survived_female]
Titanic - Machine Learning from Disaster
10,598,995
train['president'].value_counts()<count_values>
survived_class_1 = len(train[(train['Pclass'] == 1)& train['Survived'] == 1])/len(train[(train['Pclass'] == 1)])*100 survived_class_2 = len(train[(train['Pclass'] == 2)& train['Survived'] == 1])/len(train[(train['Pclass'] == 2)])*100 survived_class_3 = len(train[(train['Pclass'] == 3)& train['Survived'] == 1])/len(train[(train['Pclass'] == 3)])*100 class_1 = [survived_class_1,100-survived_class_1] class_2 = [survived_class_2,100-survived_class_2] class_3 = [survived_class_3,100-survived_class_3]
Titanic - Machine Learning from Disaster
10,598,995
train['president'].value_counts() /train.shape[0]<concatenate>
survived_S = len(train[(train['Embarked'] == 'S')&(train['Survived'] == 1)])/len(train[train['Embarked'] == 'S'])*100 survived_C = len(train[(train['Embarked'] == 'C')&(train['Survived'] == 1)])/len(train[train['Embarked'] == 'C'])*100 survived_Q = len(train[(train['Embarked'] == 'Q')&(train['Survived'] == 1)])/len(train[train['Embarked'] == 'Q'])*100 S = [survived_S,100-survived_S] C = [survived_C,100-survived_C] Q = [survived_Q,100-survived_Q]
Titanic - Machine Learning from Disaster
10,598,995
train['sentence'] = None test['president'] = None df = pd.concat([train, test], axis=0, sort=False )<drop_column>
train['Age_Mode'] = train['Age'].fillna(value = 24) train['Age_Med'] = train['Age'].fillna(train['Age'].median()) train['Age_Mean'] = train['Age'].fillna(train['Age'].mean() )
Titanic - Machine Learning from Disaster
10,598,995
df = df[['sentence', 'text', 'president']]<feature_engineering>
train['Age'] = train['Age'].fillna(train['Age'].median()) train = train.drop(columns= ['Age_Mode','Age_Med','Age_Mean']) train.head()
Titanic - Machine Learning from Disaster
10,598,995
def fixup(text): text = ''.join([char for char in text if char == '-' or char not in string.punctuation]) text = text.replace(r'^[*-]', '') text = ''.join([char for char in text if not char.isdigit() ]) text = text.lower() text = " ".join(text.split()) return text df['text'] = df['text'].apply(fixup )<feature_engineering>
def get_age_group(dataframe,column_name): dataframe[column_name] = dataframe[column_name].apply(np.ceil) age_group = {0:list(range(0,21)) ,1:list(range(21,41)) ,2:list(range(41,61)) ,3:list(range(61,81)) } col = list(dataframe.columns) index = col.index(column_name) age = [] for j in range(len(dataframe)) : for k in age_group.keys() : for i in range(len(age_group[k])) : if(age_group[k][i] == dataframe.iloc[j,index]): age.append(k) dataframe['Age_Group'] = age
Titanic - Machine Learning from Disaster
10,598,995
df['length'] = df['text'].apply(len )<sort_values>
def get_initials(dataframe,column_name): sub = [] initials = ['Mrs.','Ms.','Mr.','Miss.','Master.','Lady.','Don.','Rev.','Dr.','Mme.','Major.','Sir.','Mlle.','Col.','Capt.','Countess.','Jonkheer.','Dona.'] name = dataframe[column_name] for i in range(len(name)) : split_names = name[i].split() for j in range(len(split_names)) : if(split_names[j] in initials): sub.append(split_names[j]) dataframe[column_name] = sub
Titanic - Machine Learning from Disaster
10,598,995
df.sort_values(by='length', ascending=False ).head(10 )<filter>
get_initials(train,'Name') train['Name'].value_counts()
Titanic - Machine Learning from Disaster
10,598,995
df.loc[3930][1]<filter>
le = LabelEncoder() train['Name'] = le.fit_transform(train['Name']) encoded_values = train['Name'].unique() decoded_values = le.inverse_transform(encoded_values) initials = {} for i in range(len(encoded_values)) : initials.setdefault(decoded_values[i],encoded_values[i] )
Titanic - Machine Learning from Disaster
10,598,995
df = df[df['length']>10]<sort_values>
train['Name'] = le.fit_transform(train['Name']) train['Sex'] = le.fit_transform(train['Sex']) embarked = {0:'S',1:'C',2:'Q'} train['Embarked'] = train['Embarked'].fillna('Q') train['Embarked'] = le.fit_transform(train['Embarked'] )
Titanic - Machine Learning from Disaster
10,598,995
df.sort_values(by='length' ).head(5 )<count_values>
train = train.drop(columns = ['PassengerId','Ticket','Cabin','Age_Group']) train.head()
Titanic - Machine Learning from Disaster
10,598,995
df['president'].value_counts()<import_modules>
from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2
Titanic - Machine Learning from Disaster
10,598,995
from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from keras.wrappers.scikit_learn import KerasClassifier from keras.models import Sequential from keras.layers import Dense, Activation, Dropout from sklearn.preprocessing import LabelEncoder from sklearn.feature_extraction.text import TfidfVectorizer<categorify>
features = train.iloc[:,1:] target = train.iloc[:,0]
Titanic - Machine Learning from Disaster
10,598,995
tfidf = TfidfVectorizer(strip_accents='unicode', ngram_range=(1,3), stop_words='english', min_df=6) X = tfidf.fit_transform(df['text'] ).todense() X.shape<something_strange>
best_features = SelectKBest(score_func = chi2,k = 8) fit = best_features.fit(features,target) dfscores = pd.DataFrame(fit.scores_) dfcolumns = pd.DataFrame(features.columns) featureScores = pd.concat([dfcolumns,dfscores],axis=1) featureScores.columns = ['Column','Score'] print(featureScores.nlargest(8,'Score'))
Titanic - Machine Learning from Disaster
10,598,995
tfidf.get_feature_names()<create_dataframe>
train = train.drop(columns = ['Embarked','SibSp','Parch']) train.head()
Titanic - Machine Learning from Disaster