kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
1,981,782 | p['clean'] = p['posts'].apply(lambda x: ' '.join(x.split('|||')))
p.head()<feature_engineering> | le = LabelEncoder()
titanic_train = train.copy(deep = 'True')
titanic_test = test.copy(deep = 'True')
titanic_train['Age'] = titanic_train['AgeBand']
titanic_train['Fare'] = titanic_train['FareBand']
titanic_test['Age'] = titanic_test['AgeBand']
titanic_test['Fare'] = titanic_test['FareBand']
column_transformed = ['Sex', 'Embarked', 'Title', 'AgeBand', 'FareBand']
column_transform = ['Sex', 'Embarked', 'Title', 'Age', 'Fare']
for d in datasets:
for i in range(len(column_transform)) :
d[column_transform[i]] = le.fit_transform(d[column_transformed[i]])
datasets.append(titanic_train)
datasets.append(titanic_test ) | Titanic - Machine Learning from Disaster |
1,981,782 | z['clean'] = z['posts'].apply(lambda x: ' '.join(x.split('|||')))
z.head()<feature_engineering> | drop_column = ['Name', 'SibSp', 'Parch', 'FareBand', 'AgeBand']
for d in datasets:
d.drop(drop_column, axis=1, inplace = True)
train.drop('PassengerId', axis=1, inplace = True)
titanic_train.drop('PassengerId', axis=1, inplace = True ) | Titanic - Machine Learning from Disaster |
1,981,782 | pattern_url = r'http[s]?://(?:[A-Za-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9A-Fa-f][0-9A-Fa-f])) +'
subs_url = r'url-web'
p['clean'] = p['clean'].replace(to_replace = pattern_url, value = subs_url, regex = True)
p.head()<feature_engineering> | feature_names = ['Pclass', 'Sex', 'Age', 'Fare', 'Embarked', 'FamilySize', 'IsAlone', 'Title']
corr_matrix = train.corr()
print(corr_matrix["Survived"].sort_values(ascending=False))
print("-"*30)
for feature in feature_names:
print('Correlation between Survived and', feature)
print(titanic_train[[feature, 'Survived']].groupby([feature], as_index=False ).mean())
print("-"*30 ) | Titanic - Machine Learning from Disaster |
1,981,782 | z['clean'] = z['clean'].replace(to_replace = pattern_url, value = subs_url, regex = True)
z.tail()<feature_engineering> | X_train = train.drop("Survived", axis=1)
Y_train = train["Survived"]
X_test = test.drop("PassengerId", axis=1 ).copy()
X_train.shape, Y_train.shape, X_test.shape | Titanic - Machine Learning from Disaster |
1,981,782 | p['clean_2'] = p['clean'].str.lower()
p.head()<feature_engineering> | models = [
LinearRegression() ,
LogisticRegressionCV() ,
Perceptron() ,
GaussianNB() ,
KNeighborsClassifier() ,
SVC(probability=True),
DecisionTreeClassifier() ,
AdaBoostClassifier() ,
RandomForestClassifier() ,
XGBClassifier()
]
models_columns = ['Name', 'Parameters','Train Accuracy', 'Validation Accuracy', 'Execution Time']
models_df = pd.DataFrame(columns = models_columns)
predictions = pd.DataFrame(columns = ['Survived'])
cv_split = ShuffleSplit(n_splits = 10, test_size =.2, train_size =.8, random_state = 0)
index = 0
for model in models:
models_df.loc[index, 'Name'] = model.__class__.__name__
models_df.loc[index, 'Parameters'] = str(model.get_params())
scores = cross_validate(model, X_train, Y_train, cv= cv_split)
models_df.loc[index, 'Execution Time'] = scores['fit_time'].mean()
models_df.loc[index, 'Train Accuracy'] = scores['train_score'].mean()
models_df.loc[index, 'Validation Accuracy'] = scores['test_score'].mean()
index += 1
models_df.sort_values(by = ['Validation Accuracy'], ascending = False, inplace = True)
models_df | Titanic - Machine Learning from Disaster |
1,981,782 | p['clean_2'] = p['clean_2'].apply(lambda x : x.translate(str.maketrans(' ',' ',string.punctuation)))
p.head()<feature_engineering> | param_grid = {
'criterion': ['gini', 'entropy'],
'max_depth': [2,4,6,8,10,None],
'random_state': [0]
}
tree = DecisionTreeClassifier(random_state = 0)
score = cross_validate(tree, X_train, Y_train, cv = cv_split)
tree.fit(X_train, Y_train)
grid_search = GridSearchCV(DecisionTreeClassifier() , param_grid=param_grid, scoring = 'roc_auc', cv = cv_split)
grid_search.fit(X_train, Y_train)
print('Before GridSearch:')
print('Parameters:', tree.get_params())
print("Training score:", score['train_score'].mean())
print("Validation score", score['test_score'].mean())
print('-'*30)
print('After GridSearch:')
print('Parameters:', grid_search.best_params_)
print("Training score:", grid_search.cv_results_['mean_train_score'][grid_search.best_index_])
print("Validation score", grid_search.cv_results_['mean_test_score'][grid_search.best_index_] ) | Titanic - Machine Learning from Disaster |
1,981,782 | <feature_engineering><EOS> | final_tree = GridSearchCV(DecisionTreeClassifier() , param_grid=param_grid, scoring = 'roc_auc', cv = cv_split)
final_tree.fit(X_train, Y_train)
test['Survived'] = final_tree.predict(X_test)
submission = test[['PassengerId','Survived']]
submission.to_csv("submission.csv", index=False ) | Titanic - Machine Learning from Disaster |
893,910 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<feature_engineering> | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Perceptron
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import cross_validate
from sklearn.model_selection import GridSearchCV
from sklearn import svm, tree, linear_model, neighbors, naive_bayes, ensemble, discriminant_analysis, gaussian_process
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn import feature_selection
from sklearn import model_selection
from sklearn import metrics
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import seaborn as sns
from pandas.tools.plotting import scatter_matrix | Titanic - Machine Learning from Disaster |
893,910 | tokeniser = TreebankWordTokenizer()
p['tokens'] = p['clean_2'].apply(tokeniser.tokenize)
p.head()<string_transform> | train_df = pd.read_csv(".. /input/train.csv")
test_df = pd.read_csv(".. /input/test.csv")
train_view = pd.read_csv(".. /input/train.csv" ) | Titanic - Machine Learning from Disaster |
893,910 | stemmer = SnowballStemmer('english')
def mbti_stemmer(words, stemmer):
return [stemmer.stem(word)for word in words]<feature_engineering> | train_view['Age'].loc[train_view['Age'] <= 15] = 1
train_view['Age'].loc[(train_view['Age'] > 15)&(train_view['Age'] <= 25)] = 2
train_view['Age'].loc[(train_view['Age'] > 25)&(train_view['Age'] <= 35)] = 3
train_view['Age'].loc[(train_view['Age'] > 35)&(train_view['Age'] <= 48)] = 4
train_view['Age'].loc[(train_view['Age'] > 48)&(train_view['Age'] <= 65)] = 5
train_view['Age'].loc[(train_view['Age'] > 65)&(train_view['Age'] <= 75)] = 6
train_view['Age'].loc[train_view['Age'] > 75] = 7 | Titanic - Machine Learning from Disaster |
893,910 | p['stem'] = p['tokens'].apply(mbti_stemmer, args=(stemmer,))
p.head()<string_transform> | train_view['Fare'].loc[train_view['Fare'] <= 14] = 1
train_view['Fare'].loc[(train_view['Fare'] > 14)&(train_view['Fare'] <= 20)] = 2
train_view['Fare'].loc[(train_view['Fare'] > 20)&(train_view['Fare'] <= 40)] = 3
train_view['Fare'].loc[(train_view['Fare'] > 40)&(train_view['Fare'] <= 60)] = 4
train_view['Fare'].loc[(train_view['Fare'] > 60)&(train_view['Fare'] <= 80)] = 5
train_view['Fare'].loc[(train_view['Fare'] > 80)&(train_view['Fare'] <= 100)] = 6
train_view['Fare'].loc[train_view['Fare'] > 100] = 7 | Titanic - Machine Learning from Disaster |
893,910 | Stops = set(stopwords.words('english'))<feature_engineering> | train_view["Title"] = train_view["Title"].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
train_view["Title"] = train_view["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3})
train_view["Title"] = train_view["Title"].astype(int ) | Titanic - Machine Learning from Disaster |
893,910 | p['no_stop'] = p['stem'].apply(lambda x: [word for word in list(x)if word not in Stops])
p.head()<feature_engineering> | data_df = train_df.append(test_df ) | Titanic - Machine Learning from Disaster |
893,910 | p['no_stop'] = p['no_stop'].apply(lambda x: ' '.join(x))
p.head()<categorify> | data_df['Title'] = data_df['Name']
for name_string in data_df['Name']:
data_df['Title'] = data_df['Name'].str.extract('([A-Za-z]+)\.', expand=True)
mapping = {'Mlle': 'Miss', 'Major': 'Mr', 'Col': 'Mr', 'Sir': 'Mr', 'Don': 'Mr', 'Mme': 'Miss',
'Jonkheer': 'Mr', 'Lady': 'Mrs', 'Capt': 'Mr', 'Countess': 'Mrs', 'Ms': 'Miss', 'Dona': 'Mrs'}
data_df.replace({'Title': mapping}, inplace=True)
titles = ['Dr', 'Master', 'Miss', 'Mr', 'Mrs', 'Rev']
for title in titles:
age_to_impute = data_df.groupby('Title')['Age'].median() [titles.index(title)]
data_df.loc[(data_df['Age'].isnull())&(data_df['Title'] == title), 'Age'] = age_to_impute
train_df['Age'] = data_df['Age'][:891]
test_df['Age'] = data_df['Age'][891:]
data_df.drop('Title', axis = 1, inplace = True ) | Titanic - Machine Learning from Disaster |
893,910 | unique_type = list(p['type'].unique())
encoder = LabelEncoder().fit(unique_type)
codes = []
for i in range(0, len(p)) :
codes.append(p['type'][i])
coder = encoder.transform(codes )<categorify> | data_df['Fare'].fillna(data_df['Fare'].median() , inplace = True)
data_df['FareBin'] = pd.qcut(data_df['Fare'], 5)
label = LabelEncoder()
data_df['FareBin_Code'] = label.fit_transform(data_df['FareBin'])
train_df['FareBin_Code'] = data_df['FareBin_Code'][:891]
test_df['FareBin_Code'] = data_df['FareBin_Code'][891:]
train_df.drop(['Fare'], 1, inplace=True)
test_df.drop(['Fare'], 1, inplace=True ) | Titanic - Machine Learning from Disaster |
893,910 | p['codes'] = coder
p.head()<prepare_x_and_y> | data_df['AgeBin'] = pd.qcut(data_df['Age'], 4)
label = LabelEncoder()
data_df['AgeBin_Code'] = label.fit_transform(data_df['AgeBin'])
train_df['AgeBin_Code'] = data_df['AgeBin_Code'][:891]
test_df['AgeBin_Code'] = data_df['AgeBin_Code'][891:]
train_df.drop(['Age'], 1, inplace=True)
test_df.drop(['Age'], 1, inplace=True ) | Titanic - Machine Learning from Disaster |
893,910 | X = p.clean
y = p.codes<split> | data_df['Family_Size'] = data_df['Parch'] + data_df['SibSp']
train_df['Family_Size'] = data_df['Family_Size'][:891]
test_df['Family_Size'] = data_df['Family_Size'][891:] | Titanic - Machine Learning from Disaster |
893,910 | X_train, X_test, y_train, y_test= train_test_split(X, y, test_size=0.2, random_state=14 )<choose_model_class> | data_df["Alone"] = np.where(data_df['SibSp'] + data_df['Parch'] + 1 == 1, 1,0)
data_df["Alone"] = np.where(data_df['SibSp'] + data_df['Parch'] + 1 == 1, 1,0)
train_df['Alone'] = data_df['Alone'][:891]
test_df['Alone'] = data_df['Alone'][891:] | Titanic - Machine Learning from Disaster |
893,910 | Vect = CountVectorizer(ngram_range=(1, 1), stop_words='english', lowercase = True, max_features = 5000 )<choose_model_class> | data_df['Last_Name'] = data_df['Name'].apply(lambda x: str.split(x, ",")[0])
data_df['Fare'].fillna(data_df['Fare'].mean() , inplace=True)
DEFAULT_SURVIVAL_VALUE = 0.5
data_df['Family_Survival'] = DEFAULT_SURVIVAL_VALUE
for grp, grp_df in data_df[['Survived','Name', 'Last_Name', 'Fare', 'Ticket', 'PassengerId',
'SibSp', 'Parch', 'Age', 'Cabin']].groupby(['Last_Name', 'Fare']):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
passID = row['PassengerId']
if(smax == 1.0):
data_df.loc[data_df['PassengerId'] == passID, 'Family_Survival'] = 1
elif(smin==0.0):
data_df.loc[data_df['PassengerId'] == passID, 'Family_Survival'] = 0
print("Number of passengers with family survival information:",
data_df.loc[data_df['Family_Survival']!=0.5].shape[0] ) | Titanic - Machine Learning from Disaster |
893,910 | LRmodel = LogisticRegression(class_weight="balanced", C=0.005, penalty = "l2")
pipe = Pipeline([('vec', Vect),('model', LRmodel)] )<train_model> | for _, grp_df in data_df.groupby('Ticket'):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
if(row['Family_Survival'] == 0)|(row['Family_Survival']== 0.5):
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
passID = row['PassengerId']
if(smax == 1.0):
data_df.loc[data_df['PassengerId'] == passID, 'Family_Survival'] = 1
elif(smin==0.0):
data_df.loc[data_df['PassengerId'] == passID, 'Family_Survival'] = 0
print("Number of passenger with family/group survival information: "
+str(data_df[data_df['Family_Survival']!=0.5].shape[0]))
train_df['Family_Survival'] = data_df['Family_Survival'][:891]
test_df['Family_Survival'] = data_df['Family_Survival'][891:] | Titanic - Machine Learning from Disaster |
893,910 | pipe.fit(X_train, y_train )<predict_on_test> | label = LabelEncoder()
data_df['Embarked'] = data_df['Embarked'].fillna('None')
data_df['Embarked_code'] = label.fit_transform(data_df['Embarked'])
train_df['Embarked_code'] = data_df['Embarked_code'][:891]
test_df['Embarked_code'] = data_df['Embarked_code'][891:] | Titanic - Machine Learning from Disaster |
893,910 | y_pred_train= pipe.predict(X_train )<compute_test_metric> | train_df['Sex'].replace(['male','female'],[0,1],inplace=True)
test_df['Sex'].replace(['male','female'],[0,1],inplace=True ) | Titanic - Machine Learning from Disaster |
893,910 | print('Accuracy: '+ str(metrics.accuracy_score(y_train, y_pred_train)))
print('Precision: '+ str(metrics.precision_score(y_train, y_pred_train, average='macro')))
print('Recall: '+ str(metrics.recall_score(y_train, y_pred_train, average='macro')))
print('F1_Score: '+ str(metrics.f1_score(y_train, y_pred_train, average='macro')) )<predict_on_test> | train_df = train_df.drop(columns=['PassengerId','Pclass','Name','SibSp','Parch','Ticket','Cabin','Embarked'])
test_df = test_df.drop(columns=['PassengerId','Pclass','Name','SibSp','Parch','Ticket','Cabin','Embarked'] ) | Titanic - Machine Learning from Disaster |
893,910 | y_pred_test = pipe.predict(X_test )<compute_test_metric> | train_df = train_df.replace(np.inf, np.nan)
train_df_na =(train_df.isnull().sum() / len(train_df))
train_df_na = train_df_na.drop(train_df_na[train_df_na == 0].index ).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :train_df_na})
missing_data.head() | Titanic - Machine Learning from Disaster |
893,910 | print('Accuracy: '+ str(metrics.accuracy_score(y_test, y_pred_test)))
print('Precision: '+ str(metrics.precision_score(y_test, y_pred_test, average='macro')))
print('Recall: '+ str(metrics.recall_score(y_test, y_pred_test, average='macro')))
print('F1_Score: '+ str(metrics.f1_score(y_test, y_pred_test, average='macro')) )<create_dataframe> | test_df = test_df.replace(np.inf, np.nan)
test_df_na =(test_df.isnull().sum() / len(test_df))
test_df_na = test_df_na.drop(test_df_na[test_df_na == 0].index ).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :test_df_na})
missing_data.head() | Titanic - Machine Learning from Disaster |
893,910 | cm_logistic_reg = np.array(metrics.confusion_matrix(y_test, y_pred_test))
cm_logistic = pd.DataFrame(cm_logistic_reg, index=['ENFJ', 'ENFP', 'ENTJ', 'ENTP', 'ESFJ', 'ESFP',
'ESTJ', 'ESTP', 'INFJ', 'INFP', 'INTJ', 'INTP',
'ISFJ', 'ISFP', 'ISTJ', 'ISTP'],
columns=['predict_ENFJ','predict_ENFP','predict_ENTJ',
'predict_ENTP','predict_ESFJ','predict_ESFP',
'predict_ESTJ','predict_ESTP','predict_INFJ',
'predict_INFP','predict_INTJ','predict_INTP',
'predict_ISFJ','predict_ISFP','predict_ISTJ',
'predict_ISTP'])
cm_logistic<compute_test_metric> | X_train = train_df.drop('Survived', 1)
Y_train = train_df['Survived']
X_test = test_df.copy() | Titanic - Machine Learning from Disaster |
893,910 | print(metrics.classification_report(y_test, y_pred_test, target_names=unique_type))<predict_on_test> | std_scaler = StandardScaler()
X_train = std_scaler.fit_transform(X_train)
X_test = std_scaler.transform(X_test ) | Titanic - Machine Learning from Disaster |
893,910 | test_preds = pipe.predict(z['clean'] )<categorify> | from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from xgboost import XGBClassifier | Titanic - Machine Learning from Disaster |
893,910 | z['type'] = encoder.inverse_transform(test_preds)
z.head()<data_type_conversions> | xgb = XGBClassifier(booster='gbtree',
gamma=0.1,
learning_rate = 0.01,
max_depth = 6,
min_child_weight= 1,
n_estimators= 100,
subsample= 0.5 ) | Titanic - Machine Learning from Disaster |
893,910 | z['E or I'] = z.apply(lambda x: x['type'][0], axis = 1)
z['N or S'] = z.apply(lambda x: x['type'][1], axis = 1)
z['T or F'] = z.apply(lambda x: x['type'][2], axis = 1)
z['J or P'] = z.apply(lambda x: x['type'][3], axis = 1)
mind = z['E or I'].astype(str ).apply(lambda x: x[0] == 'E' ).astype('int')
energy = z['N or S'].astype(str ).apply(lambda x: x[0] == 'N' ).astype('int')
nature = z['T or F'].astype(str ).apply(lambda x: x[0] == 'T' ).astype('int')
tactics = z['J or P'].astype(str ).apply(lambda x: x[0] == 'J' ).astype('int')
z.head()<prepare_output> | rf=RandomForestClassifier(random_state=42,
min_samples_split=2,
max_leaf_nodes=10,
max_features='auto',
n_estimators= 500,
max_depth=5,
criterion='gini' ) | Titanic - Machine Learning from Disaster |
893,910 | df_LogReg = pd.DataFrame({"id":test_df['id'], "mind":mind, "energy":energy, "nature":nature, 'tactics':tactics})
df_LogReg.head()<save_to_csv> | ada = AdaBoostClassifier(random_state=40,
algorithm = 'SAMME.R',
learning_rate= 0.01,
n_estimators = 200
) | Titanic - Machine Learning from Disaster |
893,910 | df_LogReg.to_csv('EDSA_Team_8_Classification1.csv', index = False )<choose_model_class> | dt = DecisionTreeClassifier(random_state=40,
criterion = 'gini',
max_depth= 5,
max_features= 'auto',
max_leaf_nodes= 12,
min_samples_split= 2 ) | Titanic - Machine Learning from Disaster |
893,910 | LRmodel2 = LogisticRegression(class_weight="balanced", C=0.004, penalty = "l2")
pipe2 = Pipeline([('vec', Vect),('model', LRmodel2)] )<train_model> | knn = KNeighborsClassifier(algorithm='auto',
leaf_size=26,
metric='minkowski',
metric_params=None,
n_jobs=-1,
n_neighbors=18,
p=2,
weights='uniform' ) | Titanic - Machine Learning from Disaster |
893,910 | pipe2.fit(X_train, y_train )<predict_on_test> | optimal = VotingClassifier(estimators=[('knn',knn),('ada',ada),('rf',rf),('xgb',xgb),('dt',dt)],voting='hard')
optimal | Titanic - Machine Learning from Disaster |
893,910 | y_pred_train2= pipe2.predict(X_train )<compute_test_metric> | optimal.fit(X_train, Y_train)
Y_pred=optimal.predict(X_test ) | Titanic - Machine Learning from Disaster |
893,910 | <predict_on_test><EOS> | sub = pd.DataFrame(pd.read_csv(".. /input/test.csv")['PassengerId'])
sub['Survived'] = Y_pred
sub.to_csv(".. /working/submission.csv", index = False ) | Titanic - Machine Learning from Disaster |
889,688 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<compute_test_metric> | pd.options.mode.chained_assignment = None
warnings.filterwarnings("ignore")
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
TRAIN_FILE = ".. /input/train.csv"
TEST_FILE = ".. /input/test.csv"
SUBMISSION_FILE = ".. /input/gender_submission.csv" | Titanic - Machine Learning from Disaster |
889,688 | print('Accuracy: '+ str(metrics.accuracy_score(y_test, y_pred_test2)))
print('Precision: '+ str(metrics.precision_score(y_test, y_pred_test2, average='macro')))
print('Recall: '+ str(metrics.recall_score(y_test, y_pred_test2, average='macro')))
print('F1_Score: '+ str(metrics.f1_score(y_test, y_pred_test2, average='macro')) )<compute_test_metric> | train_data = pd.read_csv(TRAIN_FILE)
train_data.info() | Titanic - Machine Learning from Disaster |
889,688 | con_matrix_test2= metrics.confusion_matrix(y_test, y_pred_test2)
cm_logistic_df = pd.DataFrame(con_matrix_test2, index=['ENFJ', 'ENFP', 'ENTJ', 'ENTP', 'ESFJ', 'ESFP',
'ESTJ', 'ESTP', 'INFJ', 'INFP', 'INTJ', 'INTP',
'ISFJ', 'ISFP', 'ISTJ', 'ISTP'],
columns=['predict_ENFJ','predict_ENFP','predict_ENTJ',
'predict_ENTP','predict_ESFJ','predict_ESFP',
'predict_ESTJ','predict_ESTP','predict_INFJ',
'predict_INFP','predict_INTJ','predict_INTP',
'predict_ISFJ','predict_ISFP','predict_ISTJ',
'predict_ISTP'])
<compute_test_metric> | train_data = pd.read_csv(TRAIN_FILE)
print("
print(train_data[1:6].to_csv(index=False))
print("
train_data.info() | Titanic - Machine Learning from Disaster |
889,688 | print(metrics.classification_report(y_test, y_pred_test2, target_names=unique_type))<predict_on_test> | test_data = pd.read_csv(TEST_FILE)
print("
print(test_data[1:6].to_csv(index=False))
print("
test_data.info() | Titanic - Machine Learning from Disaster |
889,688 | test_preds2 = pipe2.predict(z['clean'] )<categorify> | submission_data = pd.read_csv(SUBMISSION_FILE)
print("
print(submission_data[1:6].to_csv(index=False)) | Titanic - Machine Learning from Disaster |
889,688 | z['type'] = encoder.inverse_transform(test_preds2)
z.head()<data_type_conversions> | from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import LabelBinarizer
from sklearn import preprocessing | Titanic - Machine Learning from Disaster |
889,688 | z['E or I'] = z.apply(lambda x: x['type'][0], axis = 1)
z['N or S'] = z.apply(lambda x: x['type'][1], axis = 1)
z['T or F'] = z.apply(lambda x: x['type'][2], axis = 1)
z['J or P'] = z.apply(lambda x: x['type'][3], axis = 1)
mind = z['E or I'].astype(str ).apply(lambda x: x[0] == 'E' ).astype('int')
energy = z['N or S'].astype(str ).apply(lambda x: x[0] == 'N' ).astype('int')
nature = z['T or F'].astype(str ).apply(lambda x: x[0] == 'T' ).astype('int')
tactics = z['J or P'].astype(str ).apply(lambda x: x[0] == 'J' ).astype('int')
z.head()
<prepare_output> | def create_useless_column(data):
data["UselessColumn"] = 0
data_new = data[["UselessColumn"]]
return data_new
def extract_survived(data):
return data["Survived"]
def apply_model(data, data_label):
model = KNeighborsClassifier(n_neighbors=2)
scores = cross_val_score(model, data, data_label, cv=5, verbose=1, scoring='accuracy')
print(scores.mean())
data_label = extract_survived(train_data)
data = create_useless_column(train_data)
apply_model(data, data_label ) | Titanic - Machine Learning from Disaster |
889,688 | df_LogReg2 = pd.DataFrame({"id":test_df['id'], "mind":mind, "energy":energy, "nature":nature, 'tactics':tactics})
df_LogReg2.head()<save_to_csv> | def drop_survived(data):
return data.drop("Survived", axis=1, errors="ignore")
def drop_passenger_id(data):
return data.drop("PassengerId", axis=1, errors="ignore")
train_data = pd.read_csv(TRAIN_FILE)
data = train_data
data = drop_survived(data)
data = drop_passenger_id(data)[["Fare"]]
apply_model(data, data_label ) | Titanic - Machine Learning from Disaster |
889,688 | df_LogReg2.to_csv('EDSA_Team_8_Classification2.csv', index = False )<train_model> | print("Train: {} null(on {})".format(test_data["Pclass"].isnull().sum() , len(test_data)))
print("Test: {} null(on {})".format(train_data["Pclass"].isnull().sum() , len(train_data)))
print("")
print("Correlation:")
pclass = train_data[["Pclass", "Survived"]]
pclass["Class1"] =(pclass["Pclass"] == 1 ).astype(int)
pclass["Class2"] =(pclass["Pclass"] == 2 ).astype(int)
pclass["Class3"] =(pclass["Pclass"] == 3 ).astype(int)
pclass = pclass.drop("Pclass", axis=1)
corr = pclass.corr()
print(corr)
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values, center=0 ) | Titanic - Machine Learning from Disaster |
889,688 | random= RandomForestClassifier(min_samples_leaf=2, min_samples_split=3, n_estimators=79,
criterion='entropy', bootstrap='False', n_jobs= -1, random_state=123)
random_pipe= Pipeline([('vec', Vect),('model', random)] )<train_model> | data = train_data.copy()
data["Pclass"] = data["Pclass"] - 1
data["Pclass"] = preprocessing.maxabs_scale(data["Pclass"])
print(data["Pclass"].value_counts() ) | Titanic - Machine Learning from Disaster |
889,688 | random_pipe.fit(X_train, y_train )<predict_on_test> | def handle_pclass(data):
new_data = data
new_data["Pclass"] = new_data["Pclass"] -1
new_data["Pclass"] = preprocessing.maxabs_scale(data["Pclass"])
return new_data
data = train_data.copy()
data = drop_survived(data)
data = drop_passenger_id(data)
data = handle_pclass(data)[["Pclass"]]
apply_model(data, data_label ) | Titanic - Machine Learning from Disaster |
889,688 | y_pred_random=random_pipe.predict(X_train )<compute_test_metric> | names = train_data["Name"]
print(names[1:10])
print("Train: {} null(on {})".format(test_data["Name"].isnull().sum() , len(test_data)))
print("Test: {} null(on {})".format(train_data["Name"].isnull().sum() , len(train_data)))
print("" ) | Titanic - Machine Learning from Disaster |
889,688 | print('Accuracy: '+ str(metrics.accuracy_score(y_train, y_pred_random)))
print('Precision: '+ str(metrics.precision_score(y_train, y_pred_random, average='macro')))
print('Recall: '+ str(metrics.recall_score(y_train, y_pred_random, average='macro')))
print('F1_Score: '+ str(metrics.f1_score(y_train, y_pred_random, average='macro')) )<compute_test_metric> | data = train_data.copy()
data["Name"] = data["Name"].str.replace(".",";")
data["Name"] = data["Name"].str.replace(",",";")
data["Name"] = data["Name"].str.split(';', expand=True)[1]
unique1 = data["Name"].unique()
print(unique1)
data2 = test_data.copy()
data2["Name"] = data2["Name"].str.replace(".",";")
data2["Name"] = data2["Name"].str.replace(",",";")
data2["Name"] = data2["Name"].str.split(';', expand=True)[1]
unique2 = data2["Name"].unique()
print(unique2)
for i in unique2:
if not unique1.__contains__(i):
print("Missing:" + i)
corr = data[["Name","Survived"]].corr()
print(corr)
dumies = pd.get_dummies(data["Name"])
dumies["Survived"] = data["Survived"]
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values, center=0)
data[["Survived"]].groupby([data["Name"], data["Pclass"], data["Sex"], data["Survived"]] ).count() | Titanic - Machine Learning from Disaster |
889,688 | print("Classification Report:")
print(metrics.classification_report(y_train, y_pred_random, target_names=unique_type))<predict_on_test> | data = test_data.copy()
data["Name"] = test_data["Name"].str.replace(".",";")
data["Name"] = data["Name"].str.replace(",",";")
data["Name"] = data["Name"].str.split(';', expand=True)[1]
data["Name"] = data["Name"].str.replace("Capt","Mr")
data["Name"] = data["Name"].str.replace("Col","Mr")
data["Name"] = data["Name"].str.replace("Don","Mr")
data["Name"] = data["Name"].str.replace("Dr","Mr")
data["Name"] = data["Name"].str.replace("Jonkheer","Mr")
data["Name"] = data["Name"].str.replace("Rev","Mr")
data["Name"] = data["Name"].str.replace("Sir","Mr")
data["Name"] = data["Name"].str.replace("Mme","Mrs")
data["Name"] = data["Name"].str.replace("Mlle","Miss")
data["Name"] = data["Name"].str.replace("the Countess","Mme")
data["Name"] = data["Name"].str.replace("Mme","Mrs")
data["Name"] = data["Name"].str.replace("Ms","Mrs")
data["Name"] = data["Name"].str.replace("Major","Mr")
data["Name"] = data["Name"].str.replace("Master","Mr")
data["Name"] = data["Name"].str.replace("Lady","Miss")
groups = data.groupby("Name" ).size()
groups.plot.bar() | Titanic - Machine Learning from Disaster |
889,688 | y_pred_random_test = random_pipe.predict(X_test )<compute_test_metric> | def handle_name(data):
new_data = data
new_data["Name"] = data["Name"]
new_data["Name"] = new_data["Name"].str.replace(".",";")
new_data["Name"] = new_data["Name"].str.replace(",",";")
new_data["Name"] = new_data["Name"].str.split(';', expand=True)[1]
new_data["Name"] = new_data["Name"].str.replace("Capt","Mr")
new_data["Name"] = new_data["Name"].str.replace("Col","Mr")
new_data["Name"] = new_data["Name"].str.replace("Don","Mr")
new_data["Name"] = new_data["Name"].str.replace("Dr","Mr")
new_data["Name"] = new_data["Name"].str.replace("Jonkheer","Mr")
new_data["Name"] = new_data["Name"].str.replace("Rev","Mr")
new_data["Name"] = new_data["Name"].str.replace("Sir","Mr")
new_data["Name"] = new_data["Name"].str.replace("Mme","Mrs")
new_data["Name"] = new_data["Name"].str.replace("Mlle","Miss")
new_data["Name"] = new_data["Name"].str.replace("the Countess","Mme")
new_data["Name"] = new_data["Name"].str.replace("Mme","Mrs")
new_data["Name"] = new_data["Name"].str.replace("Ms","Mrs")
new_data["Name"] = new_data["Name"].str.replace("Major","Mr")
new_data["Name"] = new_data["Name"].str.replace("Master","Mr")
new_data["Name"] = new_data["Name"].str.replace("Lady","Miss")
new_data["Miss"] = new_data["Name"].str.contains("Miss" ).astype(int)
new_data["Mr"] = new_data["Name"].str.contains("Mr" ).astype(int)
new_data["Mrs"] = new_data["Name"].str.contains("Mrs" ).astype(int)
new_data = new_data.drop("Name", axis=1)
return new_data
data = train_data
data = drop_survived(data)
data = drop_passenger_id(data)
data = handle_pclass(data)
data= handle_name(data)[["Pclass", "Miss", "Mr", "Mrs"]]
apply_model(data, data_label ) | Titanic - Machine Learning from Disaster |
889,688 | print('Accuracy: '+ str(metrics.accuracy_score(y_test, y_pred_random_test)))
print('Precision: '+ str(metrics.precision_score(y_test, y_pred_random_test, average='macro')))
print('Recall: '+ str(metrics.recall_score(y_test, y_pred_random_test, average='macro')))
print('F1_Score: '+ str(metrics.f1_score(y_test, y_pred_random_test, average='macro')) )<compute_test_metric> | data = train_data
data = drop_survived(data)
data = drop_passenger_id(data)
data = handle_pclass(data)
data= handle_name(data)
data= handle_sex(data)[["Pclass", "Sex", "Miss", "Mr", "Mrs"]]
apply_model(data, data_label ) | Titanic - Machine Learning from Disaster |
889,688 | con_matrix_random= metrics.confusion_matrix(y_test, y_pred_random_test)
cm_logistic_rf = pd.DataFrame(con_matrix_random, index=['ENFJ', 'ENFP', 'ENTJ', 'ENTP', 'ESFJ', 'ESFP',
'ESTJ', 'ESTP', 'INFJ', 'INFP', 'INTJ', 'INTP',
'ISFJ', 'ISFP', 'ISTJ', 'ISTP'],
columns=['predict_ENFJ','predict_ENFP','predict_ENTJ',
'predict_ENTP','predict_ESFJ','predict_ESFP',
'predict_ESTJ','predict_ESTP','predict_INFJ',
'predict_INFP','predict_INTJ','predict_INTP',
'predict_ISFJ','predict_ISFP','predict_ISTJ',
'predict_ISTP'])
fig, ax = plt.subplots(figsize=(10,8))
sns.heatmap(cm_logistic_rf, robust=True, annot=True, linewidth=0.3,
fmt='', cmap='BrBG', vmax=303, ax=ax)
plt.title('Confusion Matrix for Random Forest Classifier ', fontsize=10,
fontweight='bold', y=1.00)
plt.xticks(fontsize=10)
plt.yticks(rotation=0, fontsize=10)
plt.show()<compute_test_metric> | data = train_data.copy()
data = drop_survived(data)
data = drop_passenger_id(data)
data = handle_pclass(data)
data= handle_name(data)
data= handle_sex(data)
data= handle_age(data)[["Pclass", "Miss", "Mr", "Mrs", "Sex", "Age"]]
apply_model(data, data_label ) | Titanic - Machine Learning from Disaster |
889,688 | print("Classification Report:")
print(metrics.classification_report(y_test, y_pred_random_test, target_names=unique_type))<predict_on_test> | def handle_sibsp(data):
new_data = data
new_data["SibSp"] = preprocessing.maxabs_scale(data["SibSp"])
return new_data
def handle_parch(data):
new_data = data
new_data["Parch"] = preprocessing.maxabs_scale(data["Parch"])
return new_data
data = train_data.copy()
data = drop_survived(data)
data = drop_passenger_id(data)
data = handle_pclass(data)
data = handle_name(data)
data = handle_sex(data)
data = handle_age(data)
data = handle_sibsp(data)
data = handle_parch(data)
data = data[["Pclass", "Miss", "Mr", "Mrs", "Sex", "Age", "SibSp", "Parch"]]
apply_model(data, data_label ) | Titanic - Machine Learning from Disaster |
889,688 | test_preds_random = random_pipe.predict(z['clean'] )<categorify> | data = train_data.copy()
print(data["Ticket"].head(10)) | Titanic - Machine Learning from Disaster |
889,688 | z['type'] = encoder.inverse_transform(test_preds_random)
z.head()<data_type_conversions> | def drop_ticket(data):
return data.drop(["Ticket"], axis=1)
data = train_data
data = drop_survived(data)
data = drop_passenger_id(data)
data = handle_pclass(data)
data = handle_name(data)
data = handle_sex(data)
data = handle_age(data)
data = handle_sibsp(data)
data = handle_parch(data)
data = drop_ticket(data)
data = data[["Pclass", "Miss", "Mr", "Mrs", "Sex", "Age", "SibSp", "Parch"]]
apply_model(data, data_label ) | Titanic - Machine Learning from Disaster |
889,688 | z['E or I'] = z.apply(lambda x: x['type'][0], axis = 1)
z['N or S'] = z.apply(lambda x: x['type'][1], axis = 1)
z['T or F'] = z.apply(lambda x: x['type'][2], axis = 1)
z['J or P'] = z.apply(lambda x: x['type'][3], axis = 1)
mind = z['E or I'].astype(str ).apply(lambda x: x[0] == 'E' ).astype('int')
energy = z['N or S'].astype(str ).apply(lambda x: x[0] == 'N' ).astype('int')
nature = z['T or F'].astype(str ).apply(lambda x: x[0] == 'T' ).astype('int')
tactics = z['J or P'].astype(str ).apply(lambda x: x[0] == 'J' ).astype('int')
z.head()<prepare_output> | def handle_fare(data):
new_data = data
new_data["Fare"] = new_data["Fare"].fillna(new_data["Fare"].mean())
new_data["Fare"] = new_data["Fare"]/ 20
new_data["Fare"] = new_data["Fare"].astype(int)
new_data["Fare"] = preprocessing.maxabs_scale(data["Fare"])
return new_data
test_data = pd.read_csv(TRAIN_FILE)
groups = handle_fare(test_data ).groupby("Fare" ).size()
groups.plot.bar() | Titanic - Machine Learning from Disaster |
889,688 | df_random = pd.DataFrame({"id":test_df['id'], "mind":mind, "energy":energy, "nature":nature, 'tactics':tactics})
df_random.head()<save_to_csv> | data = train_data
data = drop_survived(data)
data = drop_passenger_id(data)
data = handle_pclass(data)
data = handle_name(data)
data = handle_sex(data)
data = handle_age(data)
data = handle_sibsp(data)
data = handle_parch(data)
data = drop_ticket(data)
data = handle_fare(data)
data = data[["Pclass", "Mr", "Mrs", "Miss", "Sex", "Age", "SibSp", "Parch", "Fare"]]
apply_model(data, data_label ) | Titanic - Machine Learning from Disaster |
889,688 | df_random.to_csv('EDSA_Team_8_Classification_random.csv', index = False )<set_options> | data = train_data.copy()
print(data["Cabin"].head(15))
| Titanic - Machine Learning from Disaster |
889,688 | %matplotlib inline
warnings.filterwarnings('ignore' )<load_from_csv> | def handle_cabin(data):
new_data = data
new_data["Cabin"] = new_data["Cabin"].isna().astype(int)
return new_data
data = train_data
data = drop_survived(data)
data = drop_passenger_id(data)
data = handle_pclass(data)
data = handle_name(data)
data = handle_sex(data)
data = handle_age(data)
data = handle_sibsp(data)
data = handle_parch(data)
data = drop_ticket(data)
data = handle_fare(data)
data = handle_cabin(data)
data = data[["Pclass", "Mr", "Mrs", "Miss", "Sex", "Age", "SibSp", "Parch","Fare", "Cabin"]]
apply_model(data, data_label ) | Titanic - Machine Learning from Disaster |
889,688 | train_df = pd.read_csv('.. /input/train.csv' )<load_from_csv> | print("Train: {} null(on {})".format(train_data["Embarked"].isnull().sum() , len(train_data)))
print("Test: {} null(on {})".format(test_data["Embarked"].isnull().sum() , len(test_data)))
print("")
print("Correlation:")
embarked = train_data[["Embarked", "Survived"]]
embarked["NotEmbarked"] = embarked["Embarked"].isna().astype(int)
embarked["Embarked"] = embarked["Embarked"].fillna("")
embarked['Southampton'] = embarked["Embarked"].str.contains("S" ).astype(int)
embarked['Queenstown'] = embarked["Embarked"].str.contains("Q" ).astype(int)
embarked['Cherbourg'] = embarked["Embarked"].str.contains("C" ).astype(int)
embarked = embarked.drop("Embarked", axis=1)
corr = embarked.corr()
print(corr)
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values ) | Titanic - Machine Learning from Disaster |
889,688 | test_df=pd.read_csv('.. /input/test.csv' )<concatenate> | def handle_embarked(data):
new_data = data
new_data["NotEmbarked"] = new_data["Embarked"].isna().astype(int)
new_data["Embarked"] = new_data["Embarked"].fillna("")
new_data['Southampton'] = new_data["Embarked"].str.contains("S" ).astype(int)
new_data['Queenstown'] = new_data["Embarked"].str.contains("Q" ).astype(int)
new_data['Cherbourg'] = new_data["Embarked"].str.contains("C" ).astype(int)
new_data = new_data.drop("Embarked", axis=1)
return new_data
print(handle_embarked(train_data)[["NotEmbarked", "Southampton", "Queenstown", "Cherbourg"]].head() ) | Titanic - Machine Learning from Disaster |
889,688 | combined=pd.concat([train_df[['posts']],test_df[['posts']]] )<feature_engineering> | def process_data(data):
data = drop_survived(data)
data = drop_passenger_id(data)
data = handle_pclass(data)
data = handle_name(data)
data = handle_sex(data)
data = handle_age(data)
data = handle_sibsp(data)
data = handle_parch(data)
data = drop_ticket(data)
data = handle_fare(data)
data = handle_cabin(data)
data = handle_embarked(data)
return data
data = train_data.copy()
data = process_data(data)
print(data.head())
apply_model(data, data_label ) | Titanic - Machine Learning from Disaster |
889,688 | vectorizer = CountVectorizer(stop_words='english',analyzer = "word",tokenizer = None,preprocessor = None,max_features = 10000)
combined=vectorizer.fit_transform(combined['posts'] )<prepare_x_and_y> | model = KNeighborsClassifier(n_neighbors=2)
X_train = pd.read_csv(TRAIN_FILE)
y_train = X_train["Survived"]
X_train = process_data(X_train)
X_test = pd.read_csv(TEST_FILE)
test_labels = X_test[["PassengerId"]]
X_test = process_data(X_test)
model.fit(X_train, y_train)
result = model.predict(X_test)
print(len(result))
df = pd.DataFrame()
df['PassengerId'] = test_labels.astype(int)
df['Survived'] = result.astype(int)
print(df.head())
df.to_csv("submission.csv", index=False)
print("Done")
| Titanic - Machine Learning from Disaster |
889,688 | train = combined[:train_df.shape[0]]
test = combined[train_df.shape[0]:]
y=train_df['type']<split> | pd.options.mode.chained_assignment = None
TRAIN_FILE = ".. /input/train.csv"
TEST_FILE = ".. /input/test.csv"
train_data = pd.read_csv(TRAIN_FILE)
test_data = pd.read_csv(TEST_FILE)
def drop_survived(data):
return data.drop("Survived", axis=1, errors="ignore")
def drop_passenger_id(data):
return data.drop("PassengerId", axis=1, errors="ignore")
def apply_model(data, data_label):
model = KNeighborsClassifier(n_neighbors=2)
scores = cross_val_score(model, data, data_label, cv=5, verbose=1, scoring='accuracy')
print(scores.mean())
def extract_survived(data):
return data["Survived"]
def handle_pclass(data):
new_data = data
new_data["Pclass"] = new_data["Pclass"] -1
new_data["Pclass"] = preprocessing.maxabs_scale(data["Pclass"])
return new_data
def handle_name(data):
new_data = data
new_data["Name"] = data["Name"]
new_data["Name"] = new_data["Name"].str.replace(".",";")
new_data["Name"] = new_data["Name"].str.replace(",",";")
new_data["Name"] = new_data["Name"].str.split(';', expand=True)[1]
new_data["Name"] = new_data["Name"].str.replace("Capt","Mr")
new_data["Name"] = new_data["Name"].str.replace("Col","Mr")
new_data["Name"] = new_data["Name"].str.replace("Don","Mr")
new_data["Name"] = new_data["Name"].str.replace("Dr","Mr")
new_data["Name"] = new_data["Name"].str.replace("Jonkheer","Mr")
new_data["Name"] = new_data["Name"].str.replace("Rev","Mr")
new_data["Name"] = new_data["Name"].str.replace("Sir","Mr")
new_data["Name"] = new_data["Name"].str.replace("Mme","Mrs")
new_data["Name"] = new_data["Name"].str.replace("Mlle","Miss")
new_data["Name"] = new_data["Name"].str.replace("the Countess","Mme")
new_data["Name"] = new_data["Name"].str.replace("Mme","Mrs")
new_data["Name"] = new_data["Name"].str.replace("Ms","Mrs")
new_data["Name"] = new_data["Name"].str.replace("Major","Mr")
new_data["Name"] = new_data["Name"].str.replace("Master","Mr")
new_data["Name"] = new_data["Name"].str.replace("Lady","Miss")
new_data["Miss"] = new_data["Name"].str.contains("Miss" ).astype(int)
new_data["Mr"] = new_data["Name"].str.contains("Mr" ).astype(int)
new_data["Mrs"] = new_data["Name"].str.contains("Mrs" ).astype(int)
new_data = new_data.drop("Name", axis=1)
return new_data
def handle_sex(data):
new_data = data
new_data["Sex"] = data["Sex"].str.contains("female" ).astype(int)
return new_data
def handle_age(data):
new_data = data
new_data["Age"] = new_data["Age"].fillna(new_data["Age"].mean())
new_data["Age"] = new_data["Age"]/15
new_data["Age"] = new_data["Age"].astype(int)
new_data["Age"] = preprocessing.maxabs_scale(data["Age"])
return new_data
def handle_sibsp(data):
new_data = data
new_data["SibSp"] = preprocessing.maxabs_scale(data["SibSp"])
return new_data
def handle_parch(data):
new_data = data
new_data["Parch"] = preprocessing.maxabs_scale(data["Parch"])
return new_data
def drop_ticket(data):
return data.drop(["Ticket"], axis=1)
def handle_fare(data):
new_data = data
new_data["Fare"] = new_data["Fare"].fillna(new_data["Fare"].mean())
new_data["Fare"] = new_data["Fare"]/ 20
new_data["Fare"] = new_data["Fare"].astype(int)
new_data["Fare"] = preprocessing.maxabs_scale(data["Fare"])
return new_data
def handle_cabin(data):
new_data = data
new_data["Cabin"] = new_data["Cabin"].isna().astype(int)
return new_data
def handle_embarked(data):
new_data = data
new_data["NotEmbarked"] = new_data["Embarked"].isna().astype(int)
new_data["Embarked"] = new_data["Embarked"].fillna("")
new_data['Southampton'] = new_data["Embarked"].str.contains("S" ).astype(int)
new_data['Queenstown'] = new_data["Embarked"].str.contains("Q" ).astype(int)
new_data['Cherbourg'] = new_data["Embarked"].str.contains("C" ).astype(int)
new_data = new_data.drop("Embarked", axis=1)
return new_data
def process_data(data):
data = drop_survived(data)
data = drop_passenger_id(data)
data = handle_pclass(data)
data = handle_name(data)
data = handle_sex(data)
data = handle_age(data)
data = handle_sibsp(data)
data = handle_parch(data)
data = drop_ticket(data)
data = handle_fare(data)
data = handle_cabin(data)
data = handle_embarked(data)
return data
data = train_data.copy()
data = process_data(data)
apply_model(data, data_label)
model = KNeighborsClassifier(n_neighbors=2)
X_train = pd.read_csv(TRAIN_FILE)
y_train = X_train["Survived"]
X_train = process_data(X_train)
X_test = pd.read_csv(TEST_FILE)
test_labels = X_test[["PassengerId"]]
X_test = process_data(X_test)
model.fit(X_train, y_train)
result = model.predict(X_test)
print(len(result))
df = pd.DataFrame()
df['PassengerId'] = test_labels.astype(int)
df['Survived'] = result.astype(int)
print(df.head())
df.to_csv("submission.csv", index=False)
| Titanic - Machine Learning from Disaster |
889,688 | <import_modules><EOS> | warnings.filterwarnings("ignore")
pd.options.mode.chained_assignment = None
TRAIN_FILE = ".. /input/train.csv"
TEST_FILE = ".. /input/test.csv"
train_data = pd.read_csv(TRAIN_FILE)
test_data = pd.read_csv(TEST_FILE)
def extract_survived(data):
return data["Survived"]
def drop_survived(data):
return data.drop("Survived", axis=1, errors="ignore")
def drop_passenger_id(data):
return data.drop("PassengerId", axis=1, errors="ignore")
def handle_pclass(data):
new_data = data
new_data["Pclass"] = new_data["Pclass"] -1
new_data["Pclass"] = preprocessing.maxabs_scale(data["Pclass"])
return new_data
def replace_multi(string, separators, new_separator):
for s in separators:
string = string.replace(s, new_separator)
def filter_data_contains(data, column, contain, target_column=None):
new_data = data
if target_column == None:
new_data[column] = new_data[column].str.contains(contain ).astype(int)
else:
new_data[target_column] = new_data[column].str.contains(contain ).astype(int)
return new_data
def extract_title(x):
return x.replace(".",";" ).replace(",",";" ).split(";")[1]
def handle_name(data):
new_data = data
new_data["Name"] = new_data["Name"].apply(extract_title)
to_replace = {
"Mr": ["Capt", "Col", "Don", "Dr", "Jonkheer", "Rev", "Sir", "Major", "Master" ],
"Miss": ["Mlle", "Lady" ],
"Mrs" : ["Mme", "the Countess", "Ms" ]
}
for title in to_replace.keys() :
for t in to_replace[title]:
new_data["Name"] = new_data["Name"].str.replace(t, title)
new_data = filter_data_contains(new_data, "Name", title, title)
new_data = new_data.drop("Name", axis=1)
return new_data
def fill_na_with_mean(data, column):
new_data = data
new_data[column] = new_data[column].fillna(new_data[column].mean())
return new_data
def fill_na_with_mean(data, column):
new_data = data
new_data[column] = new_data[column].fillna(new_data[column].mean())
return new_data
def is_na(data, column):
new_data = data
new_data[column] = new_data[column].isna().astype(int)
return new_data
def handle_sex(data):
return filter_data_contains(data, "Sex", "female")
def handle_age(data):
new_data = data
new_data = fill_na_with_mean(new_data, "Age")
new_data["Age"] =new_data["Age"] / 15
new_data["Age"] = preprocessing.maxabs_scale(data["Age"])
return new_data
def handle_sibsp(data):
new_data = data
new_data["SibSp"] = preprocessing.maxabs_scale(data["SibSp"])
return new_data
def handle_parch(data):
new_data = data
new_data["Parch"] = preprocessing.maxabs_scale(data["Parch"])
return new_data
def drop_ticket(data):
return data.drop(["Ticket"], axis=1)
def handle_fare(data):
new_data = data
new_data["Fare"] = fill_na_with_mean(new_data, "Fare")
new_data["Fare"] = new_data["Fare"]/ 20
new_data["Fare"] = preprocessing.maxabs_scale(data["Fare"])
return new_data
def handle_cabin(data):
new_data = data
new_data = is_na(new_data,"Cabin")
return new_data
def handle_embarked(data):
new_data = data
new_data["NotEmbarked"] = new_data["Embarked"].isna().astype(int)
new_data["Embarked"] = new_data["Embarked"].fillna("")
new_data = filter_data_contains(new_data, "Embarked", "S", "Southampton")
new_data = filter_data_contains(new_data, "Embarked", "Q", "Queenstown")
new_data = filter_data_contains(new_data, "Embarked", "C", "Cherbourg")
new_data = new_data.drop("Embarked", axis=1)
return new_data
def process_data(data):
data = drop_survived(data)
data = drop_passenger_id(data)
data = handle_pclass(data)
data = handle_name(data)
data = handle_sex(data)
data = handle_age(data)
data = handle_sibsp(data)
data = handle_parch(data)
data = drop_ticket(data)
data = handle_fare(data)
data = handle_cabin(data)
data = handle_embarked(data)
return data
data = train_data.copy()
data_label = extract_survived(data)
data = process_data(data)
classifiers = {
"Nearest Neighbors" : KNeighborsClassifier(3),
"LinearRegression": linear_model.LinearRegression() ,
"Ridge": linear_model.Ridge(alpha =.5),
"Lasso": linear_model.Lasso(alpha = 0.1),
"ElasticNet": linear_model.ElasticNet(random_state=0),
"Lars": linear_model.Lars(n_nonzero_coefs=1),
"LassoLars": linear_model.LassoLars(alpha=.1),
"Omp": linear_model.OrthogonalMatchingPursuit(n_nonzero_coefs=1),
"BayesianRidge":linear_model.BayesianRidge() ,
"ARDRegression":linear_model.ARDRegression() ,
"LogisitcRegression":linear_model.LogisticRegression() ,
"SGDClassifier":linear_model.SGDClassifier() ,
"Perceptron": linear_model.Perceptron() ,
"PassiveAggressiveClassifier": linear_model.PassiveAggressiveClassifier() ,
"Theil-Sen": linear_model.TheilSenRegressor(random_state=42),
"RANSAC": linear_model.RANSACRegressor(random_state=42),
"Huber": linear_model.HuberRegressor() ,
"SVC linear": SVC(kernel="linear", C=0.025),
"SVC": SVC(gamma=2, C=1, probability=True),
"GuassianProcess":GaussianProcessClassifier(1.0 * RBF(1.0)) ,
"DecisionTree":DecisionTreeClassifier(max_depth=5),
"RandomForest":RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
"NeutraNet":MLPClassifier(alpha=1),
"ADABoost":AdaBoostClassifier() ,
"GaussianNB":GaussianNB() ,
"QDA":QuadraticDiscriminantAnalysis()
}
best_model_names = {}
for model_name in classifiers.keys() :
try:
model = classifiers[model_name]
scores = cross_val_score(model, data, data_label, cv=5, verbose=1, scoring='accuracy')
score = scores.mean()
if score >.8:
best_model_names[model_name] = scores.mean()
print("{} {}".format(model_name, scores.mean()))
except:
pass
print(best_model_names)
res = pd.DataFrame()
X_train = pd.read_csv(TRAIN_FILE)
y_train = X_train["Survived"]
X_train = process_data(X_train)
X_test = pd.read_csv(TEST_FILE)
test_labels = X_test[["PassengerId"]]
X_test = process_data(X_test)
res["PassengerId"] = test_labels["PassengerId"]
for model_name in best_model_names.keys() :
model = classifiers[model_name]
model.fit(X_train, y_train)
result = model.predict_proba(X_test)[:,1]
print("{}: {} rows".format(model_name, len(result)))
res[model_name] = result
models_list = list(best_model_names.keys())
res['ProbaMin'] = res[models_list].min(axis=1)
res['ProbaMax'] = res[models_list].max(axis=1)
res['Accurate'] =(res['ProbaMin']<.20)|(res['ProbaMax']>.80)
res['Survived'] =(res['ProbaMax']-0.5)>(0.5-res['ProbaMin'])
res['Survived'] = res['Survived'].astype(int)
res.to_csv("submission_detail.csv", index=False)
res_filtered = res[["PassengerId","Survived"]]
res_filtered.to_csv("submission.csv", index=False)
res.head(20 ) | Titanic - Machine Learning from Disaster |
604,207 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<choose_model_class> | %matplotlib inline
label=LabelEncoder()
| Titanic - Machine Learning from Disaster |
604,207 | clf = GradientBoostingClassifier(n_estimators=200,learning_rate=0.15, random_state=0)
kfolds = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
scoring = {'acc': 'accuracy', 'f1': 'f1_micro'}<compute_train_metric> | train_df = pd.read_csv(".. /input/train.csv")
test_df = pd.read_csv(".. /input/test.csv")
train_df.head() | Titanic - Machine Learning from Disaster |
604,207 | result = cross_validate(clf,train, y_mind, scoring=scoring,
cv=kfolds, n_jobs=-1, verbose=1 )<compute_test_metric> | test_df.Fare.fillna(test_df.Fare.mean() , inplace=True)
data_df = train_df.append(test_df)
passenger_id=test_df['PassengerId']
train_df.drop(['PassengerId'], axis=1, inplace=True)
test_df.drop(['PassengerId'], axis=1, inplace=True)
test_df.shape | Titanic - Machine Learning from Disaster |
604,207 | print('Y_mind model performance:')
pprint(result)
for key in result:
print(key + ' : ', result[key].mean() )<compute_train_metric> | train_df=train_df[train_df['Fare']<400] | Titanic - Machine Learning from Disaster |
604,207 | result = cross_validate(clf,train, y_energy, scoring=scoring,
cv=kfolds, n_jobs=-1, verbose=1 )<compute_test_metric> | train_df['Sex'] = train_df.Sex.apply(lambda x: 0 if x == "female" else 1)
test_df['Sex'] = test_df.Sex.apply(lambda x: 0 if x == "female" else 1 ) | Titanic - Machine Learning from Disaster |
604,207 | print('Y_energy model performance:')
pprint(result)
for key in result:
print(key + ' : ', result[key].mean() )<compute_train_metric> | pd.options.display.max_columns = 99
test_df['Fare'].fillna(test_df['Fare'].mean() ,inplace=True)
train_df.head()
| Titanic - Machine Learning from Disaster |
604,207 | result = cross_validate(clf,train, y_nature, scoring=scoring,
cv=kfolds, n_jobs=-1, verbose=1 )<compute_test_metric> | for name_string in data_df['Name']:
data_df['Title']=data_df['Name'].str.extract('([A-Za-z]+)\.',expand=True)
mapping = {'Mlle': 'Miss', 'Major': 'Mr', 'Col': 'Mr', 'Sir': 'Mr', 'Don': 'Mr', 'Mme': 'Miss',
'Jonkheer': 'Mr', 'Lady': 'Mrs', 'Capt': 'Mr', 'Countess': 'Mrs', 'Ms': 'Miss', 'Dona': 'Mrs'}
data_df.replace({'Title': mapping}, inplace=True ) | Titanic - Machine Learning from Disaster |
604,207 | print('Y_nature model performance:')
pprint(result)
for key in result:
print(key + ' : ', result[key].mean() )<compute_train_metric> | for name_string in data_df['Name']:
data_df['Title']=data_df['Name'].str.extract('([A-Za-z]+)\.',expand=True)
mapping = {'Mlle': 'Miss', 'Major': 'Mr', 'Col': 'Mr', 'Sir': 'Mr', 'Don': 'Mr', 'Mme': 'Miss',
'Jonkheer': 'Mr', 'Lady': 'Mrs', 'Capt': 'Mr', 'Countess': 'Mrs', 'Ms': 'Miss', 'Dona': 'Mrs'}
data_df.replace({'Title': mapping}, inplace=True)
data_df['Title'].value_counts()
train_df['Title']=data_df['Title'][:891]
test_df['Title']=data_df['Title'][891:]
titles=['Mr','Miss','Mrs','Master','Rev','Dr']
for title in titles:
age_to_impute = data_df.groupby('Title')['Age'].mean() [titles.index(title)]
data_df.loc[(data_df['Age'].isnull())&(data_df['Title'] == title), 'Age'] = age_to_impute
data_df.isnull().sum()
train_df['Age']=data_df['Age'][:891]
test_df['Age']=data_df['Age'][891:]
test_df.isnull().sum() | Titanic - Machine Learning from Disaster |
604,207 | result = cross_validate(clf,train, y_tactics, scoring=scoring,
cv=kfolds, n_jobs=-1, verbose=1 )<compute_test_metric> | train_df.groupby('Survived' ).mean() | Titanic - Machine Learning from Disaster |
604,207 | print('Y_tactics model performance:')
pprint(result)
for key in result:
print(key + ' : ', result[key].mean() )<predict_on_test> | train_df.groupby('Sex' ).mean() | Titanic - Machine Learning from Disaster |
604,207 | clf.fit(train,y_mind)
X_t = test
predictions_Mind = pd.DataFrame(clf.predict(X_t))<predict_on_test> | train_df['family_size'] = train_df.SibSp + train_df.Parch+1
test_df['family_size'] = test_df.SibSp + test_df.Parch+1
| Titanic - Machine Learning from Disaster |
604,207 | clf.fit(train,y_energy)
predictions_Energy = pd.DataFrame(clf.predict(X_t))<predict_on_test> | def family_group(size):
a = ''
if(size <= 1):
a = 'loner'
elif(size <= 4):
a = 'small'
else:
a = 'large'
return a
train_df['family_group'] = train_df['family_size'].map(family_group)
test_df['family_group'] = test_df['family_size'].map(family_group ) | Titanic - Machine Learning from Disaster |
604,207 | clf.fit(train,y_tactics)
predictions_Tactic = pd.DataFrame(clf.predict(X_t))<predict_on_test> | train_df['is_alone'] = [1 if i<2 else 0 for i in train_df.family_size]
test_df['is_alone'] = [1 if i<2 else 0 for i in test_df.family_size] | Titanic - Machine Learning from Disaster |
604,207 | clf.fit(train,y_nature)
predictions_Nature = pd.DataFrame(clf.predict(X_t))<concatenate> | train_df['child'] = [1 if i<16 else 0 for i in train_df.Age]
test_df['child'] = [1 if i<16 else 0 for i in test_df.Age]
train_df.child.value_counts() | Titanic - Machine Learning from Disaster |
604,207 | submission = pd.concat([predictions_Mind,predictions_Energy,predictions_Nature,predictions_Tactic], axis=1 )<prepare_output> | train_df['calculated_fare'] = train_df.Fare/train_df.family_size
test_df['calculated_fare'] = test_df.Fare/test_df.family_size
| Titanic - Machine Learning from Disaster |
604,207 | submission['index'] = submission['index'] +1
submission.columns = ['id', 'mind', 'energy', 'nature', 'tactics']
submission.head()<save_to_csv> | train_df.calculated_fare.mode() | Titanic - Machine Learning from Disaster |
604,207 | submission.to_csv('submission_Gradient.csv', index=False )<import_modules> | train_df['fare_group'] = train_df['calculated_fare'].map(fare_group)
test_df['fare_group'] = test_df['calculated_fare'].map(fare_group ) | Titanic - Machine Learning from Disaster |
604,207 | if not 'sklearn' in sys.modules.keys() :
pip.main(['install', 'sklearn'])
<load_from_csv> | train_df = pd.get_dummies(train_df, columns=['Title',"Pclass",'Embarked', 'family_group', 'fare_group'], drop_first=True)
test_df = pd.get_dummies(test_df, columns=['Title',"Pclass",'Embarked', 'family_group', 'fare_group'], drop_first=True)
train_df.drop(['Cabin', 'family_size','Ticket','Name', 'Fare'], axis=1, inplace=True)
test_df.drop(['Ticket','Name','family_size',"Fare",'Cabin'], axis=1, inplace=True)
| Titanic - Machine Learning from Disaster |
604,207 | train = pd.read_csv('.. /input/train.csv')
train_y = train.Quality
predictor_cols = ['fixed.acidity','volatile.acidity','citric.acid','residual.sugar','chlorides','free.sulfur.dioxide','total.sulfur.dioxide','density','pH','sulphates','alcohol']
train_X = train[predictor_cols]
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
imp = imp.fit(train_X )<train_model> | pd.options.display.max_columns = 99
| Titanic - Machine Learning from Disaster |
604,207 | train_X_imp = imp.transform(train_X)
my_model = RandomForestClassifier(n_estimators=100)
my_model.fit(train_X_imp, train_y )<create_dataframe> | train_df['age_group'] = train_df['Age'].map(age_group_fun)
test_df['age_group'] = test_df['Age'].map(age_group_fun ) | Titanic - Machine Learning from Disaster |
604,207 | feature_importances = pd.DataFrame(my_model.feature_importances_,
index = train_X.columns,
columns=['importance'] ).sort_values('importance', ascending=False)
feature_importances<predict_on_test> | train_df = pd.get_dummies(train_df,columns=['age_group'], drop_first=True)
test_df = pd.get_dummies(test_df,columns=['age_group'], drop_first=True)
train_df.drop(['Age','calculated_fare'],axis=1,inplace=True)
test_df.drop(['Age','calculated_fare'],axis=1,inplace=True ) | Titanic - Machine Learning from Disaster |
604,207 | test = pd.read_csv('.. /input/test.csv')
test_X = test[predictor_cols]
test_X_imp = imp.transform(test_X)
predicted_q = my_model.predict(test_X_imp)
print(predicted_q)
<save_to_csv> | train_df.head()
train_df.drop(['Title_Rev','age_group_old','age_group_teenager','age_group_senior_citizen','Embarked_Q'],axis=1,inplace=True)
test_df.drop(['Title_Rev','age_group_old','age_group_teenager','age_group_senior_citizen','Embarked_Q'],axis=1,inplace=True ) | Titanic - Machine Learning from Disaster |
604,207 | my_submission = pd.DataFrame({'Id': test.Id, 'Quality': predicted_q})
my_submission.to_csv('submission.csv', index=False )<import_modules> | X = train_df.drop('Survived', 1)
y = train_df['Survived']
| Titanic - Machine Learning from Disaster |
604,207 | warnings.filterwarnings('ignore')
print(os.listdir(".. /input"))
<define_variables> | classifiers = [
KNeighborsClassifier(3),
svm.SVC(probability=True),
DecisionTreeClassifier() ,
CatBoostClassifier() ,
XGBClassifier() ,
RandomForestClassifier() ,
AdaBoostClassifier() ,
GradientBoostingClassifier() ,
GaussianNB() ,
LinearDiscriminantAnalysis() ,
QuadraticDiscriminantAnalysis() ,
LogisticRegression() ]
log_cols = ["Classifier", "Accuracy"]
log= pd.DataFrame(columns=log_cols)
| Titanic - Machine Learning from Disaster |
604,207 | baseline_tree_score = 0.23092278864723115
baseline_neuralnetwork_score = 0.5480561937041435<load_from_csv> | from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split,StratifiedShuffleSplit
| Titanic - Machine Learning from Disaster |
604,207 | train = pd.read_csv('.. /input/kaggletutorial/covertype_train.csv')
test = pd.read_csv('.. /input/kaggletutorial/covertype_test.csv' )<define_variables> | from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score, StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import BaggingClassifier
from sklearn.metrics import accuracy_score,classification_report, precision_recall_curve, confusion_matrix | Titanic - Machine Learning from Disaster |
604,207 | train_index = train.shape[0]<init_hyperparams> | std_scaler = StandardScaler()
X = std_scaler.fit_transform(X)
testframe = std_scaler.fit_transform(test_df)
testframe.shape
| Titanic - Machine Learning from Disaster |
604,207 | lgbm_param = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'binary_logloss',
"learning_rate": 0.06,
"num_leaves": 16,
"max_depth": 6,
"colsample_bytree": 0.7,
"subsample": 0.8,
"reg_alpha": 0.1,
"reg_lambda": 0.1,
"nthread":8
}<choose_model_class> | X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.20,random_state=1000 ) | Titanic - Machine Learning from Disaster |
604,207 | def keras_model(input_dims):
model = Sequential()
model.add(Dense(input_dims, input_dim=input_dims))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Dense(input_dims//2))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam', metrics=['accuracy'])
return model
def keras_history_plot(history):
plt.plot(history.history['loss'], 'y', label='train loss')
plt.plot(history.history['val_loss'], 'r', label='val loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='upper right')
plt.show()<split> | logreg = LogisticRegression(solver='liblinear', penalty='l1')
logreg.fit(X_train,y_train)
predict=logreg.predict(X_test)
print(accuracy_score(y_test,predict))
print(confusion_matrix(y_test,predict))
print(precision_score(y_test,predict))
print(recall_score(y_test,predict)) | Titanic - Machine Learning from Disaster |
604,207 | def baseline_tree_cv(train):
train_df = train.copy()
y_value = train_df["Cover_Type"]
del train_df["Cover_Type"], train_df["ID"]
NFOLD = 5
folds = StratifiedKFold(n_splits= NFOLD, shuffle=True, random_state=2018)
total_score = 0
best_iteration = 0
for n_fold,(train_idx, valid_idx)in enumerate(folds.split(train_df, y_value)) :
train_x, train_y = train_df.iloc[train_idx], y_value.iloc[train_idx]
valid_x, valid_y = train_df.iloc[valid_idx], y_value.iloc[valid_idx]
evals_result_dict = {}
dtrain = lgbm.Dataset(train_x, label=train_y)
dvalid = lgbm.Dataset(valid_x, label=valid_y)
clf = lgbm.train(lgbm_param, train_set=dtrain, num_boost_round=3000, valid_sets=[dtrain, dvalid],
early_stopping_rounds=200, evals_result=evals_result_dict, verbose_eval=500)
predict = clf.predict(valid_x)
cv_score = log_loss(valid_y, predict)
total_score += cv_score
best_iteration = max(best_iteration, clf.best_iteration)
print('Fold {} LogLoss : {}'.format(n_fold + 1, cv_score))
lgbm.plot_metric(evals_result_dict)
plt.show()
print("Best Iteration", best_iteration)
print("Total LogLoss", total_score / NFOLD)
print("Baseline model Score Diff", total_score / NFOLD - baseline_tree_score)
del train_df
return best_iteration
def baseline_keras_cv(train):
train_df = train.copy()
y_value = train_df['Cover_Type']
del train_df['Cover_Type'], train_df['ID']
model = keras_model(train_df.shape[1])
callbacks = [
EarlyStopping(
patience=10,
verbose=10)
]
NFOLD = 5
folds = StratifiedKFold(n_splits= NFOLD, shuffle=True, random_state=2018)
total_score = 0
best_epoch = 0
for n_fold,(train_idx, valid_idx)in enumerate(folds.split(train_df, y_value)) :
train_x, train_y = train_df.iloc[train_idx], y_value.iloc[train_idx]
valid_x, valid_y = train_df.iloc[valid_idx], y_value.iloc[valid_idx]
history = model.fit(train_x.values, train_y.values, nb_epoch=30, batch_size = 64, validation_data=(valid_x.values, valid_y.values),
verbose=1, callbacks=callbacks)
keras_history_plot(history)
predict = model.predict(valid_x.values)
null_count = np.sum(pd.isnull(predict))
if null_count > 0:
print("Null Prediction Error: ", null_count)
predict[pd.isnull(predict)] = predict[~pd.isnull(predict)].mean()
cv_score = log_loss(valid_y, predict)
total_score += cv_score
best_epoch = max(best_epoch, np.max(history.epoch))
print('Fold {} LogLoss : {}'.format(n_fold + 1, cv_score))
print("Best Epoch: ", best_epoch)
print("Total LogLoss", total_score/NFOLD)
print("Baseline model Score Diff", total_score/NFOLD - baseline_neuralnetwork_score )<categorify> | C_vals = [0.0001, 0.001, 0.01, 0.1,0.13,0.2,.15,.25,.275,.33, 0.5,.66, 0.75, 1.0, 2.5, 4.0,4.5,5.0,5.1,5.5,6.0, 10.0, 100.0, 1000.0]
penalties = ['l1','l2']
param = {'penalty': penalties, 'C': C_vals, }
grid = GridSearchCV(logreg, param,verbose=False, cv = StratifiedKFold(n_splits=5,random_state=10,shuffle=True), n_jobs=1,scoring='accuracy' ) | Titanic - Machine Learning from Disaster |
604,207 | def outlier_binary(frame, col, outlier_range):
outlier_feature = col + '_Outlier'
frame[outlier_feature] = 0
frame.loc[frame[col] > outlier_range, outlier_feature] = 1
return frame
def outlier_divide_ratio(frame, col, outlier_range):
outlier_index = frame[col] >= outlier_range
outlier_median = frame.loc[outlier_index, col].median()
normal_median = frame.loc[frame[col] < outlier_range, col].median()
outlier_ratio = outlier_median / normal_median
frame.loc[outlier_index, col] = frame.loc[outlier_index, col]/outlier_ratio
return frame
def frequency_encoding(frame, col):
freq_encoding = frame.groupby([col] ).size() /frame.shape[0]
freq_encoding = freq_encoding.reset_index().rename(columns={0:'{}_Frequncy'.format(col)})
return frame.merge(freq_encoding, on=col, how='left')
def binning_category_combine_feature(frame, col1, col2, col1_quantile, col2_quantile):
print(col1, ' ', col2, 'Bining Combine')
col1_quantile = np.arange(0,1.1,col1_quantile)
col2_quantile = np.arange(0,1.1,col2_quantile)
col1_label = '{}_quantile_label'.format(col1)
frame[col1_label] = pd.qcut(frame[col1], q=col1_quantile, labels = ['{}_quantile_{:.1f}'.format(col1, col)for col in col1_quantile][1:])
col2_label = '{}_quantile_label'.format(col2)
frame[col2_label] = pd.qcut(frame[col2], q=col2_quantile, labels = ['{}_quantile_{:.1f}'.format(col2, col)for col in col2_quantile][1:])
combine_label = 'Binnig_{}_{}_Combine'.format(col1, col2)
frame[combine_label] = frame[[col1_label, col2_label]].apply(lambda row: row[col1_label] +'_'+ row[col2_label] ,axis=1)
for col in [col1_label, col2_label, combine_label]:
frame[col] = frame[col].factorize() [0]
gc.collect()
return frame, [col1_label, col2_label, combine_label]<feature_engineering> | grid.fit(X_train,y_train)
print(grid.best_params_)
print(grid.best_score_)
print(grid.best_estimator_ ) | Titanic - Machine Learning from Disaster |
604,207 | def tree_data_preprocessing(train, test):
train_index = train.shape[0]
all_data = pd.concat([train, test])
del all_data['oil_Type']
all_column_set = set(all_data.columns)
category_feature = []
for col in all_data.loc[:, all_data.dtypes=='object'].columns:
all_data[col] = all_data[col].factorize() [0]
category_feature.append(col)
numerical_feature = list(all_column_set - set(category_feature)- set(['Cover_Type','ID']))
all_data['Elevation'] = np.log1p(all_data['Elevation'])
all_data = outlier_binary(all_data, 'Horizontal_Distance_To_Fire_Points', 10000)
all_data = outlier_binary(all_data, 'Horizontal_Distance_To_Roadways', 10000)
all_data = outlier_divide_ratio(all_data, 'Horizontal_Distance_To_Fire_Points', 10000)
all_data = outlier_divide_ratio(all_data, 'Horizontal_Distance_To_Roadways', 10000)
all_data = frequency_encoding(all_data, 'Soil_Type')
all_data = frequency_encoding(all_data, 'Wilderness_Area')
aspect_train = all_data.loc[all_data['Aspect'].notnull() ]
aspect_test = all_data.loc[all_data['Aspect'].isnull() ]
del aspect_train["Cover_Type"], aspect_train['ID']
del aspect_test["Cover_Type"], aspect_test['ID']
numerical_feature_woaspect = numerical_feature[:]
numerical_feature_woaspect.remove('Aspect')
sc = StandardScaler()
aspect_train[numerical_feature_woaspect] = sc.fit_transform(aspect_train[numerical_feature_woaspect])
aspect_test[numerical_feature_woaspect] = sc.transform(aspect_test[numerical_feature_woaspect])
y_value = aspect_train['Aspect']
del aspect_train['Aspect'], aspect_test['Aspect']
knn = KNeighborsRegressor(n_neighbors=7)
knn.fit(aspect_train,y_value)
predict = knn.predict(aspect_test)
sns.distplot(predict)
sns.distplot(all_data['Aspect'].dropna())
plt.title('KNN Aspect Null Imputation')
plt.show()
all_data.loc[all_data['Aspect'].isnull() ,'Aspect'] = predict
all_data['Horizontal_Distance_To_Hydrology'] = all_data['Horizontal_Distance_To_Hydrology']/1000
all_data['HF1'] = all_data['Horizontal_Distance_To_Hydrology'] + all_data['Horizontal_Distance_To_Fire_Points']
all_data['HF2'] = all_data['Horizontal_Distance_To_Hydrology'] - all_data['Horizontal_Distance_To_Fire_Points']
all_data['HF3'] = np.log1p(all_data['Horizontal_Distance_To_Hydrology'] * all_data['Horizontal_Distance_To_Fire_Points'])
all_data['HF4'] = all_data['Horizontal_Distance_To_Hydrology'] / all_data['Horizontal_Distance_To_Fire_Points']
all_data['HR1'] = all_data['Horizontal_Distance_To_Hydrology'] + all_data['Horizontal_Distance_To_Roadways']
all_data['HR2'] = all_data['Horizontal_Distance_To_Hydrology'] - all_data['Horizontal_Distance_To_Roadways']
all_data['HR3'] = np.log1p(all_data['Horizontal_Distance_To_Hydrology'] * all_data['Horizontal_Distance_To_Roadways'])
all_data['HR4'] = all_data['Horizontal_Distance_To_Hydrology'] / all_data['Horizontal_Distance_To_Roadways']
all_data['HH1'] = all_data['Horizontal_Distance_To_Hydrology'] + all_data['Vertical_Distance_To_Hydrology']
all_data['HH2'] = all_data['Horizontal_Distance_To_Hydrology'] - all_data['Vertical_Distance_To_Hydrology']
all_data['HH3'] = np.log1p(abs(all_data['Horizontal_Distance_To_Hydrology'] * all_data['Vertical_Distance_To_Hydrology']))
all_data['HH4'] = all_data['Horizontal_Distance_To_Hydrology'] / all_data['Vertical_Distance_To_Hydrology']
all_data['FR1'] = all_data['Horizontal_Distance_To_Fire_Points'] + all_data['Horizontal_Distance_To_Roadways']
all_data['FR2'] = all_data['Horizontal_Distance_To_Fire_Points'] - all_data['Horizontal_Distance_To_Roadways']
all_data['FR3'] = np.log1p(all_data['Horizontal_Distance_To_Fire_Points'] * all_data['Horizontal_Distance_To_Roadways'])
all_data['FR4'] = all_data['Horizontal_Distance_To_Fire_Points'] / all_data['Horizontal_Distance_To_Roadways']
all_data['Direct_Distance_Hydrology'] =(all_data['Horizontal_Distance_To_Hydrology']**2+all_data['Vertical_Distance_To_Hydrology']**2)**0.5
all_data.loc[np.isinf(all_data['HF4']),'HF4'] = 0
all_data.loc[np.isinf(all_data['HR4']),'HR4'] = 0
all_data.loc[np.isinf(all_data['HH4']),'HH4'] = 0
all_data.loc[np.isinf(all_data['FR4']),'FR4'] = 0
all_data[['HF4','HH4']] = all_data[['HF4','HH4']].fillna(0)
all_data, new_col = binning_category_combine_feature(all_data, 'Elevation', 'Aspect', 0.1, 0.1)
for col in new_col:
all_data = frequency_encoding(all_data, col)
train_df = all_data.iloc[:train_index]
test_df = all_data.iloc[train_index:]
soil_mean_encoding = train_df.groupby(['Soil_Type'])['Cover_Type'].agg({'Soil_Type_Mean':'mean',
'Soil_Type_Std':'std',
'Soil_Type_Size':'size',
'Soil_Type_Sum':'sum'} ).reset_index()
train_df = train_df.merge(soil_mean_encoding, on='Soil_Type', how='left')
test_df = test_df.merge(soil_mean_encoding, on='Soil_Type', how='left')
wildness_mean_encoding = train_df.groupby(['Wilderness_Area'])['Cover_Type'].agg({'Wilderness_Area_Mean':'mean',
'Wilderness_Area_Std':'std',
'Wilderness_Area_Size':'size',
'Wilderness_Area_Sum':'sum'} ).reset_index()
train_df = train_df.merge(wildness_mean_encoding, on='Wilderness_Area', how='left')
test_df = test_df.merge(wildness_mean_encoding, on='Wilderness_Area', how='left')
del all_data, predict, aspect_train, aspect_test
gc.collect()
return train_df, test_df<feature_engineering> | logreg_grid = LogisticRegression(penalty=grid.best_params_['penalty'], C=grid.best_params_['C'])
logreg_grid.fit(X_train,y_train)
y_pred = logreg_grid.predict(X_test)
logreg_accy = round(accuracy_score(y_test, y_pred), 3)
print(logreg_accy)
print(confusion_matrix(y_test,y_pred))
print(precision_score(y_test,y_pred))
print(recall_score(y_test,y_pred)) | Titanic - Machine Learning from Disaster |
604,207 | def nn_data_preprocessing(train, test):
train_index = train.shape[0]
all_data = pd.concat([train, test])
del all_data['oil_Type']
all_column_set = set(all_data.columns)
category_feature = []
for col in all_data.loc[:, all_data.dtypes=='object'].columns:
all_data[col] = all_data[col].factorize() [0]
category_feature.append(col)
numerical_feature = list(all_column_set - set(category_feature)- set(['Cover_Type','ID']))
all_data['Elevation'] = np.log1p(all_data['Elevation'])
all_data = outlier_binary(all_data, 'Horizontal_Distance_To_Fire_Points', 10000)
all_data = outlier_binary(all_data, 'Horizontal_Distance_To_Roadways', 10000)
all_data = outlier_divide_ratio(all_data, 'Horizontal_Distance_To_Fire_Points', 10000)
all_data = outlier_divide_ratio(all_data, 'Horizontal_Distance_To_Roadways', 10000)
all_data = frequency_encoding(all_data, 'Soil_Type')
all_data = frequency_encoding(all_data, 'Wilderness_Area')
aspect_train = all_data.loc[all_data['Aspect'].notnull() ]
aspect_test = all_data.loc[all_data['Aspect'].isnull() ]
del aspect_train["Cover_Type"], aspect_train['ID']
del aspect_test["Cover_Type"], aspect_test['ID']
numerical_feature_woaspect = numerical_feature[:]
numerical_feature_woaspect.remove('Aspect')
sc = StandardScaler()
aspect_train[numerical_feature_woaspect] = sc.fit_transform(aspect_train[numerical_feature_woaspect])
aspect_test[numerical_feature_woaspect] = sc.transform(aspect_test[numerical_feature_woaspect])
y_value = aspect_train['Aspect']
del aspect_train['Aspect'], aspect_test['Aspect']
knn = KNeighborsRegressor(n_neighbors=7)
knn.fit(aspect_train,y_value)
predict = knn.predict(aspect_test)
sns.distplot(predict)
sns.distplot(all_data['Aspect'].dropna())
plt.title('KNN Aspect Null Imputation')
plt.show()
all_data.loc[all_data['Aspect'].isnull() ,'Aspect'] = predict
all_data['Horizontal_Distance_To_Hydrology'] = all_data['Horizontal_Distance_To_Hydrology']/1000
all_data['HF1'] = all_data['Horizontal_Distance_To_Hydrology'] + all_data['Horizontal_Distance_To_Fire_Points']
all_data['HF2'] = all_data['Horizontal_Distance_To_Hydrology'] - all_data['Horizontal_Distance_To_Fire_Points']
all_data['HF3'] = np.log1p(all_data['Horizontal_Distance_To_Hydrology'] * all_data['Horizontal_Distance_To_Fire_Points'])
all_data['HF4'] = all_data['Horizontal_Distance_To_Hydrology'] / all_data['Horizontal_Distance_To_Fire_Points']
all_data['HR1'] = all_data['Horizontal_Distance_To_Hydrology'] + all_data['Horizontal_Distance_To_Roadways']
all_data['HR2'] = all_data['Horizontal_Distance_To_Hydrology'] - all_data['Horizontal_Distance_To_Roadways']
all_data['HR3'] = np.log1p(all_data['Horizontal_Distance_To_Hydrology'] * all_data['Horizontal_Distance_To_Roadways'])
all_data['HR4'] = all_data['Horizontal_Distance_To_Hydrology'] / all_data['Horizontal_Distance_To_Roadways']
all_data['HH1'] = all_data['Horizontal_Distance_To_Hydrology'] + all_data['Vertical_Distance_To_Hydrology']
all_data['HH2'] = all_data['Horizontal_Distance_To_Hydrology'] - all_data['Vertical_Distance_To_Hydrology']
all_data['HH3'] = np.log1p(abs(all_data['Horizontal_Distance_To_Hydrology'] * all_data['Vertical_Distance_To_Hydrology']))
all_data['HH4'] = all_data['Horizontal_Distance_To_Hydrology'] / all_data['Vertical_Distance_To_Hydrology']
all_data['FR1'] = all_data['Horizontal_Distance_To_Fire_Points'] + all_data['Horizontal_Distance_To_Roadways']
all_data['FR2'] = all_data['Horizontal_Distance_To_Fire_Points'] - all_data['Horizontal_Distance_To_Roadways']
all_data['FR3'] = np.log1p(all_data['Horizontal_Distance_To_Fire_Points'] * all_data['Horizontal_Distance_To_Roadways'])
all_data['FR4'] = all_data['Horizontal_Distance_To_Fire_Points'] / all_data['Horizontal_Distance_To_Roadways']
all_data['Direct_Distance_Hydrology'] =(all_data['Horizontal_Distance_To_Hydrology']**2+all_data['Vertical_Distance_To_Hydrology']**2)**0.5
all_data.loc[np.isinf(all_data['HF4']),'HF4'] = 0
all_data.loc[np.isinf(all_data['HR4']),'HR4'] = 0
all_data.loc[np.isinf(all_data['HH4']),'HH4'] = 0
all_data.loc[np.isinf(all_data['FR4']),'FR4'] = 0
all_data[['HF4','HH4']] = all_data[['HF4','HH4']].fillna(0)
all_data, new_col = binning_category_combine_feature(all_data, 'Elevation', 'Aspect', 0.1, 0.1)
for col in new_col:
all_data = frequency_encoding(all_data, col)
all_data.drop(columns=new_col,axis=1,inplace=True)
before_one_hot = set(all_data.columns)
for col in category_feature:
all_data = pd.concat([all_data,pd.get_dummies(all_data[col],prefix=col)],axis=1)
one_hot_feature = set(all_data.columns)- before_one_hot
train_df = all_data.iloc[:train_index]
test_df = all_data.iloc[train_index:]
soil_mean_encoding = train_df.groupby(['Soil_Type'])['Cover_Type'].agg({'Soil_Type_Mean':'mean',
'Soil_Type_Std':'std',
'Soil_Type_Size':'size',
'Soil_Type_Sum':'sum'} ).reset_index()
train_df = train_df.merge(soil_mean_encoding, on='Soil_Type', how='left')
test_df = test_df.merge(soil_mean_encoding, on='Soil_Type', how='left')
wildness_mean_encoding = train_df.groupby(['Wilderness_Area'])['Cover_Type'].agg({'Wilderness_Area_Mean':'mean',
'Wilderness_Area_Std':'std',
'Wilderness_Area_Size':'size',
'Wilderness_Area_Sum':'sum'} ).reset_index()
train_df = train_df.merge(wildness_mean_encoding, on='Wilderness_Area', how='left')
test_df = test_df.merge(wildness_mean_encoding, on='Wilderness_Area', how='left')
train_df.drop(columns=category_feature, axis=1, inplace=True)
test_df.drop(columns=category_feature, axis=1, inplace=True)
scale_feature = list(set(train_df.columns)-one_hot_feature-set(['Cover_Type','ID']))
sc = StandardScaler()
train_df[scale_feature] = sc.fit_transform(train_df[scale_feature])
test_df[scale_feature] = sc.transform(test_df[scale_feature])
return train_df, test_df<merge> | ABC=AdaBoostClassifier()
ABC.fit(X_train,y_train)
predict=ABC.predict(X_test)
print(accuracy_score(y_test,predict))
print(confusion_matrix(y_test,predict))
print(precision_score(y_test,predict))
| Titanic - Machine Learning from Disaster |
604,207 | org_train_df, org_test_df = tree_data_preprocessing(train, test )<create_dataframe> | n_estimator=[50,60,100,150,200,300]
learning_rate=[0.001,0.01,0.1,0.2,]
hyperparam={'n_estimators':n_estimator,'learning_rate':learning_rate}
gridBoost=GridSearchCV(ABC,param_grid=hyperparam,verbose=False, cv = StratifiedKFold(n_splits=5,random_state=15,shuffle=True), n_jobs=1,scoring='accuracy' ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.