kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
9,888,019 | train_data['seperated_ingredients'] = train_data['ingredients'].apply(','.join)
test_data['seperated_ingredients'] = test_data['ingredients'].apply(','.join )<import_modules> |
class_cat=FunctionTransformer(class_categorizer)
quantile_transformer = FunctionTransformer(quantile_transform)
cat_features=['Title', 'Deck' ,'Pclass' , 'Sex' , 'Embarked' , 'Age_Grp' , 'Fare_Bin']
num_features = list(x.select_dtypes(include=['int64','float64'] ).columns)
categorical_transformer = pip.Pipeline(steps=[('class_cat',class_cat),
('enc', CatBoostEncoder())
])
numerical_transformer = pip.Pipeline([('just','passthrough')])
preprocessor = ColumnTransformer(transformers=[('cat', categorical_transformer , cat_features),
('num' , numerical_transformer , num_features)
])
pipeline_xgb =pip.Pipeline(steps=[('preprocessor', preprocessor),
('feature_select', SelectKBest(chi2 , k = 15)) ,
('classifier',XGBClassifier(learning_rate=0.01 ,
n_estimators=860,
max_depth=3,
subsample=1,
colsample_bytree=1,
gamma=6,
reg_alpha = 14,
reg_lambda = 3))
])
cv = StratifiedKFold(5, shuffle=True, random_state=42)
accuracies = cross_val_score(pipeline_xgb, x , y , cv = cv)
print("5 fold cross validation accuracies {}".format(accuracies)) | Titanic - Machine Learning from Disaster |
9,888,019 | import nltk
from collections import Counter<feature_engineering> |
params=[{
'feature_select' : [SelectKBest(chi2)],
'feature_select__k' : [i for i in range(5,19)]
}]
cv = StratifiedKFold(5, shuffle=True, random_state=42)
search=GridSearchCV(estimator=pipeline_xgb,
param_grid=params,
n_jobs=-1,
cv=cv)
search.fit(x, y)
print("best score : {} , best params : {} ".format(search.best_score_ , search.best_params_))
| Titanic - Machine Learning from Disaster |
9,888,019 | train_data['for ngrams']=train_data['seperated_ingredients'].str.replace(',',' ' )<define_search_model> |
cat_features=['Title', 'Deck' ,'Pclass' , 'Sex' , 'Embarked' , 'Age_Grp' , 'Fare_Bin']
num_features = list(x.select_dtypes(include=['int64','float64'] ).columns)
categorical_transformer = pip.Pipeline(steps=[('class_cat',class_cat),
('enc', CatBoostEncoder())
])
numerical_transformer = pip.Pipeline([('normal_trans',quantile_transformer)])
preprocessor = ColumnTransformer(transformers=[('cat', categorical_transformer , cat_features),
('num' , numerical_transformer , num_features)])
pipeline_log =pip.Pipeline(steps=[('preprocessor', preprocessor),
('feature_select',SelectKBest(chi2, k = 17)) ,
('classifier',LogisticRegression(penalty = 'l2',
solver = 'liblinear',
C = 0.25))
])
cv = StratifiedKFold(5, shuffle=True, random_state=42)
accuracies = cross_val_score(pipeline_log, x , y , cv = cv)
print("5 fold cross validation accuracies {}".format(accuracies))
| Titanic - Machine Learning from Disaster |
9,888,019 | def generate_ngrams(text, n):
words = text.split(' ')
iterations = len(words)- n + 1
for i in range(iterations):
yield words[i:i + n]
def net_diagram(*cuisines):
ngrams = {}
for title in train_data[train_data.cuisine==cuisines[0]]['for ngrams']:
for ngram in generate_ngrams(title, 2):
ngram = ','.join(ngram)
if ngram in ngrams:
ngrams[ngram] += 1
else:
ngrams[ngram] = 1
ngrams_mws_df = pd.DataFrame.from_dict(ngrams, orient='index')
ngrams_mws_df.columns = ['count']
ngrams_mws_df['cusine'] = cuisines[0]
ngrams_mws_df.reset_index(level=0, inplace=True)
ngrams = {}
for title in train_data[train_data.cuisine==cuisines[1]]['for ngrams']:
for ngram in generate_ngrams(title, 2):
ngram = ','.join(ngram)
if ngram in ngrams:
ngrams[ngram] += 1
else:
ngrams[ngram] = 1
ngrams_mws_df1 = pd.DataFrame.from_dict(ngrams, orient='index')
ngrams_mws_df1.columns = ['count']
ngrams_mws_df1['cusine'] = cuisines[1]
ngrams_mws_df1.reset_index(level=0, inplace=True)
cuisine1=ngrams_mws_df.sort_values('count',ascending=False)[:25]
cuisine2=ngrams_mws_df1.sort_values('count',ascending=False)[:25]
df_final=pd.concat([cuisine1,cuisine2])
g = nx.from_pandas_edgelist(df_final,source='cusine',target='index')
cmap = plt.cm.RdYlGn
colors = [n for n in range(len(g.nodes())) ]
k = 0.35
pos=nx.spring_layout(g, k=k)
nx.draw_networkx(g,pos, node_size=df_final['count'].values*8, cmap = cmap, node_color=colors, edge_color='grey', font_size=15, width=3)
plt.title("Top 25 Bigrams for %s and %s" %(cuisines[0],cuisines[1]), fontsize=30)
plt.gcf().set_size_inches(30,30)
plt.show()
plt.savefig('network.png' )<define_variables> | classifier = VotingClassifier(estimators=[('XGB', pipeline_xgb),('LOG', pipeline_log)])
cv = StratifiedKFold(5, shuffle=True, random_state=42)
accuracies = cross_val_score(classifier, x , y , cv = cv)
print("5 fold cross validation accuracies {}".format(accuracies)) | Titanic - Machine Learning from Disaster |
9,888,019 | <define_variables><EOS> | classifier.fit(x,y)
y_submit=pd.Series(( classifier.predict(pred_set)))
y_submit=y_submit.astype(int)
y_1=pred_set_original.PassengerId
y_submit_f=pd.concat([y_1,y_submit],axis=1)
y_submit_f.rename(columns={ 0 :'Survived'}, inplace=True)
y_submit_f.to_csv('submission.csv',index=False ) | Titanic - Machine Learning from Disaster |
1,083,209 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<train_model> | import numpy as np
import pandas as pd
import os
from typing import Tuple | Titanic - Machine Learning from Disaster |
1,083,209 | count_m = []
for recipe in features_processed:
if not recipe:
count_m.append([recipe])
else: pass
print("Empty instances in the preprocessed training sample: " + str(len(count_m)) )<define_variables> | pd.options.mode.chained_assignment = None | Titanic - Machine Learning from Disaster |
1,083,209 | count_m = []
for recipe in features_test_processed:
if not recipe:
count_m.append([recipe])
else: pass
print("Empty instances in the preprocessed test sample: " + str(len(count_m)) )<choose_model_class> | class DataHelper() :
def __init__(self, directory: str ='.. /input/', train_file: str ='train.csv', test_file: str ='test.csv'):
self.directory =directory
self.test_file = test_file
self.train_file = train_file
pass
def __read_csv(self, filename: str)-> pd.DataFrame:
return pd.read_csv(self.directory + filename)
def get_train_data(self, y_columns)-> Tuple[pd.DataFrame, pd.DataFrame]:
train_data = self.__read_csv(self.train_file)
return train_data.drop(y_columns, axis=1), train_data[y_columns]
def get_test_data(self)-> pd.DataFrame:
return self.__read_csv(self.test_file)
def __format_prediction_before_save(self, index, prediction)-> pd.DataFrame:
reshaped = np.array(prediction ).reshape(418,1)
return pd.DataFrame(reshaped, index=index, columns=['Survived'])
def save_prediction(self, index, prediction: pd.DataFrame, filename: str = 'submission.csv'):
self.__format_prediction_before_save(index, prediction ).to_csv(path_or_buf=filename, sep=',', header=True ) | Titanic - Machine Learning from Disaster |
1,083,209 | vectorizer = CountVectorizer(analyzer = "word",
ngram_range =(1,1),
binary = True,
tokenizer = None,
preprocessor = None,
stop_words = None,
max_df = 0.99 )<feature_engineering> | from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import StandardScaler
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score | Titanic - Machine Learning from Disaster |
1,083,209 | train_X = vectorizer.fit_transform([str(i)for i in features_processed])
test_X = vectorizer.transform([str(i)for i in features_test_processed] )<prepare_x_and_y> | class CabinExtractor(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def __get_deck(self, cabin)-> int:
return 0 if cabin == 0 else 1
def transform(self, X, y=None):
X.Cabin.fillna(0, inplace=True)
X.Cabin = X.Cabin.apply(self.__get_deck)
return X | Titanic - Machine Learning from Disaster |
1,083,209 | target = train_data['cuisine']<categorify> | class TitleExtractor(BaseEstimator, TransformerMixin):
def __init__(self, regex: str):
self.regex = regex
pass
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X['Title'] = X.Name.str.extract(self.regex)[0].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
return X.drop('Name', axis=1 ) | Titanic - Machine Learning from Disaster |
1,083,209 | lb = LabelEncoder()
train_Y = lb.fit_transform(target )<split> | class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, column_names):
self.column_names = column_names
pass
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X[self.column_names] | Titanic - Machine Learning from Disaster |
1,083,209 | X_train, X_test, y_train, y_test = train_test_split(train_X, train_Y , random_state = 0 )<train_on_grid> | class DataFrameToValueConverter(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X.values | Titanic - Machine Learning from Disaster |
1,083,209 | clfs = []
rfc = RandomForestClassifier(n_estimators=50, random_state=42, n_jobs=-1)
rfc.fit(X_train, y_train)
print('RFC LogLoss {score}'.format(score=log_loss(y_test, rfc.predict_proba(X_test))))
clfs.append(rfc)
logreg = LogisticRegression(random_state = 42)
logreg.fit(X_train, y_train)
print('LogisticRegression LogLoss {score}'.format(score=log_loss(y_test, logreg.predict_proba(X_test))))
clfs.append(logreg)
svc = SVC(random_state=42,probability=True, kernel='linear')
svc.fit(X_train, y_train)
print('SVC LogLoss {score}'.format(score=log_loss(y_test, svc.predict_proba(X_test))))
clfs.append(svc)
<find_best_params> | class FamilyAttributeCombiner(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def get_val(self, amount):
if amount > 0:
return 1
else:
return 0
def transform(self, X, y=None):
X['HasFamily'] = X[['SibSp','Parch']].sum(axis=1 ).apply(self.get_val)
return X.drop(['SibSp','Parch'], axis=1 ) | Titanic - Machine Learning from Disaster |
1,083,209 | predictions = []
for clf in clfs:
predictions.append(clf.predict_proba(X_test))
def log_loss_func(weights):
final_prediction = 0
for weight, prediction in zip(weights, predictions):
final_prediction += weight*prediction
return log_loss(y_test, final_prediction)
starting_values = [0.5]*len(predictions)
cons =({'type':'eq','fun':lambda w: 1-sum(w)})
bounds = [(0,1)]*len(predictions)
res = minimize(log_loss_func, starting_values, method='SLSQP', bounds=bounds, constraints=cons)
print('Ensamble Score: {best_score}'.format(best_score=res['fun']))
print('Best Weights: {weights}'.format(weights=res['x']))<choose_model_class> | class GridSearchHelper() :
def __init__(self, preprocesing_pipeline: Pipeline, X: pd.DataFrame, y: pd.DataFrame):
self.preprocesing_pipeline = preprocesing_pipeline
self.X = X
self.y = y
pass
def __get_grid_search(self, clf, param_grid: list, kwargs)-> GridSearchCV:
return GridSearchCV(clf, param_grid, **kwargs)
def __get_best_estimator(self, clf, param_grid: list, kwargs)-> BaseEstimator:
grid_search = self.__get_grid_search(clf, param_grid, kwargs)
grid_search.fit(self.preprocesing_pipeline.fit_transform(self.X), self.y)
return grid_search.best_estimator_
def get_best_classifier(self, clf, param_grid, **kwargs):
return self.__get_best_estimator(clf, param_grid, kwargs ) | Titanic - Machine Learning from Disaster |
1,083,209 | vclf=VotingClassifier(estimators=[('clf1',RandomForestClassifier(n_estimators = 50,random_state = 42)) ,
('clf2',LogisticRegression(random_state = 42)) ,
('clf3',SVC(kernel='linear',random_state = 42,probability=True))
],
voting='soft', weights = [0.05607363, 0.70759724, 0.23632913])
vclf.fit(train_X, train_Y )<compute_train_metric> | not_needed_attribs = ['PassengerId', 'Name', 'Ticket','Cabin', 'Embarked']
cat_attribs = ['Sex', 'Pclass']
num_attribs = ['SibSp', 'Parch', 'Age', 'Fare'] | Titanic - Machine Learning from Disaster |
1,083,209 | kfold = model_selection.KFold(n_splits=5, random_state=42)
valscores = model_selection.cross_val_score(vclf, train_X, train_Y, cv=kfold)
print('Mean accuracy on 5-fold cross validation: ' + str(np.mean(valscores)) )<save_to_csv> | title_regex = '([A-Za-z]+)\.' | Titanic - Machine Learning from Disaster |
1,083,209 | predictions = vclf.predict(test_X)
predictions = lb.inverse_transform(predictions)
predictions_final = pd.DataFrame({'cuisine' : predictions , 'id' : test_data.id }, columns=['id', 'cuisine'])
predictions_final.to_csv('Final_submission.csv', index = False )<set_options> | num_pipeline = Pipeline([
('selector', DataFrameSelector(num_attribs)) ,
('df_to_value_converter',DataFrameToValueConverter()),
('imputer', SimpleImputer(strategy='most_frequent')) ,
('std_scaler', StandardScaler()),
] ) | Titanic - Machine Learning from Disaster |
1,083,209 | %matplotlib inline
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
warnings.filterwarnings('ignore' )<set_options> | cat_pipeline = Pipeline([
('selector', DataFrameSelector(cat_attribs)) ,
('df_to_value_converter',DataFrameToValueConverter()),
('one_hot_encoder', OneHotEncoder()),
] ) | Titanic - Machine Learning from Disaster |
1,083,209 | np.set_printoptions(precision=6 )<define_variables> | feature_union = FeatureUnion(transformer_list=[
('num_pipeline', num_pipeline),
('cat_pipeline', cat_pipeline),
] ) | Titanic - Machine Learning from Disaster |
1,083,209 | DATA_DIR='.. /input'
TRAIN_FILE=DATA_DIR + '/train.json'
TEST_FILE=DATA_DIR + '/test.json'
SUBMIT_FILE= 'sample_submission.csv'
<categorify> | preprocessing_pipeline = Pipeline([
('selector', DataFrameSelector(num_attribs + cat_attribs)) ,
('feature_union', feature_union),
] ) | Titanic - Machine Learning from Disaster |
1,083,209 | def clean_data(X):
X['ingredients'] = X['ingredients'].map(lambda l: [x.lower() for x in l])
X['ingredients'] = X['ingredients'].map(lambda l: [re.sub(r'\(\s*[^\s]*\s+oz\.\s*\)\s*', '', x ).strip() for x in l])
X['ingredients'] = X['ingredients'].map(lambda l: [x.replace("-", " ")for x in l])
X['ingredients'] = X['ingredients'].map(lambda l: [x.replace("half & half", "half milk cream")for x in l])
X['ingredients'] = X['ingredients'].map(lambda l: [x.replace(" & ", " ")for x in l])
X['ingredients'] = X['ingredients'].map(lambda l: [re.sub(r'\d+%\s+less\s+sodium ', '', x ).strip() for x in l])
lemmatizer = WordNetLemmatizer()
X['ingredients'] = X['ingredients'].map(lambda l: [lemmatizer.lemmatize(x)for x in l])
return X<categorify> | X_train, y_train = dataHelper.get_train_data(['Survived'])
X_test = dataHelper.get_test_data() | Titanic - Machine Learning from Disaster |
1,083,209 | def words_to_text(list_of_words, add_separate_words=True):
if isinstance(list_of_words, list):
l = list_of_words
else:
l = eval(list_of_words)
s = ''
for i, w_0 in enumerate(l):
if i > 0:
s = s + ' '
w_0 = w_0.strip()
w_1 = w_0.replace(' ', '_' ).replace(',','_vir_')
s = s + w_1
if add_separate_words and ' ' in w_0:
s = s + ' ' + w_0
return s
def vectorize(X_words, sublinear_tf=True, max_df=0.75):
print("Extracting vectorized feats from the training data using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(sublinear_tf=True,
max_df=max_df,
stop_words='english')
X_vectorized = vectorizer.fit_transform(X_words)
duration = time() - t0
print("done in %fs " %(duration))
print("n_samples: %d, n_features: %d" % X_vectorized.shape)
print()
return X_vectorized, vectorizer, duration
<compute_train_metric> | preprocessing_pipeline.fit(X_train)
gridSearchHelper = GridSearchHelper(preprocessing_pipeline, X_train, y_train.values.ravel() ) | Titanic - Machine Learning from Disaster |
1,083,209 | def benchmark(clf, X_train, y_train, X_test, y_test):
print('_' * 80)
print("Training {}: ".format(str(type(clf))))
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
y_pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, y_pred)
print("accuracy score: %0.3f" % score)
cm = metrics.confusion_matrix(y_test, y_pred)
clf_descr = str(clf ).split('(')[0]
ret = {
'classifier': clf,
'accuracy_score': score,
'confusion_matrix': cm,
'train_time': train_time,
'test_time': test_time,
'y_pred': y_pred
}
return ret<load_from_disk> | def get_estimator_and_print_score(clf, param_grid):
X_prepared = preprocessing_pipeline.transform(X_train)
estimator = gridSearchHelper.get_best_classifier(clf, param_grid, cv=2, scoring='accuracy', refit=True)
estimator.fit(X_prepared, y_train.values.ravel())
scores = cross_val_score(estimator, X_prepared, y_train.values.ravel() , cv=2, scoring='accuracy')
print(type(clf ).__name__)
print('Crossval:', scores.mean())
print('Accuracy:',accuracy_score(y_train, estimator.predict(X_prepared)))
print('F1-score:', f1_score(y_train, estimator.predict(X_prepared)))
return estimator | Titanic - Machine Learning from Disaster |
1,083,209 | df = pd.read_json(TRAIN_FILE, encoding='iso-8859-1')
clean_data(df)
df.head(3 )<categorify> | svc_param_grid = [
{
'C':[0.9,1.0,1.1],
'gamma':[1]
},
] | Titanic - Machine Learning from Disaster |
1,083,209 | X_words = df['ingredients'].map(words_to_text)
X_words.head(3 )<feature_engineering> | rfc_param_grid = [
{
'max_depth' : [10], 'max_features' : ['log2'], 'max_leaf_nodes' : [None],
'min_impurity_decrease' : [0.0], 'min_impurity_split' : [None],
'min_samples_leaf' : [1], 'min_samples_split' : [30],
'min_weight_fraction_leaf' : [0.0], 'n_estimators' : [100], 'n_jobs' : [1],
'oob_score' : [False], 'random_state' : [None], 'verbose' : [0], 'warm_start' : [False]
}
] | Titanic - Machine Learning from Disaster |
1,083,209 | X_vects, Vectorizer_inst, Vectorizer_duration = vectorize(X_words)
X_column_names = Vectorizer_inst.get_feature_names()<categorify> | dtc_param_grid = [
{
'max_depth': [4, 5],
}
] | Titanic - Machine Learning from Disaster |
1,083,209 | encoder = LabelEncoder()
y_encoded = encoder.fit_transform(df['cuisine'])
y_encoded_classes_names = encoder.inverse_transform(range(20))<split> | sgdc_param_grid = [
{
'shuffle': [True],
'max_iter':[np.ceil(10**6 / len(X_train)) ,],
},
] | Titanic - Machine Learning from Disaster |
1,083,209 | Xs_train, Xs_test, y_train, y_test = train_test_split(X_vects, y_encoded, random_state=0 )<choose_model_class> | knc_param_grid = [
{
'n_neighbors':[2,3,4,5,7],
'weights':['uniform','distance'],
},
{
}
] | Titanic - Machine Learning from Disaster |
1,083,209 | BEST_PARAMS = {
'C': 80,
'kernel': 'rbf',
'gamma': 1.7,
'coef0': 1,
'cache_size': 500
}
BEST_ESTIMATOR = OneVsRestClassifier(
SVC(**BEST_PARAMS))<compute_test_metric> | final_pipeline = Pipeline([
('preprocessing', preprocessing_pipeline),
('estimator', current_estimator)
] ) | Titanic - Machine Learning from Disaster |
1,083,209 | RESULT = benchmark(BEST_ESTIMATOR, Xs_train, y_train, Xs_test, y_test )<categorify> | final_pipeline.fit(X_train, y_train.values.ravel() ) | Titanic - Machine Learning from Disaster |
1,083,209 | <filter><EOS> | dataHelper.save_prediction(X_test['PassengerId'], final_pipeline.predict(X_test)) | Titanic - Machine Learning from Disaster |
943,976 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_disk> | %matplotlib inline
warnings.filterwarnings('ignore')
print(os.listdir(".. /input"))
df_train=pd.read_csv('.. /input/train.csv',sep=',')
df_test=pd.read_csv('.. /input/test.csv',sep=',')
df_data = df_train.append(df_test)
PassengerId = df_test['PassengerId']
Submission=pd.DataFrame()
Submission['PassengerId'] = df_test['PassengerId'] | Titanic - Machine Learning from Disaster |
943,976 | df_test_subm = pd.read_json(TEST_FILE, encoding='iso-8859-1')
df_test_subm.set_index('id', inplace=True)
df_test_subm.head(20 )<feature_engineering> | NUMERIC_COLUMNS=['Pclass','Age','SibSp','Parch','Fare']
test = df_test[NUMERIC_COLUMNS].fillna(-1000)
data_to_train = df_train[NUMERIC_COLUMNS].fillna(-1000)
y=df_train['Survived']
X_train, X_test, y_train, y_test = train_test_split(data_to_train, y, test_size=0.3,random_state=21, stratify=y)
clf = SVC()
clf.fit(X_train, y_train)
print("Accuracy: {}".format(clf.score(X_test, y_test)) ) | Titanic - Machine Learning from Disaster |
943,976 | clean_data(df_test_subm)
X_test_subm_words = df_test_subm['ingredients'].map(words_to_text)
X_test_subm_vects = Vectorizer_inst.transform(X_test_subm_words )<predict_on_test> | Submission['Survived']=clf.predict(test)
print(Submission.head())
print('predictions generated' ) | Titanic - Machine Learning from Disaster |
943,976 | y_test_subm_encoded = BEST_ESTIMATOR.predict(X_test_subm_vects )<categorify> | print('file created' ) | Titanic - Machine Learning from Disaster |
943,976 | y_test_subm = [y_encoded_classes_names[i] for i in y_test_subm_encoded]
y_test_subm[:3]<feature_engineering> | embarked = ['S', 'C', 'Q']
for port in embarked:
fare_to_impute = df_data.groupby('Embarked')['Fare'].median() [embarked.index(port)]
df_data.loc[(df_data['Fare'].isnull())&(df_data['Embarked'] == port), 'Fare'] = fare_to_impute
df_train["Fare"] = df_data['Fare'][:891]
df_test["Fare"] = df_data['Fare'][891:]
print('Missing Fares Estimated' ) | Titanic - Machine Learning from Disaster |
943,976 | df_test_subm['cuisine'] = y_test_subm
df_test_subm.head(20 )<save_to_csv> | for x in range(len(df_train["Fare"])) :
if pd.isnull(df_train["Fare"][x]):
pclass = df_train["Pclass"][x]
df_train["Fare"][x] = round(df_train[df_train["Pclass"] == pclass]["Fare"].mean() , 8)
for x in range(len(df_test["Fare"])) :
if pd.isnull(df_test["Fare"][x]):
pclass = df_test["Pclass"][x]
df_test["Fare"][x] = round(df_test[df_test["Pclass"] == pclass]["Fare"].mean() , 8)
df_data["FareBand"] = pd.qcut(df_data['Fare'], 8, labels = [1, 2, 3, 4,5,6,7,8] ).astype('int')
df_train["FareBand"] = pd.qcut(df_train['Fare'], 8, labels = [1, 2, 3, 4,5,6,7,8] ).astype('int')
df_test["FareBand"] = pd.qcut(df_test['Fare'], 8, labels = [1, 2, 3, 4,5,6,7,8] ).astype('int')
df_train[["FareBand", "Survived"]].groupby(["FareBand"], as_index=False ).mean()
print('FareBand feature created' ) | Titanic - Machine Learning from Disaster |
943,976 | df_test_subm['cuisine'].to_csv(SUBMIT_FILE, header=True )<set_options> | embarked_mapping = {"S": 1, "C": 2, "Q": 3}
df_data["Embarked"] = df_data["Embarked"].map(embarked_mapping)
df_train["Embarked"] = df_data["Embarked"][:891]
df_test["Embarked"] = df_data["Embarked"][891:]
print('Embarked feature created')
df_data[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False ).mean() | Titanic - Machine Learning from Disaster |
943,976 | warnings.filterwarnings("ignore" )<load_from_disk> | fareband = [1,2,3,4]
for fare in fareband:
embark_to_impute = df_data.groupby('FareBand')['Embarked'].median() [fare]
df_data.loc[(df_data['Embarked'].isnull())&(df_data['FareBand'] == fare), 'Embarked'] = embark_to_impute
df_train["Embarked"] = df_data['Embarked'][:891]
df_test["Embarked"] = df_data['Embarked'][891:]
print('Missing Embarkation Estimated' ) | Titanic - Machine Learning from Disaster |
943,976 | train_data = pd.read_json('.. /input/train.json')
test_data = pd.read_json('.. /input/test.json' )<count_unique_values> | dummies=pd.get_dummies(df_train[['Sex']], prefix_sep='_')
df_train = pd.concat([df_train, dummies], axis=1)
testdummies=pd.get_dummies(df_test[['Sex']], prefix_sep='_')
df_test = pd.concat([df_test, testdummies], axis=1)
print('Gender Feature added ' ) | Titanic - Machine Learning from Disaster |
943,976 | print("Number of cuisine classes: {}".format(len(train_data.cuisine.unique())))
train_data.cuisine.unique()<set_options> | gender_mapping = {"female": 0, "male": 1}
df_data["Sex"] = df_data['Sex'].map(gender_mapping)
df_data["Sex"]=df_data["Sex"].astype('int')
df_train["Sex"] = df_data["Sex"][:891]
df_test["Sex"] = df_data["Sex"][891:]
print('Gender Category created' ) | Titanic - Machine Learning from Disaster |
943,976 | init_notebook_mode(connected=True )<randomize_order> | df_data['NameLen'] = df_data['Name'].apply(lambda x: len(x))
print('Name Length calculated')
df_train["NameLen"] = df_data["NameLen"][:891]
df_test["NameLen"] = df_data["NameLen"][891:]
df_train["NameBand"] = pd.cut(df_train["NameLen"], bins=5, labels = [1,2,3,4,5])
df_test["NameBand"] = pd.cut(df_test["NameLen"], bins=5, labels = [1,2,3,4,5])
dummies=pd.get_dummies(df_train[["NameBand"]].astype('category'), prefix_sep='_')
df_train = pd.concat([df_train, dummies], axis=1)
dummies=pd.get_dummies(df_test[["NameBand"]].astype('category'), prefix_sep='_')
df_test = pd.concat([df_test, dummies], axis=1)
print("Name Length categories created")
pd.qcut(df_train['NameLen'],5 ).value_counts() | Titanic - Machine Learning from Disaster |
943,976 | def random_colors(n):
colors = []
for i in range(n):
colors.append("
return colors<define_variables> | df_data["Title"] = df_data.Name.str.extract('([A-Za-z]+)\.', expand=False)
df_data["Title"] = df_data["Title"].replace('Mlle', 'Miss')
df_data["Title"] = df_data["Title"].replace('Master', 'Master')
df_data["Title"] = df_data["Title"].replace(['Mme', 'Dona', 'Ms'], 'Mrs')
df_data["Title"] = df_data["Title"].replace(['Jonkheer','Don'],'Mr')
df_data["Title"] = df_data["Title"].replace(['Capt','Major', 'Col','Rev','Dr'], 'Millitary')
df_data["Title"] = df_data["Title"].replace(['Lady', 'Countess','Sir'], 'Honor')
df_train["Title"] = df_data['Title'][:891]
df_test["Title"] = df_data['Title'][891:]
titledummies=pd.get_dummies(df_train[['Title']], prefix_sep='_')
df_train = pd.concat([df_train, titledummies], axis=1)
ttitledummies=pd.get_dummies(df_test[['Title']], prefix_sep='_')
df_test = pd.concat([df_test, ttitledummies], axis=1)
print('Title categories added' ) | Titanic - Machine Learning from Disaster |
943,976 | long_recipes = train_data[train_data['ingredients'].str.len() > 30]
print("There are {} recipes consist of more than 30 ingredients.".format(len(long_recipes)))
short_recipes = train_data[train_data['ingredients'].str.len() < 2]
print("There are {} recipes consist of less than 2 ingredients.".format(len(short_recipes)) )<count_values> | title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Millitary": 5, "Honor": 6}
df_data["TitleCat"] = df_data['Title'].map(title_mapping)
df_data["TitleCat"] = df_data["TitleCat"].astype(int)
df_train["TitleCat"] = df_data["TitleCat"][:891]
df_test["TitleCat"] = df_data["TitleCat"][891:]
print('Title Category created' ) | Titanic - Machine Learning from Disaster |
943,976 | all_ingredients = []
for item in train_data['ingredients']:
for ingr in item:
all_ingredients.append(ingr)
counter = Counter()
for ingredient in all_ingredients:
counter[ingredient] += 1
print("Among {} unique ingredients in our training sample,"
"the most commonly used 20 are: ".format(len(counter)))
counter.most_common(20 )<define_variables> | titles = ['Master', 'Miss', 'Mr', 'Mrs', 'Millitary','Honor']
for title in titles:
age_to_impute = df_data.groupby('Title')['Age'].median() [title]
df_data.loc[(df_data['Age'].isnull())&(df_data['Title'] == title), 'Age'] = age_to_impute
df_train["Age"] = df_data['Age'][:891]
df_test["Age"] = df_data['Age'][891:]
print('Missing Ages Estimated' ) | Titanic - Machine Learning from Disaster |
943,976 | features = []
all_ingredients = []
for ingredient_list in train_data['ingredients']:
features.append(ingredient_list)
all_ingredients += ingredient_list
test_features = []
for ingredient_list in test_data['ingredients']:
test_features.append(ingredient_list )<categorify> | bins = [0,12,24,45,60,np.inf]
labels = ['Child', 'Young Adult', 'Adult','Older Adult','Senior']
df_train["AgeBand"] = pd.cut(df_train["Age"], bins, labels = labels)
df_test["AgeBand"] = pd.cut(df_test["Age"], bins, labels = labels)
print('Age Feature created')
dummies=pd.get_dummies(df_train[["AgeBand"]], prefix_sep='_')
df_train = pd.concat([df_train, dummies], axis=1)
dummies=pd.get_dummies(df_test[["AgeBand"]], prefix_sep='_')
df_test = pd.concat([df_test, dummies], axis=1)
print('AgeBand feature created' ) | Titanic - Machine Learning from Disaster |
943,976 | tfidf = TfidfVectorizer(
vocabulary = list(set([str(i ).lower() for i in all_ingredients])) ,
max_df=0.99, norm='l2', ngram_range=(1, 4)
).fit([str(i)for i in features])
X_tr = tfidf.transform([str(i)for i in features])
to_predict = tfidf.transform([str(i)for i in test_features])
feature_names = tfidf.get_feature_names()<categorify> | df_train["Alone"] = np.where(df_train['SibSp'] + df_train['Parch'] + 1 == 1, 1,0)
df_test["Alone"] = np.where(df_test['SibSp'] + df_test['Parch'] + 1 == 1, 1,0)
print('Lone traveller feature created' ) | Titanic - Machine Learning from Disaster |
943,976 | def top_feats_by_class(min_tfidf=0.1, top_n=10):
dfs = []
labels = np.unique(target)
for label in labels:
ids = np.where(target==label)
D = X_tr[ids].toarray()
D[D < min_tfidf] = 0
tfidf_means = np.nanmean(D, axis=0)
topn_ids = np.argsort(tfidf_means)[::-1][:top_n]
top_feats = [(feature_names[i], tfidf_means[i])for i in topn_ids]
df = pd.DataFrame(top_feats)
df.columns = ['feature', 'tfidf']
df['cuisine'] = label
dfs.append(df)
return dfs<prepare_x_and_y> | df_data['Mother'] =(df_data['Title'] == 'Mrs')&(df_data['Parch'] > 0)
df_data['Mother'] = df_data['Mother'].astype(int)
df_train["Mother"] = df_data["Mother"][:891]
df_test["Mother"] = df_data["Mother"][891:]
print('Mother Category created' ) | Titanic - Machine Learning from Disaster |
943,976 | target = train_data['cuisine']
result_tfidf = top_feats_by_class(min_tfidf=0.1, top_n=5 )<categorify> | df_train["Family Size"] =(df_train['SibSp'] + df_train['Parch'] + 1)
df_test["Family Size"] = df_test['SibSp'] + df_test['Parch'] + 1
print('Family size feature created' ) | Titanic - Machine Learning from Disaster |
943,976 | warnings.filterwarnings("ignore")
encoder = LabelEncoder()
y_transformed = encoder.fit_transform(train_data.cuisine)
X_train, X_test, y_train, y_test = train_test_split(X_tr, y_transformed, random_state=42 )<train_model> | df_data["Last_Name"] = df_data['Name'].apply(lambda x: str.split(x, ",")[0])
DEFAULT_SURVIVAL_VALUE = 0.5
df_data["Family_Survival"] = DEFAULT_SURVIVAL_VALUE
for grp, grp_df in df_data[['Survived','Name', 'Last_Name', 'Fare', 'Ticket', 'PassengerId',
'SibSp', 'Parch', 'Age', 'Cabin']].groupby(['Last_Name', 'Fare']):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
passID = row['PassengerId']
if(smax == 1.0):
df_data.loc[df_data['PassengerId'] == passID, 'Family_Survival'] = 1
elif(smin==0.0):
df_data.loc[df_data['PassengerId'] == passID, 'Family_Survival'] = 0
print("Number of passengers with family survival information:",
df_data.loc[df_data['Family_Survival']!=0.5].shape[0])
for _, grp_df in df_data.groupby('Ticket'):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
if(row['Family_Survival'] == 0)|(row['Family_Survival']== 0.5):
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
passID = row['PassengerId']
if(smax == 1.0):
df_data.loc[df_data['PassengerId'] == passID, 'Family_Survival'] = 1
elif(smin==0.0):
df_data.loc[df_data['PassengerId'] == passID, 'Family_Survival'] = 0
print("Number of passenger with family/group survival information: "
+str(df_data[df_data['Family_Survival']!=0.5].shape[0]))
df_train["Family_Survival"] = df_data['Family_Survival'][:891]
df_test["Family_Survival"] = df_data['Family_Survival'][891:] | Titanic - Machine Learning from Disaster |
943,976 | clf1_cv = LogisticRegression(C=10, verbose=True)
clf1_cv.fit(X_train, y_train)
y_pred = encoder.inverse_transform(clf1_cv.predict(X_train))
y_true = encoder.inverse_transform(y_train)
print("Accuracy score on train data: {}".format(accuracy_score(y_true, y_pred)))
print("Accuracy score on test data: {}".format(clf1_cv.score(X_test, y_test)) )<categorify> | df_data["HadCabin"] =(df_data["Cabin"].notnull().astype('int'))
df_train["HadCabin"] = df_data["HadCabin"][:891]
df_test["HadCabin"] = df_data["HadCabin"][891:]
print('Cabin feature created' ) | Titanic - Machine Learning from Disaster |
943,976 | _SVC = SVC(C=50, kernel='rbf', gamma=1.4, coef0=1, cache_size=3000, probability=True, verbose=True)
OvRSVC = OneVsRestClassifier(_SVC, n_jobs=-1)
OvRSVC.fit(X_train, y_train)
y_pred = encoder.inverse_transform(OvRSVC.predict(X_train))
y_true = encoder.inverse_transform(y_train)
print("Accuracy score on train data: {}".format(accuracy_score(y_true, y_pred)))
print("Accuracy score on test data: {}".format(OvRSVC.score(X_test, y_test)) )<train_model> | df_data["Deck"] = df_data.Cabin.str.extract('([A-Za-z])', expand=False)
df_data["Deck"] = df_data["Deck"].fillna("N")
deck_mapping = {"N":0,"A": 1, "B": 2, "C": 3, "D": 4, "E": 5}
df_data['Deck'] = df_data['Deck'].map(deck_mapping)
df_train["Deck"] = df_data["Deck"][:891]
df_test["Deck"] = df_data["Deck"][891:]
print('Deck feature created')
df_data["Deck"] = df_data.Cabin.str.extract('([A-Za-z])', expand=False)
deck_mapping = {"0":0,"A": 1, "B": 2, "C": 3, "D": 4, "E": 5}
df_data['Deck'] = df_data['Deck'].map(deck_mapping)
df_data["Deck"] = df_data["Deck"].fillna("0")
df_data["Deck"]=df_data["Deck"].astype('int')
df_train["Deck"] = df_data['Deck'][:891]
df_test["Deck"] = df_data['Deck'][891:]
print('Deck feature created')
dummies=pd.get_dummies(df_train[['Deck']].astype('category'), prefix_sep='_')
df_train = pd.concat([df_train, dummies], axis=1)
dummies=pd.get_dummies(df_test[['Deck']].astype('category'), prefix_sep='_')
df_test = pd.concat([df_test,dummies], axis=1)
print('Deck Categories created' ) | Titanic - Machine Learning from Disaster |
943,976 | vclf=VotingClassifier(estimators=[('clf1',clf1_cv),('clf2',OvRSVC)],voting='soft',weights=[1,2])
vclf.fit(X_train , y_train)
vclf.score(X_test, y_test )<save_to_csv> | Ticket = []
for i in list(df_data.Ticket):
if not i.isdigit() :
Ticket.append(i.replace(".","" ).replace("/","" ).strip().split(' ')[0])
else:
Ticket.append("X")
df_data["Ticket"] = Ticket
df_data["Ticket"].head()
df_train["Ticket"] = df_data["Ticket"][:891]
df_test["Ticket"] = df_data["Ticket"][891:]
print('Ticket feature created' ) | Titanic - Machine Learning from Disaster |
943,976 | predicted_result = vclf.predict(to_predict)
predicted_result_encoded = encoder.inverse_transform(predicted_result)
result_to_submit = pd.DataFrame({'cuisine' : predicted_result_encoded , 'id' : test_data.id })
result_to_submit = result_to_submit[[ 'id' , 'cuisine']]
result_to_submit.to_csv('submit.csv', index = False )<feature_engineering> | df_data['TicketRef'] = df_data['Ticket'].apply(lambda x: str(x)[0])
df_data['TicketRef'].value_counts()
df_train["TicketRef"] = df_data["TicketRef"][:891]
df_test["TicketRef"] = df_data["TicketRef"][891:]
dummies=pd.get_dummies(df_train[["TicketRef"]].astype('category'), prefix_sep='_')
df_train = pd.concat([df_train, dummies], axis=1)
dummies=pd.get_dummies(df_test[["TicketRef"]].astype('category'), prefix_sep='_')
df_test = pd.concat([df_test, dummies], axis=1)
print("TicketBand categories created" ) | Titanic - Machine Learning from Disaster |
943,976 | train_data = pd.read_json('.. /input/train.json')
test_data = pd.read_json('.. /input/test.json')
train_data['ingredients'] = train_data['ingredients'].apply(lambda list: ','.join(list ).lower())
test_data['ingredients'] = test_data['ingredients'].apply(lambda list: ','.join(list ).lower())
vectorizer = TfidfVectorizer(binary = True)
train_X = vectorizer.fit_transform(train_data['ingredients'])
test_X = vectorizer.transform(test_data['ingredients'] )<features_selection> | dummies=pd.get_dummies(df_train[["Pclass"]].astype('category'), prefix_sep='_')
df_train = pd.concat([df_train, dummies], axis=1)
dummies=pd.get_dummies(df_test[["Pclass"]].astype('category'), prefix_sep='_')
df_test = pd.concat([df_test, dummies], axis=1)
print("pclass categories created" ) | Titanic - Machine Learning from Disaster |
943,976 | idxtuple = train_X.nonzero()
for i in range(16):
row = idxtuple[0][i]
col = idxtuple[1][i]
print('Recipe {0}: {1} = {2}'.format(row, vectorizer.get_feature_names() [col], train_X[row, col]))<save_to_csv> | df_data["Free"] = np.where(df_data['Fare'] ==0, 1,0)
df_data["Free"] = df_data['Free'].astype(int)
df_train["Free"] = df_data["Free"][:891]
df_test["Free"] = df_data["Free"][891:]
print('Free Category created' ) | Titanic - Machine Learning from Disaster |
943,976 |
<save_to_csv> | Pclass = [1,2,3]
for aclass in Pclass:
fare_to_impute = df_data.groupby('Pclass')['Fare'].median() [aclass]
df_data.loc[(df_data['Fare'].isnull())&(df_data['Pclass'] == aclass), 'Fare'] = fare_to_impute
df_train["Fare"] = df_data["Fare"][:891]
df_test["Fare"] = df_data["Fare"][891:]
df_train["FareBand"] = pd.qcut(df_train['Fare'], 4, labels = [1, 2, 3, 4] ).astype('category')
df_test["FareBand"] = pd.qcut(df_test['Fare'], 4, labels = [1, 2, 3, 4] ).astype('category')
dummies=pd.get_dummies(df_train[["FareBand"]], prefix_sep='_')
df_train = pd.concat([df_train, dummies], axis=1)
dummies=pd.get_dummies(df_test[["FareBand"]], prefix_sep='_')
df_test = pd.concat([df_test, dummies], axis=1)
print("Fareband categories created" ) | Titanic - Machine Learning from Disaster |
943,976 |
<save_to_csv> | dummies=pd.get_dummies(df_train[["Embarked"]].astype('category'), prefix_sep='_')
df_train = pd.concat([df_train, dummies], axis=1)
dummies=pd.get_dummies(df_test[["Embarked"]].astype('category'), prefix_sep='_')
df_test = pd.concat([df_test, dummies], axis=1)
print("Embarked feature created" ) | Titanic - Machine Learning from Disaster |
943,976 |
<save_to_csv> | print(len(df_test.columns))
print(pd.isnull(df_test ).sum() ) | Titanic - Machine Learning from Disaster |
943,976 |
<load_from_disk> | SIMPLE_COLUMNS=['Pclass','Age','SibSp','Parch','Family_Survival','Alone','Sex_female','Sex_male','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','Embarked']
INTERESTING_COLUMNS=['Survived','Pclass','Age','SibSp','Parch','Title','Alone','Mother','Family Size','Family_Survival','Embarked','FareBand','TicketRef']
CATEGORY_COLUMNS=['Family Size','Family_Survival','Alone','Mother','Sex_female','Sex_male','AgeBand_Child',
'AgeBand_Young Adult', 'AgeBand_Adult', 'AgeBand_Older Adult',
'AgeBand_Senior','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','NameBand_1',
'NameBand_2', 'NameBand_3', 'NameBand_4', 'NameBand_5','Embarked','TicketRef_A', 'TicketRef_C', 'TicketRef_F', 'TicketRef_L',
'TicketRef_P', 'TicketRef_S', 'TicketRef_W', 'TicketRef_X','Pclass_1', 'Pclass_2', 'Pclass_3','HadCabin','Free','FareBand_1', 'FareBand_2', 'FareBand_3', 'FareBand_4'] | Titanic - Machine Learning from Disaster |
943,976 | train_data = pd.read_json('.. /input/train.json')
test_data = pd.read_json('.. /input/test.json')
cuisine = train_data['cuisine'].value_counts()
ingredients = {}
for idx, row in train_data.iterrows() :
for i in range(len(row['ingredients'])) :
ingr = row['ingredients'][i].lower()
if row['cuisine'] in ingredients:
if ingr in ingredients[row['cuisine']]:
ingredients[row['cuisine']][ingr] += 1
else:
ingredients[row['cuisine']][ingr] = 1
else:
ingredients[row['cuisine']] = {}
ingredients[row['cuisine']][ingr] = 1
ingredients = pd.DataFrame(ingredients)
ingredients = ingredients.fillna(0 )<categorify> | test = df_test[CATEGORY_COLUMNS].fillna(-1000)
data_to_train = df_train[CATEGORY_COLUMNS].fillna(-1000)
X_train, X_test, y_train, y_test = train_test_split(data_to_train, df_train['Survived'], test_size=0.3,random_state=21, stratify=df_train['Survived'])
RandomForest = RandomForestClassifier(random_state = 0)
RandomForest.fit(X_train, y_train)
print('Evaluation complete')
print("Accuracy: {}".format(RandomForest.score(X_test, y_test)) ) | Titanic - Machine Learning from Disaster |
943,976 | ingredients = ingredients.apply(lambda x: x.apply(lambda y: y / cuisine[x.name]))
def unit_vec(vec):
len =(vec.apply(lambda x: x * x ).sum())** 0.5
return vec.apply(lambda x: x / len)
ingredients = ingredients.apply(unit_vec, axis = 1 )<define_variables> | RandomForest_checker = RandomForestClassifier()
RandomForest_checker.fit(X_train, y_train)
importances_df = pd.DataFrame(RandomForest_checker.feature_importances_, columns=['Feature_Importance'],
index=X_train.columns)
importances_df.sort_values(by=['Feature_Importance'], ascending=False, inplace=True)
print(importances_df ) | Titanic - Machine Learning from Disaster |
943,976 | train_data_vector = []
for idx, row in train_data.iterrows() :
train_data_vector.append({
'id': row['id'],
'cuisine': row['cuisine'],
'cuisine_vector': ingredients.loc[row['ingredients']].sum()
})
total = len(train_data_vector)
correct = 0
for i in range(total):
if train_data_vector[i]['cuisine_vector'].idxmax() == train_data_vector[i]['cuisine']:
correct += 1
print('Result on training data')
print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total}' )<save_to_csv> | Submission['Survived']=RandomForest.predict(test)
print(Submission.head())
print('Submission created' ) | Titanic - Machine Learning from Disaster |
943,976 | testing_data_vector = []
for idx, row in test_data.iterrows() :
ingredients_known = [igdt for igdt in row['ingredients'] if igdt in ingredients.index]
vector = ingredients.loc[ingredients_known].sum()
cuisine = vector.idxmax()
testing_data_vector.append({
'id': row['id'],
'cuisine_vector': vector,
'cuisine': cuisine
})
answer = pd.DataFrame(testing_data_vector)[['id','cuisine']]
answer.to_csv('answer.csv', index = False )<set_options> | Submission.to_csv('randomforestcat01.csv',sep=',')
print('file created' ) | Titanic - Machine Learning from Disaster |
943,976 | warnings.filterwarnings('ignore')
<load_from_disk> | REVISED_NUMERIC_COLUMNS=['Pclass','Age','SibSp','Parch','Family_Survival','Alone','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','Embarked']
SIMPLE_COLUMNS=['Pclass','Age','SibSp','Parch','Family_Survival','Alone','Sex_female','Sex_male','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','Embarked']
INTERESTING_COLUMNS=['Survived','Pclass','Age','SibSp','Parch','Title','Alone','Mother','Family Size','Family_Survival','Embarked','FareBand','TicketRef']
CATEGORY_COLUMNS=['Pclass','SibSp','Parch','Family Size','Family_Survival','Alone','Mother','Sex_female','Sex_male','AgeBand_Child',
'AgeBand_Young Adult', 'AgeBand_Adult', 'AgeBand_Older Adult',
'AgeBand_Senior','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','NameBand_1',
'NameBand_2', 'NameBand_3', 'NameBand_4', 'NameBand_5','Embarked','TicketRef_A', 'TicketRef_C', 'TicketRef_F', 'TicketRef_L',
'TicketRef_P', 'TicketRef_S', 'TicketRef_W', 'TicketRef_X','HadCabin','Free']
CATEGORY_COLUMNS=['Pclass','SibSp','Parch','Family Size','Family_Survival','Alone','Mother','Sex_female','Sex_male','AgeBand_Child',
'AgeBand_Young Adult', 'AgeBand_Adult', 'AgeBand_Older Adult',
'AgeBand_Senior','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','NameBand_1',
'NameBand_2', 'NameBand_3', 'NameBand_4', 'NameBand_5','Embarked','TicketRef_A', 'TicketRef_C', 'TicketRef_F', 'TicketRef_L',
'TicketRef_P', 'TicketRef_S', 'TicketRef_W', 'TicketRef_X','HadCabin','Free']
data_to_train = df_train[CATEGORY_COLUMNS].fillna(-1000)
prediction = df_train["Survived"]
test = df_test[CATEGORY_COLUMNS].fillna(-1000)
X_train, X_val, y_train, y_val = train_test_split(data_to_train, prediction, test_size = 0.3,random_state=21, stratify=y)
print('Data split' ) | Titanic - Machine Learning from Disaster |
943,976 | df_train = pd.read_json(".. /input/train.json")
df_test = pd.read_json(".. /input/test.json" )<string_transform> | adaboost=AdaBoostClassifier()
adaboost.fit(X_train, y_train)
y_pred = adaboost.predict(X_val)
acc_adaboost = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_adaboost ) | Titanic - Machine Learning from Disaster |
943,976 | non_alphabetical_or_whitespace = re.compile(r"[^a-zA-Z\s]")
multi_whitespace = re.compile(r"\s+")
lemmatizer = WordNetLemmatizer()
def clean_ingredients(ingredients: List[str])-> List[str]:
result = []
for ingredient in ingredients:
temp = ingredient.lower()
temp = non_alphabetical_or_whitespace.sub("", temp)
temp = multi_whitespace.sub(" ", temp)
temp = ' '.join([lemmatizer.lemmatize(word)for word in multi_whitespace.split(temp)])
result.append(temp)
return ",".join(result)
df_train["ingredients_cleaned"] = df_train["ingredients"].apply(lambda ingredients: clean_ingredients(ingredients))
df_test["ingredients_cleaned"] = df_test["ingredients"].apply(lambda ingredients: clean_ingredients(ingredients))<categorify> | bagging=BaggingClassifier()
bagging.fit(X_train, y_train)
y_pred = bagging.predict(X_val)
acc_bagging = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_bagging ) | Titanic - Machine Learning from Disaster |
943,976 | label_encoder = LabelEncoder()
vectorizer = TfidfVectorizer(binary=True)
X = vectorizer.fit_transform(df_train["ingredients_cleaned"])
y = label_encoder.fit_transform(df_train["cuisine"] )<categorify> | decisiontree = DecisionTreeClassifier()
decisiontree.fit(X_train, y_train)
y_pred = decisiontree.predict(X_val)
acc_decisiontree = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_decisiontree ) | Titanic - Machine Learning from Disaster |
943,976 | y = keras.utils.to_categorical(y )<split> | et = ExtraTreesClassifier()
et.fit(X_train, y_train)
y_pred = et.predict(X_val)
acc_et = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_et ) | Titanic - Machine Learning from Disaster |
943,976 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2 )<choose_model_class> | gaussian = GaussianNB()
gaussian.fit(X_train, y_train)
y_pred = gaussian.predict(X_val)
acc_gaussian = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_gaussian ) | Titanic - Machine Learning from Disaster |
943,976 | def create_model1(input_dim: int):
model = Sequential()
model.add(Dense(1024, input_dim=input_dim))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(64))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(20, activation="softmax"))
model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy'])
model.summary()
return model
def create_logistic_model(input_dim: int):
model = Sequential()
model.add(Dense(20, input_dim=input_dim, activation='softmax'))
batch_size = 128
nb_epoch = 20
model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
return model<train_model> | gbk = GradientBoostingClassifier()
gbk.fit(X_train, y_train)
y_pred = gbk.predict(X_val)
acc_gbk = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_gbk ) | Titanic - Machine Learning from Disaster |
943,976 | features_dimension = X_train.shape[1]
model1 = create_logistic_model(features_dimension)
model1.fit(X, y, epochs=200, batch_size=32)
model2 = create_model1(features_dimension)
model2.fit(X, y, epochs=50, batch_size=64 )<train_model> | knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
y_pred = knn.predict(X_val)
acc_knn = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_knn ) | Titanic - Machine Learning from Disaster |
943,976 | for model in [model1, model2]:
print(model.evaluate(X_test, y_test, batch_size=128))<prepare_x_and_y> | linear_da=LinearDiscriminantAnalysis()
linear_da.fit(X_train, y_train)
y_pred = linear_da.predict(X_val)
acc_linear_da = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_linear_da ) | Titanic - Machine Learning from Disaster |
943,976 | X_validation = df_test["ingredients_cleaned"].apply(lambda ingredients: vectorizer.transform([ingredients] ).todense() )<categorify> | linear_svc = LinearSVC()
linear_svc.fit(X_train, y_train)
y_pred = linear_svc.predict(X_val)
acc_linear_svc = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_linear_svc ) | Titanic - Machine Learning from Disaster |
943,976 | classification_results = []
for ingredients in X_validation:
classification_results.append(
label_encoder.inverse_transform(
np.argmax(
(model1.predict(ingredients)+ model2.predict(ingredients)) /2
)
)
)
df_test["cuisine"] = classification_results<save_to_csv> | logreg = LogisticRegression()
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_val)
acc_logreg = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_logreg ) | Titanic - Machine Learning from Disaster |
943,976 | df_test[["id", "cuisine"]].to_csv("results_multi_with_feature_engineering.csv", index=False )<drop_column> | MLP = MLPClassifier()
MLP.fit(X_train, y_train)
y_pred = MLP.predict(X_val)
acc_MLP= round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_MLP ) | Titanic - Machine Learning from Disaster |
943,976 | data_all = pd.concat([train,test])
data_all = data_all.drop('AnimalID',axis=1)
data_all = data_all.drop('ID',axis=1 )<categorify> | passiveaggressive = PassiveAggressiveClassifier()
passiveaggressive.fit(X_train, y_train)
y_pred = passiveaggressive.predict(X_val)
acc_passiveaggressive = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_passiveaggressive ) | Titanic - Machine Learning from Disaster |
943,976 | X = []
X_t = []
for c in data_all.columns:
le = LabelEncoder()
le.fit(data_all[c].values)
X.append(le.transform(train[c].values))
X_t.append(le.transform(test[c].values))
X = np.vstack(X ).T
X_t = np.vstack(X_t ).T
<statistical_test> | perceptron = Perceptron()
perceptron.fit(X_train, y_train)
y_pred = perceptron.predict(X_val)
acc_perceptron = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_perceptron ) | Titanic - Machine Learning from Disaster |
943,976 | def make_mf_classifier(X ,y, clf, X_test,n_folds=2, n_round=5):
n = X.shape[0]
len_y = len(np.unique(y))
mf_tr = np.zeros(( X.shape[0],len_y))
mf_te = np.zeros(( X_test.shape[0],len_y))
for i in range(n_round):
skf = StratifiedKFold(y, n_folds=n_folds, shuffle=True, random_state=42+i*1000)
for ind_tr, ind_te in skf:
X_tr = X[ind_tr]
X_te = X[ind_te]
y_tr = y[ind_tr]
y_te = y[ind_te]
clf.fit(X_tr, y_tr)
mf_tr[ind_te] += clf.predict_proba(X_te)
mf_te += clf.predict_proba(X_test)*0.5
y_pred = clf.predict_proba(X_te)
score = log_loss(y_te, y_pred)
print('pred[{}],score[{}]'.format(i,score))
return(mf_tr / n_round, mf_te / n_round)
skf = StratifiedKFold(y, n_folds=5, shuffle=True, random_state=seed)
for ind_tr, ind_te in skf:
X_train = X[ind_tr]
X_test = X[ind_te]
y_train = y[ind_tr]
y_test = y[ind_te]
break
print(X_train.shape,X_test.shape)
<train_model> | randomforest = RandomForestClassifier(random_state = 0)
randomforest.fit(X_train, y_train)
y_pred = randomforest.predict(X_val)
acc_randomforest = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_randomforest ) | Titanic - Machine Learning from Disaster |
943,976 | xgboost = xgb.XGBClassifier(
n_estimators=1000,
learning_rate = 0.03,
max_depth=6,
subsample=0.7,
colsample_bytree = 0.7,
reg_lambda = 4,
seed = seed,
)
xgboost.fit(
X_train,
y_train,
eval_metric='mlogloss',
eval_set=[(X_train,y_train),(X_test,y_test)],
early_stopping_rounds=100,
)
y_preds = xgboost.predict_proba(X_test)
res = xgboost.predict_proba(X_t)
submission = pd.DataFrame()
submission["ID"] = np.arange(res.shape[0])+1
submission["Adoption"]= res[:,0]
submission["Died"]= res[:,1]
submission["Euthanasia"]= res[:,2]
submission["Return_to_owner"]= res[:,3]
submission["Transfer"]= res[:,4]
submission.to_csv("sub10.csv",index=False )<load_from_csv> | ridge = RidgeClassifierCV()
ridge.fit(X_train, y_train)
y_pred = ridge.predict(X_val)
acc_ridge = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_ridge ) | Titanic - Machine Learning from Disaster |
943,976 | shelter_train = pd.read_csv(".. /input/train.csv")
shelter_train_outcome = shelter_train["OutcomeType"]
shelter_test = pd.read_csv(".. /input/test.csv" )<drop_column> | sgd = SGDClassifier()
sgd.fit(X_train, y_train)
y_pred = sgd.predict(X_val)
acc_sgd = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_sgd ) | Titanic - Machine Learning from Disaster |
943,976 | dog_train = shelter_train[shelter_train["AnimalType"]=="Dog"]
dog_train = dog_train.reset_index()
dog_train.drop("AnimalType", axis=1, inplace=True)
dog_train.drop("index", axis=1, inplace=True)
dog_train_outcome = dog_train["OutcomeType"]
dog_train.drop("OutcomeType", axis=1, inplace=True)
dog_test = shelter_test[shelter_test["AnimalType"]=="Dog"]
dog_test = dog_test.reset_index()
dog_test.drop("AnimalType", axis=1, inplace=True)
dog_test.drop("index", axis=1, inplace=True)
cat_train = shelter_train[shelter_train["AnimalType"]=="Cat"]
cat_train = cat_train.reset_index()
cat_train.drop("AnimalType", axis=1, inplace=True)
cat_train.drop("index", axis=1, inplace=True)
cat_train_outcome = cat_train["OutcomeType"]
cat_train.drop("OutcomeType", axis=1, inplace=True)
cat_test = shelter_test[shelter_test["AnimalType"]=="Cat"]
cat_test = cat_test.reset_index()
cat_test.drop("AnimalType", axis=1, inplace=True)
cat_test.drop("index", axis=1, inplace=True)
dog_test_ID = dog_test["ID"].as_matrix()
dog_test_ID = np.array([dog_test_ID])
dog_test_ID = dog_test_ID.T
dog_test.drop("ID", axis=1, inplace=True)
cat_test_ID = cat_test["ID"].as_matrix()
cat_test_ID = np.array([cat_test_ID])
cat_test_ID = cat_test_ID.T
cat_test.drop("ID", axis=1, inplace=True)
<data_type_conversions> | clf = SVC()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_val)
acc_clf = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_clf ) | Titanic - Machine Learning from Disaster |
943,976 | def pre_processing(shelter_train, shelter_test, animal_type):
shelter_train.drop("AnimalID", axis=1, inplace=True)
shelter_train.drop("OutcomeSubtype", axis=1, inplace=True)
time_train = pd.to_datetime(shelter_train["DateTime"])
time_test = pd.to_datetime(shelter_test["DateTime"])
shelter_train["Year"] = time_train.dt.year
shelter_test["Year"] = time_test.dt.year
shelter_train["Month"] = time_train.dt.month
shelter_test["Month"] = time_test.dt.month
shelter_test["Day"] = time_test.dt.day
shelter_train["Day"] = time_train.dt.day
shelter_test["Hour"] = time_test.dt.hour
shelter_train["Hour"] = time_train.dt.hour
shelter_test["Minute"] = time_test.dt.minute
shelter_train["Minute"] = time_train.dt.minute
shelter_train.drop("DateTime", axis=1, inplace=True)
shelter_test.drop("DateTime", axis=1, inplace=True)
shelter_train["SexuponOutcome"].fillna("Spayed Female", inplace=True)
shelter_test["SexuponOutcome"].fillna("Spayed Female", inplace=True)
def intact_group(sex):
try:
intact_type = sex.split()
except:
return 0
if intact_type[0] == "Neutered" or intact_type[0] == "Spayed":
return 1
elif intact_type[0] == "Intact":
return 2
else:
return 0
shelter_train["Virginity"] = shelter_train["SexuponOutcome"].apply(intact_group)
shelter_test["Virginity"] = shelter_test["SexuponOutcome"].apply(intact_group)
def sex_group(sexs):
try:
sex_type = sexs.split()
except:
return 0
if sex_type[0] == "Unknown":
return 0
elif sex_type[1] == "Male":
return 1
elif sex_type[1] == "Female":
return 2
else:
return 0
shelter_train["Sex"] = shelter_train["SexuponOutcome"].apply(sex_group)
shelter_test["Sex"] = shelter_test["SexuponOutcome"].apply(sex_group)
shelter_train.drop("SexuponOutcome", axis=1, inplace=True)
shelter_test.drop("SexuponOutcome", axis=1, inplace=True)
def check_has_name(name):
if type(name)is str:
return 1
else:
return 0
shelter_train["has_name"] = shelter_train["Name"].apply(check_has_name)
shelter_test["has_name"] = shelter_test["Name"].apply(check_has_name)
shelter_train.drop("Name", axis=1, inplace=True)
shelter_test.drop("Name", axis=1, inplace=True)
shelter_train["AgeuponOutcome"].fillna("1 month", inplace=True)
shelter_test["AgeuponOutcome"].fillna("1 month", inplace=True)
def age_group(age):
try:
age_list = age.split()
except:
return None
ages = int(age_list[0])
if(age_list[1].find("s")) :
age_list[1] = age_list[1].replace("s","")
if age_list[1] == "day":
return ages
elif(age_list[1] == "week"):
return ages*7
elif(age_list[1] == "month"):
return ages*30
elif(age_list[1] == "year"):
return ages*365
shelter_train["AgeuponOutcome"] = shelter_train["AgeuponOutcome"].apply(age_group)
shelter_test["AgeuponOutcome"] = shelter_test["AgeuponOutcome"].apply(age_group)
def hair_group(breed):
if breed.find("Shorthair")!= -1:
return 0
elif breed.find("Longhair")!= -1:
return 1
else:
return 2
shelter_train["Hairgroup"] = shelter_train["Breed"].apply(hair_group)
shelter_test["Hairgroup"] = shelter_test["Breed"].apply(hair_group)
def aggressive(breed):
if breed.find("Pit Bull")!= -1:
return 1
elif breed.find("Rottweiler")!= -1:
return 2
elif breed.find("Husky")!= -1:
return 3
elif breed.find("Shepherd")!= -1:
return 4
elif breed.find("Malamute")!= -1:
return 5
elif breed.find("Doberman")!= -1:
return 6
elif breed.find("Chow")!= -1:
return 7
elif breed.find("Dane")!= -1:
return 8
elif breed.find("Boxer")!= -1:
return 9
elif breed.find("Akita")!= -1:
return 10
else:
return 11
if(animal_type == "Dog"):
shelter_train["Aggresiveness"] = shelter_train["Breed"].apply(aggressive)
shelter_test["Aggresiveness"] = shelter_test["Breed"].apply(aggressive)
def allergic(breed):
if breed.find("Akita")!= -1:
return 1
elif breed.find("Malamute")!= -1:
return 2
elif breed.find("Eskimo")!= -1:
return 3
elif breed.find("Corgi")!= -1:
return 4
elif breed.find("Chow")!= -1:
return 5
elif breed.find("Shepherd")!= -1:
return 6
elif breed.find("Pyrenees")!= -1:
return 7
elif breed.find("Labrador")!= -1:
return 8
elif breed.find("Retriever")!= -1:
return 9
elif breed.find("Husky")!= -1:
return 10
else:
return 11
if(animal_type == "Dog"):
shelter_train["Allergic"] = shelter_train["Breed"].apply(allergic)
shelter_test["Allergic"] = shelter_test["Breed"].apply(allergic)
def weight(breed):
if breed.find("Pit Bull")!= -1:
return 1
elif breed.find("Husky")!= -1:
return 1
elif breed.find("Doberman")!= -1:
return 1
elif breed.find("Boxer")!= -1:
return 1
elif breed.find("Akita")!= -1:
return 1
elif breed.find("Chow")!= -1:
return 1
elif breed.find("Rottweiler")!= -1:
return 2
elif breed.find("Shepherd")!= -1:
return 2
elif breed.find("Malamute")!= -1:
return 2
elif breed.find("Dane")!= -1:
return 2
else:
return 3
if(animal_type == "Dog"):
shelter_train["Weight"] = shelter_train["Breed"].apply(weight)
shelter_test["Weight"] = shelter_test["Breed"].apply(weight)
def breed_group(breed_input):
breed = str(breed_input)
if(' ' in breed)== False:
br = breed
else:
breed_list = breed.split()
try:
br = breed_list[2]
except:
br = breed_list[1]
if(br == "Mix"):
return 0
else:
return 1
return 1
shelter_train["Breed"] = shelter_train["Breed"].apply(breed_group)
shelter_test["Breed"] = shelter_test["Breed"].apply(breed_group)
def color_group(color):
try:
color_type = color.split()
except:
return "unknown"
return str(color_type[0])
shelter_train["Color"] = shelter_train["Color"].apply(color_group)
shelter_test["Color"] = shelter_test["Color"].apply(color_group)
intval, label = pd.factorize(shelter_train["Color"], sort=True)
shelter_train["Color"] = pd.DataFrame(intval)
del intval, label
intval, label = pd.factorize(shelter_test["Color"], sort=True)
shelter_test["Color"] = pd.DataFrame(intval)
del intval, label
print(shelter_train.head())
return shelter_train, shelter_test
dog_train, dog_test = pre_processing(dog_train, dog_test, "Dog")
cat_train, cat_test = pre_processing(cat_train, cat_test, "Cat")
<split> | xgb = XGBClassifier(n_estimators=10)
xgb.fit(X_train, y_train)
y_pred = xgb.predict(X_val)
acc_xgb = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_xgb ) | Titanic - Machine Learning from Disaster |
943,976 | dog_X_train, dog_X_val, dog_y_train, dog_y_val = train_test_split(dog_train, dog_train_outcome, test_size=0.3)
cat_X_train, cat_X_val, cat_y_train, cat_y_val = train_test_split(cat_train, cat_train_outcome, test_size=0.3)
<choose_model_class> | models = pd.DataFrame({
'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression', 'Ridge Classifier',
'Random Forest', 'Naive Bayes', 'Linear SVC', 'MLP','AdaBoost','Linear discriminant','Passive Aggressive',
'Decision Tree', 'Gradient Boosting Classifier','Extra Trees','Stochastic Gradient Descent','Perceptron','xgboost'],
'Score': [acc_clf, acc_knn, acc_logreg,acc_ridge,acc_randomforest, acc_gaussian,acc_linear_svc, acc_MLP,acc_adaboost,acc_linear_da,acc_passiveaggressive,acc_decisiontree,acc_gbk,acc_et,acc_sgd,acc_perceptron,acc_xgb]})
models.sort_values(by='Score', ascending=False ) | Titanic - Machine Learning from Disaster |
943,976 | classifiers = [
GradientBoostingClassifier()
]
print("DOG")
for classifier in classifiers:
dog_log = classifier
dog_log.fit(dog_X_train, dog_y_train)
show_validation = True
if(show_validation == True):
dog_y_probs = dog_log.predict_proba(dog_X_val)
dog_y_pred = dog_log.predict(dog_X_val)
print(type(classifier))
print("accuracy_score:", accuracy_score(dog_y_val, dog_y_pred))
print("log_loss:", log_loss(dog_y_val, dog_y_probs))
elif(show_validation == False):
dog_y_probs = dog_log.predict_proba(dog_X_train)
dog_y_pred = dog_log.predict(dog_X_train)
print(type(classifier))
print("accuracy_score:", accuracy_score(dog_y_train, dog_y_pred))
print("log_loss:", log_loss(dog_y_train, dog_y_probs))
print("CAT")
for classifier in classifiers:
cat_log = classifier
cat_log.fit(cat_X_train, cat_y_train)
show_validation = True
if(show_validation == True):
cat_y_probs = cat_log.predict_proba(cat_X_val)
cat_y_pred = cat_log.predict(cat_X_val)
print(type(classifier))
print("accuracy_score:", accuracy_score(cat_y_val, cat_y_pred))
print("log_loss:", log_loss(cat_y_val, cat_y_probs))
elif(show_validation == False):
print(type(classifier))
print("accuracy_score:", accuracy_score(cat_y_train, cat_y_pred))
print("log_loss:", log_loss(cat_y_train, cat_y_probs))
<save_to_csv> | Submission['Survived']=ridge.predict(test)
print(Submission.head(5))
print('Prediction complete' ) | Titanic - Machine Learning from Disaster |
943,976 | y_probs = np.append(dog_test_result, cat_test_result, axis=0)
y_probs = y_probs[y_probs[:,0].argsort() ]
y_probs = y_probs[:,1:]
print(y_probs)
results = pd.read_csv(".. /input/sample_submission.csv")
results["Adoption"] = y_probs[:,0]
results["Died"] = y_probs[:,1]
results["Euthanasia"] = y_probs[:,2]
results["Return_to_owner"] = y_probs[:,3]
results["Transfer"] = y_probs[:,4]
results.to_csv("split_animal.csv",index = False)
<import_modules> | Submission.set_index('PassengerId', inplace=True)
Submission.to_csv('ridgesubmission02.csv',sep=',')
print('File created' ) | Titanic - Machine Learning from Disaster |
943,976 | tqdm.pandas(desc='Progress')
<define_variables> | REVISED_NUMERIC_COLUMNS=['Pclass','Age','SibSp','Parch','Family_Survival','Alone','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','Embarked']
SIMPLE_COLUMNS=['Pclass','Age','SibSp','Parch','Family_Survival','Alone','Sex_female','Sex_male','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','Embarked']
INTERESTING_COLUMNS=['Survived','Pclass','Age','SibSp','Parch','Title','Alone','Mother','Family Size','Family_Survival','Embarked','FareBand','TicketRef']
CATEGORY_COLUMNS=['Pclass','SibSp','Parch','Family Size','Family_Survival','Alone','Mother','Sex_female','Sex_male','AgeBand_Child',
'AgeBand_Young Adult', 'AgeBand_Adult', 'AgeBand_Older Adult',
'AgeBand_Senior','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','NameBand_1',
'NameBand_2', 'NameBand_3', 'NameBand_4', 'NameBand_5','Embarked','TicketRef_A', 'TicketRef_C', 'TicketRef_F', 'TicketRef_L',
'TicketRef_P', 'TicketRef_S', 'TicketRef_W', 'TicketRef_X','HadCabin','Free']
data_to_train = df_train[CATEGORY_COLUMNS].fillna(-1000)
prediction = df_train["Survived"]
test = df_test[CATEGORY_COLUMNS].fillna(-1000)
X_train, X_val, y_train, y_val = train_test_split(data_to_train, prediction, test_size = 0.3,random_state=21, stratify=prediction)
print('Data split' ) | Titanic - Machine Learning from Disaster |
943,976 | embed_size = 300
max_features = 120000
maxlen = 70
batch_size = 512
n_epochs = 5
n_splits = 5
SEED = 1029<set_options> | param_grid = {'C':np.arange(1, 7),
'degree':np.arange(1, 7),
'max_iter':np.arange(0, 12),
'kernel':['rbf','linear'],
'shrinking':[0,1]}
clf = SVC()
svc_cv=GridSearchCV(clf, param_grid, cv=10)
svc_cv.fit(X_train, y_train)
print("Tuned SVC Parameters: {}".format(svc_cv.best_params_))
print("Best score is {}".format(svc_cv.best_score_))
acc_svc_cv = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_svc_cv ) | Titanic - Machine Learning from Disaster |
943,976 | def seed_everything(seed=1029):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything()<features_selection> | param_grid = {"solver": ['newton-cg','lbfgs','liblinear','sag','saga'],'C': [0.01, 0.1, 1, 10, 100]}
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
logreg_cv = GridSearchCV(logreg, param_grid, cv=30)
logreg_cv.fit(X_train, y_train)
y_pred = logreg_cv.predict(X_val)
print("Tuned Logistic Regression Parameters: {}".format(logreg_cv.best_params_))
print("Best score is {}".format(logreg_cv.best_score_))
acc_logreg_cv = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_logreg_cv ) | Titanic - Machine Learning from Disaster |
943,976 | def load_glove(word_index):
EMBEDDING_FILE = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')[:300]
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE))
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = -0.005838499,0.48782197
embed_size = all_embs.shape[1]
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size))
for word, i in word_index.items() :
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
return embedding_matrix
def load_fasttext(word_index):
EMBEDDING_FILE = '.. /input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)if len(o)>100)
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = all_embs.mean() , all_embs.std()
embed_size = all_embs.shape[1]
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size))
for word, i in word_index.items() :
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
return embedding_matrix
def load_para(word_index):
EMBEDDING_FILE = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore')if len(o)>100)
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = -0.0053247833,0.49346462
embed_size = all_embs.shape[1]
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size))
for word, i in word_index.items() :
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
return embedding_matrix<load_from_csv> | param_grid = {"n_neighbors": np.arange(1, 50),
"leaf_size": np.arange(20, 40),
"algorithm": ["ball_tree","kd_tree","brute"]
}
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
knn_cv = GridSearchCV(knn, param_grid, cv=10)
knn_cv.fit(X_train, y_train)
y_pred = knn_cv.predict(X_val)
print("Tuned knn Parameters: {}".format(knn_cv.best_params_))
print("Best score is {}".format(knn_cv.best_score_))
acc_knn_cv = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_knn_cv ) | Titanic - Machine Learning from Disaster |
943,976 | df_train = pd.read_csv(".. /input/train.csv")
df_test = pd.read_csv(".. /input/test.csv")
df = pd.concat([df_train ,df_test],sort=True )<feature_engineering> | param_dist = {"random_state" : np.arange(0, 10),
"max_depth": np.arange(1, 10),
"max_features": np.arange(1, 10),
"min_samples_leaf": np.arange(1, 10),
"criterion": ["gini","entropy"]}
tree = DecisionTreeClassifier()
tree_cv = RandomizedSearchCV(tree, param_dist, cv=30)
tree_cv.fit(X_train,y_train)
y_pred = tree_cv.predict(X_val)
print("Tuned Decision Tree Parameters: {}".format(tree_cv.best_params_))
print("Best score is {}".format(tree_cv.best_score_))
acc_tree_cv = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_tree_cv ) | Titanic - Machine Learning from Disaster |
943,976 | def build_vocab(texts):
sentences = texts.apply(lambda x: x.split() ).values
vocab = {}
for sentence in sentences:
for word in sentence:
try:
vocab[word] += 1
except KeyError:
vocab[word] = 1
return vocab
vocab = build_vocab(df['question_text'] )<define_variables> | param_dist = {"random_state" : np.arange(0, 10),
"n_estimators" : np.arange(1, 20),
"max_depth": np.arange(1, 10),
"max_features": np.arange(1, 10),
"min_samples_leaf": np.arange(1, 10),
"criterion": ["gini","entropy"]}
randomforest = RandomForestClassifier()
randomforest_cv = RandomizedSearchCV(randomforest, param_dist, cv=30)
randomforest_cv.fit(X_train,y_train)
y_pred = randomforest_cv.predict(X_val)
print("Tuned Decision Tree Parameters: {}".format(randomforest_cv.best_params_))
print("Best score is {}".format(randomforest_cv.best_score_))
acc_randomforest_cv = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_randomforest_cv ) | Titanic - Machine Learning from Disaster |
943,976 | sin = len(df_train[df_train["target"]==0])
insin = len(df_train[df_train["target"]==1])
persin =(sin/(sin+insin)) *100
perinsin =(insin/(sin+insin)) *100
print("
print("<feature_engineering> | param_dist = {'max_depth':np.arange(1, 7),
'min_samples_leaf': np.arange(1, 6),
"max_features": np.arange(1, 10),
}
gbk = GradientBoostingClassifier()
gbk_cv = RandomizedSearchCV(gbk, param_dist, cv=30)
gbk_cv.fit(X_train, y_train)
y_pred = gbk_cv.predict(X_val)
print("Tuned Gradient Boost Parameters: {}".format(gbk_cv.best_params_))
print("Best score is {}".format(gbk_cv.best_score_))
acc_gbk_cv = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_gbk_cv ) | Titanic - Machine Learning from Disaster |
943,976 | def build_vocab(texts):
sentences = texts.apply(lambda x: x.split() ).values
vocab = {}
for sentence in sentences:
for word in sentence:
try:
vocab[word] += 1
except KeyError:
vocab[word] = 1
return vocab
def known_contractions(embed):
known = []
for contract in contraction_mapping:
if contract in embed:
known.append(contract)
return known
def clean_contractions(text, mapping):
specials = ["’", "‘", "´", "`"]
for s in specials:
text = text.replace(s, "'")
text = ' '.join([mapping[t] if t in mapping else t for t in text.split(" ")])
return text
def correct_spelling(x, dic):
for word in dic.keys() :
x = x.replace(word, dic[word])
return x
def unknown_punct(embed, punct):
unknown = ''
for p in punct:
if p not in embed:
unknown += p
unknown += ' '
return unknown
def clean_numbers(x):
x = re.sub('[0-9]{5,}', '
x = re.sub('[0-9]{4}', '
x = re.sub('[0-9]{3}', '
x = re.sub('[0-9]{2}', '
return x
def clean_special_chars(text, punct, mapping):
for p in mapping:
text = text.replace(p, mapping[p])
for p in punct:
text = text.replace(p, f' {p} ')
specials = {'\u200b': ' ', '…': '...', '\ufeff': '', 'करना': '', 'है': ''}
for s in specials:
text = text.replace(s, specials[s])
return text
def add_lower(embedding, vocab):
count = 0
for word in vocab:
if word in embedding and word.lower() not in embedding:
embedding[word.lower() ] = embedding[word]
count += 1
print(f"Added {count} words to embedding" )<define_variables> | param_dist = {'learning_rate': [.01,.03,.05,.1,.25],
'max_depth': np.arange(1, 10),
'n_estimators': [10, 50, 100, 300],
'booster':['gbtree','gblinear','dart']
}
xgb = XGBClassifier()
xgb_cv = RandomizedSearchCV(xgb, param_dist, cv=20)
xgb_cv.fit(X_train, y_train)
y_pred = xgb_cv.predict(X_val)
print("xgBoost Parameters: {}".format(xgb_cv.best_params_))
print("Best score is {}".format(xgb_cv.best_score_))
acc_xgb_cv = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_xgb_cv ) | Titanic - Machine Learning from Disaster |
943,976 | puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', '
'·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…',
'“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─',
'▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞',
'∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ]
def clean_text(x):
x = str(x)
for punct in puncts:
x = x.replace(punct, f' {punct} ')
return x
def clean_numbers(x):
x = re.sub('[0-9]{5,}', '
x = re.sub('[0-9]{4}', '
x = re.sub('[0-9]{3}', '
x = re.sub('[0-9]{2}', '
return x
mispell_dict = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as", "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have","you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have", 'colour': 'color', 'centre': 'center', 'favourite': 'favorite', 'travelling': 'traveling', 'counselling': 'counseling', 'theatre': 'theater', 'cancelled': 'canceled', 'labour': 'labor', 'organisation': 'organization', 'wwii': 'world war 2', 'citicise': 'criticize', 'youtu ': 'youtube ', 'Qoura': 'Quora', 'sallary': 'salary', 'Whta': 'What', 'narcisist': 'narcissist', 'howdo': 'how do', 'whatare': 'what are', 'howcan': 'how can', 'howmuch': 'how much', 'howmany': 'how many', 'whydo': 'why do', 'doI': 'do I', 'theBest': 'the best', 'howdoes': 'how does', 'mastrubation': 'masturbation', 'mastrubate': 'masturbate', "mastrubating": 'masturbating', 'pennis': 'penis', 'Etherium': 'Ethereum', 'narcissit': 'narcissist', 'bigdata': 'big data', '2k17': '2017', '2k18': '2018', 'qouta': 'quota', 'exboyfriend': 'ex boyfriend', 'airhostess': 'air hostess', "whst": 'what', 'watsapp': 'whatsapp', 'demonitisation': 'demonetization', 'demonitization': 'demonetization', 'demonetisation': 'demonetization'}
def _get_mispell(mispell_dict):
mispell_re = re.compile('(%s)' % '|'.join(mispell_dict.keys()))
return mispell_dict, mispell_re
mispellings, mispellings_re = _get_mispell(mispell_dict)
def replace_typical_misspell(text):
def replace(match):
return mispellings[match.group(0)]
return mispellings_re.sub(replace, text )<feature_engineering> | optmodels = pd.DataFrame({
'optModel': ['Linear Regression','KNearestNieghbours','Decision Tree','Gradient Boost','Logistic Regression','xgboost'],
'optScore': [acc_svc_cv,acc_knn_cv,acc_tree_cv,acc_gbk_cv,acc_logreg_cv,acc_xgb_cv]})
optmodels.sort_values(by='optScore', ascending=False ) | Titanic - Machine Learning from Disaster |
943,976 | def add_features(df):
df['question_text'] = df['question_text'].progress_apply(lambda x:str(x))
df['total_length'] = df['question_text'].progress_apply(len)
df['capitals'] = df['question_text'].progress_apply(lambda comment: sum(1 for c in comment if c.isupper()))
df['caps_vs_length'] = df.progress_apply(lambda row: float(row['capitals'])/float(row['total_length']),
axis=1)
df['num_words'] = df.question_text.str.count('\S+')
df['num_unique_words'] = df['question_text'].progress_apply(lambda comment: len(set(w for w in comment.split())))
df['words_vs_unique'] = df['num_unique_words'] / df['num_words']
return df
def load_and_prec() :
train_df = pd.read_csv(".. /input/train.csv")
test_df = pd.read_csv(".. /input/test.csv")
print("Train shape : ",train_df.shape)
print("Test shape : ",test_df.shape)
train_df["question_text"] = train_df["question_text"].apply(lambda x: x.lower())
test_df["question_text"] = test_df["question_text"].apply(lambda x: x.lower())
train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_text(x))
test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_text(x))
train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_numbers(x))
test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_numbers(x))
train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: replace_typical_misspell(x))
test_df["question_text"] = test_df["question_text"].apply(lambda x: replace_typical_misspell(x))
train_X = train_df["question_text"].fillna("_
test_X = test_df["question_text"].fillna("_
train = add_features(train_df)
test = add_features(test_df)
features = train[['caps_vs_length', 'words_vs_unique']].fillna(0)
test_features = test[['caps_vs_length', 'words_vs_unique']].fillna(0)
ss = StandardScaler()
ss.fit(np.vstack(( features, test_features)))
features = ss.transform(features)
test_features = ss.transform(test_features)
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(train_X))
train_X = tokenizer.texts_to_sequences(train_X)
test_X = tokenizer.texts_to_sequences(test_X)
train_X = pad_sequences(train_X, maxlen=maxlen)
test_X = pad_sequences(test_X, maxlen=maxlen)
train_y = train_df['target'].values
np.random.seed(SEED)
trn_idx = np.random.permutation(len(train_X))
train_X = train_X[trn_idx]
train_y = train_y[trn_idx]
features = features[trn_idx]
return train_X, test_X, train_y, features, test_features, tokenizer.word_index
<train_model> | X_train = df_train[CATEGORY_COLUMNS].fillna(-1000)
y_train = df_train["Survived"]
X_test = df_test[CATEGORY_COLUMNS].fillna(-1000)
test = df_test[REVISED_NUMERIC_COLUMNS].fillna(-1000)
randomf=RandomForestClassifier(random_state= 7,n_estimators=17,min_samples_leaf= 4, max_features=9,max_depth=5, criterion='gini')
randomf.fit(data_to_train, prediction)
Submission['Survived']=randomf.predict(X_test)
Submission.to_csv('randomforestcats01.csv',sep=',')
print(Submission.head(5))
print('File created' ) | Titanic - Machine Learning from Disaster |
943,976 | x_train, x_test, y_train, features, test_features, word_index = load_and_prec()
<save_model> | REVISED_NUMERIC_COLUMNS=['Pclass','Age','SibSp','Parch','Family_Survival','Alone','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','Embarked']
SIMPLE_COLUMNS=['Pclass','Age','SibSp','Parch','Family_Survival','Alone','Sex_female','Sex_male','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','Embarked']
INTERESTING_COLUMNS=['Survived','Pclass','Age','SibSp','Parch','Title','Alone','Mother','Family Size','Family_Survival','Embarked','FareBand','TicketRef']
CATEGORY_COLUMNS=['Pclass','SibSp','Parch','Family Size','Family_Survival','Alone','Mother','Sex_female','Sex_male','AgeBand_Child',
'AgeBand_Young Adult', 'AgeBand_Adult', 'AgeBand_Older Adult',
'AgeBand_Senior','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','NameBand_1',
'NameBand_2', 'NameBand_3', 'NameBand_4', 'NameBand_5','Embarked','TicketRef_A', 'TicketRef_C', 'TicketRef_F', 'TicketRef_L',
'TicketRef_P', 'TicketRef_S', 'TicketRef_W', 'TicketRef_X','HadCabin','Free']
data_to_train = df_train[CATEGORY_COLUMNS].fillna(-1000)
X_test2= df_test[CATEGORY_COLUMNS].fillna(-1000)
prediction = df_train["Survived"]
X_train, X_test, y_train, y_test = train_test_split(data_to_train, prediction, test_size = 0.3,random_state=21, stratify=prediction)
print('Data Split')
hyperparams = {'algorithm': ['auto'], 'weights': ['uniform', 'distance'] ,'leaf_size': list(range(1,50,5)) ,
'n_neighbors':[6,7,8,9,10,11,12,14,16,18,20,22]}
gd=GridSearchCV(estimator = KNeighborsClassifier() , param_grid = hyperparams, verbose=True, cv=10, scoring = "roc_auc")
gd.fit(X_train, y_train)
gd.best_estimator_.fit(X_train,y_train)
y_pred=gd.best_estimator_.predict(X_test)
Submission['Survived']=gd.best_estimator_.predict(X_test2)
print('Best Score')
print(gd.best_score_)
print('Best Estimator')
print(gd.best_estimator_)
acc_gd_cv = round(accuracy_score(y_pred, y_val)* 100, 2)
print('Accuracy')
print(acc_gd_cv)
print('Confusion Matrrix')
print(confusion_matrix(y_test, y_pred))
print('Classification_report')
print(classification_report(y_test, y_pred))
print('Sample Prediction')
print(Submission.head(10))
print('KNN prediction created' ) | Titanic - Machine Learning from Disaster |
943,976 | np.save("x_train",x_train)
np.save("x_test",x_test)
np.save("y_train",y_train)
np.save("features",features)
np.save("test_features",test_features)
np.save("word_index.npy",word_index )<load_pretrained> | REVISED_NUMERIC_COLUMNS=['Pclass','Age','SibSp','Parch','Family_Survival','Alone','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','Embarked']
SIMPLE_COLUMNS=['Pclass','Age','SibSp','Parch','Family_Survival','Alone','Sex_female','Sex_male','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','Embarked']
INTERESTING_COLUMNS=['Survived','Pclass','Age','SibSp','Parch','Title','Alone','Mother','Family Size','Family_Survival','Embarked','FareBand','TicketRef']
CATEGORY_COLUMNS=['Pclass','SibSp','Parch','Family Size','Family_Survival','Alone','Mother','Sex_female','Sex_male','AgeBand_Child',
'AgeBand_Young Adult', 'AgeBand_Adult', 'AgeBand_Older Adult',
'AgeBand_Senior','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','NameBand_1',
'NameBand_2', 'NameBand_3', 'NameBand_4', 'NameBand_5','Embarked','TicketRef_A', 'TicketRef_C', 'TicketRef_F', 'TicketRef_L',
'TicketRef_P', 'TicketRef_S', 'TicketRef_W', 'TicketRef_X','HadCabin','Free']
data_to_train = df_train[CATEGORY_COLUMNS].fillna(-1000)
X_test2= df_test[CATEGORY_COLUMNS].fillna(-1000)
prediction = df_train["Survived"]
X_train, X_test, y_train, y_test = train_test_split(data_to_train, prediction, test_size = 0.3,random_state=21, stratify=prediction)
print('Data Split')
hyperparams = {"random_state" : np.arange(0, 10),
"max_depth": np.arange(1, 10),
"max_features": np.arange(1, 10),
"min_samples_leaf": np.arange(1, 10),
"criterion": ["gini","entropy"]}
gd=GridSearchCV(estimator = DecisionTreeClassifier() , param_grid = hyperparams, verbose=True, cv=10, scoring = "roc_auc")
gd.fit(X_train, y_train)
gd.best_estimator_.fit(X_train,y_train)
y_pred=gd.best_estimator_.predict(X_test)
Submission['Survived']=gd.best_estimator_.predict(X_test2)
print('Best Score')
print(gd.best_score_)
print('Best Estimator')
print(gd.best_estimator_)
acc_gd_cv = round(accuracy_score(y_pred, y_val)* 100, 2)
print('Accuracy')
print(acc_gd_cv)
print('Confusion Matrrix')
print(confusion_matrix(y_test, y_pred))
print('Classification_report')
print(classification_report(y_test, y_pred))
print(Submission.head(10))
Submission.to_csv('Treegridsearch03.csv',sep=',')
print('Decision Tree prediction created' ) | Titanic - Machine Learning from Disaster |
943,976 | x_train = np.load("x_train.npy")
x_test = np.load("x_test.npy")
y_train = np.load("y_train.npy")
features = np.load("features.npy")
test_features = np.load("test_features.npy")
word_index = np.load("word_index.npy" ).item()<normalization> | REVISED_NUMERIC_COLUMNS=['Pclass','Age','SibSp','Parch','Family_Survival','Alone','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','Embarked']
SIMPLE_COLUMNS=['Pclass','Age','SibSp','Parch','Family_Survival','Alone','Sex_female','Sex_male','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','Embarked']
INTERESTING_COLUMNS=['Survived','Pclass','Age','SibSp','Parch','Title','Alone','Mother','Family Size','Family_Survival','Embarked','FareBand','TicketRef']
CATEGORY_COLUMNS=['Pclass','SibSp','Parch','Family Size','Family_Survival','Alone','Mother','Sex_female','Sex_male','AgeBand_Child',
'AgeBand_Young Adult', 'AgeBand_Adult', 'AgeBand_Older Adult',
'AgeBand_Senior','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','NameBand_1',
'NameBand_2', 'NameBand_3', 'NameBand_4', 'NameBand_5','Embarked','TicketRef_A', 'TicketRef_C', 'TicketRef_F', 'TicketRef_L',
'TicketRef_P', 'TicketRef_S', 'TicketRef_W', 'TicketRef_X','HadCabin','Free']
data_to_train = df_train[CATEGORY_COLUMNS].fillna(-1000)
X_test2= df_test[CATEGORY_COLUMNS].fillna(-1000)
prediction = df_train["Survived"]
X_train, X_test, y_train, y_test = train_test_split(data_to_train, prediction, test_size = 0.3,random_state=21, stratify=prediction)
print('Data Split')
hyperparams = {'solver':["newton-cg", "lbfgs", "liblinear", "sag", "saga"],
'C': [0.01, 0.1, 1, 10, 100]}
gd=GridSearchCV(estimator = LogisticRegression() , param_grid = hyperparams, verbose=True, cv=10, scoring = "roc_auc")
gd.fit(X_train, y_train)
gd.best_estimator_.fit(X_train,y_train)
y_pred=gd.best_estimator_.predict(X_test)
Submission['Survived']=gd.best_estimator_.predict(X_test2)
print('Best Score')
print(gd.best_score_)
print('Best Estimator')
print(gd.best_estimator_)
acc_gd_cv = round(accuracy_score(y_pred, y_val)* 100, 2)
print('Accuracy')
print(acc_gd_cv)
print('Confusion Matrrix')
print(confusion_matrix(y_test, y_pred))
print('Classification_report')
print(classification_report(y_test, y_pred))
print(Submission.head(10))
Submission.to_csv('Logregwithconfusion01.csv',sep=',')
print('Logistic Regression prediction created' ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.