kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
13,911,451 | def open_json_file(file):
json_opened = json.load(open(file))
return json_opened<load_from_disk> | titanictest = pd.read_csv('.. /input/titanic/test.csv')
titanictest.info() | Titanic - Machine Learning from Disaster |
13,911,451 | train = open_json_file('.. /input/train.json')
test = open_json_file('.. /input/test.json' )<define_variables> | titanictest['Sex_label'] = titanictest['Sex'].astype('category' ).cat.codes
onehots = pd.get_dummies(titanictest['Embarked'], prefix='Embarked')
titanictest = titanictest.join(onehots)
titanictest = titanictest[['Pclass',
'Sex_label',
'Age',
'SibSp',
'Parch',
'Fare',
'Embarked_C',
'Embarked_Q',
'Embarked_S']]
titanictest.info() | Titanic - Machine Learning from Disaster |
13,911,451 | print("--- sample of training data set ---")
train[0:1]<define_variables> | titanictest.isna().sum() | Titanic - Machine Learning from Disaster |
13,911,451 | print("--- sample of testing data set ---")
test[0:1]<train_model> | titanictest['Age'] = titanictest['Age'].fillna(30)
titanictest.isna().sum() | Titanic - Machine Learning from Disaster |
13,911,451 | print("Training data set size:",len(train))
print("Testing data set size:",len(test))<string_transform> | titanictest['Fare'] = titanictest['Fare'].fillna(35)
titanictest.isna().sum() | Titanic - Machine Learning from Disaster |
13,911,451 | def pre_process_data(data):
new_data = []
for recipe in data:
new_recipe = []
for ingredient in recipe:
new_ingredient_list = []
for word in ingredient.split() :
word = re.sub('[^a-zA-Z -]+', '', word)
new_ingredient_list.append(wn.morphy(word.lower().strip(",.!:?;' ")) or word.strip(",.!:?;' "))
new_recipe.append(' '.join(new_ingredient_list))
new_data.append(new_recipe)
return new_data<prepare_x_and_y> | X = titanic[['Pclass',
'Sex_label',
'Age',
'SibSp',
'Parch',
'Fare',
'Embarked_C',
'Embarked_Q',
'Embarked_S']]
y = titanic['Survived']
X_train, X_test,y_train,y_test = train_test_split(X,
y,
test_size = 0.3,
random_state = 789 ) | Titanic - Machine Learning from Disaster |
13,911,451 | x_train = list([train[i]['ingredients'] for i in range(len(train)) ])
y_train_labels = [train[i]['cuisine'] for i in range(len(train)) ]<prepare_x_and_y> | logreg = LogisticRegression(random_state=789)
logreg.fit(X_train, y_train)
y_predicted = logreg.predict(titanictest)
THRESHOLD = 0.58
y_predicted = np.where(logreg.predict_proba(titanictest)[:,1] > THRESHOLD, 1, 0 ) | Titanic - Machine Learning from Disaster |
13,911,451 | x_test = list([test[i]['ingredients'] for i in range(len(test)) ] )<prepare_x_and_y> | pd.DataFrame(y_predicted ).rename(columns = {0 : 'Survived'} ) | Titanic - Machine Learning from Disaster |
13,911,451 | new_x_train = pre_process_data(x_train)
new_x_test = pre_process_data(x_test )<categorify> | dfkagglesubmission = pd.read_csv('.. /input/titanic/test.csv')
dfid = dfkagglesubmission[['PassengerId']]
dfkagglesubmission = pd.concat([dfid, pd.DataFrame(y_predicted ).rename(columns = {0 : 'Survived'})], axis=1)
dfkagglesubmission | Titanic - Machine Learning from Disaster |
13,911,451 | print("--- Label Encode the Target Variable ---")
lb_enc = LabelEncoder()
y_enc_labels = lb_enc.fit_transform(y_train_labels)
print(y_enc_labels )<string_transform> | dfkagglesubmission['Survived'].value_counts() | Titanic - Machine Learning from Disaster |
13,911,451 | def bag_of_words(data):
text_data = [' '.join(recipe ).lower() for recipe in data]
return text_data
def concatenated_words(data):
text_data = [' '.join(word.replace(" ","_" ).lower() for word in recipe)for recipe in data]
return text_data<string_transform> | dfkagglesubmission.to_csv('Titanic_submission.csv',index=False ) | Titanic - Machine Learning from Disaster |
14,577,228 | print("--- Preparing text data ---")
train_text = bag_of_words(x_train)
submission_text = bag_of_words(x_test)
prep_train_text = bag_of_words(new_x_train)
prep_submission_text = bag_of_words(new_x_test)
prep_train_text_underscore = concatenated_words(new_x_train)
prep_submission_text_underscore = concatenated_words(new_x_test)
<categorify> | warnings.filterwarnings('ignore')
%matplotlib inline
pio.templates.default = 'ggplot2' | Titanic - Machine Learning from Disaster |
14,577,228 | tfidf_enc = TfidfVectorizer()
def tf_idf_features(text, flag):
if flag == "train":
x = tfidf_enc.fit_transform(text)
else:
x = tfidf_enc.transform(text)
x = x.astype('float16')
return x<feature_engineering> | dpath = Path('/kaggle/input/titanic')
train_raw = pd.read_csv(dpath / 'train.csv')
test_raw = pd.read_csv(dpath / 'test.csv' ) | Titanic - Machine Learning from Disaster |
14,577,228 | train_text_features = tf_idf_features(train_text, flag="train")
submission_text_features = tf_idf_features(submission_text, flag="submission")
prep_train_text_features = tf_idf_features(prep_train_text, flag="train")
prep_submission_text_features = tf_idf_features(prep_submission_text, flag="submission")
prep_train_text_underscore_features = tf_idf_features(prep_train_text_underscore, flag="train")
prep_submission_text_underscore_features = tf_idf_features(prep_submission_text_underscore, flag="submission" )<compute_train_metric> | full = pd.concat([train_raw.copy(deep=True), test_raw.copy(deep=True)], ignore_index=True ) | Titanic - Machine Learning from Disaster |
14,577,228 | RANDOM_SEED = 1
names = ['DecisionTree','RandomForestClassifier','Logistic_Regression', "SVC"]
regressors = [DecisionTreeClassifier(random_state = RANDOM_SEED),
RandomForestClassifier(n_estimators = 10, random_state = RANDOM_SEED),
LogisticRegression(random_state = RANDOM_SEED),
SVC(C=10, gamma = 1, decision_function_shape=None, random_state = RANDOM_SEED),
]
def kfold_cross_validation(X,y, col_names):
N_FOLDS = 3
cv_results = np.zeros(( N_FOLDS, len(names)))
kf = KFold(n_splits = N_FOLDS, shuffle = False, random_state = RANDOM_SEED)
index_for_fold = 0
for train_index, test_index in kf.split(X,y):
print('
Fold index:', index_for_fold + 1,
'------------------------------------------')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
print('
Shape of input data for this fold:',
'
Data Set:(Observations, Variables)')
print('X_train:', X_train.shape)
print('X_test:',X_test.shape)
print('y_train:', y_train.shape)
print('y_test:',y_test.shape)
index_for_method = 0
for name, reg_model in zip(names, regressors):
print('
Regression model evaluation for:', name)
print(' Scikit Learn method:', reg_model)
reg_model.fit(X_train, y_train)
y_test_predict = reg_model.predict(X_test)
fold_method_result = reg_model.score(X_test, y_test)
cv_results[index_for_fold, index_for_method] = fold_method_result
index_for_method += 1
index_for_fold += 1
cv_results_df = pd.DataFrame(cv_results)
cv_results_df.columns = col_names
return cv_results_df
<compute_train_metric> | def split_data(df):
df = df.reset_index(drop=True)
passengerId = df['PassengerId']
df = df.drop(columns=['PassengerId'])
if 'Survived' in df.columns:
df['Survived'] = df['Survived'].astype('category')
return(df, passengerId ) | Titanic - Machine Learning from Disaster |
14,577,228 | starttime = time.monotonic()
print("--- Performing 3-fold cross validation ---
")
print("---------Standard Results---------")
train_text_cv = kfold_cross_validation(train_text_features,
y_enc_labels,
col_names = ["DecisionTree",
"RandomForest",
"LogisticRegression",
"SVM"])
print("
---------Removing Special Characters & Punctuations Results---------")
prep_train_text_cv = kfold_cross_validation(prep_train_text_features,
y_enc_labels,
col_names = ["DecisionTree",
"RandomForest",
"LogisticRegression",
"SVM"])
print("
---------Concatenated Words Results---------")
prep_train_text_underscore_cv = kfold_cross_validation(prep_train_text_underscore_features,
y_enc_labels,
col_names = ["DecisionTree",
'RandomForest',
'LogisticRegression',
"SVM"])
print("
That took ",(time.monotonic() -starttime)/60, " minutes" )<create_dataframe> | def remove_wordy_cols(df):
df = df.drop(columns=['Name', 'Ticket'])
return df | Titanic - Machine Learning from Disaster |
14,577,228 | baseline_models = pd.DataFrame(prep_train_text_cv.mean() ,columns=["Avg Accuracy"])
baseline_models = baseline_models.reset_index()
baseline_models.columns = ["Model","Baseline Score"]<train_on_grid> | def fill_fare_mean(df):
df['Fare'] = df['Fare'].fillna(df['Fare'].mean())
return df | Titanic - Machine Learning from Disaster |
14,577,228 | def grid_search_function(func_X_train, func_X_test, func_y_train, func_y_test, parameters, model):
model = model(random_state=RANDOM_SEED)
grid_search = GridSearchCV(model, parameters)
classifier= grid_search.fit(func_X_train,func_y_train)
return classifier<split> | def apply_all(df, funs, debug=False):
for fun in funs:
if debug:
print(f'Apply {fun.__name__}')
df = fun(df)
return df | Titanic - Machine Learning from Disaster |
14,577,228 | def train_test_split_function(X,y, test_size_percent):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size_percent, random_state=RANDOM_SEED)
return X_train, X_test, y_train, y_test<split> | prep_nn_1 = lambda x : apply_all(x, [remove_wordy_cols, fill_fare_mean, split_data])
train = train_raw.copy(deep=True)
train, train_pId = prep_nn_1(train)
display(train.head(3))
display(train_pId.head(3 ).to_frame() ) | Titanic - Machine Learning from Disaster |
14,577,228 | starttime = time.monotonic()
parameters = {'n_estimators':[10,100,300]}
X_train, X_test, y_train, y_test = train_test_split_function(prep_train_text_features,
y_enc_labels,
test_size_percent = 0.20)
rf_classifier = grid_search_function(X_train, X_test, y_train, y_test,
parameters,
model = RandomForestClassifier)
print("
That took ",(time.monotonic() -starttime)/60, " minutes" )<find_best_params> | dls.show_batch() , learn.loss_func | Titanic - Machine Learning from Disaster |
14,577,228 | rf_classifier.best_estimator_<train_model> | learn.lr_find() | Titanic - Machine Learning from Disaster |
14,577,228 | def training_info_and_classification_report(X,y,test_size_percent, model):
starttime = time.monotonic()
X_train, X_test, y_train, y_test = train_test_split_function(X,y, test_size_percent)
print("
--- Fitting Model ---")
model.fit(X_train, y_train)
print("
--- Predicting Cuisines ---")
y_predict = model.predict(X_test)
print("
--- Scoring Model ---")
model_training_score = model.score(X_train,y_train)
model_testing_score = model.score(X_test,y_test)
print("
--- Creating Classification Report ---")
clf_report = classification_report(y_test,
y_predict,
target_names = lb_enc.inverse_transform(model.classes_ ).tolist())
array = confusion_matrix(y_test, y_predict)
row_sums = array.sum(axis=1,keepdims=True)
norm_conf_mx = array/row_sums
error_matrix_df = pd.DataFrame(norm_conf_mx, index = [i for i in lb_enc.inverse_transform(model.classes_ ).tolist() ],
columns = [i for i in lb_enc.inverse_transform(model.classes_ ).tolist() ])
print("
That took ",(time.monotonic() -starttime)/60, " minutes")
return model_training_score, model_testing_score, clf_report, array, norm_conf_mx, error_matrix_df<train_model> | learn.fit_one_cycle(5, lr_max=slice(0.004, 5e-2, 0.005))
learn.fit_one_cycle(25, lr_max=slice(0.004, 1e-2, 0.005), cbs=callbacks ) | Titanic - Machine Learning from Disaster |
14,577,228 | training_score_rf,testing_score_rf,rf_classification_report, rf_array, rf_norm_conf_mx, rf_error_matrix_df = training_info_and_classification_report(prep_train_text_features,
y_enc_labels,
test_size_percent = 0.20,
model = rf_classifier.best_estimator_ )<compute_test_metric> | test, test_pId = prep_nn_1(test_raw.copy(deep=True))
test_dl = learn.dls.test_dl(test)
preds, _ = learn.get_preds(dl=test_dl)
preds = np.argmax(preds, 1 ).numpy()
submission = pd.DataFrame(
{'PassengerId': test_pId,
'Survived': preds}
)
submission.to_csv('submission_trivial.csv', index=False ) | Titanic - Machine Learning from Disaster |
14,577,228 | print(" Random Forest Classification Report ")
print(rf_classification_report )<split> | full['Title'] = full['Name'].apply(title)
full['Title'] = full['Title'].replace({'Ms': 'Miss', 'Mlle': 'Miss', 'Mme': 'Mrs'})
RARE_TITLE = list(( full['Title'].value_counts() < 8 ).replace({False: np.nan} ).dropna().index)
full['Title'] = full['Title'].replace(RARE_TITLE, 'Rare')
def extract_title(df, reduce=True):
df['Title'] = df['Name'].apply(title)
if reduce:
df['Title'] = df['Title'].replace({'Ms': 'Miss', 'Mlle': 'Miss', 'Mme': 'Mrs'})
df['Title'] = df['Title'].replace(RARE_TITLE, 'Rare')
df['Title'] = df['Title'].fillna('Unknown')
return df | Titanic - Machine Learning from Disaster |
14,577,228 | starttime = time.monotonic()
parameters = {'C':[1,5,10,100,1000]}
X_train, X_test, y_train, y_test = train_test_split_function(prep_train_text_features,
y_enc_labels,
test_size_percent = 0.20)
logreg_classifier = grid_search_function(X_train, X_test, y_train, y_test, parameters, model = LogisticRegression)
print("
That took ",(time.monotonic() -starttime)/60, " minutes" )<find_best_params> | def extract_name_length(df):
df['Name_Length'] = df['Name'].str.len()
return df | Titanic - Machine Learning from Disaster |
14,577,228 | logreg_classifier.best_estimator_<train_model> | AGE_TITLE = full.groupby(['Title'])['Age'].agg(['mean', 'std'])
AGE_TITLE.columns = [f'AGE_{i}' for i in AGE_TITLE.columns]
def fill_age_with_normal_rand(df):
df = df.merge(AGE_TITLE,
how='left',
left_on='Title',
right_index=True)
df['RAND_AGE'] = df.apply(lambda x: np.random.normal(x['AGE_mean'], x['AGE_std']/2.0), axis=1)
df['Age'] = df['Age'].fillna(df['RAND_AGE'])
df = df.drop(columns=['RAND_AGE', 'AGE_mean', 'AGE_std'])
return df | Titanic - Machine Learning from Disaster |
14,577,228 | training_score_lgr,testing_score_lgr,lgr_classification_report, lgr_array, lgr_norm_conf_mx, lgr_error_matrix_df = training_info_and_classification_report(prep_train_text_features,
y_enc_labels,
test_size_percent = 0.20,
model = logreg_classifier.best_estimator_ )<prepare_x_and_y> | def add_family_size(df):
df['FamilySize'] = df['Parch'] + df['SibSp'] + 1
df['Single'] =(df['FamilySize'] == 1 ).astype(int)
df['FamilySize'] = df['FamilySize'].astype(str)
return df | Titanic - Machine Learning from Disaster |
14,577,228 | That took ",(time.monotonic() -starttime)/60, " minutes")
<choose_model_class> | full['HasCabin'] = full['Cabin'].notna()
full.groupby('HasCabin')['Survived'].mean() | Titanic - Machine Learning from Disaster |
14,577,228 | svm_clf = SVC(C=10, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma=1, kernel='rbf',
max_iter=-1, probability=False, random_state=1, shrinking=True,
tol=0.001, verbose=False)
svm_ovr = OneVsRestClassifier(svm_clf )<split> | def cabin_information(df):
df['HasCabin'] = df['Cabin'].notna().astype(str)
df['CabinType'] = df['Cabin'].str.split(r'(^[A-Z])',expand = True)[1]
df['CabinType'] = df['CabinType'].fillna('Unknown')
df = df.drop(columns='Cabin')
return df | Titanic - Machine Learning from Disaster |
14,577,228 | def get_training_info(model,full_X_train, full_y_train):
X_train, X_test, y_train, y_test = train_test_split_function(full_X_train,
full_y_train,
test_size_percent = 0.20)
print("
--- Fitting Model ---")
model.fit(X_train, y_train)
print("
--- Scoring Model ---")
model_training_score = model.score(X_train,y_train)
model_testing_score = model.score(X_test,y_test)
return model_training_score,model_testing_score
<find_best_model_class> | EMBARKED_MODE = full['Embarked'].mode() [0]
def fill_embarked(df):
df['Embarked'] = df['Embarked'].fillna(EMBARKED_MODE)
return df | Titanic - Machine Learning from Disaster |
14,577,228 | def get_classification_report(X,y,test_size_percent, model):
X_train, X_test, y_train, y_test = train_test_split_function(X,y, test_size_percent)
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
clf_report = classification_report(y_test,
y_predict,
target_names = lb_enc.inverse_transform(model.classes_ ).tolist())
array = confusion_matrix(y_test, y_predict)
row_sums = array.sum(axis=1,keepdims=True)
norm_conf_mx = array/row_sums
error_matrix_df = pd.DataFrame(norm_conf_mx, index = [i for i in lb_enc.inverse_transform(model.classes_ ).tolist() ],
columns = [i for i in lb_enc.inverse_transform(model.classes_ ).tolist() ])
return clf_report, array, norm_conf_mx, error_matrix_df<split> | FARE_MEDIAN = full['Fare'].median()
def fill_fare(df):
df['Fare'] = df['Fare'].fillna(FARE_MEDIAN)
return df | Titanic - Machine Learning from Disaster |
14,577,228 | training_score_svm, testing_score_svm = get_training_info(svm_ovr,prep_train_text_features,y_enc_labels )<choose_model_class> | full['TicketCat'] = full['Ticket'].str[0:2]
RARE_TICKET = list(( full['Ticket'].value_counts() < 20 ).replace({False: np.nan} ).dropna().index ) | Titanic - Machine Learning from Disaster |
14,577,228 | p_svm_clf = SVC(C=10, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma=1, kernel='rbf',
max_iter=-1, probability=True, random_state=1, shrinking=True,
tol=0.001, verbose=False )<compute_test_metric> | def ticket_cat(df, reduce=True):
df['TicketCat'] = df['Ticket'].str[0:2]
df['TicketCat'] = df['TicketCat'].replace(RARE_TICKET, 'Rare')
df['TicketCat'] = df['TicketCat'].fillna('Unknown')
return df | Titanic - Machine Learning from Disaster |
14,577,228 | print(" SVM Classification Report ")
print(svm_classification_report )<find_best_params> | torch.device('cuda')
preps = [extract_title,
extract_name_length,
fill_age_with_normal_rand,
add_family_size,
cabin_information,
fill_embarked,
fill_fare,
ticket_cat,
remove_wordy_cols,
split_data]
prep_nn_2 = lambda x : apply_all(x, preps)
train = train_raw.copy(deep=True)
train, train_pId = prep_nn_2(train)
cont_names = ['Pclass', 'Age', 'Name_Length', 'Parch', 'SibSp']
cat_names = ['Sex', 'TicketCat', 'CabinType', 'HasCabin', 'Single', 'Title', 'Embarked', 'Fare', 'FamilySize']
procs = [Categorify, Normalize]
dep_var = 'Survived'
splits = RandomSplitter(valid_pct=0.2, seed=42 )(train.index)
cfg = tabular_config(embed_p=0.25, ps=0.05)
dls = TabularPandas(train,
cont_names=cont_names,
cat_names=cat_names,
procs=procs,
y_names=dep_var,
splits=splits ).dataloaders(bs=64)
callbacks = [SaveModelCallback(min_delta=0.005, monitor='accuracy', fname='model_feat01_best')]
learn = tabular_learner(dls, layers=[500,200,100], metrics=[accuracy], config=cfg)
learn.lr_find() | Titanic - Machine Learning from Disaster |
14,577,228 | rf_clf = rf_classifier.best_estimator_
lgr_clf = logreg_classifier.best_estimator_
print(rf_clf,"
")
print(lgr_clf,"
")
print(svm_clf,"
" )<define_variables> | learn.fit_one_cycle(5, lr_max=slice(0.004, 5e-2, 0.05))
learn.fit_one_cycle(45, lr_max=slice(0.005, 1e-2, 0.005), cbs=callbacks ) | Titanic - Machine Learning from Disaster |
14,577,228 | model_names = [
"Random Forest Classifier",
"Logistic Regression",
"SVM",
"VotingClassifier"]
best_model_testing_score = [
testing_score_rf,
testing_score_lgr,
testing_score_svm,
"-"
]
best_model_training_score = [
training_score_rf,
training_score_lgr,
training_score_svm,
"-"
]<save_to_csv> | test, test_pId = prep_nn_2(test_raw.copy(deep=True))
test_dl = learn.dls.test_dl(test)
preds, _ = learn.get_preds(dl=test_dl)
preds = np.argmax(preds, 1 ).numpy()
submission = pd.DataFrame(
{'PassengerId': test_pId,
'Survived': preds}
)
submission.to_csv('submission_feature_eng.csv', index=False ) | Titanic - Machine Learning from Disaster |
14,577,228 | def model_to_submission_file(model, full_X_train, full_y_train, full_X_test ,nameOfcsvfile):
print("
--- Fitting Model ---")
starttime = time.monotonic()
model_clf = model.fit(full_X_train,full_y_train)
print("
--- Predicting Cuisines ---")
submission_pred = model_clf.predict(full_X_test)
test_cuisine = lb_enc.inverse_transform(submission_pred)
test_id = [recipe['id'] for recipe in test]
submission_df = pd.DataFrame({'id':test_id, 'cuisine':test_cuisine},columns = ['id','cuisine'])
submission_df.to_csv('{}'.format(nameOfcsvfile), index= False)
print("
--- Results have been saved ---")
print("
That took ",(time.monotonic() -starttime)/60, " minutes" )<save_to_csv> | splits = RandomSplitter(valid_pct=0.01, seed=42 )(train.index)
cfg = tabular_config(embed_p=0.25, ps=0.05)
dls = TabularPandas(train,
cont_names=cont_names,
cat_names=cat_names,
procs=procs,
y_names=dep_var,
splits=splits ).dataloaders(bs=64)
learn = tabular_learner(dls, layers=[500,200,100], metrics=[accuracy], config=cfg ) | Titanic - Machine Learning from Disaster |
14,577,228 | ovr_svm = OneVsRestClassifier(svm_clf, n_jobs = 4)
model_to_submission_file(model=ovr_svm,
full_X_train=prep_train_text_features,
full_y_train=y_enc_labels,
full_X_test=prep_submission_text_features,
nameOfcsvfile="svm_model.csv")
<create_dataframe> | learn.fit_one_cycle(5, lr_max=slice(0.004, 5e-2, 0.05))
learn.fit_one_cycle(15, lr_max=slice(0.005, 1e-2, 0.005)) | Titanic - Machine Learning from Disaster |
14,577,228 | model_testing_score = [0.75663, 0.78751, 0.81999, 0.81989]
pd.DataFrame({"Model":model_names,
"Training Score":best_model_training_score,
"Testing Score": best_model_testing_score,
"Submission Score": model_testing_score},
columns=["Model","Training Score", "Testing Score", "Submission Score"] )<set_options> | test, test_pId = prep_nn_2(test_raw.copy(deep=True))
test_dl = learn.dls.test_dl(test)
preds, _ = learn.get_preds(dl=test_dl)
preds = np.argmax(preds, 1 ).numpy()
submission = pd.DataFrame(
{'PassengerId': test_pId,
'Survived': preds}
)
submission.to_csv('submission_feature_eng_full.csv', index=False ) | Titanic - Machine Learning from Disaster |
14,418,549 | %matplotlib inline<load_from_disk> | pd.options.mode.chained_assignment = None
%matplotlib inline
warnings.filterwarnings('ignore' ) | Titanic - Machine Learning from Disaster |
14,418,549 | rawdf_te = pd.read_json(path_or_buf='.. /input/test.json')
rawdf_te.head(n=3)
rawdf_tr = pd.read_json(path_or_buf='.. /input/train.json')
rawdf_tr.head(n=3 )<filter> | train_data = pd.read_csv(".. /input/titanic/train.csv", index_col="PassengerId")
test_data = pd.read_csv(".. /input/titanic/test.csv", index_col="PassengerId" ) | Titanic - Machine Learning from Disaster |
14,418,549 | print(rawdf_tr['ingredients'].loc[41935])
print(rawdf_tr['ingredients'].loc[27566])
print(rawdf_tr['ingredients'].loc[32596])
print(rawdf_tr['ingredients'].loc[8476] )<create_dataframe> | print(train_data.isna().sum() ) | Titanic - Machine Learning from Disaster |
14,418,549 | ingredients_tr = rawdf_tr['ingredients']
ingredients_te = rawdf_te['ingredients']<feature_engineering> | print(test_data.isna().sum() ) | Titanic - Machine Learning from Disaster |
14,418,549 | def sub_match(pattern, sub_pattern, ingredients):
for i in ingredients.index.values:
for j in range(len(ingredients[i])) :
ingredients[i][j] = re.sub(pattern, sub_pattern, ingredients[i][j].strip())
ingredients[i][j] = ingredients[i][j].strip()
re.purge()
return ingredients
def regex_sub_match(series):
p0 = re.compile(r'\s*(oz|ounc|ounce|pound|lb|inch|inches|kg|to)\s*[^a-z]')
series = sub_match(p0, ' ', series)
p1 = re.compile(r'\d+')
series = sub_match(p1, ' ', series)
p2 = re.compile('[^\w]')
series = sub_match(p2, ' ', series)
return series<feature_engineering> | print(train_data["Pclass"].unique())
train_data[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
14,418,549 | lemmatizer = WordNetLemmatizer()
def lemma(series):
for i in series.index.values:
for j in range(len(series[i])) :
series[i][j] = series[i][j].strip()
token = TK(series[i][j])
for k in range(len(token)) :
token[k] = lemmatizer.lemmatize(token[k])
token = ' '.join(token)
series[i][j] = token
return series<categorify> | train_data.Name[1].split() | Titanic - Machine Learning from Disaster |
14,418,549 | ingredients_tr = lemma(ingredients_tr)
ingredients_te = lemma(ingredients_te )<feature_engineering> | train_data = train_data.assign(fname = train_data.Name.str.split("," ).str[0])
train_data["title"] = pd.Series([i.split(",")[1].split(".")[0].strip() for i in train_data.Name], index=train_data.index ) | Titanic - Machine Learning from Disaster |
14,418,549 | rawdf_tr['ingredients_lemma'] = ingredients_tr
rawdf_tr['ingredients_lemma_string'] = [' '.join(_ ).strip() for _ in rawdf_tr['ingredients_lemma']]
rawdf_te['ingredients_lemma'] = ingredients_te
rawdf_te['ingredients_lemma_string'] = [' '.join(_ ).strip() for _ in rawdf_te['ingredients_lemma']]<create_dataframe> | test_data = test_data.assign(fname = test_data.Name.str.split("," ).str[0])
test_data["title"] = pd.Series([i.split(",")[1].split(".")[0].strip() for i in test_data.Name], index=test_data.index)
train_data.drop("Name", axis=1, inplace=True)
test_data.drop("Name", axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
14,418,549 | traindf = rawdf_tr[['cuisine', 'ingredients_lemma_string']].reset_index(drop=True)
testdf = rawdf_te[['ingredients_lemma_string']]<prepare_x_and_y> | print(test_data.fname.nunique())
print(test_data.title.nunique() ) | Titanic - Machine Learning from Disaster |
14,418,549 | X_train = traindf['ingredients_lemma_string']
vectorizertr = TfidfVectorizer(stop_words='english', analyzer="word", max_df=0.65, min_df=2, binary=True)
X_train = vectorizertr.fit_transform(X_train)
y_train = traindf['cuisine']
le = LE()
y_train_ec = le.fit_transform(y_train)
X_pred = testdf['ingredients_lemma_string']
vectorizerts = TfidfVectorizer(stop_words='english')
X_pred = vectorizertr.transform(X_pred)
<save_to_csv> | train_data["title"] = train_data['title'].replace(other_titles, 'Other')
train_data["title"] = train_data["title"].map({"Mr":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Master":2, "Other":3})
test_data["title"] = test_data['title'].replace(other_titles, 'Other')
test_data["title"] = test_data["title"].map({"Mr":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Master":2, "Other":3} ) | Titanic - Machine Learning from Disaster |
14,418,549 | clf_ovrc_svm = SVC(C=3.25, cache_size=500, class_weight=None, coef0=0.0,\
decision_function_shape='ovr', degree=3, gamma=1, kernel='rbf',\
max_iter=-1, probability=False, random_state=0, shrinking=True,\
tol=0.001, verbose=False)
clf_ovrc_svm = clf_ovrc_svm.fit(X_train, y_train)
y_pred_ovrc_svm = clf_ovrc_svm.predict(X_pred)
testdf['cuisine'] = y_pred_ovrc_svm
d = pd.DataFrame(data=testdf['cuisine'], index=testdf.index ).sort_index().reset_index().to_csv('submission_ovr_svm.csv', index=False )<prepare_x_and_y> | print(train_data.title)
print(test_data.title.isna().sum() ) | Titanic - Machine Learning from Disaster |
14,418,549 | x=pd.read_json('.. /input/train.json')
y=pd.read_json('.. /input/test.json' )<import_modules> | oh = OneHotEncoder(handle_unknown="ignore", sparse = False)
train_data = train_data.join(pd.DataFrame(oh.fit_transform(train_data[["fname", "title"]]), index = train_data.index))
test_data = test_data.join(pd.DataFrame(oh.transform(test_data[["fname", "title"]]), index = test_data.index))
train_data.drop("fname", axis = 1, inplace = True)
test_data.drop("fname", axis = 1, inplace = True ) | Titanic - Machine Learning from Disaster |
14,418,549 | z=x['cuisine']
<string_transform> | print(train_data["Sex"].unique())
train_data[['Sex', 'Survived']].groupby(['Sex'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
14,418,549 | x['separated_ing']=x['ingredients'].map(lambda x: ' '.join(x))
y['separated_ing']=y['ingredients'].map(lambda x: ' '.join(x))
<feature_engineering> | interactions = train_data.assign(sex_class = train_data['Sex'] + "_" + train_data['Pclass'].astype("str"))
interactions[['sex_class', 'Survived']].groupby(['sex_class'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
14,418,549 | def purify(f):
f=f.lower()
f=re.sub('[%s]' % re.escape(string.punctuation),'',f)
f=re.sub('\s+',' ',f)
return f
x['cleared_ing']=x['separated_ing'].map(lambda g :purify(g))
y['cleared_ing']=y['separated_ing'].map(lambda g :purify(g))<feature_engineering> | train_data = train_data.assign(sex_class = train_data['Sex'] + "_" + train_data['Pclass'].astype("str"))
test_data = test_data.assign(sex_class = test_data['Sex'] + "_" + test_data['Pclass'].astype("str")) | Titanic - Machine Learning from Disaster |
14,418,549 | sb=SnowballStemmer('english')
def stemmer(f):
lists=[sb.stem(c)for c in f.split(" ")]
return lists
l=WordNetLemmatizer()
def lemmar(f):
lists=[l.lemmatize(g)for g in f.split(" ")]
return lists
x['separated_ing_stemmed']=[stemmer(l)for l in x['cleared_ing']]
x['separated_ing_stemmed']=x['separated_ing_stemmed'].map(lambda x: ' '.join(x))
x['separated_ing_lemma']=[lemmar(l)for l in x['separated_ing_stemmed']]
x['separated_ing_lemma']=x['separated_ing_lemma'].map(lambda x: ' '.join(x))
y['separated_ing_stemmed']=[stemmer(l)for l in y['cleared_ing']]
y['separated_ing_stemmed']=y['separated_ing_stemmed'].map(lambda x: ' '.join(x))
y['separated_ing_lemma']=[lemmar(l)for l in y['separated_ing_stemmed']]
y['separated_ing_lemma']=y['separated_ing_lemma'].map(lambda x: ' '.join(x))<prepare_x_and_y> | train_data["Sex"] = train_data["Sex"].map({"female":0, "male":1})
test_data["Sex"] = test_data["Sex"].map({"female":0, "male":1} ) | Titanic - Machine Learning from Disaster |
14,418,549 | x.columns
x=x.drop(['ingredients','separated_ing','cleared_ing','separated_ing_stemmed'],axis=1)
y=y.drop(['ingredients','separated_ing','cleared_ing','separated_ing_stemmed'],axis=1 )<string_transform> | train_data["sex_class"] = train_data["sex_class"].map({"female_1":0, "female_2":1, "female_3":2, "male_1":4, "male_2":5, "male_3":6})
test_data["sex_class"] = test_data["sex_class"].map({"female_1":0, "female_2":1, "female_3":2, "male_1":4, "male_2":5, "male_3":6} ) | Titanic - Machine Learning from Disaster |
14,418,549 | lists=list(ENGLISH_STOP_WORDS)+stopwords.words()<import_modules> | def find_similar_passengers(id, dataset):
subset = dataset[(dataset.title == dataset.title[id])&
(dataset.Pclass == dataset.Pclass[id])]
if subset["Age"].mean() == "NaN":
subset = dataset[(dataset["sex_class"] == dataset.iloc[id]["sex_class"])]
if subset["Age"].mean() == "NaN":
subset = dataset[(dataset["sex"] == dataset.iloc[id]["sex"])]
age = subset["Age"].mean()
return age | Titanic - Machine Learning from Disaster |
14,418,549 | from sklearn.feature_extraction.text import TfidfVectorizer as tfidf,CountVectorizer as cv<define_variables> | no_ages = train_data[train_data["Age"].isna() ].index
for pid in no_ages:
train_data.Age[pid] = find_similar_passengers(pid, train_data)
no_ages_test = test_data[test_data["Age"].isna() ].index
for pid2 in no_ages_test:
test_data.Age[pid2] = find_similar_passengers(pid2, test_data ) | Titanic - Machine Learning from Disaster |
14,418,549 | z=x['cuisine']<categorify> | train_data["age_group"] = pd.cut(train_data["Age"], bins=[0,5,65,100], labels=[0,1,2] ).astype("int64")
test_data["age_group"] = pd.cut(test_data["Age"], bins=[0,5,65,100], labels=[0,1,2] ).astype("int64" ) | Titanic - Machine Learning from Disaster |
14,418,549 | tfidf1=tfidf(max_df=0.9,stop_words=lists,analyzer=u'word')
train=tfidf1.fit_transform(x['separated_ing_lemma'])
test=tfidf1.transform(y['separated_ing_lemma'] )<import_modules> | train_data[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
14,418,549 | from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV as gsc
from sklearn.ensemble import RandomForestClassifier
from xgboost.sklearn import XGBClassifier as xgb
from lightgbm import LGBMClassifier as lgb
from sklearn.linear_model import LogisticRegression as lr<set_options> | train_data[['Parch', 'Survived']].groupby(['Parch'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
14,418,549 | svm={'C':[6]}
<train_model> | train_data["fsize"] = train_data["SibSp"] + train_data["Parch"] + 1
test_data["fsize"] = test_data["SibSp"] + test_data["Parch"] + 1 | Titanic - Machine Learning from Disaster |
14,418,549 | p=le().fit(z )<categorify> | train_data[['fsize', 'Survived']].groupby(['fsize'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
14,418,549 | z=p.transform(z )<split> | print(train_data.Ticket.nunique())
print(train_data.Ticket.tail() ) | Titanic - Machine Learning from Disaster |
14,418,549 | xtrain,xtest,ztrain,ztest=tts(train,z,train_size=0.7 )<choose_model_class> | train_data["ticket_prefix"] = pd.Series([len(i.split())> 1 for i in train_data.Ticket], index=train_data.index)
| Titanic - Machine Learning from Disaster |
14,418,549 | r1=lgb(n_estimators=500,max_depth=7,objective='multiclass',metric='multi_logloss',num_classes=20,bagging_fraction=0.6,feature_fraction=0.6 )<set_options> | train_data[['ticket_prefix', 'Survived']].groupby(['ticket_prefix'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
14,418,549 | a=gsc(lr() ,svm )<set_options> | train_data.drop("ticket_prefix", axis=1, inplace=True)
train_data.drop("Ticket", axis=1, inplace=True)
test_data.drop("Ticket", axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
14,418,549 | k={'n_neighbors':[5,7,9]}
k1=gsc(knn() ,k )<import_modules> | train_data["Embarked"] = train_data["Embarked"].fillna("S")
train_data[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
14,418,549 | from sklearn.multiclass import OneVsRestClassifier as orc
from sklearn.ensemble import VotingClassifier as vc
<choose_model_class> | train_data["Embarked"] = train_data["Embarked"].fillna("S")
print(train_data.Embarked.isna().sum() ) | Titanic - Machine Learning from Disaster |
14,418,549 | v=vc(estimators=[('lr',a),('k1',k1),('lg',r1)],voting='soft' )<train_model> | train_data = train_data.join(pd.get_dummies(train_data['Embarked'], prefix="Embarked_"))
test_data = test_data.join(pd.get_dummies(test_data['Embarked'], prefix="Embarked_")) | Titanic - Machine Learning from Disaster |
14,418,549 | v.fit(xtrain,ztrain )<compute_test_metric> | train_data.drop("Embarked", axis=1, inplace=True)
test_data.drop("Embarked", axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
14,418,549 | print(accuracy_score(ztest,v.predict(xtest)) )<predict_on_test> | train_data.drop("Cabin", axis=1, inplace=True)
test_data.drop("Cabin", axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
14,418,549 | z1=v.predict(test )<feature_engineering> | ss = StandardScaler()
train_y = train_data["Survived"]
train_data.drop("Survived", axis=1, inplace=True)
scoring_method = "f1"
train_scaled = ss.fit_transform(train_data)
test_scaled = ss.transform(test_data ) | Titanic - Machine Learning from Disaster |
14,418,549 | z=p.inverse_transform(z1 )<create_dataframe> | print(train_data.isna().sum())
print(test_data.isna().sum() ) | Titanic - Machine Learning from Disaster |
14,418,549 | ff=pd.DataFrame(z,index=y['id'],columns=['cuisine'] )<rename_columns> | model = LogisticRegression(random_state=10, max_iter = 1000)
logit_params = {
"C": [1, 3, 10, 20, 30, 40],
"solver": ["lbfgs", "liblinear"]
}
logit_gs = GridSearchCV(model, logit_params, scoring="f1", cv = 5, n_jobs=4)
| Titanic - Machine Learning from Disaster |
14,418,549 | ff.index.name='id'<save_to_csv> | logit_gs.fit(train_data, train_y ) | Titanic - Machine Learning from Disaster |
14,418,549 | ff.to_csv('aagya.csv' )<set_options> | print(logit_gs.best_params_)
print(logit_gs.best_score_ ) | Titanic - Machine Learning from Disaster |
14,418,549 | Image(".. /input/ratatouille/ratatouille.jpg" )<set_options> | svc_model = SVC()
test_parameters = {
"C": [1, 3, 10, 30, 100],
"kernel": ["linear", "poly", "rbf" , "sigmoid"],
}
svc_gs = GridSearchCV(svc_model, test_parameters, scoring="f1", cv=5, n_jobs=4 ) | Titanic - Machine Learning from Disaster |
14,418,549 | %matplotlib inline
plt.rcParams["figure.figsize"] =(15,10 )<load_from_disk> | svc_gs.fit(train_scaled, train_y ) | Titanic - Machine Learning from Disaster |
14,418,549 | data = pd.read_json(".. /input/whats-cooking-kernels-only/train.json")
test = pd.read_json(".. /input/whats-cooking-kernels-only/test.json")
data.head()<count_missing_values> | print(svc_gs.best_params_)
print(svc_gs.best_score_ ) | Titanic - Machine Learning from Disaster |
14,418,549 | data.isnull().values.any()
data.dropna(axis=0, how='any',inplace = True)
data.isnull().values.any()
data.isnull().sum()<string_transform> | lgb_model = LGBMClassifier()
test_parameters = {
"n_estimators": [int(x)for x in np.linspace(5, 30, 6)],
"reg_alpha": [0, 0.75, 1, 1.25],
"learning_rate": [0.5, 0.4, 0.35, 0.3, 0.25, 0.2],
"subsample": [0.5, 0.75, 1]
}
lgb_gs = GridSearchCV(lgb_model, test_parameters, scoring=scoring_method, cv=8, n_jobs=4 ) | Titanic - Machine Learning from Disaster |
14,418,549 | data.ingredients =data.ingredients.str.join(' ')
test.ingredients =test.ingredients.str.join(' ' )<feature_engineering> | lgb_gs.fit(train_data, train_y ) | Titanic - Machine Learning from Disaster |
14,418,549 | vect = HashingVectorizer()
features = vect.fit_transform(data.ingredients)
testfeatures = vect.transform(test.ingredients )<define_variables> | print(lgb_gs.best_params_)
print(lgb_gs.best_score_ ) | Titanic - Machine Learning from Disaster |
14,418,549 | labels = data.cuisine<split> | rf_model = RandomForestClassifier()
rf_params ={
'bootstrap': [True, False],
'max_depth': [10, None],
'max_features': ['auto', 'sqrt'],
'min_samples_leaf': [1, 2, 4],
'min_samples_split': [2, 5, 10],
'n_estimators': [5, 10, 15, 20, 25, 30]}
rf_gs = GridSearchCV(rf_model, rf_params, scoring=scoring_method, cv=8, n_jobs=4 ) | Titanic - Machine Learning from Disaster |
14,418,549 | X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.2 )<train_model> | rf_gs.fit(train_data, train_y ) | Titanic - Machine Learning from Disaster |
14,418,549 | start = time.time()
log_reg = LogisticRegression(C=12)
log_reg.fit(X_train,y_train)
print("Accuracy: ",log_reg.score(X_test, y_test))
print("Time: " , time.time() - start )<train_model> | print(rf_gs.best_params_)
print(rf_gs.best_score_ ) | Titanic - Machine Learning from Disaster |
14,418,549 | start = time.time()
linear_svm = LinearSVC(random_state=0, max_iter = 1500)
linear_svm.fit(X_train, y_train)
print("Accuracy: ",linear_svm.score(X_test, y_test))
print("Time: " , time.time() - start )<train_model> | ensemble_model = VotingClassifier(estimators=[
("logit", logit_gs.best_estimator_),
("rf", rf_gs.best_estimator_),
("svc", svc_gs.best_estimator_),
("lgb", lgb_gs.best_estimator_),
], voting = "hard" ) | Titanic - Machine Learning from Disaster |
14,418,549 | start = time.time()
rbf_svm = SVC(kernel='rbf', gamma=0.8, C=12)
rbf_svm.fit(X_train, y_train)
print("Accuracy: ",rbf_svm.score(X_test, y_test))
print("Time: " , time.time() - start )<save_to_csv> | ensemble_model.fit(train_data, train_y ) | Titanic - Machine Learning from Disaster |
14,418,549 | prediction = rbf_svm.predict(testfeatures)
sub = pd.DataFrame({'id':test.id,'cuisine':prediction})
output = sub[['id','cuisine']]
output.to_csv("sample_submission.csv",index = False )<import_modules> | ensemble_model.score(train_data, train_y ) | Titanic - Machine Learning from Disaster |
14,418,549 | import numpy as np
import pandas as pd
from nltk.stem import WordNetLemmatizer
from nltk import word_tokenize
from nltk.stem import PorterStemmer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfVectorizer,HashingVectorizer<load_from_disk> | preds = ensemble_model.predict(test_data ) | Titanic - Machine Learning from Disaster |
14,418,549 | df = pd.read_json(".. /input/train.json")
testset = pd.read_json(".. /input/test.json" )<count_missing_values> | output = pd.DataFrame({'PassengerId': test_data.index,
'Survived': preds})
output.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
14,356,555 | df.isnull().sum()<count_missing_values> | train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
train['type'] = 'train'
test['type'] = 'test'
alldata = pd.concat([train,test])
alldata.head() | Titanic - Machine Learning from Disaster |
14,356,555 | testset.isnull().sum()<data_type_conversions> | alldata['titles']=alldata['titles'].str.replace('Don','Mr')
alldata['titles']=alldata['titles'].str.replace('Rev','Mr')
alldata['titles']=alldata['titles'].str.replace('Dr','Mr')
alldata['titles']=alldata['titles'].str.replace('Capt','Mr')
alldata['titles']=alldata['titles'].str.replace('Jonkheer','Mr')
alldata['titles']=alldata['titles'].str.replace('Lady','Mrs')
alldata['titles']=alldata['titles'].str.replace('Mme','Mrs')
alldata['titles']=alldata['titles'].str.replace('Ms','Mrs')
alldata['titles']=alldata['titles'].str.replace('Mlle','Mrs')
alldata['titles']=alldata['titles'].str.replace('the Countess','Mrs')
alldata['titles']=alldata['titles'].str.replace('Sir','Master')
alldata['titles']=alldata['titles'].str.replace('Major','Master')
alldata['titles']=alldata['titles'].str.replace('Col','Master' ) | Titanic - Machine Learning from Disaster |
14,356,555 | df.ingredients = df.ingredients.astype('str')
testset.ingredients = testset.ingredients.astype('str' )<define_variables> | alldata['titles'].value_counts() | Titanic - Machine Learning from Disaster |
14,356,555 | df.ingredients = df.ingredients.str.replace("["," ")
df.ingredients = df.ingredients.str.replace("]"," ")
df.ingredients = df.ingredients.str.replace("'"," ")
df.ingredients = df.ingredients.str.replace(","," " )<feature_engineering> | alldata.drop('Name',axis=1,inplace=True ) | Titanic - Machine Learning from Disaster |
14,356,555 | testset.ingredients = testset.ingredients.str.replace("["," ")
testset.ingredients = testset.ingredients.str.replace("]"," ")
testset.ingredients = testset.ingredients.str.replace("'"," ")
testset.ingredients = testset.ingredients.str.replace(","," " )<string_transform> | alldata.drop('Cabin',axis=1,inplace=True ) | Titanic - Machine Learning from Disaster |
14,356,555 | df.ingredients = df.ingredients.apply(lambda x: word_tokenize(x))
testset.ingredients = testset.ingredients.apply(lambda x: word_tokenize(x))<choose_model_class> | alldata.isnull().sum() | Titanic - Machine Learning from Disaster |
14,356,555 | lemmatizer = WordNetLemmatizer()<normalization> | alldata['Family_size'] = alldata['SibSp'] + alldata['Parch'] | Titanic - Machine Learning from Disaster |
14,356,555 | def lemmat(wor):
l = []
for i in wor:
l.append(lemmatizer.lemmatize(i))
return l<feature_engineering> | agegroups = alldata['Age']
agegroups
groups = []
for ages in agegroups:
if ages < 18:
groups.append('Child')
else:
groups.append('Adult')
groups = pd.DataFrame(groups,columns=['Child'])
len(groups)
| Titanic - Machine Learning from Disaster |
14,356,555 | df.ingredients = df.ingredients.apply(lemmat)
testset.ingredients = testset.ingredients.apply(lemmat )<data_type_conversions> | alldata['Child'] = groups
alldata.head()
alldata['Child'] = pd.get_dummies(alldata['Child'],drop_first=True)
alldata.head() | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.