kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
22,340,798 | listNull = [x for x in [(row,adult[row].isnull().sum())for row in adult] if x[1]!=0]
print(listNull )<drop_column> | X_train, X_val, y_train, y_val = train_test_split(X, y, test_size = 0.25, random_state=1)
print(type(X_train))
| Titanic - Machine Learning from Disaster |
22,340,798 | adult.drop(adult.index[0], inplace=True )<count_values> | clf_mae=[]
for md in range(8):
clf = RandomForestClassifier(max_depth=(md+1), random_state=1)
clf.fit(X_train, y_train)
predictions = clf.predict(X_val)
mae =(1.- mean_absolute_error(predictions, y_val)) *100.
clf_mae.append(mae)
print(clf_mae ) | Titanic - Machine Learning from Disaster |
22,340,798 | adult["Workclass"].value_counts()
adult["Occupation"].value_counts()
<define_variables> | clf = RandomForestClassifier(max_depth=4, random_state=1)
clf.fit(X_train, y_train)
predictions = clf.predict(X_val)
mae =(1.- mean_absolute_error(predictions, y_val)) *100.
print(mae ) | Titanic - Machine Learning from Disaster |
22,340,798 | firstW = ["United-States",
"Germany",
"Canada",
"England",
"Italy",
"Japan",
"Poland",
"Portugal",
"Taiwan",
"France",
"Greece",
"Ireland",
"Hong",
"Yugoslavia",
"Hungary",
"Scotland",
"Holand-Netherlands"]<categorify> | logreg = LogisticRegression(solver='liblinear')
logreg.fit(X_train, y_train)
predictions = logreg.predict(X_val)
mae = mean_absolute_error(predictions, y_val)
print(( 1-mae)*100,"%")
| Titanic - Machine Learning from Disaster |
22,340,798 | adult["Race"] = adult["Race"].transform(lambda x: 1 if x=="White" else 0 if x==x else x)
adult["Country"] = adult["Country"].transform(lambda x: 1 if x in firstW else -1 if x == "Mexico" else 0 if x==x else x)
adult["Sex"].replace({"Male":1,"Female":0}, inplace=True )<feature_engineering> | missing_test_total = test_data.isnull().sum().sort_values(ascending= False)
missing_test_total.head() | Titanic - Machine Learning from Disaster |
22,340,798 | adult["Age"] = adult["Age"].transform(lambda x: int(x))
adult["Age"] = adult["Age"].transform(lambda x: 2*(x - x.mean())/ x.std())
adult["Workclass"] = adult["Workclass"].transform(lambda x: 1 if x in ["Without-pay", "Never-worked"] else 0 if x==x else x)
adult["Occupation"] = adult["Occupation"].transform(lambda x: 1 if x in ["Priv-house-serv","Handlers-cleaners","Transport-moving"] else 0 if x==x else x)
<feature_engineering> | Pclass_Sex_Age_median = test_data.groupby(['Pclass','Sex'] ).Age.transform('median')
test_data.Age.fillna(Pclass_Sex_Age_median, inplace = True)
Pclass_Fare_median = test_data.groupby('Pclass' ).Fare.transform('median')
test_data.Fare.fillna(Pclass_Fare_median, inplace = True)
test_data['Embarked'].replace({'S': 0, 'Q': 1, 'C': 2}, inplace = True)
test_data['Sex'].replace({'female': 0, 'male': 1}, inplace = True)
| Titanic - Machine Learning from Disaster |
22,340,798 | adult["MissingCountry"] = adult["Country"].transform(lambda x: 1 if x==x else 0)
adult["Country"].fillna(adult["Country"].mean() , inplace=True)
adult["MissingWorkclass"] = adult["Workclass"].transform(lambda x: 1 if x==x else 0)
adult["Workclass"].fillna(adult["Workclass"].mean() , inplace=True)
adult["MissingOccupation"] = adult["Occupation"].transform(lambda x:1 if x==x else 0)
adult["Occupation"].fillna(adult["Occupation"].mean() ,inplace=True )<correct_missing_values> | missing_test_total = test_data.isnull().sum().sort_values(ascending= False)
missing_test_total.head() | Titanic - Machine Learning from Disaster |
22,340,798 | nadult = adult.dropna()<count_values> | Titanic - Machine Learning from Disaster | |
22,340,798 | nadult["MissingCountry"].value_counts()<define_variables> | name_list_train = train_data.Name.to_list()
print(name_list_train ) | Titanic - Machine Learning from Disaster |
22,340,798 | testfilepath = ".. /input/adult-dataset/test_data.csv"<load_from_csv> | list_of_titles = title_retriever(name_list_train)
print(len(list_of_titles))
train_data['Title'] = list_of_titles
train_data.Title.value_counts() | Titanic - Machine Learning from Disaster |
22,340,798 | testAdult = pd.read_csv(testfilepath,
names=[
"Id","Age", "Workclass", "fnlwgt", "Education", "Education-Num", "Martial Status",
"Occupation", "Relationship", "Race", "Sex", "Capital Gain", "Capital Loss",
"Hours per week", "Country", "Target"],
sep=r'\s*,\s*',
engine='python',
na_values="?" )<count_values> | Title_Age_median = train_data.groupby(['Title'] ).Age.transform('median')
train_data.Age.fillna(Title_Age_median, inplace = True)
| Titanic - Machine Learning from Disaster |
22,340,798 | testAdult
testAdult["Workclass"].value_counts()<drop_column> | name_list_test = test_data.Name.to_list()
print(name_list_test ) | Titanic - Machine Learning from Disaster |
22,340,798 | testAdult.drop(testAdult.index[0], inplace=True )<drop_column> | list_of_titles_test = title_retriever(name_list_test)
print(len(list_of_titles_test))
test_data['Title'] = list_of_titles_test
test_data.Title.value_counts() | Titanic - Machine Learning from Disaster |
22,340,798 | testAdult.drop("Id",axis=1, inplace=True)
testAdult.drop("Target",axis=1, inplace=True )<count_missing_values> | Title_Age_median_test = test_data.groupby(['Title'] ).Age.transform('median')
test_data.Age.fillna(Title_Age_median_test, inplace = True)
| Titanic - Machine Learning from Disaster |
22,340,798 | listNull = [x for x in [(row,testAdult[row].isnull().sum())for row in testAdult] if x[1]!=0]
print(listNull )<feature_engineering> | X = train_data[features]
y = train_data.Survived
Pclass_Fare_median = X.groupby('Pclass' ).Fare.transform('median')
X.Fare.fillna(Pclass_Fare_median, inplace = True)
X.Embarked.fillna('S', inplace = True)
X['Embarked'].replace({'S': 0, 'Q': 1, 'C': 2}, inplace = True)
X['Sex'].replace({'female': 0, 'male': 1}, inplace = True)
X.info()
print(X.Sex)
| Titanic - Machine Learning from Disaster |
22,340,798 | testAdult["Race"] = testAdult["Race"].transform(lambda x: 1 if x=="White" else 0)
testAdult["Country"] = testAdult["Country"].transform(lambda x: 1 if x in firstW else -1 if x=="Mexico" else 0)
testAdult["Sex"] = testAdult["Sex"].transform(lambda x: 1 if x=="Male" else 0)
testAdult["Age"] = testAdult["Age"].transform(int)
testAdult["Age"] = testAdult["Age"].transform(lambda x: 2*(x - x.mean())/ x.std())
testAdult["Workclass"] = testAdult["Workclass"].transform(lambda x: 1 if x in ["Without-pay", "Never-worked"] else 0)
testAdult["Occupation"] = testAdult["Occupation"].transform(lambda x: 1 if x in ["Priv-house-serv","Handlers-cleaners","Transport-moving"] else 0)
<categorify> | X_test = test_data[features]
clf.fit(X, y)
predictions = clf.predict(X_test)
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
output.to_csv('submission.csv', index=False)
print("Your submission was successfully saved!" ) | Titanic - Machine Learning from Disaster |
17,664,212 | testAdult["MissingCountry"] = testAdult["Country"].transform(lambda x: 1 if x==x else 0)
testAdult["Country"].fillna(testAdult["Country"].mean() , inplace=True)
testAdult["MissingWorkclass"] = testAdult["Workclass"].transform(lambda x: 1 if x==x else 0)
testAdult["Workclass"].fillna(testAdult["Workclass"].mean() , inplace=True)
testAdult["MissingOccupation"] = testAdult["Occupation"].transform(lambda x:1 if x==x else 0)
testAdult["Occupation"].fillna(testAdult["Occupation"].mean() ,inplace=True)
<count_values> | train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
17,664,212 | testAdult["Country"].value_counts()
<define_variables> | women = train_data.loc[train_data.Sex == 'female']['Survived']
print('Women survived',sum(women)/len(women))
men = train_data.loc[train_data.Sex == 'male']['Survived']
print('Men survived',sum(men)/len(men)) | Titanic - Machine Learning from Disaster |
17,664,212 | nTestAdult = testAdult<prepare_x_and_y> | train_data['female'] = pd.get_dummies(train_data['Sex'])['female']
test_data['female'] = pd.get_dummies(test_data['Sex'])['female'] | Titanic - Machine Learning from Disaster |
17,664,212 | Xadult = nadult[["Age", "Workclass","Education-Num","Occupation","Race", "Sex","Capital Gain", "Capital Loss", "Hours per week", "Country"]]
<prepare_x_and_y> | sum(train_data['Age'].isnull())
train_data['Age'] = train_data['Age'].fillna(train_data['Age'].mean())
test_data['Age'] = test_data['Age'].fillna(test_data['Age'].mean() ) | Titanic - Machine Learning from Disaster |
17,664,212 | Yadult = nadult.Target<drop_column> | high_fare = train_data.loc[train_data.Fare > 100]['Survived']
print('High fare survivors',sum(high_fare)/len(high_fare))
low_fare = train_data.loc[train_data.Fare < 32]['Survived']
print('High fare survivors',sum(low_fare)/len(low_fare)) | Titanic - Machine Learning from Disaster |
17,664,212 | XtestAdult = nTestAdult[["Age", "Workclass","Education-Num","Occupation","Race", "Sex","Capital Gain", "Capital Loss", "Hours per week", "Country"]]
<count_values> | pclass1 = train_data.loc[train_data.Pclass == 1]['Survived']
print('Class1',sum(pclass1)/len(pclass1))
pclass2 = train_data.loc[train_data.Pclass == 2]['Survived']
print('Class2',sum(pclass2)/len(pclass2))
pclass3 = train_data.loc[train_data.Pclass == 3]['Survived']
print('Class3',sum(pclass3)/len(pclass3)) | Titanic - Machine Learning from Disaster |
17,664,212 | XtestAdult["Country"].value_counts()<import_modules> | sum(test_data.Pclass.isna() ) | Titanic - Machine Learning from Disaster |
17,664,212 | from sklearn.neighbors import KNeighborsClassifier<import_modules> | train_data['class1'] = pd.get_dummies(train_data.Pclass)[1]
test_data['class1'] = pd.get_dummies(test_data.Pclass)[1]
train_data['class2'] = pd.get_dummies(train_data.Pclass)[2]
test_data['class2'] = pd.get_dummies(test_data.Pclass)[2] | Titanic - Machine Learning from Disaster |
17,664,212 | from sklearn.neighbors import KNeighborsClassifier<import_modules> | sum(test_data.SibSp.isna() ) | Titanic - Machine Learning from Disaster |
17,664,212 | from sklearn.neighbors import KNeighborsClassifier<choose_model_class> | sibs = train_data.loc[train_data.SibSp <= 1]['Survived']
print(sum(sibs)/len(sibs))
train_data['many_sibs'] =(train_data.SibSp > 1)*1
test_data['many_sibs'] =(test_data.SibSp > 1)*1 | Titanic - Machine Learning from Disaster |
17,664,212 | knn = KNeighborsClassifier(n_neighbors=25 )<import_modules> | young = train_data.loc[train_data.Age <= 15]['Survived']
print(sum(young)/len(young))
old = train_data.loc[train_data.Age >=40]['Survived']
print(sum(old)/len(old)) | Titanic - Machine Learning from Disaster |
17,664,212 | from sklearn.model_selection import cross_val_score<compute_train_metric> | bins = [0.42, 15, 30, 50,80]
train_data['bin_age'] = pd.cut(x=train_data.Age, bins=bins)
test_data['bin_age'] = pd.cut(x=test_data.Age, bins=bins ) | Titanic - Machine Learning from Disaster |
17,664,212 | scores = cross_val_score(knn, Xadult, Yadult, cv=10 )<train_model> | train_data['young'] = pd.get_dummies(train_data.bin_age ).iloc[:,0]
test_data['young'] = pd.get_dummies(test_data.bin_age ).iloc[:,0]
train_data['senior'] = pd.get_dummies(train_data.bin_age ).iloc[:,3]
test_data['senior'] = pd.get_dummies(test_data.bin_age ).iloc[:,3] | Titanic - Machine Learning from Disaster |
17,664,212 | knn.fit(Xadult,Yadult )<predict_on_test> | from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix | Titanic - Machine Learning from Disaster |
17,664,212 | YtestPred = knn.predict(XtestAdult )<define_variables> | X = train_data[features]
y = train_data.Survived
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33, random_state = 0 ) | Titanic - Machine Learning from Disaster |
17,664,212 | savepath = "results.csv"<save_to_csv> | log_reg = LogisticRegression()
log_reg.fit(X_train, y_train)
y_pred = log_reg.predict(X_test)
y_pred | Titanic - Machine Learning from Disaster |
17,664,212 | ypanda = pd.DataFrame(YtestPred, columns = ["income"])
ypanda.to_csv(savepath, index_label="Id")
ypanda<load_from_csv> | accuracy_score(y_pred, y_test ) | Titanic - Machine Learning from Disaster |
17,664,212 | adult = pd.read_csv(".. /input/dataset-adult/train_data.csv",sep=",", na_values="?" )<count_values> | confusion_matrix(y_pred, y_test ) | Titanic - Machine Learning from Disaster |
17,664,212 | adult['native.country'].value_counts()<drop_column> | model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X_train, y_train)
y_pred = model.predict(X_test ) | Titanic - Machine Learning from Disaster |
17,664,212 | na_adult=adult.set_index("Id" ).dropna()<load_from_csv> | accuracy_score(y_pred, y_test ) | Titanic - Machine Learning from Disaster |
17,664,212 | test_adult= pd.read_csv(".. /input/dataset-adult/test_data.csv",sep=",",na_values="?" )<drop_column> | confusion_matrix(y_pred, y_test ) | Titanic - Machine Learning from Disaster |
17,664,212 | test_adult=test_adult.set_index("Id" )<prepare_x_and_y> | test_data.Fare = test_data.Fare.fillna(test_data.Fare.mean() ) | Titanic - Machine Learning from Disaster |
17,664,212 | X_adult = na_adult.iloc[:,:-1]
Y_adult = na_adult.income<prepare_x_and_y> | param_grid = {
'n_estimators': [200, 500],
'max_features': ['auto', 'sqrt', 'log2'],
'max_depth' : [4,5,6,7,8],
'criterion' :['gini', 'entropy']
}
| Titanic - Machine Learning from Disaster |
17,664,212 | X_test = test_adult.iloc[:,:]<prepare_x_and_y> | rfc1=RandomForestClassifier(random_state=42, max_features='log2', n_estimators= 200, max_depth=6, criterion='entropy')
rfc1.fit(X_train, y_train ) | Titanic - Machine Learning from Disaster |
17,664,212 | <import_modules><EOS> | predictions = rfc1.predict(test_data[features])
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
output.to_csv('my_submission3.csv', index=False)
print("Your submission was successfully saved!" ) | Titanic - Machine Learning from Disaster |
22,360,789 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<find_best_model_class> | import numpy as np
import pandas as pd
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import ComplementNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import CategoricalNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import Perceptron
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier | Titanic - Machine Learning from Disaster |
22,360,789 | knn=KNeighborsClassifier(n_neighbors=9)
scores = cross_val_score(knn,X_num,Y_num,cv=10)
knn.fit(X_num,Y_num)
Y_testpredict=knn.predict(X_test)
scores<data_type_conversions> | train_data = pd.read_csv('.. /input/titanic/train.csv')
test_data = pd.read_csv('.. /input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
22,360,789 | convert_cols=['workclass','education','marital.status','occupation','race','relationship',
'sex','native.country']
test_adult[convert_cols] = test_adult[convert_cols].astype(str )<categorify> | missing_values = train_data.isna().any()
print('Columns which have missing values:
{0}'.format(missing_values[missing_values == True].index.tolist())) | Titanic - Machine Learning from Disaster |
22,360,789 | Xencode_adult= na_adult.iloc[:,:-1].apply(LabelEncoder().fit_transform)
Xencode_test_adult = test_adult.apply(LabelEncoder().fit_transform)
X_adult = Xencode_adult
X_test = Xencode_test_adult<categorify> | print("Percentage of missing values in `Age` column: {0:.2f}".format(100.*(train_data.Age.isna().sum() /len(train_data))))
print("Percentage of missing values in `Cabin` column: {0:.2f}".format(100.*(train_data.Cabin.isna().sum() /len(train_data))))
print("Percentage of missing values in `Embarked` column: {0:.2f}".format(100.*(train_data.Embarked.isna().sum() /len(train_data)))) | Titanic - Machine Learning from Disaster |
22,360,789 | Yfit_adult= LabelEncoder().fit(na_adult["income"])
Y_adult = Yfit_adult.transform(na_adult["income"] )<train_on_grid> | duplicates = train_data.duplicated().sum()
print('Duplicates in train data: {0}'.format(duplicates)) | Titanic - Machine Learning from Disaster |
22,360,789 | knn =KNeighborsClassifier(n_neighbors=10)
scores = cross_val_score(knn,X_adult,Y_adult,cv=10)
knn.fit(X_adult,Y_adult)
scores<predict_on_test> | categorical = train_data.nunique().sort_values(ascending=True)
print('Categorical variables in train data:
{0}'.format(categorical)) | Titanic - Machine Learning from Disaster |
22,360,789 | Ytest_predict= knn.predict(X_test)
print(Ytest_predict )<prepare_x_and_y> | def clean_data(data):
data.drop(['Cabin'], axis=1, inplace=True)
data.drop(['Name', 'Ticket', 'Fare', 'Embarked'], axis=1, inplace=True)
return data
train_data = clean_data(train_data)
test_data = clean_data(test_data ) | Titanic - Machine Learning from Disaster |
22,360,789 | atributos=atributos=["age","workclass","education.num","occupation","sex","marital.status","capital.gain","capital.loss"]
X_adult = Xencode_adult[atributos]
X_test = Xencode_test_adult[atributos]<find_best_params> | train_data['Sex'].replace({'male':0, 'female':1}, inplace=True)
test_data['Sex'].replace({'male':0, 'female':1}, inplace=True)
all_data = pd.concat([train_data, test_data])
average = all_data.Age.median()
print("Average Age: {0}".format(average))
train_data.fillna(value={'Age': average}, inplace=True)
test_data.fillna(value={'Age': average}, inplace=True ) | Titanic - Machine Learning from Disaster |
22,360,789 | k_range=list(range(1,35))
k_scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
scores = cross_val_score(knn, X_adult, Y_adult, cv=10, scoring='accuracy')
k_scores.append(scores.mean())
print(k_scores )<train_model> | X = train_data.drop(['Survived', 'PassengerId'], axis=1)
y = train_data['Survived']
test_X = test_data.drop(['PassengerId'], axis=1 ) | Titanic - Machine Learning from Disaster |
22,360,789 | knn =KNeighborsClassifier(n_neighbors=27)
knn.fit(X_adult,Y_adult)
scores<predict_on_test> | best_models = {}
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
def print_best_parameters(hyperparameters, best_parameters):
value = "Best parameters: "
for key in hyperparameters:
value += str(key)+ ": " + str(best_parameters[key])+ ", "
if hyperparameters:
print(value[:-2])
def get_best_model(estimator, hyperparameters, fit_params={}):
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
grid_search = GridSearchCV(estimator=estimator, param_grid=hyperparameters, n_jobs=-1, cv=cv, scoring="accuracy")
best_model = grid_search.fit(train_X, train_y, **fit_params)
best_parameters = best_model.best_estimator_.get_params()
print_best_parameters(hyperparameters, best_parameters)
return best_model
def evaluate_model(model, name):
print("Accuracy score:", accuracy_score(train_y, model.predict(train_X)))
best_models[name] = model | Titanic - Machine Learning from Disaster |
22,360,789 | Ytest_predict= knn.predict(X_test )<save_to_csv> | hyperparameters = {
'solver' : ['newton-cg', 'lbfgs', 'liblinear'],
'penalty' : ['l2'],
'C' : [100, 10, 1.0, 0.1, 0.01]
}
estimator = LogisticRegression(random_state=1)
best_model_logistic = get_best_model(estimator, hyperparameters ) | Titanic - Machine Learning from Disaster |
22,360,789 | label_out = Yfit_adult.inverse_transform(Ytest_predict)
df_out = pd.DataFrame({'Id': X_test.index,'income':label_out})
df_out.to_csv('submission_adult.csv',index=False )<load_from_csv> | evaluate_model(best_model_logistic.best_estimator_, 'logistic' ) | Titanic - Machine Learning from Disaster |
22,360,789 | pd.read_csv("submission_adult.csv" )<set_options> | hyperparameters = {
'var_smoothing': np.logspace(0, -9, num=100)
}
estimator = GaussianNB()
best_model_gaussian_nb = get_best_model(estimator, hyperparameters ) | Titanic - Machine Learning from Disaster |
22,360,789 | %matplotlib inline
<load_from_csv> | evaluate_model(best_model_gaussian_nb.best_estimator_, 'gaussian_nb' ) | Titanic - Machine Learning from Disaster |
22,360,789 | adult = pd.read_csv(".. /input/mydata/train_data.csv",
names=[
"Age", "Workclass", "fnlwgt", "Education", "Education-Num", "Martial Status",
"Occupation", "Relationship", "Race", "Sex", "Capital Gain", "Capital Loss",
"Hours per week", "Country", "Target"],
skiprows=1,
sep=r'\s*,\s*',
engine='python',
na_values="?" )<count_values> | hyperparameters = {
'alpha' : [0.5, 1.0, 1.5, 2.0, 5],
'fit_prior' : [True, False],
}
estimator = MultinomialNB()
best_model_multinominal_nb = get_best_model(estimator, hyperparameters ) | Titanic - Machine Learning from Disaster |
22,360,789 | adult["Country"].value_counts()<correct_missing_values> | evaluate_model(best_model_multinominal_nb.best_estimator_, 'multinominal_nb' ) | Titanic - Machine Learning from Disaster |
22,360,789 | nadult = adult.dropna()<load_from_csv> | hyperparameters = {
'alpha' : [0.5, 1.0, 1.5, 2.0, 5],
'fit_prior' : [True, False],
'norm' : [True, False]
}
estimator = ComplementNB()
best_model_complement_nb = get_best_model(estimator, hyperparameters ) | Titanic - Machine Learning from Disaster |
22,360,789 | testAdult = pd.read_csv(".. /input/mydata/test_data.csv",
names=[
"ID","Age", "Workclass", "fnlwgt", "Education", "Education-Num", "Martial Status",
"Occupation", "Relationship", "Race", "Sex", "Capital Gain", "Capital Loss",
"Hours per week", "Country"],
skiprows=1,
index_col=0,
sep=r'\s*,\s*',
engine='python',
na_values="?" )<prepare_x_and_y> | evaluate_model(best_model_complement_nb.best_estimator_, 'complement_nb' ) | Titanic - Machine Learning from Disaster |
22,360,789 | Yadult = nadult.Target<import_modules> | hyperparameters = {
'alpha' : [0.5, 1.0, 1.5, 2.0, 5],
'fit_prior' : [True, False],
}
estimator = BernoulliNB()
best_model_bernoulli_nb = get_best_model(estimator, hyperparameters ) | Titanic - Machine Learning from Disaster |
22,360,789 | from sklearn.neighbors import KNeighborsClassifier<import_modules> | evaluate_model(best_model_bernoulli_nb.best_estimator_, 'bernoulli_nb' ) | Titanic - Machine Learning from Disaster |
22,360,789 | from sklearn.neighbors import KNeighborsClassifier<import_modules> | hyperparameters = {
'n_neighbors' : list(range(1,5)) ,
'weights' : ['uniform', 'distance'],
'algorithm' : ['auto', 'ball_tree', 'kd_tree', 'brute'],
'leaf_size' : list(range(1,10)) ,
'p' : [1,2]
}
estimator = KNeighborsClassifier()
best_model_kneighbors = get_best_model(estimator, hyperparameters ) | Titanic - Machine Learning from Disaster |
22,360,789 | from sklearn.neighbors import KNeighborsClassifier<choose_model_class> | evaluate_model(best_model_kneighbors.best_estimator_, 'kneighbors' ) | Titanic - Machine Learning from Disaster |
22,360,789 | knn = KNeighborsClassifier(n_neighbors=3 )<import_modules> | hyperparameters = {
'penalty' : ['l1', 'l2', 'elasticnet'],
'eta0' : [0.0001, 0.001, 0.01, 0.1, 1.0],
'max_iter' : list(range(50, 200, 50))
}
estimator = Perceptron(random_state=1)
best_model_perceptron = get_best_model(estimator, hyperparameters ) | Titanic - Machine Learning from Disaster |
22,360,789 | from sklearn.model_selection import cross_val_score<compute_train_metric> | evaluate_model(best_model_perceptron.best_estimator_, 'perceptron' ) | Titanic - Machine Learning from Disaster |
22,360,789 | scores = cross_val_score(knn, Xadult, Yadult, cv=10 )<train_model> | hyperparameters = {
'C' : [0.1, 1, 10, 100],
'gamma' : [0.0001, 0.001, 0.01, 0.1, 1],
'kernel' : ['rbf']
}
estimator = SVC(random_state=1)
best_model_svc = get_best_model(estimator, hyperparameters ) | Titanic - Machine Learning from Disaster |
22,360,789 | knn.fit(Xadult,Yadult )<predict_on_test> | evaluate_model(best_model_svc.best_estimator_, 'svc' ) | Titanic - Machine Learning from Disaster |
22,360,789 | YtestPred = knn.predict(XtestAdult )<import_modules> | hyperparameters = {
'loss' : ['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron'],
'penalty' : ['l1', 'l2', 'elasticnet'],
'alpha' : [0.01, 0.1, 1, 10]
}
estimator = SGDClassifier(random_state=1, early_stopping=True)
best_model_sgd = get_best_model(estimator, hyperparameters ) | Titanic - Machine Learning from Disaster |
22,360,789 | from sklearn.metrics import accuracy_score<load_from_csv> | evaluate_model(best_model_sgd.best_estimator_, 'sgd' ) | Titanic - Machine Learning from Disaster |
22,360,789 | UCITest = pd.read_csv(".. /input/mydata/adult.test",
names=[
"Age", "Workclass", "fnlwgt", "Education", "Education-Num", "Martial Status",
"Occupation", "Relationship", "Race", "Sex", "Capital Gain", "Capital Loss",
"Hours per week", "Country", "Target"],
sep=r'\s*,\s*',
engine='python',
na_values="?" )<correct_missing_values> | hyperparameters = {
'loss' : ['deviance', 'exponential'],
'learning_rate' : [0.01, 0.1, 0.2, 0.3],
'n_estimators' : [50, 100, 200],
'subsample' : [0.1, 0.2, 0.5, 1.0],
'max_depth' : [2, 3, 4, 5]
}
estimator = GradientBoostingClassifier(random_state=1)
best_model_gbc = get_best_model(estimator, hyperparameters ) | Titanic - Machine Learning from Disaster |
22,360,789 | nUCITest = UCITest.dropna()<define_variables> | evaluate_model(best_model_gbc.best_estimator_, 'gbc' ) | Titanic - Machine Learning from Disaster |
22,360,789 | XUCITest = nUCITest[["Age","Education-Num","Capital Gain", "Capital Loss", "Hours per week"]]<prepare_x_and_y> | hyperparameters = {
'n_estimators' : [10, 50, 100, 500],
'learning_rate' : [0.001, 0.01, 0.1, 1.0]
}
estimator = AdaBoostClassifier(random_state=1)
best_model_adaboost = get_best_model(estimator, hyperparameters ) | Titanic - Machine Learning from Disaster |
22,360,789 | YUCITest = nUCITest.Target<predict_on_test> | evaluate_model(best_model_adaboost.best_estimator_, 'adaboost' ) | Titanic - Machine Learning from Disaster |
22,360,789 | YUCIPred = knn.predict(XUCITest)
YUCIPred<compute_test_metric> | hyperparameters = {
'criterion' : ['gini', 'entropy'],
'splitter' : ['best', 'random'],
'max_depth' : [None, 1, 2, 3, 4, 5],
'min_samples_split' : list(range(2,5)) ,
'min_samples_leaf' : list(range(1,5))
}
estimator = DecisionTreeClassifier(random_state=1)
best_model_decision_tree = get_best_model(estimator, hyperparameters ) | Titanic - Machine Learning from Disaster |
22,360,789 | accuracy_score(YUCIPred, YUCITest )<choose_model_class> | evaluate_model(best_model_decision_tree.best_estimator_, 'decision_tree' ) | Titanic - Machine Learning from Disaster |
22,360,789 | knn = KNeighborsClassifier(n_neighbors=30 )<train_model> | hyperparameters = {
'n_estimators' : list(range(10, 50, 10)) ,
'max_features' : ['auto', 'sqrt', 'log2'],
'criterion' : ['gini', 'entropy'],
'max_depth' : [None, 1, 2, 3, 4, 5],
'min_samples_split' : list(range(2,5)) ,
'min_samples_leaf' : list(range(1,5))
}
estimator = RandomForestClassifier(random_state=1)
best_model_random_forest = get_best_model(estimator, hyperparameters ) | Titanic - Machine Learning from Disaster |
22,360,789 | knn.fit(Xadult, Yadult )<train_model> | evaluate_model(best_model_random_forest.best_estimator_, 'random_forest' ) | Titanic - Machine Learning from Disaster |
22,360,789 | knn.fit(Xadult, Yadult )<compute_train_metric> | hyperparameters = {
'learning_rate' : [0.3, 0.4, 0.5],
'gamma' : [0, 0.4, 0.8],
'max_depth' : [2, 3, 4],
'reg_lambda' : [0, 0.1, 1],
'reg_alpha' : [0.1, 1]
}
fit_params = {
'verbose' : False,
'early_stopping_rounds' : 40,
'eval_metric' : 'logloss',
'eval_set' : [(val_X, val_y)]
}
estimator = XGBClassifier(seed=1, tree_method='gpu_hist', predictor='gpu_predictor', use_label_encoder=False)
best_model_xgb = get_best_model(estimator, hyperparameters, fit_params ) | Titanic - Machine Learning from Disaster |
22,360,789 | scores = cross_val_score(knn, Xadult, Yadult, cv=10 )<predict_on_test> | evaluate_model(best_model_xgb.best_estimator_, 'xgb' ) | Titanic - Machine Learning from Disaster |
22,360,789 | YUCIPred = knn.predict(XUCITest )<compute_test_metric> | hyperparameters = {
'boosting_type' : ['gbdt', 'dart', 'goss'],
'num_leaves' : [4, 8, 16, 32],
'learning_rate' : [0.01, 0.1, 1],
'n_estimators' : [25, 50, 100],
'reg_alpha' : [0, 0.1, 1],
'reg_lambda' : [0, 0.1, 1],
}
estimator = LGBMClassifier(random_state=1, device='gpu')
best_model_lgbm = get_best_model(estimator, hyperparameters ) | Titanic - Machine Learning from Disaster |
22,360,789 | accuracy_score(YUCITest, YUCIPred )<find_best_params> | evaluate_model(best_model_lgbm.best_estimator_, 'lgbm' ) | Titanic - Machine Learning from Disaster |
22,360,789 | <choose_model_class><EOS> | for model in best_models:
predictions = best_models[model].predict(test_X)
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
output.to_csv('submission_' + model + '.csv', index=False ) | Titanic - Machine Learning from Disaster |
22,357,080 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<compute_train_metric> | !pip install -q -U keras-tuner
clear_output()
| Titanic - Machine Learning from Disaster |
22,357,080 | scores = cross_val_score(knn, Xadult, Yadult, cv=10)
scores<train_model> | TRAIN_PATH = ".. /input/titanic/train.csv"
TEST_PATH = ".. /input/titanic/test.csv"
SAMPLE_SUBMISSION_PATH = ".. /input/titanic/gender_submission.csv"
SUBMISSION_PATH = "submission.csv"
ID = "PassengerId"
TARGET = "Survived"
TEST_SIZE = 0.2
RANDOM_SEED = 42
MAX_TRIAL = 3
EPOCHS = 5
VALIDATION_SPLIT = 0.15 | Titanic - Machine Learning from Disaster |
22,357,080 | knn.fit(Xadult, Yadult )<train_model> | train = pd.read_csv(TRAIN_PATH)
test = pd.read_csv(TEST_PATH ) | Titanic - Machine Learning from Disaster |
22,357,080 | knn.fit(Xadult, Yadult )<categorify> | drop_elements = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp','Parch']
train = train.drop(drop_elements, axis = 1)
test = test.drop(drop_elements, axis = 1)
def checkNull_fillData(df):
for col in df.columns:
if len(df.loc[df[col].isnull() == True])!= 0:
if df[col].dtype == "float64" or df[col].dtype == "int64":
df.loc[df[col].isnull() == True,col] = df[col].mean()
else:
df.loc[df[col].isnull() == True,col] = df[col].mode() [0]
checkNull_fillData(train)
checkNull_fillData(test)
str_list = []
num_list = []
for colname, colvalue in train.iteritems() :
if type(colvalue[1])== str:
str_list.append(colname)
else:
num_list.append(colname)
train = pd.get_dummies(train, columns=str_list)
test = pd.get_dummies(test, columns=str_list ) | Titanic - Machine Learning from Disaster |
22,357,080 | adult['Sex'] = adult['Sex'].transform(lambda x: 1 if x=='Male' else 0 if x==x else x )<predict_on_test> | y = train[TARGET]
X = train.drop([TARGET],axis=1)
X_test = test
gc.collect() | Titanic - Machine Learning from Disaster |
22,357,080 | predictions = knn.predict(testAdult[["Age","Education-Num","Capital Gain", "Capital Loss", "Hours per week"]] )<create_dataframe> | X_train,X_val,y_train,y_val=train_test_split(X,y,test_size=TEST_SIZE,random_state=RANDOM_SEED ) | Titanic - Machine Learning from Disaster |
22,357,080 | result = np.vstack(( testAdult.index.values, predictions)).T
x = ['Id','income']
resultado = pd.DataFrame(columns=x, data=result)
resultado.set_index('Id', inplace=True )<save_to_csv> | def build_random_forest(hp):
model = ensemble.RandomForestClassifier(
n_estimators=hp.Int('n_estimators', 10, 50, step=10),
max_depth=hp.Int('max_depth', 3, 10))
return model
tuner = kt.tuners.Sklearn(
oracle=kt.oracles.BayesianOptimization(
objective=kt.Objective('score', 'max'),
max_trials=10),
hypermodel= build_random_forest,
directory='.',
project_name='random_forest')
tuner.search(X_train.values, y_train.values.ravel())
best_hp = tuner.get_best_hyperparameters(num_trials=1)[0] | Titanic - Machine Learning from Disaster |
22,357,080 | resultado.to_csv('mypredictions.csv' )<install_modules> | model = tuner.hypermodel.build(best_hp)
model.fit(X_train, y_train.values ) | Titanic - Machine Learning from Disaster |
22,357,080 | !pip show fastai<set_options> | pred_val = model.predict(X_val)
print(accuracy_score(y_val, pred_val)) | Titanic - Machine Learning from Disaster |
22,357,080 | %matplotlib inline
<define_variables> | pred_test = model.predict(X_test ) | Titanic - Machine Learning from Disaster |
22,357,080 | <compute_test_metric><EOS> | sub = pd.read_csv(SAMPLE_SUBMISSION_PATH)
sub[TARGET]=(pred_test > 0.5 ).astype(int)
sub.to_csv(SUBMISSION_PATH,index=False)
sub.head() | Titanic - Machine Learning from Disaster |
22,184,906 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_csv> | warnings.filterwarnings('ignore' ) | Titanic - Machine Learning from Disaster |
22,184,906 | train_df = pd.read_csv(".. /input/train.csv")
test_df = pd.read_csv(".. /input/sample.csv")
train_df.head()<count_values> | train = pd.read_csv('/kaggle/input/titanic/train.csv')
test=pd.read_csv('/kaggle/input/titanic/test.csv')
sub = pd.read_csv('/kaggle/input/titanic/gender_submission.csv' ) | Titanic - Machine Learning from Disaster |
22,184,906 | train_df.Category.value_counts()<train_model> | train.isnull().sum().sort_values(ascending = False ) | Titanic - Machine Learning from Disaster |
22,184,906 | image = Image.open(DATA_PATH+'/train/train/0/100380.jpg')
imgplot = plt.imshow(image)
plt.show()
image.size<feature_engineering> | test.isnull().sum().sort_values(ascending = False ) | Titanic - Machine Learning from Disaster |
22,184,906 | tfms = get_transforms(max_zoom=1., max_warp=0.2, max_lighting=0.3,
xtra_tfms=[cutout(n_holes=(1,20)) ]
)<define_variables> | train.loc[train.Cabin.notnull() ,'Cabin']=1
train.loc[train.Cabin.isnull() ,'Cabin']=0 | Titanic - Machine Learning from Disaster |
22,184,906 | data = ImageDataBunch.from_folder(DATA_PATH, train=PATH+'train/train/', test='test/test_upload/',
ds_tfms=tfms, padding_mode='zeros',
valid_pct=0.1, size=SIZE,
classes=['0','1','2','3','4','5','6','7','8','9'],
bs=BATCH, num_workers=0 ).normalize(imagenet_stats)
data.path = pathlib.Path(PATH )<define_variables> | test.loc[test.Cabin.notnull() ,'Cabin']=1
test.loc[test.Cabin.isnull() ,'Cabin']=0 | Titanic - Machine Learning from Disaster |
22,184,906 | data.show_batch(rows=4, figsize=(12,9))<choose_model_class> | train.Cabin.isnull().sum() | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.