kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
12,825,546
def split_rule(x): return x.split(">") def calculate_rule(rules): condA, condB = split_rule(rules) condB += "&" + condA condA = condA.split("&") condB = condB.split("&") totalA = get_assoc(*condA) totalB = get_assoc(*condB) return totalB / totalA<feature_engineering>
GS.best_score_
Titanic - Machine Learning from Disaster
12,825,546
rules_df['confidence'] = [floor(confidence * 1000)for confidence in confidences]<define_variables>
param_grid={'min_samples_leaf':np.arange(1,1+10,1)} rfc = RandomForestClassifier( n_estimators=30, random_state=90, max_depth=8 ) GS = GridSearchCV(rfc,param_grid,cv=10) GS.fit(X_train,Y_train) GS.best_params_
Titanic - Machine Learning from Disaster
12,825,546
submission = rules_df[['rule', 'confidence']] submission<save_to_csv>
GS.best_score_
Titanic - Machine Learning from Disaster
12,825,546
submission.to_csv("submission.csv", index= False )<import_modules>
param_grid={'min_samples_split':np.arange(2, 2+20, 1)} rfc = RandomForestClassifier( n_estimators=30, random_state=90, max_depth=8, min_samples_leaf=2 ) GS = GridSearchCV(rfc,param_grid,cv=10) GS.fit(X_train,Y_train) GS.best_params_
Titanic - Machine Learning from Disaster
12,825,546
from fastai.vision import * from fastai.metrics import accuracy, fbeta<define_variables>
GS.best_score_
Titanic - Machine Learning from Disaster
12,825,546
path = Path(".. /input" )<categorify>
rfc = RandomForestClassifier( n_estimators=30, random_state=90, max_depth=8, min_samples_leaf=2, min_samples_split=11 ) score = cross_val_score(rfc,X_train,Y_train,cv=10 ).mean() score
Titanic - Machine Learning from Disaster
12,825,546
src =(ImageList.from_folder(path/'train') .split_by_rand_pct() .label_from_folder() .add_test(Path('.. /input/test/test' ).ls()))<normalization>
rfc.fit(X_train, Y_train) predictions = rfc.predict(X_predict ).astype(int )
Titanic - Machine Learning from Disaster
12,825,546
data =(src.transform(get_transforms() , size=128) .databunch(path=Path(".. /")) .normalize(imagenet_stats))<define_variables>
output = pd.DataFrame({'PassengerId': test.PassengerId, 'Survived': predictions}) output.to_csv('submission2.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
13,070,360
data.show_batch(3 )<groupby>
%matplotlib inline
Titanic - Machine Learning from Disaster
13,070,360
data.batch_stats()<choose_model_class>
train=pd.read_csv('/kaggle/input/titanic/train.csv') test=pd.read_csv('/kaggle/input/titanic/test.csv') y=train['Survived'] submission=pd.DataFrame(test['PassengerId']) total=pd.concat([train.drop(['PassengerId','Survived'],axis=1),test.drop('PassengerId',axis=1)] )
Titanic - Machine Learning from Disaster
13,070,360
arch = models.resnet34<choose_model_class>
encoder=LabelEncoder() total['Sex']=encoder.fit_transform(total['Sex']) total['Embarked']=encoder.fit_transform(total['Embarked']) total['Age']=total['Age'].astype(int) total.head()
Titanic - Machine Learning from Disaster
13,070,360
learn = cnn_learner(data, arch, metrics=accuracy ).to_fp16()<define_search_space>
total['Famile_no']=total['SibSp']+total['Parch']+1 def family_gp(size): a='' if(size<=1): a='Alone' elif(size<=3): a='nuclear family' elif(size<=5): a='middle' else: a='large' return a total['Family_grp']=total['Famile_no'].map(family_gp)
Titanic - Machine Learning from Disaster
13,070,360
lr = 5e-2<train_model>
total.drop(['Name','Ticket','Cabin','SibSp','Parch'],axis=1,inplace=True) total.head()
Titanic - Machine Learning from Disaster
13,070,360
learn.fit_one_cycle(6, max_lr=slice(lr))<save_model>
total=pd.get_dummies(total,columns=['Embarked','Family_grp','Pclass']) c=StandardScaler() train=total[:len(train)] train=c.fit_transform(train) test=total[len(train):] test=c.fit_transform(test )
Titanic - Machine Learning from Disaster
13,070,360
learn.save('stage-1' )<predict_on_test>
xtrain,xvalid,ytrain,yvalid=train_test_split(train,y,test_size=0.3) models = {"KNN": KNeighborsClassifier() , "Logistic Regression": LogisticRegression(max_iter=10000), "Random Forest": RandomForestClassifier() , "SVC" : SVC(probability=True), "DecisionTreeClassifier" : DecisionTreeClassifier() , "AdaBoostClassifier" : AdaBoostClassifier() , "GradientBoostingClassifier" : GradientBoostingClassifier() , "GaussianNB" : GaussianNB() , "LinearDiscriminantAnalysis" : LinearDiscriminantAnalysis() , "QuadraticDiscriminantAnalysis" : QuadraticDiscriminantAnalysis() } def fit_and_score(models, X_train, X_valid, y_train, y_valid): np.random.seed(42) model_scores = {} for name, model in models.items() : model.fit(X_train, y_train) y_pred = model.predict(X_valid) model_scores[name] = roc_auc_score(y_pred, y_valid) return model_scores fit_and_score(models,xtrain,xvalid,ytrain,yvalid)
Titanic - Machine Learning from Disaster
13,070,360
<save_to_csv><EOS>
leaks = { 897:1, 899:1, 930:1, 932:1, 949:1, 987:1, 995:1, 998:1, 999:1, 1016:1, 1047:1, 1083:1, 1097:1, 1099:1, 1103:1, 1115:1, } model=GradientBoostingClassifier() model.fit(train,target) a=model.predict(test) submission['Survived']=a submission['Survived'] = sub['Survived'].apply(lambda x: 1 if x>0.8 else 0) submission['Survived'] = sub.apply(lambda r: leaks[int(r['PassengerId'])] if int(r['PassengerId'])in leaks else r['Survived'], axis=1) submission.to_csv('sub_tit.csv', index=False )
Titanic - Machine Learning from Disaster
13,011,849
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_pretrained>
df = pd.read_csv('.. /input/titanic/train.csv' )
Titanic - Machine Learning from Disaster
13,011,849
learn.load('stage-1');<train_model>
df_cat['Cabin'].value_counts()
Titanic - Machine Learning from Disaster
13,011,849
learn.fit_one_cycle(5, max_lr=slice(5e-6, lr/10))<save_model>
def splitter(cols): return re.split('\, |\.',cols)[1] def remove_digits(col): for char in col: if not char.isdigit() : return col[0] else: return char df_cat['title']= df_cat['Name'].apply(splitter) df_cat['deck'] = df_cat['Cabin'].apply(lambda x: str(x ).split(' ')[0] ).apply(remove_digits )
Titanic - Machine Learning from Disaster
13,011,849
learn.save('stage-2' )<predict_on_test>
print(df_cat['title'].value_counts() ,' ',df_cat['deck'].value_counts() )
Titanic - Machine Learning from Disaster
13,011,849
test_probs, _ = learn.get_preds(ds_type=DatasetType.Test) test_preds = [data.classes[pred] for pred in np.argmax(test_probs.numpy() , axis=-1)]<save_to_csv>
df_cat['title'] = df_cat['title'].apply(lambda x: 'Other' if x not in ['Mr','Master','Miss','Mrs'] else x) print(df_cat['title'].value_counts() )
Titanic - Machine Learning from Disaster
13,011,849
fnames = [f.name[:-4] for f in learn.data.test_ds.items] df = pd.DataFrame({'id':fnames, 'predicted_class':test_preds}, columns=['id', 'predicted_class']) df['id'] = df['id'].astype(str)+ '.jpg' df.to_csv('submission-2.csv', index=False )<load_pretrained>
df_cat['deck'] = df_cat['deck'].map({'A':'ABC','B':'ABC','C':'ABC','D':'DE','E':'DE','F':'FG','G':'FG','n':'U','T':'U'} )
Titanic - Machine Learning from Disaster
13,011,849
learn.load('stage-2');<normalization>
df_num['alone'] =(df['SibSp'] + df['Parch'] ).apply(lambda x: 1 if x==0 else 0 )
Titanic - Machine Learning from Disaster
13,011,849
data =(src.transform(get_transforms() , size=256) .databunch(path=Path(".. /")) .normalize(imagenet_stats))<categorify>
df_new = pd.concat([df_num,df_cat],axis=1 )
Titanic - Machine Learning from Disaster
13,011,849
learn.data = data learn = learn.to_fp16()<define_search_space>
df_new['age_imp'] = df_new[['Age','title']].apply(imp,axis=1 )
Titanic - Machine Learning from Disaster
13,011,849
lr = 5e-3<train_model>
df_new[df_new['Embarked'].isnull() ]
Titanic - Machine Learning from Disaster
13,011,849
learn.fit_one_cycle(5, max_lr=slice(lr))<save_model>
df_new.loc[81,'Embarked'] = 'S' df_new.loc[829,'Embarked'] = 'S'
Titanic - Machine Learning from Disaster
13,011,849
learn.save('stage-256-1' )<predict_on_test>
dummies = pd.get_dummies(df_new[['Embarked','Sex','title','deck']],drop_first=True )
Titanic - Machine Learning from Disaster
13,011,849
test_probs, _ = learn.get_preds(ds_type=DatasetType.Test) test_preds = [data.classes[pred] for pred in np.argmax(test_probs.numpy() , axis=-1)]<save_to_csv>
df_final = pd.concat([df_new,dummies],axis=1 ).drop( ['PassengerId','Age','SibSp','Parch','Fare','fare_bins_15','Name','Sex','Ticket','Cabin','Embarked','title','age_imp','deck'],axis=1 )
Titanic - Machine Learning from Disaster
13,011,849
fnames = [f.name[:-4] for f in learn.data.test_ds.items] df = pd.DataFrame({'id':fnames, 'predicted_class':test_preds}, columns=['id', 'predicted_class']) df['id'] = df['id'].astype(str)+ '.jpg' df.to_csv('submission-3.csv', index=False )<load_pretrained>
from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.ensemble import VotingClassifier from sklearn.metrics import classification_report,confusion_matrix from sklearn.model_selection import cross_val_score
Titanic - Machine Learning from Disaster
13,011,849
learn.load('stage-256-1');<train_model>
X = df_final.drop('Survived',axis=1) y = df_final['Survived']
Titanic - Machine Learning from Disaster
13,011,849
learn.fit_one_cycle(5, max_lr=slice(1e-5, 1e-4))<save_model>
lr = LogisticRegression(solver='liblinear',max_iter=200) knn = KNeighborsClassifier(n_neighbors=10) forest = RandomForestClassifier(n_estimators=200) svc = SVC()
Titanic - Machine Learning from Disaster
13,011,849
learn.save('stage-256-2' )<predict_on_test>
models = [lr, knn, forest, svc] scores = [] for model in models: scores.append(cross_val_score(model,X,y,cv=10)) for score in scores: print("Accuracy: %0.2f(+/- %0.2f)" %(score.mean() , score.std() * 2))
Titanic - Machine Learning from Disaster
13,011,849
test_probs, _ = learn.get_preds(ds_type=DatasetType.Test) test_preds = [data.classes[pred] for pred in np.argmax(test_probs.numpy() , axis=-1)]<save_to_csv>
vote = VotingClassifier(estimators=[('knn',knn),('forest',forest),('svc',svc),('lr',lr)] )
Titanic - Machine Learning from Disaster
13,011,849
fnames = [f.name[:-4] for f in learn.data.test_ds.items] df = pd.DataFrame({'id':fnames, 'predicted_class':test_preds}, columns=['id', 'predicted_class']) df['id'] = df['id'].astype(str)+ '.jpg' df.to_csv('submission-4.csv', index=False )<predict_on_test>
score = cross_val_score(vote,X,y,cv=10 )
Titanic - Machine Learning from Disaster
13,011,849
preds,y,losses = learn.get_preds(with_loss=True) interp = ClassificationInterpretation(learn.to_fp32() , preds, y, losses )<feature_engineering>
print("Accuracy: %0.2f(+/- %0.2f)" %(score.mean() , score.std() * 2))
Titanic - Machine Learning from Disaster
13,011,849
test_probs, _ = learn.TTA(ds_type=DatasetType.Test) test_preds = [data.classes[pred] for pred in np.argmax(test_probs.numpy() , axis=-1)]<save_to_csv>
test_df = pd.read_csv('.. /input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
13,011,849
fnames = [f.name[:-4] for f in learn.data.test_ds.items] df = pd.DataFrame({'id':fnames, 'predicted_class':test_preds}, columns=['id', 'predicted_class']) df['id'] = df['id'].astype(str)+ '.jpg' df.to_csv('submission-5.csv', index=False )<set_options>
p_id = test_df['PassengerId'] test_df = test_df.drop('PassengerId',axis=1 )
Titanic - Machine Learning from Disaster
13,011,849
pd.set_option("display.max_columns", 500) pd.set_option("display.max_rows", 200) plt.rcParams['figure.figsize'] = [15, 6] sns.set_style("darkgrid" )<install_modules>
test_df['title']= test_df['Name'].apply(splitter) test_df['deck'] = test_df['Cabin'].apply(lambda x: str(x ).split(' ')[0] ).apply(remove_digits) test_df['title'] = test_df['title'].apply(lambda x: 'Other' if x not in ['Mr','Master','Miss','Mrs'] else x) test_df['alone'] =(test_df['SibSp'] + test_df['Parch'] ).apply(lambda x: 1 if x==0 else 0) test_df['age_imp'] = test_df[['Age','title']].apply(imp,axis=1) test_df['fare_bins'] = pd.qcut(x=test_df['Fare'],q=10,labels=[1,2,3,4,5,6,7,8,9,10],duplicates='drop') test_df['age_bins'] = pd.qcut(x=test_df['age_imp'],q=10,labels=[1,2,3,4,5,6,7,8,9],duplicates='drop' )
Titanic - Machine Learning from Disaster
13,011,849
!pip install pandas-profiling<load_from_csv>
test_df['deck'] = test_df['deck'].map({'A':'ABC','B':'ABC','C':'ABC','D':'DE','E':'DE','F':'FG','G':'FG','n':'U'} )
Titanic - Machine Learning from Disaster
13,011,849
online_sales = pd.read_csv('/kaggle/input/uisummerschool/Online_sales.csv', sep=',') online_sales.head()<define_variables>
test_dummies = pd.get_dummies(test_df[['Embarked','Sex','title','deck']],drop_first=True) test_df_final = pd.concat([test_df,test_dummies],axis=1 ).drop( ['Age','SibSp','Parch','Fare','Name','Sex','Ticket','Cabin','Embarked','title','age_imp','deck'],axis=1 )
Titanic - Machine Learning from Disaster
13,011,849
online_sales [['Date', 'Product SKU', 'Quantity', 'Revenue', 'Tax']]<define_variables>
test_df_final[test_df_final['fare_bins'].isnull() ]
Titanic - Machine Learning from Disaster
13,011,849
test = online_sales [['Date', 'Product SKU', 'Quantity', 'Revenue', 'Tax']] <filter>
test_df_final.loc[152,'fare_bins'] = X['fare_bins_10'].mode() [0]
Titanic - Machine Learning from Disaster
13,011,849
condition1= online_sales['Product SKU'] == 'GGOENEBQ079099' online_sales[(condition1)] condition2= online_sales['Quantity'] > 2 online_sales[(condition1)&(condition2)]<feature_engineering>
vote.fit(X,y) preds = vote.predict(test_df_final )
Titanic - Machine Learning from Disaster
13,011,849
<feature_engineering><EOS>
submission = pd.DataFrame({'PassengerId':p_id,'Survived':preds}) submission.to_csv('20201126_vote.csv',index=False )
Titanic - Machine Learning from Disaster
13,576,626
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<groupby>
sns.set(style="darkgrid") SEED = 31 random.seed(SEED) for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
Titanic - Machine Learning from Disaster
13,576,626
test = online_sales.groupby(['Date'])['Quantity'].sum().reset_index() test.head()<groupby>
data = pd.read_csv("/kaggle/input/titanic/train.csv") data_test = pd.read_csv("/kaggle/input/titanic/test.csv" )
Titanic - Machine Learning from Disaster
13,576,626
test = online_sales.groupby(['Date', 'Product SKU'])['Quantity'].sum().reset_index() test.head()<groupby>
y = data["Survived"] x = data.copy() x_test = data_test.copy() x_full = pd.concat([x, x_test]) n = len(x) n_test = len(x_test) n_full = n + n_test
Titanic - Machine Learning from Disaster
13,576,626
test = online_sales.groupby(['Date'] ).agg({'Quantity': 'sum', 'Revenue': 'sum', 'Tax': 'sum', 'Product SKU': 'count', 'Transaction ID': 'count', } ).reset_index() test.head()<sort_values>
( x_full.drop(["Survived"], axis = 1 ).isna().sum() / n_full)* 100
Titanic - Machine Learning from Disaster
13,576,626
online_sales.sort_values(by=['Quantity'], ascending = False ).head(15 )<drop_column>
x_full[x_full["Fare"].isnull() ]
Titanic - Machine Learning from Disaster
13,576,626
test.rename(index=str, columns={"Quantity": "Total Quantity", "Revenue": "Total Revenue"}, inplace = True) test.drop(columns=['Product SKU', 'Transaction ID'], inplace = True) test.head()<load_from_csv>
x_full[x_full["Embarked"].isnull() ]
Titanic - Machine Learning from Disaster
13,576,626
online_sales = online_sales = pd.read_csv('/kaggle/input/uisummerschool/Online_sales.csv') backup = online_sales.copy() daily_online_revenue = online_sales.groupby(['Date'])['Revenue'].sum().reset_index() daily_online_revenue.tail()<create_dataframe>
embarked_survived = x.groupby(["Embarked", "Survived"])["PassengerId"].count() embarked_survived
Titanic - Machine Learning from Disaster
13,576,626
add_data = [['2017-12-01', 0], ['2017-12-02', 0], ['2017-12-03', 0], ['2017-12-04', 0], ['2017-12-05', 0], ['2017-12-06', 0], ['2017-12-07', 0], ['2017-12-08', 0], ['2017-12-09', 0], ['2017-12-10', 0], ['2017-12-11', 0], ['2017-12-10', 0], ['2017-12-13', 0], ['2017-12-14', 0] ] add_data_df = pd.DataFrame(add_data, columns = ['Date', 'Revenue']) add_data_df['Date'] = add_data_df['Date'].astype(str) add_data_df['Date'] = pd.to_datetime(add_data_df['Date']) daily_online_revenue = daily_online_revenue.append(add_data_df) df_rev = daily_online_revenue.copy() daily_online_revenue.tail(20 )<prepare_output>
def display_survival_prob(data, feature): feature_survived = data.groupby([feature, "Survived"])["PassengerId"].count() classes = set(feature_survived.index.get_level_values(0)) surv_counts = {key: {} for key in classes} for c in classes: for survive in range(0, 2): surv_counts[c][survive] = 0 if survive in feature_survived[c]: surv_counts[c][survive] = feature_survived[c][survive] df_surv = pd.DataFrame(surv_counts) surv_percentages = {} for col in df_surv.columns: surv_percentages[col] = [(count / df_surv[col].sum())* 100 for count in df_surv[col]] df_survived_percentages = pd.DataFrame(surv_percentages ).transpose() class_names = tuple(classes) bar_count = np.arange(len(class_names)) bar_width = 0.5 not_survived = df_survived_percentages[0] survived = df_survived_percentages[1] plt.figure(figsize=(10, 6)) plt.bar(bar_count, not_survived, color=" plt.bar(bar_count, survived, bottom=not_survived, color=" plt.xlabel("{}".format(feature), size=15, labelpad=20) plt.ylabel("Survival Percentage", size=15, labelpad=20) plt.xticks(bar_count, class_names) plt.tick_params(axis="x", labelsize=15) plt.tick_params(axis="y", labelsize=15) plt.legend(loc="upper left", bbox_to_anchor=(1, 1), prop={"size": 15}) plt.title("Survival Percentage by {}".format(feature), size=18, y=1.05) plt.show()
Titanic - Machine Learning from Disaster
13,576,626
daily_online_revenue = preprocess(df_rev ).set_index('Date') daily_online_revenue.head()<split>
embarked_class_passengers = x.groupby("Pclass")["Embarked"].value_counts() print(embarked_class_passengers )
Titanic - Machine Learning from Disaster
13,576,626
def split_train_test(dataset, end_of_training_date): training_data = daily_online_revenue.loc['2017-01-05':end_of_training_date] testing_data = daily_online_revenue["2017-12-01":] return training_data, testing_data<prepare_x_and_y>
x2 = x.copy() x2["Cabin"] = x2["Cabin"].map(lambda cabin: cabin if pd.isnull(cabin)else cabin[0]) x2["Cabin"].sample(10 )
Titanic - Machine Learning from Disaster
13,576,626
def split_label_and_predictor(train_or_test_data): y_data = train_or_test_data['today_revenue'] x_data = train_or_test_data[['d-1_rev','d-2_rev','d-3_rev','d-4_rev','d-5_rev','d-6_rev','d-7_rev','d-8_rev','d-9_rev','d-10_rev']] return x_data, y_data<split>
x2.groupby(["Cabin", "Pclass"])["PassengerId"].count()
Titanic - Machine Learning from Disaster
13,576,626
end_of_training_date = "2017-11-30" training_data, test_data = split_train_test(daily_online_revenue,end_of_training_date) <split>
class_cabin_passengers = x2.groupby("Pclass")["Cabin"].value_counts() print("Number of passengers without a cabin registered: {}".format(sum(x2["Cabin"].isnull()))) print("Number of passengers without a cabin registered that survived: {}".format(sum(( x2["Survived"] == 1)&(x2["Cabin"].isnull())))) print("Number of passengers without a cabin registered that didn't survived: {}".format(sum(( x2["Survived"] == 0)&(x2["Cabin"].isnull())))) print("Total number of passengers that didn't survived: {}".format(sum(x["Survived"] == 0))) print("=================") print("Passengers from class 1 with cabin registered: {}".format(class_cabin_passengers[1].sum())) print("Total number of Passengers with cabin registered: {}".format(( ~x2["Cabin"].isnull() ).sum())) print("Number of passengers with a cabin registered that survived: {}".format(sum(( x2["Survived"] == 1)& ~(x2["Cabin"].isnull())))) print("Total number of Passengers of class 1: {}".format(( x2["Pclass"] == 1 ).sum()))
Titanic - Machine Learning from Disaster
13,576,626
x_train, y_train = split_label_and_predictor(training_data) x_test, y_test = split_label_and_predictor(test_data) <train_model>
class MissingValuesTransformer(BaseEstimator, TransformerMixin): def __init__(self): print("In the MissingValuesTransformer init method") def fit(self, x, y = None): self._median_gender_class_age = x.groupby(["Sex", "Pclass"])["Age"].median() self._median_class_fare = x.groupby(["Pclass"])["Fare"].median() self._count_class_embarked = x.groupby("Pclass")["Embarked"].value_counts() return self def transform(self, x, y = None): x["Cabin"] = x["Cabin"].map(lambda cabin: cabin if pd.isnull(cabin)else cabin[0]) self._count_class_cabin = x.groupby("Pclass")["Cabin"].value_counts() x = x.apply(self._fill_na_age, axis = 1) x = x.apply(self._fill_na_fare, axis = 1) x = x.apply(self._fill_na_embarked, axis = 1) x = x.apply(self._fill_na_cabin, axis = 1) return x def _fill_na_age(self, row): row["Age"] = self._median_gender_class_age[row["Sex"]][row["Pclass"]] if pd.isnull(row["Age"])else row["Age"] return row def _fill_na_fare(self, row): row["Fare"] = self._median_class_fare[row["Pclass"]] if pd.isnull(row["Fare"])else row["Fare"] return row def _fill_na_embarked(self, row): row_class = row["Pclass"] total_class_passengers = self._count_class_embarked[row_class].sum() class_embarked_dist = self._count_class_embarked[row_class] / total_class_passengers if pd.isnull(row["Embarked"]): row["Embarked"] = random.choices(population = class_embarked_dist.index, weights = class_embarked_dist, k = 1)[0] return row def _fill_na_cabin(self, row): row_class = row["Pclass"] total_class_passengers = self._count_class_cabin[row_class].sum() class_cabin_dist = self._count_class_cabin[row_class] / total_class_passengers if pd.isnull(row["Cabin"]): row["Cabin"] = random.choices(population = class_cabin_dist.index, weights = class_cabin_dist, k = 1)[0] return row
Titanic - Machine Learning from Disaster
13,576,626
def fit(x_train, y_train): model = RandomForestRegressor(random_state=1) model.fit(x_train, y_train) return model def predict(model, x_test): y_pred = model.predict(x_test) return y_pred model = fit(x_train, y_train) <predict_on_test>
pipeline = Pipeline(steps=[ ("preprocessor", MissingValuesTransformer()) ]) filled_x = pipeline.fit_transform(x) filled_x.sample(10 )
Titanic - Machine Learning from Disaster
13,576,626
df_rev2 = df_rev.copy() n_iteration = len(x_test) result = [] for i in range(n_iteration): y_pred = predict(model, pd.DataFrame(x_test.iloc[i] ).transpose()) result.append(y_pred[0]) df_rev2.loc[df_rev2["Date"]==x_test.index[i],"Revenue"] = y_pred daily_online_revenue = preprocess(df_rev2 ).set_index('Date') _, testing_data = split_train_test(daily_online_revenue,end_of_training_date) x_test, _ = split_label_and_predictor(testing_data) result<compute_test_metric>
x2["FamilySize"] = x2["SibSp"] + x2["Parch"] + 1 x2.drop(["SibSp", "Parch"], axis = 1, inplace = True) x2.sample(10 )
Titanic - Machine Learning from Disaster
13,576,626
comparison = pd.DataFrame({"Prediction":result,"Actual":y_test}) comparison.index = y_test.index error = sqrt(mean_squared_error(comparison["Actual"], comparison["Prediction"])) print("Error Score(RMSE)= {}".format(round(error,2))) historical = pd.DataFrame(y_train ).rename(columns={"Revenue":"Actual"} ).tail(14) pd.concat([historical,comparison],sort=True ).plot() ;<create_dataframe>
def get_family_group(size): if size == 1: return "Alone" elif size <= 4: return "Small" elif size <= 6: return "Medium" else: return "Large" x2["FamilySize"] = x2["FamilySize"].map(lambda s: get_family_group(s)) x2.sample(10 )
Titanic - Machine Learning from Disaster
13,576,626
formatted_result = pd.DataFrame(result ).reset_index().rename(columns={"index":"Id",0:"Revenue"}) display(formatted_result) formatted_result.to_csv("result.csv",index=False) <install_modules>
x2["NameTitle"] = x2["Name"].map(lambda name: name.split(", ")[1].split(".")[0]) x2.sample(5 )
Titanic - Machine Learning from Disaster
13,576,626
!pip install catboost==0.13.1<set_options>
def get_reduced_title(title): return title if title in ["Mr", "Mrs", "Miss", "Master"] else "Other" x2["NameTitle"] = x2["NameTitle"].map(lambda title: get_reduced_title(title)) x2.sample(5 )
Titanic - Machine Learning from Disaster
13,576,626
warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", category=FutureWarning) warnings.filterwarnings("ignore", category=UserWarning )<import_modules>
pds = pd.Series([3.3, 40.5, 23.78, 56.4, 15,2]) pds.isin(x2["Fare"] )
Titanic - Machine Learning from Disaster
13,576,626
import gc import numpy as np import pandas as pd from time import time from sklearn.metrics import roc_auc_score, classification_report from catboost import Pool, CatBoost, CatBoostClassifier, MetricVisualizer<load_from_csv>
pipeline = Pipeline(steps=[ ("preprocessor", MissingValuesTransformer()), ("featureEngineering", FeatureEngineeringTransformer()) ]) output = pipeline.fit_transform(x) output.head(7 )
Titanic - Machine Learning from Disaster
13,576,626
basepath = ".. /input/" df_train = pd.read_csv(f"{basepath}train/train.csv", index_col=0, low_memory=False) df_test = pd.read_csv(f"{basepath}test/test.csv", index_col=0, low_memory=False) group_col = 'group_id' target = 'target' df_train.shape, df_test.shape<drop_column>
categorical_columns = ["Pclass", "Sex", "Cabin", "Embarked", "FamilySize", "NameTitle"] feature_transformer = ColumnTransformer( transformers=[ ('age', SimpleImputer() , ["Age"]), ('fare', OrdinalEncoder() , ["Fare"]), ('categorical', OneHotEncoder() , categorical_columns) ] )
Titanic - Machine Learning from Disaster
13,576,626
df_train = df_train.drop(df_train.index[[0,1,2,3]]) df_train = df_train.drop(['feature_13', 'feature_32'], axis=1) df_test = df_test.drop(['feature_13', 'feature_32'], axis=1) df_train = df_train.rename(columns={'feature_18': 'pos'}) df_test = df_test.rename(columns={'feature_18': 'pos'} )<data_type_conversions>
pipeline = Pipeline(steps=[ ("preprocessor", MissingValuesTransformer()), ("featureEngineering", FeatureEngineeringTransformer()), ("columnTransformer", feature_transformer) ]) output = pipeline.fit_transform(x, y) print(output.todense() [0:9,:] )
Titanic - Machine Learning from Disaster
13,576,626
num_features = list(df_train.select_dtypes(include=[np.float64, np.int64] ).columns) num_features.remove(target) num_features.remove(group_col) mins = df_train[num_features].min() maxs = df_train[num_features].max() difs = np.abs(maxs - mins) vals = mins - 5 * difs for col in num_features: df_train[col] = df_train[col].fillna(vals[col]) df_test[col] = df_test[col].fillna(vals[col]) df_train[num_features] = df_train[num_features].astype(np.float32) df_test[num_features] = df_test[num_features].astype(np.float32) f"{gc.collect() } objects collected"<categorify>
preprocessing_pipeline = Pipeline(steps=[ ("preprocessor", MissingValuesTransformer()), ("featureEngineering", FeatureEngineeringTransformer()), ("columnTransformer", feature_transformer) ] )
Titanic - Machine Learning from Disaster
13,576,626
def add_aggregates(df, col): df[f"{col}_min"] = df.groupby(group_col)[col].transform(np.min) df[f"{col}_avg"] = df.groupby(group_col)[col].transform(np.mean) df[f"{col}_q50"] = df.groupby(group_col)[col].transform(np.median) df[f"{col}_max"] = df.groupby(group_col)[col].transform(np.max) df[f"{col}_var"] = df.groupby(group_col)[col].transform(np.var) df[f"{col}_sum"] = df.groupby(group_col)[col].transform(np.sum) df[f"{col}_cmp_min"] = df[col] == df[f"{col}_min"] df[f"{col}_cmp_max"] = df[col] == df[f"{col}_max"] df[f"{col}_cmp_avg"] = df[col] <= df[f"{col}_avg"] df[f"{col}_cmp_q50"] = df[col] <= df[f"{col}_q50"] df[f"{col}_scaled_min"] = df[col] / df[f"{col}_min"] df[f"{col}_scaled_avg"] = df[col] / df[f"{col}_avg"] df[f"{col}_scaled_q50"] = df[col] / df[f"{col}_q50"] df[f"{col}_scaled_max"] = df[col] / df[f"{col}_max"] df[f"{col}_scaled_sum"] = df[col] / df[f"{col}_sum"]<data_type_conversions>
x_clean = preprocessing_pipeline.fit_transform(x) x_test_clean = preprocessing_pipeline.transform(x_test )
Titanic - Machine Learning from Disaster
13,576,626
cat_features = list(df_train.select_dtypes(include=[np.object] ).columns) df_train[cat_features] = df_train[cat_features].fillna('N/A') df_test[cat_features] = df_test[cat_features].fillna('N/A') all_data = pd.concat(( df_train, df_test)) for col in cat_features: cats = all_data[col].unique() df_train[col] = df_train[col].astype('category', categories = cats) df_test[col] = df_test[col].astype('category', categories = cats) for col in cat_features: df_train[col] = df_train[col].cat.codes + 1 df_test[col] = df_test[col].cat.codes + 1 del all_data f"{gc.collect() } objects collected"<define_variables>
x_train, x_valid, y_train, y_valid = train_test_split(x_clean, y, train_size=0.8, test_size=0.2, random_state=0 )
Titanic - Machine Learning from Disaster
13,576,626
features = num_features + cat_features f"Total {len(features)} features: {len(num_features)} num., {len(cat_features)} cat."<compute_train_metric>
model = RandomForestClassifier(criterion='gini', n_estimators=1100, max_depth=5, min_samples_split=4, min_samples_leaf=5, max_features='auto', oob_score=True, random_state=42, n_jobs=-1, verbose=1 )
Titanic - Machine Learning from Disaster
13,576,626
def score_model(model, pool): ys = pool.get_label() ys_pred = model.predict(pool) results = {'auc': roc_auc_score(ys, ys_pred)} results.update(classification_report(ys, ys_pred, output_dict=True)) return results def print_results(r): if 'time' in r: print(f"Train time: {r['time'][0]:2d}m {r['time'][1]:2d}s") print(f"AUC score: {r['auc']:1.5f}") print(f"Class Neg:") print(f" precision: {r['0']['precision']:1.5f}") print(f" recall: {r['0']['recall']:1.5f}") print(f"Class Pos:") print(f" precision: {r['1']['precision']:1.5f}") print(f" recall: {r['1']['recall']:1.5f}" )<categorify>
modeling_pipeline = Pipeline(steps=[ ("model", model) ] )
Titanic - Machine Learning from Disaster
13,576,626
def make_pool(df, labeled=True): return Pool( data = df[features], label = df[target] if labeled else None, cat_features = cat_features, group_id = df[group_col], ) def space_split(df, val_size=0.1): df.sort_values([group_col, 'pos']) cutoff = df[group_col].iloc[int(( 1 - val_size)* len(df)) ] pt1, pt2 = df[df[group_col] < cutoff], df[df[group_col] >= cutoff] return make_pool(pt1), make_pool(pt2 )<train_model>
modeling_pipeline.fit(x_train, y_train )
Titanic - Machine Learning from Disaster
13,576,626
def full_train(model, train_data, test_data, name): train_pool = make_pool(train_data) t_start = time() model.fit(train_pool) t_elapsed = int(time() - t_start) m, s = t_elapsed // 60, t_elapsed % 60 print(f"Train time: {m}m {s}s") if target in test_data.columns: test_pool = make_pool(test_data) return score_model(model, test_pool) else: id_test = test_data.index xs_test = make_pool(test_data, labeled=False) ys_prob = model.predict_proba(xs_test) yP_prob = [y2 for y1,y2 in ys_prob] answer = pd.DataFrame({'Id': id_test, 'target': yP_prob}) answer.to_csv(f'{name}.csv', sep=',', index=False )<train_model>
preds = modeling_pipeline.predict(x_valid) preds
Titanic - Machine Learning from Disaster
13,576,626
def validate(model, data, val_size=0.1, rounds=100, fit=True): results = {} learn, val = space_split(data, val_size) if fit: t_start = time() model.fit( X=learn, eval_set=val, use_best_model=True, early_stopping_rounds=rounds, verbose=False, plot=True ) t_elapsed = int(time() - t_start) results['time'] = t_elapsed // 60, t_elapsed % 60 results.update(score_model(model, val)) return results<init_hyperparams>
accuracy_score(y_valid, preds )
Titanic - Machine Learning from Disaster
13,576,626
common = { 'loss_function': 'CrossEntropy', 'eval_metric': 'F1', 'od_type': 'IncToDec', 'od_pval': 1e-5, 'boosting_type': 'Ordered', 'bootstrap_type': 'Bernoulli', 'one_hot_max_size': 3, 'random_seed': 51, 'task_type': 'GPU', }<init_hyperparams>
preds_test = modeling_pipeline.predict(x_test_clean )
Titanic - Machine Learning from Disaster
13,576,626
params = { **common, 'train_dir': 'p08-2naf', 'iterations': 1500, 'learning_rate': 0.05, 'depth': 8, 'l2_leaf_reg': 12.0, 'random_strength': 3.0, 'subsample': 0.5, }<train_model>
output = pd.DataFrame({"PassengerId": x_test["PassengerId"], "Survived": preds_test}) output.head()
Titanic - Machine Learning from Disaster
13,576,626
model = CatBoostClassifier(**params) full_train(model, df_train, df_test, 'm13-kernel-catboost' )<import_modules>
output.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
13,514,727
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns<load_from_csv>
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.impute import KNNImputer from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.model_selection import RepeatedStratifiedKFold,cross_val_score from sklearn.feature_selection import chi2,SelectKBest,mutual_info_classif from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score from sklearn.preprocessing import StandardScaler from sklearn.model_selection import RandomizedSearchCV,GridSearchCV,StratifiedKFold
Titanic - Machine Learning from Disaster
13,514,727
df_train = pd.read_csv('/kaggle/input/dataanalyticscoaching/train.csv') df_test = pd.read_csv('/kaggle/input/dataanalyticscoaching/test.csv' )<count_missing_values>
train=pd.read_csv("/kaggle/input/titanic/train.csv") test=pd.read_csv('/kaggle/input/titanic/test.csv') target=train['Survived'] sub=pd.DataFrame(test['PassengerId'] )
Titanic - Machine Learning from Disaster
13,514,727
df_train.isnull().sum().sum()<count_missing_values>
null_data=['Survived','Pclass','Age','SibSp','Parch','Fare'] imputer=KNNImputer() imputer.fit(train[null_data]) b=imputer.transform(train[null_data]) a=pd.DataFrame(b) train['Age']=a.iloc[:,2] train['Fare']=a.iloc[:,5]
Titanic - Machine Learning from Disaster
13,514,727
df_test.isnull().sum().sum()<count_values>
print(train.isnull().sum() ,' ',test.isnull().sum()) a=train.groupby(train['Pclass'])['Age'].median() test['Age']=test['Age'].fillna(test['Pclass'].map(a)) test['Fare'].fillna(test['Fare'].median() ,inplace=True) train['Cabin'].fillna('X',inplace=True) test['Cabin'].fillna('X',inplace=True) train['Embarked'].fillna('S',inplace=True) print(' ',train.isnull().sum() ,' ',test.isnull().sum())
Titanic - Machine Learning from Disaster
13,514,727
df_train['target'].value_counts()<define_variables>
x=[] for i in range(len(train)) : a=train['Cabin'][i][0] x.append(a) train['Cabin']=x x=[] for i in range(len(test)) : x.append(test['Cabin'][i][0]) test['Cabin']=x train['Cabin']=train['Cabin'].map({'A':3,'B':3,'C':3,'D':2,"E":2,'F':2,'T':3,'X':0,'G':1,}) test['Cabin']=test['Cabin'].map({'A':3,'B':3,'C':3,'D':2,"E":2,'F':2,'T':3,'X':0,'G':1,}) a=[] for i in range(len(train)) : a.append(train['Name'][i].split() [1]) train['Title']=a a=[] for i in range(len(test)) : a.append(test['Name'][i].split() [1]) test['Title']=a train['Passenger']=train['SibSp']+train['Parch']+1 test['Passenger']=test['SibSp']+test['Parch']+1 def family(size): a='' if(size<=1): a='lonely' elif(size<=3): a='nuclear' elif(size<=6): a='middle' else: a='large' return a train['Passenger']=train['Passenger'].map(family) test['Passenger']=test['Passenger'].map(family) a=[] for i in range(len(train['Ticket'])) : if not(train['Ticket'][i][0].isdigit()): a.append(1) else: a.append(0) train['Ticket']=a a=[] for i in range(len(test['Ticket'])) : if not(test['Ticket'][i][0].isdigit()): a.append(1) else: a.append(0) test['Ticket']=a
Titanic - Machine Learning from Disaster
13,514,727
cat_feats = ['sex', 'cp', 'fbs', 'restecg', 'exang', 'slope', 'ca', 'thal']<define_variables>
train.drop(['Name','PassengerId'],axis=1,inplace=True) test.drop(['Name','PassengerId'],axis=1,inplace=True) encode=LabelEncoder() encode.fit(train['Sex']) train['Sex']=encode.transform(train['Sex']) test['Sex']=encode.transform(test['Sex']) encode.fit(train['Embarked']) train['Embarked']=encode.transform(train['Embarked']) test['Embarked']=encode.transform(test['Embarked']) encode.fit(train['Passenger']) train['Passenger']=encode.transform(train['Passenger']) test['Passenger']=encode.transform(test['Passenger'])
Titanic - Machine Learning from Disaster
13,514,727
num_feats = list(set(df_train.columns)- set(cat_feats)- {'id', 'target'}) print(num_feats )<prepare_x_and_y>
a=pd.DataFrame(train['Title'].value_counts()) a.index train['Title']=train['Title'].replace(['Dr.', 'Rev.', 'y', 'Planke,', 'Impe,', 'Gordon,', 'Mlle.', 'Col.', 'Major.', 'Pelsmaeker,', 'Capt.', 'Shawah,', 'Messemaeker,', 'Billiard,', 'Carlo,', 'Mulder,', 'Ms.', 'the', 'Jonkheer.', 'Don.', 'Mme.', 'Melkebeke,', 'der', 'Steen,', 'Cruyssen,', 'Velde,', 'Walle,'],'Rare') test['Title']=test['Title'].replace(['Dr.', 'Rev.', 'y', 'Planke,', 'Impe,', 'Gordon,', 'Mlle.', 'Col.', 'Major.', 'Pelsmaeker,', 'Capt.', 'Shawah,', 'Messemaeker,', 'Billiard,', 'Carlo,', 'Mulder,', 'Ms.', 'the', 'Jonkheer.', 'Don.', 'Mme.', 'Melkebeke,', 'der', 'Steen,', 'Cruyssen,', 'Velde,', 'Walle,'],'Rare') train['Title']=train['Title'].map({'Mr.':1,'Miss.':2,'Mrs.':2,'Rare':0,'Master.':3}) test['Title']=test['Title'].map({'Mr.':1,'Miss.':2,'Mrs.':2,'Rare':0,'Master.':3,'Brito,':0,'Khalil,':0,'Palmquist,':0}) train.drop('Survived',axis=1,inplace=True )
Titanic - Machine Learning from Disaster
13,514,727
X_train = df_train[cat_feats + num_feats] y_train = df_train['target']<import_modules>
fs=SelectKBest(score_func=chi2,k='all') fs.fit(train,target) x=fs.transform(train) for i in range(len(fs.scores_)) : print('Feature %d: %f' %(i, fs.scores_[i]))
Titanic - Machine Learning from Disaster
13,514,727
from sklearn.base import BaseEstimator, TransformerMixin from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.preprocessing import StandardScaler<prepare_x_and_y>
train.drop(['SibSp','Ticket','Parch'],axis=1,inplace=True) test.drop(['SibSp','Ticket','Parch'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
13,514,727
class DataFrameSelector(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self.attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X): return X[self.attribute_names].values<create_dataframe>
train['genderclass']=train['Sex']*train['Pclass'] test['genderclass']=test['Sex']*test['Pclass']
Titanic - Machine Learning from Disaster
13,514,727
num_pipeline = Pipeline([ ('selector', DataFrameSelector(num_feats)) , ('std_scaler', StandardScaler()) ] )<create_dataframe>
xtrain,xvalid,ytrain,yvalid=train_test_split(train,target,test_size=0.25) models = {"KNN": KNeighborsClassifier() , "Logistic Regression": LogisticRegression(max_iter=10000), "Random Forest": RandomForestClassifier() , "SVC" : SVC(probability=True), "DecisionTreeClassifier" : DecisionTreeClassifier() , "AdaBoostClassifier" : AdaBoostClassifier(algorithm='SAMME', base_estimator=DecisionTreeClassifier() , learning_rate=1.5, n_estimators=2, random_state=7), "GradientBoostingClassifier" : GradientBoostingClassifier(max_depth=4, max_features=0.3, min_samples_leaf=100, n_estimators=300), "GaussianNB" : GaussianNB() , "LinearDiscriminantAnalysis" : LinearDiscriminantAnalysis() , "QuadraticDiscriminantAnalysis" : QuadraticDiscriminantAnalysis() } scores={} cv=RepeatedStratifiedKFold(n_splits=10,n_repeats=3) np.random.seed(42) model_scores = {} for name, model in models.items() : score=cross_val_score(model,train,target,cv=cv,scoring='accuracy',n_jobs=-1) scores[name]=np.mean(score) print(scores)
Titanic - Machine Learning from Disaster
13,514,727
cat_pipeline = Pipeline([ ('selector', DataFrameSelector(cat_feats)) , ] )<merge>
a=GradientBoostingClassifier() a.fit(train,target) b=a.predict(test) sub['Survived']=b
Titanic - Machine Learning from Disaster
13,514,727
full_pipeline = FeatureUnion(transformer_list=[ ('num_pipeline', num_pipeline), ('cat_pipeline', cat_pipeline) ] )<feature_engineering>
sub.to_csv('ver3.csv',index=False )
Titanic - Machine Learning from Disaster
13,506,177
X_prep_train = full_pipeline.fit_transform(X_train )<import_modules>
train = pd.read_csv('.. /input/titanic/train.csv' )
Titanic - Machine Learning from Disaster
13,506,177
from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier<import_modules>
target_class = 'Survived' x = train.drop(target_class,axis=1) y = train[target_class]
Titanic - Machine Learning from Disaster
13,506,177
from sklearn.metrics import accuracy_score, classification_report<choose_model_class>
es = ft.EntitySet(id = 'Titanic') es.entity_from_dataframe(entity_id = 'data', dataframe = x, make_index = True, index = 'index') feature_matrix, feature_defs = ft.dfs(entityset = es, target_entity = 'data', trans_primitives = ['add_numeric', 'multiply_numeric']) feature_matrix.head()
Titanic - Machine Learning from Disaster
13,506,177
LR_clf = LogisticRegression() LR_clf.fit(X_prep_train, y_train) y_pred = LR_clf.predict(X_prep_train) print(classification_report(y_train, y_pred))<import_modules>
pip install evalml
Titanic - Machine Learning from Disaster
13,506,177
from sklearn.model_selection import cross_val_score, KFold<choose_model_class>
import evalml from evalml import AutoMLSearch
Titanic - Machine Learning from Disaster
13,506,177
kfold = KFold(n_splits=5 )<compute_train_metric>
X_train, X_holdout, y_train, y_holdout = evalml.preprocessing.split_data(x, y, test_size=0.2, random_state=0)
Titanic - Machine Learning from Disaster
13,506,177
scores = cross_val_score(LR_clf, X_prep_train, y_train, scoring='accuracy', cv=kfold) print('Scores:',scores) print('Mean:',np.mean(scores)) print('Std:',np.std(scores))<import_modules>
automl = AutoMLSearch(problem_type='binary', objective='auto', additional_objectives=["accuracy binary"], max_batches=1, ensembling=True, optimize_thresholds=True)
Titanic - Machine Learning from Disaster
13,506,177
from sklearn.model_selection import GridSearchCV<import_modules>
automl.search(X_train, y_train )
Titanic - Machine Learning from Disaster
13,506,177
from sklearn.model_selection import GridSearchCV<train_on_grid>
automl.rankings
Titanic - Machine Learning from Disaster